From 70bf549e5ab99787d98c763fd980cb52545ceedd Mon Sep 17 00:00:00 2001 From: River Riddle Date: Sun, 22 Dec 2019 21:59:55 -0800 Subject: [PATCH] NFC: Introduce new ValuePtr/ValueRef typedefs to simplify the transition to Value being value-typed. This is an initial step to refactoring the representation of OpResult as proposed in: https://groups.google.com/a/tensorflow.org/g/mlir/c/XXzzKhqqF_0/m/v6bKb08WCgAJ This change will make it much simpler to incrementally transition all of the existing code to use value-typed semantics. PiperOrigin-RevId: 286844725 --- bindings/python/pybind.cpp | 2 +- examples/toy/Ch2/include/toy/Ops.td | 8 +- examples/toy/Ch2/mlir/Dialect.cpp | 9 +- examples/toy/Ch2/mlir/MLIRGen.cpp | 41 +-- examples/toy/Ch3/include/toy/Ops.td | 8 +- examples/toy/Ch3/mlir/Dialect.cpp | 9 +- examples/toy/Ch3/mlir/MLIRGen.cpp | 41 +-- examples/toy/Ch3/mlir/ToyCombine.cpp | 2 +- examples/toy/Ch4/include/toy/Ops.td | 8 +- examples/toy/Ch4/mlir/Dialect.cpp | 13 +- examples/toy/Ch4/mlir/MLIRGen.cpp | 41 +-- examples/toy/Ch4/mlir/ToyCombine.cpp | 2 +- examples/toy/Ch5/include/toy/Ops.td | 8 +- examples/toy/Ch5/mlir/Dialect.cpp | 13 +- examples/toy/Ch5/mlir/LowerToAffineLoops.cpp | 36 +-- examples/toy/Ch5/mlir/MLIRGen.cpp | 41 +-- examples/toy/Ch5/mlir/ToyCombine.cpp | 2 +- examples/toy/Ch6/include/toy/Ops.td | 8 +- examples/toy/Ch6/mlir/Dialect.cpp | 13 +- examples/toy/Ch6/mlir/LowerToAffineLoops.cpp | 36 +-- examples/toy/Ch6/mlir/LowerToLLVM.cpp | 24 +- examples/toy/Ch6/mlir/MLIRGen.cpp | 41 +-- examples/toy/Ch6/mlir/ToyCombine.cpp | 2 +- examples/toy/Ch7/include/toy/Ops.td | 10 +- examples/toy/Ch7/mlir/Dialect.cpp | 15 +- examples/toy/Ch7/mlir/LowerToAffineLoops.cpp | 36 +-- examples/toy/Ch7/mlir/LowerToLLVM.cpp | 24 +- examples/toy/Ch7/mlir/MLIRGen.cpp | 40 +-- examples/toy/Ch7/mlir/ToyCombine.cpp | 2 +- g3doc/DeclarativeRewrites.md | 6 +- g3doc/DialectConversion.md | 6 +- g3doc/EDSC.md | 8 +- g3doc/GenericDAGRewriter.md | 2 +- g3doc/OpDefinitions.md | 14 +- g3doc/QuickstartRewrites.md | 4 +- g3doc/Rationale.md | 2 +- g3doc/Tutorials/Toy/Ch-3.md | 2 +- g3doc/Tutorials/Toy/Ch-4.md | 4 +- g3doc/Tutorials/Toy/Ch-5.md | 10 +- g3doc/UsageOfConst.md | 8 +- include/mlir/Analysis/AffineAnalysis.h | 9 +- include/mlir/Analysis/AffineStructures.h | 72 ++--- include/mlir/Analysis/CallInterfaces.h | 4 +- include/mlir/Analysis/Dominance.h | 4 +- include/mlir/Analysis/Liveness.h | 17 +- include/mlir/Analysis/LoopAnalysis.h | 9 +- include/mlir/Analysis/Utils.h | 10 +- .../AffineToStandard/AffineToStandard.h | 13 +- .../mlir/Conversion/LoopsToGPU/LoopsToGPU.h | 7 +- .../StandardToLLVM/ConvertStandardToLLVM.h | 57 ++-- include/mlir/Dialect/AffineOps/AffineOps.h | 105 +++--- include/mlir/Dialect/AffineOps/AffineOps.td | 8 +- include/mlir/Dialect/GPU/GPUDialect.h | 6 +- include/mlir/Dialect/GPU/GPUOps.td | 16 +- include/mlir/Dialect/LLVMIR/LLVMDialect.h | 6 +- include/mlir/Dialect/LLVMIR/LLVMOps.td | 22 +- .../Linalg/Analysis/DependenceAnalysis.h | 16 +- include/mlir/Dialect/Linalg/EDSC/Builders.h | 20 +- .../Dialect/Linalg/IR/LinalgLibraryOps.td | 10 +- include/mlir/Dialect/Linalg/IR/LinalgOps.td | 16 +- .../Dialect/Linalg/IR/LinalgStructuredOps.td | 10 +- include/mlir/Dialect/Linalg/IR/LinalgTraits.h | 8 +- .../Transforms/LinalgTransformPatterns.td | 2 +- .../Linalg/Transforms/LinalgTransforms.h | 4 +- include/mlir/Dialect/Linalg/Utils/Utils.h | 36 +-- include/mlir/Dialect/LoopOps/LoopOps.h | 2 +- include/mlir/Dialect/LoopOps/LoopOps.td | 12 +- .../mlir/Dialect/SPIRV/SPIRVCompositeOps.td | 2 +- .../mlir/Dialect/SPIRV/SPIRVControlFlowOps.td | 2 +- include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td | 4 +- include/mlir/Dialect/SPIRV/SPIRVLowering.h | 4 +- include/mlir/Dialect/SPIRV/SPIRVOps.td | 6 +- include/mlir/Dialect/StandardOps/Ops.h | 35 +- include/mlir/Dialect/StandardOps/Ops.td | 78 ++--- include/mlir/Dialect/VectorOps/Utils.h | 5 +- include/mlir/Dialect/VectorOps/VectorOps.td | 22 +- .../mlir/Dialect/VectorOps/VectorTransforms.h | 5 +- include/mlir/EDSC/Builders.h | 32 +- include/mlir/EDSC/Helpers.h | 10 +- include/mlir/EDSC/Intrinsics.h | 26 +- include/mlir/IR/Block.h | 8 +- include/mlir/IR/BlockAndValueMapping.h | 8 +- include/mlir/IR/Builders.h | 10 +- include/mlir/IR/FunctionSupport.h | 2 +- include/mlir/IR/Matchers.h | 14 +- include/mlir/IR/OpDefinition.h | 40 +-- include/mlir/IR/OpImplementation.h | 30 +- include/mlir/IR/Operation.h | 22 +- include/mlir/IR/OperationSupport.h | 45 +-- include/mlir/IR/TypeUtilities.h | 12 +- include/mlir/IR/Value.h | 22 +- .../Support/ConstraintAnalysisGraph.h | 10 +- .../mlir/Target/LLVMIR/ModuleTranslation.h | 2 +- include/mlir/Transforms/DialectConversion.h | 46 +-- include/mlir/Transforms/FoldUtils.h | 10 +- include/mlir/Transforms/InliningUtils.h | 14 +- include/mlir/Transforms/LoopLikeInterface.td | 2 +- include/mlir/Transforms/LoopUtils.h | 12 +- include/mlir/Transforms/RegionUtils.h | 8 +- include/mlir/Transforms/Utils.h | 20 +- lib/Analysis/AffineAnalysis.cpp | 60 ++-- lib/Analysis/AffineStructures.cpp | 94 +++--- lib/Analysis/CallGraph.cpp | 2 +- lib/Analysis/Dominance.cpp | 2 +- lib/Analysis/Liveness.cpp | 34 +- lib/Analysis/LoopAnalysis.cpp | 30 +- lib/Analysis/SliceAnalysis.cpp | 4 +- lib/Analysis/Utils.cpp | 42 +-- lib/Analysis/VectorAnalysis.cpp | 4 +- lib/Analysis/Verifier.cpp | 6 +- .../AffineToStandard/AffineToStandard.cpp | 139 ++++---- .../GPUCommon/IndexIntrinsicsOpLowering.h | 4 +- .../GPUCommon/OpToFuncCallLowering.h | 6 +- .../ConvertLaunchFuncToCudaCalls.cpp | 46 +-- .../GPUToNVVM/LowerGpuOpsToNVVMOps.cpp | 217 ++++++------- .../GPUToSPIRV/ConvertGPUToSPIRV.cpp | 30 +- lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp | 68 ++-- .../LoopToStandard/ConvertLoopToStandard.cpp | 18 +- lib/Conversion/LoopsToGPU/LoopsToGPU.cpp | 111 +++---- lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp | 2 +- .../StandardToLLVM/ConvertStandardToLLVM.cpp | 301 +++++++++--------- .../ConvertStandardToSPIRV.cpp | 37 +-- .../ConvertStandardToSPIRVPass.cpp | 4 +- .../LegalizeStandardForSPIRV.cpp | 8 +- .../VectorToLLVM/ConvertVectorToLLVM.cpp | 108 ++++--- lib/Dialect/AffineOps/AffineOps.cpp | 147 ++++----- .../Transforms/LowerUniformRealMath.cpp | 64 ++-- .../Transforms/UniformKernelUtils.h | 6 +- lib/Dialect/GPU/IR/GPUDialect.cpp | 47 +-- .../GPU/Transforms/KernelOutlining.cpp | 12 +- lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 26 +- .../Linalg/Analysis/DependenceAnalysis.cpp | 20 +- lib/Dialect/Linalg/EDSC/Builders.cpp | 18 +- lib/Dialect/Linalg/IR/LinalgOps.cpp | 4 +- lib/Dialect/Linalg/Transforms/Fusion.cpp | 32 +- .../Linalg/Transforms/LinalgToLoops.cpp | 44 +-- .../Linalg/Transforms/LinalgTransforms.cpp | 6 +- lib/Dialect/Linalg/Transforms/Promotion.cpp | 35 +- lib/Dialect/Linalg/Transforms/Tiling.cpp | 53 +-- lib/Dialect/Linalg/Utils/Utils.cpp | 24 +- lib/Dialect/LoopOps/LoopOps.cpp | 12 +- lib/Dialect/SPIRV/SPIRVDialect.cpp | 2 +- lib/Dialect/SPIRV/SPIRVLowering.cpp | 8 +- lib/Dialect/SPIRV/SPIRVOps.cpp | 39 +-- .../SPIRV/Serialization/Deserializer.cpp | 38 +-- .../SPIRV/Serialization/Serializer.cpp | 18 +- .../Transforms/LowerABIAttributesPass.cpp | 6 +- lib/Dialect/StandardOps/Ops.cpp | 66 ++-- lib/Dialect/VectorOps/VectorOps.cpp | 30 +- lib/Dialect/VectorOps/VectorTransforms.cpp | 76 +++-- lib/EDSC/Builders.cpp | 23 +- lib/EDSC/Helpers.cpp | 6 +- lib/EDSC/Intrinsics.cpp | 12 +- lib/IR/AsmPrinter.cpp | 50 +-- lib/IR/Block.cpp | 4 +- lib/IR/Builders.cpp | 4 +- lib/IR/Operation.cpp | 26 +- lib/IR/OperationSupport.cpp | 13 +- lib/IR/Region.cpp | 6 +- lib/IR/TypeUtilities.cpp | 12 +- lib/IR/Value.cpp | 4 +- lib/Parser/Parser.cpp | 65 ++-- lib/Pass/IRPrinting.cpp | 4 +- .../Support/ConstraintAnalysisGraph.cpp | 2 +- .../Transforms/AddDefaultStatsTestPass.cpp | 2 +- .../Transforms/InferQuantizedTypesPass.cpp | 14 +- lib/TableGen/Pattern.cpp | 2 +- lib/Target/LLVMIR/ConvertFromLLVMIR.cpp | 38 +-- lib/Target/LLVMIR/ModuleTranslation.cpp | 10 +- lib/Transforms/AffineDataCopyGeneration.cpp | 2 +- .../AffineLoopInvariantCodeMotion.cpp | 21 +- lib/Transforms/DialectConversion.cpp | 58 ++-- lib/Transforms/LoopFusion.cpp | 93 +++--- lib/Transforms/LoopInvariantCodeMotion.cpp | 4 +- lib/Transforms/LoopTiling.cpp | 11 +- lib/Transforms/LoopUnrollAndJam.cpp | 4 +- lib/Transforms/MemRefDataFlowOpt.cpp | 6 +- lib/Transforms/PipelineDataTransfer.cpp | 14 +- lib/Transforms/Utils/FoldUtils.cpp | 8 +- .../Utils/GreedyPatternRewriteDriver.cpp | 8 +- lib/Transforms/Utils/InliningUtils.cpp | 36 +-- lib/Transforms/Utils/LoopFusionUtils.cpp | 16 +- lib/Transforms/Utils/LoopUtils.cpp | 169 +++++----- lib/Transforms/Utils/RegionUtils.cpp | 24 +- lib/Transforms/Utils/Utils.cpp | 57 ++-- lib/Transforms/Vectorize.cpp | 40 +-- test/EDSC/builder-api-test.cpp | 4 +- test/lib/TestDialect/TestDialect.cpp | 8 +- test/lib/TestDialect/TestOps.td | 2 +- test/lib/TestDialect/TestPatterns.cpp | 33 +- test/lib/Transforms/TestLoopMapping.cpp | 2 +- .../lib/Transforms/TestVectorizationUtils.cpp | 2 +- test/mlir-tblgen/op-attribute.td | 6 +- test/mlir-tblgen/op-decl.td | 24 +- test/mlir-tblgen/op-operand.td | 10 +- test/mlir-tblgen/op-result.td | 6 +- test/mlir-tblgen/predicate.td | 4 +- tools/mlir-tblgen/OpDefinitionsGen.cpp | 27 +- tools/mlir-tblgen/RewriterGen.cpp | 20 +- tools/mlir-tblgen/SPIRVUtilsGen.cpp | 2 +- unittests/IR/OperationSupportTest.cpp | 8 +- 201 files changed, 2493 insertions(+), 2413 deletions(-) diff --git a/bindings/python/pybind.cpp b/bindings/python/pybind.cpp index 825f800c0bd6..54646cbe8000 100644 --- a/bindings/python/pybind.cpp +++ b/bindings/python/pybind.cpp @@ -103,7 +103,7 @@ struct PythonValueHandle { assert(value.hasType() && value.getType().isa() && "can only call function-typed values"); - std::vector argValues; + std::vector argValues; argValues.reserve(args.size()); for (auto arg : args) argValues.push_back(arg.value.getValue()); diff --git a/examples/toy/Ch2/include/toy/Ops.td b/examples/toy/Ch2/include/toy/Ops.td index f7c011915ffd..dd88b097ab13 100644 --- a/examples/toy/Ch2/include/toy/Ops.td +++ b/examples/toy/Ch2/include/toy/Ops.td @@ -98,7 +98,7 @@ def AddOp : Toy_Op<"add"> { // Allow building an AddOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -129,7 +129,7 @@ def GenericCallOp : Toy_Op<"generic_call"> { // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " - "StringRef callee, ArrayRef arguments"> + "StringRef callee, ArrayRef arguments"> ]; } @@ -145,7 +145,7 @@ def MulOp : Toy_Op<"mul"> { // Allow building a MulOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -219,7 +219,7 @@ def TransposeOp : Toy_Op<"transpose"> { // Allow building a TransposeOp with from the input operand. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *input"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr input"> ]; // Invoke a static verify method to verify this transpose operation. diff --git a/examples/toy/Ch2/mlir/Dialect.cpp b/examples/toy/Ch2/mlir/Dialect.cpp index 86f648dbe0e7..4a3232dabe3d 100644 --- a/examples/toy/Ch2/mlir/Dialect.cpp +++ b/examples/toy/Ch2/mlir/Dialect.cpp @@ -94,7 +94,7 @@ static mlir::LogicalResult verify(ConstantOp op) { // AddOp void AddOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -103,7 +103,8 @@ void AddOp::build(mlir::Builder *builder, mlir::OperationState &state, // GenericCallOp void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state, - StringRef callee, ArrayRef arguments) { + StringRef callee, + ArrayRef arguments) { // Generic call always returns an unranked Tensor initially. state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(arguments); @@ -114,7 +115,7 @@ void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state, // MulOp void MulOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -161,7 +162,7 @@ static mlir::LogicalResult verify(ReturnOp op) { // TransposeOp void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *value) { + mlir::ValuePtr value) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(value); } diff --git a/examples/toy/Ch2/mlir/MLIRGen.cpp b/examples/toy/Ch2/mlir/MLIRGen.cpp index da474e809b30..902c634a9546 100644 --- a/examples/toy/Ch2/mlir/MLIRGen.cpp +++ b/examples/toy/Ch2/mlir/MLIRGen.cpp @@ -99,7 +99,7 @@ class MLIRGenImpl { /// Entering a function creates a new scope, and the function arguments are /// added to the mapping. When the processing of a function is terminated, the /// scope is destroyed and the mappings created in this scope are dropped. - llvm::ScopedHashTable symbolTable; + llvm::ScopedHashTable symbolTable; /// Helper conversion for a Toy AST location to an MLIR location. mlir::Location loc(Location loc) { @@ -109,7 +109,7 @@ class MLIRGenImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) { + mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) { if (symbolTable.count(var)) return mlir::failure(); symbolTable.insert(var, value); @@ -132,7 +132,8 @@ class MLIRGenImpl { /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope( + symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -183,7 +184,7 @@ class MLIRGenImpl { } /// Emit a binary operation - mlir::Value *mlirGen(BinaryExprAST &binop) { + mlir::ValuePtr mlirGen(BinaryExprAST &binop) { // First emit the operations for each side of the operation before emitting // the operation itself. For example if the expression is `a + foo(a)` // 1) First it will visiting the LHS, which will return a reference to the @@ -195,10 +196,10 @@ class MLIRGenImpl { // and the result value is returned. If an error occurs we get a nullptr // and propagate. // - mlir::Value *lhs = mlirGen(*binop.getLHS()); + mlir::ValuePtr lhs = mlirGen(*binop.getLHS()); if (!lhs) return nullptr; - mlir::Value *rhs = mlirGen(*binop.getRHS()); + mlir::ValuePtr rhs = mlirGen(*binop.getRHS()); if (!rhs) return nullptr; auto location = loc(binop.loc()); @@ -219,8 +220,8 @@ class MLIRGenImpl { /// This is a reference to a variable in an expression. The variable is /// expected to have been declared and so should have a value in the symbol /// table, otherwise emit an error and return nullptr. - mlir::Value *mlirGen(VariableExprAST &expr) { - if (auto *variable = symbolTable.lookup(expr.getName())) + mlir::ValuePtr mlirGen(VariableExprAST &expr) { + if (auto variable = symbolTable.lookup(expr.getName())) return variable; emitError(loc(expr.loc()), "error: unknown variable '") @@ -233,7 +234,7 @@ class MLIRGenImpl { auto location = loc(ret.loc()); // 'return' takes an optional expression, handle that case here. - mlir::Value *expr = nullptr; + mlir::ValuePtr expr = nullptr; if (ret.getExpr().hasValue()) { if (!(expr = mlirGen(*ret.getExpr().getValue()))) return mlir::failure(); @@ -241,7 +242,7 @@ class MLIRGenImpl { // Otherwise, this return operation has zero operands. builder.create(location, expr ? makeArrayRef(expr) - : ArrayRef()); + : ArrayRef()); return mlir::success(); } @@ -263,7 +264,7 @@ class MLIRGenImpl { /// [[1.000000e+00, 2.000000e+00, 3.000000e+00], /// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64> /// - mlir::Value *mlirGen(LiteralExprAST &lit) { + mlir::ValuePtr mlirGen(LiteralExprAST &lit) { auto type = getType(lit.getDims()); // The attribute is a vector with a floating point value per element @@ -309,14 +310,14 @@ class MLIRGenImpl { /// Emit a call expression. It emits specific operations for the `transpose` /// builtin. Other identifiers are assumed to be user-defined functions. - mlir::Value *mlirGen(CallExprAST &call) { + mlir::ValuePtr mlirGen(CallExprAST &call) { llvm::StringRef callee = call.getCallee(); auto location = loc(call.loc()); // Codegen the operands first. - SmallVector operands; + SmallVector operands; for (auto &expr : call.getArgs()) { - auto *arg = mlirGen(*expr); + auto arg = mlirGen(*expr); if (!arg) return nullptr; operands.push_back(arg); @@ -342,7 +343,7 @@ class MLIRGenImpl { /// Emit a print expression. It emits specific operations for two builtins: /// transpose(x) and print(x). mlir::LogicalResult mlirGen(PrintExprAST &call) { - auto *arg = mlirGen(*call.getArg()); + auto arg = mlirGen(*call.getArg()); if (!arg) return mlir::failure(); @@ -351,12 +352,12 @@ class MLIRGenImpl { } /// Emit a constant for a single number (FIXME: semantic? broadcast?) - mlir::Value *mlirGen(NumberExprAST &num) { + mlir::ValuePtr mlirGen(NumberExprAST &num) { return builder.create(loc(num.loc()), num.getValue()); } /// Dispatch codegen for the right expression subclass using RTTI. - mlir::Value *mlirGen(ExprAST &expr) { + mlir::ValuePtr mlirGen(ExprAST &expr) { switch (expr.getKind()) { case toy::ExprAST::Expr_BinOp: return mlirGen(cast(expr)); @@ -380,7 +381,7 @@ class MLIRGenImpl { /// initializer and record the value in the symbol table before returning it. /// Future expressions will be able to reference this variable through symbol /// table lookup. - mlir::Value *mlirGen(VarDeclExprAST &vardecl) { + mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) { auto init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), @@ -388,7 +389,7 @@ class MLIRGenImpl { return nullptr; } - mlir::Value *value = mlirGen(*init); + mlir::ValuePtr value = mlirGen(*init); if (!value) return nullptr; @@ -408,7 +409,7 @@ class MLIRGenImpl { /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/examples/toy/Ch3/include/toy/Ops.td b/examples/toy/Ch3/include/toy/Ops.td index 921e503e4160..6c400169da23 100644 --- a/examples/toy/Ch3/include/toy/Ops.td +++ b/examples/toy/Ch3/include/toy/Ops.td @@ -98,7 +98,7 @@ def AddOp : Toy_Op<"add", [NoSideEffect]> { // Allow building an AddOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -129,7 +129,7 @@ def GenericCallOp : Toy_Op<"generic_call"> { // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " - "StringRef callee, ArrayRef arguments"> + "StringRef callee, ArrayRef arguments"> ]; } @@ -145,7 +145,7 @@ def MulOp : Toy_Op<"mul", [NoSideEffect]> { // Allow building a MulOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -225,7 +225,7 @@ def TransposeOp : Toy_Op<"transpose", [NoSideEffect]> { // Allow building a TransposeOp with from the input operand. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *input"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr input"> ]; // Invoke a static verify method to verify this transpose operation. diff --git a/examples/toy/Ch3/mlir/Dialect.cpp b/examples/toy/Ch3/mlir/Dialect.cpp index 86f648dbe0e7..4a3232dabe3d 100644 --- a/examples/toy/Ch3/mlir/Dialect.cpp +++ b/examples/toy/Ch3/mlir/Dialect.cpp @@ -94,7 +94,7 @@ static mlir::LogicalResult verify(ConstantOp op) { // AddOp void AddOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -103,7 +103,8 @@ void AddOp::build(mlir::Builder *builder, mlir::OperationState &state, // GenericCallOp void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state, - StringRef callee, ArrayRef arguments) { + StringRef callee, + ArrayRef arguments) { // Generic call always returns an unranked Tensor initially. state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(arguments); @@ -114,7 +115,7 @@ void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state, // MulOp void MulOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -161,7 +162,7 @@ static mlir::LogicalResult verify(ReturnOp op) { // TransposeOp void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *value) { + mlir::ValuePtr value) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(value); } diff --git a/examples/toy/Ch3/mlir/MLIRGen.cpp b/examples/toy/Ch3/mlir/MLIRGen.cpp index da474e809b30..902c634a9546 100644 --- a/examples/toy/Ch3/mlir/MLIRGen.cpp +++ b/examples/toy/Ch3/mlir/MLIRGen.cpp @@ -99,7 +99,7 @@ class MLIRGenImpl { /// Entering a function creates a new scope, and the function arguments are /// added to the mapping. When the processing of a function is terminated, the /// scope is destroyed and the mappings created in this scope are dropped. - llvm::ScopedHashTable symbolTable; + llvm::ScopedHashTable symbolTable; /// Helper conversion for a Toy AST location to an MLIR location. mlir::Location loc(Location loc) { @@ -109,7 +109,7 @@ class MLIRGenImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) { + mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) { if (symbolTable.count(var)) return mlir::failure(); symbolTable.insert(var, value); @@ -132,7 +132,8 @@ class MLIRGenImpl { /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope( + symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -183,7 +184,7 @@ class MLIRGenImpl { } /// Emit a binary operation - mlir::Value *mlirGen(BinaryExprAST &binop) { + mlir::ValuePtr mlirGen(BinaryExprAST &binop) { // First emit the operations for each side of the operation before emitting // the operation itself. For example if the expression is `a + foo(a)` // 1) First it will visiting the LHS, which will return a reference to the @@ -195,10 +196,10 @@ class MLIRGenImpl { // and the result value is returned. If an error occurs we get a nullptr // and propagate. // - mlir::Value *lhs = mlirGen(*binop.getLHS()); + mlir::ValuePtr lhs = mlirGen(*binop.getLHS()); if (!lhs) return nullptr; - mlir::Value *rhs = mlirGen(*binop.getRHS()); + mlir::ValuePtr rhs = mlirGen(*binop.getRHS()); if (!rhs) return nullptr; auto location = loc(binop.loc()); @@ -219,8 +220,8 @@ class MLIRGenImpl { /// This is a reference to a variable in an expression. The variable is /// expected to have been declared and so should have a value in the symbol /// table, otherwise emit an error and return nullptr. - mlir::Value *mlirGen(VariableExprAST &expr) { - if (auto *variable = symbolTable.lookup(expr.getName())) + mlir::ValuePtr mlirGen(VariableExprAST &expr) { + if (auto variable = symbolTable.lookup(expr.getName())) return variable; emitError(loc(expr.loc()), "error: unknown variable '") @@ -233,7 +234,7 @@ class MLIRGenImpl { auto location = loc(ret.loc()); // 'return' takes an optional expression, handle that case here. - mlir::Value *expr = nullptr; + mlir::ValuePtr expr = nullptr; if (ret.getExpr().hasValue()) { if (!(expr = mlirGen(*ret.getExpr().getValue()))) return mlir::failure(); @@ -241,7 +242,7 @@ class MLIRGenImpl { // Otherwise, this return operation has zero operands. builder.create(location, expr ? makeArrayRef(expr) - : ArrayRef()); + : ArrayRef()); return mlir::success(); } @@ -263,7 +264,7 @@ class MLIRGenImpl { /// [[1.000000e+00, 2.000000e+00, 3.000000e+00], /// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64> /// - mlir::Value *mlirGen(LiteralExprAST &lit) { + mlir::ValuePtr mlirGen(LiteralExprAST &lit) { auto type = getType(lit.getDims()); // The attribute is a vector with a floating point value per element @@ -309,14 +310,14 @@ class MLIRGenImpl { /// Emit a call expression. It emits specific operations for the `transpose` /// builtin. Other identifiers are assumed to be user-defined functions. - mlir::Value *mlirGen(CallExprAST &call) { + mlir::ValuePtr mlirGen(CallExprAST &call) { llvm::StringRef callee = call.getCallee(); auto location = loc(call.loc()); // Codegen the operands first. - SmallVector operands; + SmallVector operands; for (auto &expr : call.getArgs()) { - auto *arg = mlirGen(*expr); + auto arg = mlirGen(*expr); if (!arg) return nullptr; operands.push_back(arg); @@ -342,7 +343,7 @@ class MLIRGenImpl { /// Emit a print expression. It emits specific operations for two builtins: /// transpose(x) and print(x). mlir::LogicalResult mlirGen(PrintExprAST &call) { - auto *arg = mlirGen(*call.getArg()); + auto arg = mlirGen(*call.getArg()); if (!arg) return mlir::failure(); @@ -351,12 +352,12 @@ class MLIRGenImpl { } /// Emit a constant for a single number (FIXME: semantic? broadcast?) - mlir::Value *mlirGen(NumberExprAST &num) { + mlir::ValuePtr mlirGen(NumberExprAST &num) { return builder.create(loc(num.loc()), num.getValue()); } /// Dispatch codegen for the right expression subclass using RTTI. - mlir::Value *mlirGen(ExprAST &expr) { + mlir::ValuePtr mlirGen(ExprAST &expr) { switch (expr.getKind()) { case toy::ExprAST::Expr_BinOp: return mlirGen(cast(expr)); @@ -380,7 +381,7 @@ class MLIRGenImpl { /// initializer and record the value in the symbol table before returning it. /// Future expressions will be able to reference this variable through symbol /// table lookup. - mlir::Value *mlirGen(VarDeclExprAST &vardecl) { + mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) { auto init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), @@ -388,7 +389,7 @@ class MLIRGenImpl { return nullptr; } - mlir::Value *value = mlirGen(*init); + mlir::ValuePtr value = mlirGen(*init); if (!value) return nullptr; @@ -408,7 +409,7 @@ class MLIRGenImpl { /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/examples/toy/Ch3/mlir/ToyCombine.cpp b/examples/toy/Ch3/mlir/ToyCombine.cpp index 1b9dcd202919..42a103975136 100644 --- a/examples/toy/Ch3/mlir/ToyCombine.cpp +++ b/examples/toy/Ch3/mlir/ToyCombine.cpp @@ -48,7 +48,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern { matchAndRewrite(TransposeOp op, mlir::PatternRewriter &rewriter) const override { // Look through the input of the current transpose. - mlir::Value *transposeInput = op.getOperand(); + mlir::ValuePtr transposeInput = op.getOperand(); TransposeOp transposeInputOp = llvm::dyn_cast_or_null(transposeInput->getDefiningOp()); diff --git a/examples/toy/Ch4/include/toy/Ops.td b/examples/toy/Ch4/include/toy/Ops.td index aec1cc3cfc9c..ef5b30a862b6 100644 --- a/examples/toy/Ch4/include/toy/Ops.td +++ b/examples/toy/Ch4/include/toy/Ops.td @@ -100,7 +100,7 @@ def AddOp : Toy_Op<"add", // Allow building an AddOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -151,7 +151,7 @@ def GenericCallOp : Toy_Op<"generic_call", // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " - "StringRef callee, ArrayRef arguments"> + "StringRef callee, ArrayRef arguments"> ]; } @@ -168,7 +168,7 @@ def MulOp : Toy_Op<"mul", // Allow building a MulOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -245,7 +245,7 @@ def TransposeOp : Toy_Op<"transpose", // Allow building a TransposeOp with from the input operand. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *input"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr input"> ]; // Invoke a static verify method to verify this transpose operation. diff --git a/examples/toy/Ch4/mlir/Dialect.cpp b/examples/toy/Ch4/mlir/Dialect.cpp index 7003cbdcc810..8be1094cf152 100644 --- a/examples/toy/Ch4/mlir/Dialect.cpp +++ b/examples/toy/Ch4/mlir/Dialect.cpp @@ -55,7 +55,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// Handle the given inlined terminator(toy.return) by replacing it with a new /// operation as necessary. void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const final { + ArrayRef valuesToRepl) const final { // Only "toy.return" needs to be handled here. auto returnOp = cast(op); @@ -70,7 +70,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// operation that takes 'input' as the only operand, and produces a single /// result of 'resultType'. If a conversion can not be generated, nullptr /// should be returned. - Operation *materializeCallConversion(OpBuilder &builder, Value *input, + Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input, Type resultType, Location conversionLoc) const final { return builder.create(conversionLoc, resultType, input); @@ -144,7 +144,7 @@ static mlir::LogicalResult verify(ConstantOp op) { // AddOp void AddOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -164,7 +164,8 @@ void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); } // GenericCallOp void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state, - StringRef callee, ArrayRef arguments) { + StringRef callee, + ArrayRef arguments) { // Generic call always returns an unranked Tensor initially. state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(arguments); @@ -185,7 +186,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); } // MulOp void MulOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -236,7 +237,7 @@ static mlir::LogicalResult verify(ReturnOp op) { // TransposeOp void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *value) { + mlir::ValuePtr value) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(value); } diff --git a/examples/toy/Ch4/mlir/MLIRGen.cpp b/examples/toy/Ch4/mlir/MLIRGen.cpp index da474e809b30..902c634a9546 100644 --- a/examples/toy/Ch4/mlir/MLIRGen.cpp +++ b/examples/toy/Ch4/mlir/MLIRGen.cpp @@ -99,7 +99,7 @@ class MLIRGenImpl { /// Entering a function creates a new scope, and the function arguments are /// added to the mapping. When the processing of a function is terminated, the /// scope is destroyed and the mappings created in this scope are dropped. - llvm::ScopedHashTable symbolTable; + llvm::ScopedHashTable symbolTable; /// Helper conversion for a Toy AST location to an MLIR location. mlir::Location loc(Location loc) { @@ -109,7 +109,7 @@ class MLIRGenImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) { + mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) { if (symbolTable.count(var)) return mlir::failure(); symbolTable.insert(var, value); @@ -132,7 +132,8 @@ class MLIRGenImpl { /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope( + symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -183,7 +184,7 @@ class MLIRGenImpl { } /// Emit a binary operation - mlir::Value *mlirGen(BinaryExprAST &binop) { + mlir::ValuePtr mlirGen(BinaryExprAST &binop) { // First emit the operations for each side of the operation before emitting // the operation itself. For example if the expression is `a + foo(a)` // 1) First it will visiting the LHS, which will return a reference to the @@ -195,10 +196,10 @@ class MLIRGenImpl { // and the result value is returned. If an error occurs we get a nullptr // and propagate. // - mlir::Value *lhs = mlirGen(*binop.getLHS()); + mlir::ValuePtr lhs = mlirGen(*binop.getLHS()); if (!lhs) return nullptr; - mlir::Value *rhs = mlirGen(*binop.getRHS()); + mlir::ValuePtr rhs = mlirGen(*binop.getRHS()); if (!rhs) return nullptr; auto location = loc(binop.loc()); @@ -219,8 +220,8 @@ class MLIRGenImpl { /// This is a reference to a variable in an expression. The variable is /// expected to have been declared and so should have a value in the symbol /// table, otherwise emit an error and return nullptr. - mlir::Value *mlirGen(VariableExprAST &expr) { - if (auto *variable = symbolTable.lookup(expr.getName())) + mlir::ValuePtr mlirGen(VariableExprAST &expr) { + if (auto variable = symbolTable.lookup(expr.getName())) return variable; emitError(loc(expr.loc()), "error: unknown variable '") @@ -233,7 +234,7 @@ class MLIRGenImpl { auto location = loc(ret.loc()); // 'return' takes an optional expression, handle that case here. - mlir::Value *expr = nullptr; + mlir::ValuePtr expr = nullptr; if (ret.getExpr().hasValue()) { if (!(expr = mlirGen(*ret.getExpr().getValue()))) return mlir::failure(); @@ -241,7 +242,7 @@ class MLIRGenImpl { // Otherwise, this return operation has zero operands. builder.create(location, expr ? makeArrayRef(expr) - : ArrayRef()); + : ArrayRef()); return mlir::success(); } @@ -263,7 +264,7 @@ class MLIRGenImpl { /// [[1.000000e+00, 2.000000e+00, 3.000000e+00], /// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64> /// - mlir::Value *mlirGen(LiteralExprAST &lit) { + mlir::ValuePtr mlirGen(LiteralExprAST &lit) { auto type = getType(lit.getDims()); // The attribute is a vector with a floating point value per element @@ -309,14 +310,14 @@ class MLIRGenImpl { /// Emit a call expression. It emits specific operations for the `transpose` /// builtin. Other identifiers are assumed to be user-defined functions. - mlir::Value *mlirGen(CallExprAST &call) { + mlir::ValuePtr mlirGen(CallExprAST &call) { llvm::StringRef callee = call.getCallee(); auto location = loc(call.loc()); // Codegen the operands first. - SmallVector operands; + SmallVector operands; for (auto &expr : call.getArgs()) { - auto *arg = mlirGen(*expr); + auto arg = mlirGen(*expr); if (!arg) return nullptr; operands.push_back(arg); @@ -342,7 +343,7 @@ class MLIRGenImpl { /// Emit a print expression. It emits specific operations for two builtins: /// transpose(x) and print(x). mlir::LogicalResult mlirGen(PrintExprAST &call) { - auto *arg = mlirGen(*call.getArg()); + auto arg = mlirGen(*call.getArg()); if (!arg) return mlir::failure(); @@ -351,12 +352,12 @@ class MLIRGenImpl { } /// Emit a constant for a single number (FIXME: semantic? broadcast?) - mlir::Value *mlirGen(NumberExprAST &num) { + mlir::ValuePtr mlirGen(NumberExprAST &num) { return builder.create(loc(num.loc()), num.getValue()); } /// Dispatch codegen for the right expression subclass using RTTI. - mlir::Value *mlirGen(ExprAST &expr) { + mlir::ValuePtr mlirGen(ExprAST &expr) { switch (expr.getKind()) { case toy::ExprAST::Expr_BinOp: return mlirGen(cast(expr)); @@ -380,7 +381,7 @@ class MLIRGenImpl { /// initializer and record the value in the symbol table before returning it. /// Future expressions will be able to reference this variable through symbol /// table lookup. - mlir::Value *mlirGen(VarDeclExprAST &vardecl) { + mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) { auto init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), @@ -388,7 +389,7 @@ class MLIRGenImpl { return nullptr; } - mlir::Value *value = mlirGen(*init); + mlir::ValuePtr value = mlirGen(*init); if (!value) return nullptr; @@ -408,7 +409,7 @@ class MLIRGenImpl { /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/examples/toy/Ch4/mlir/ToyCombine.cpp b/examples/toy/Ch4/mlir/ToyCombine.cpp index 47e1abc6c744..604e9fa6c83e 100644 --- a/examples/toy/Ch4/mlir/ToyCombine.cpp +++ b/examples/toy/Ch4/mlir/ToyCombine.cpp @@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern { matchAndRewrite(TransposeOp op, mlir::PatternRewriter &rewriter) const override { // Look through the input of the current transpose. - mlir::Value *transposeInput = op.getOperand(); + mlir::ValuePtr transposeInput = op.getOperand(); TransposeOp transposeInputOp = llvm::dyn_cast_or_null(transposeInput->getDefiningOp()); diff --git a/examples/toy/Ch5/include/toy/Ops.td b/examples/toy/Ch5/include/toy/Ops.td index e40b661fd34a..b3bda1d647b4 100644 --- a/examples/toy/Ch5/include/toy/Ops.td +++ b/examples/toy/Ch5/include/toy/Ops.td @@ -100,7 +100,7 @@ def AddOp : Toy_Op<"add", // Allow building an AddOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -151,7 +151,7 @@ def GenericCallOp : Toy_Op<"generic_call", // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " - "StringRef callee, ArrayRef arguments"> + "StringRef callee, ArrayRef arguments"> ]; } @@ -168,7 +168,7 @@ def MulOp : Toy_Op<"mul", // Allow building a MulOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -246,7 +246,7 @@ def TransposeOp : Toy_Op<"transpose", // Allow building a TransposeOp with from the input operand. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *input"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr input"> ]; // Invoke a static verify method to verify this transpose operation. diff --git a/examples/toy/Ch5/mlir/Dialect.cpp b/examples/toy/Ch5/mlir/Dialect.cpp index 7003cbdcc810..8be1094cf152 100644 --- a/examples/toy/Ch5/mlir/Dialect.cpp +++ b/examples/toy/Ch5/mlir/Dialect.cpp @@ -55,7 +55,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// Handle the given inlined terminator(toy.return) by replacing it with a new /// operation as necessary. void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const final { + ArrayRef valuesToRepl) const final { // Only "toy.return" needs to be handled here. auto returnOp = cast(op); @@ -70,7 +70,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// operation that takes 'input' as the only operand, and produces a single /// result of 'resultType'. If a conversion can not be generated, nullptr /// should be returned. - Operation *materializeCallConversion(OpBuilder &builder, Value *input, + Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input, Type resultType, Location conversionLoc) const final { return builder.create(conversionLoc, resultType, input); @@ -144,7 +144,7 @@ static mlir::LogicalResult verify(ConstantOp op) { // AddOp void AddOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -164,7 +164,8 @@ void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); } // GenericCallOp void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state, - StringRef callee, ArrayRef arguments) { + StringRef callee, + ArrayRef arguments) { // Generic call always returns an unranked Tensor initially. state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(arguments); @@ -185,7 +186,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); } // MulOp void MulOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -236,7 +237,7 @@ static mlir::LogicalResult verify(ReturnOp op) { // TransposeOp void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *value) { + mlir::ValuePtr value) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(value); } diff --git a/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp index 4ab8c5b501c9..3fa761c74048 100644 --- a/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp +++ b/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp @@ -43,8 +43,8 @@ static MemRefType convertTensorToMemRef(TensorType type) { } /// Insert an allocation and deallocation for the given MemRefType. -static Value *insertAllocAndDealloc(MemRefType type, Location loc, - PatternRewriter &rewriter) { +static ValuePtr insertAllocAndDealloc(MemRefType type, Location loc, + PatternRewriter &rewriter) { auto alloc = rewriter.create(loc, type); // Make sure to allocate at the beginning of the block. @@ -63,11 +63,11 @@ static Value *insertAllocAndDealloc(MemRefType type, Location loc, /// to the operands of the input operation, and the set of loop induction /// variables for the iteration. It returns a value to store at the current /// index of the iteration. -using LoopIterationFn = function_ref memRefOperands, - ArrayRef loopIvs)>; +using LoopIterationFn = function_ref memRefOperands, + ArrayRef loopIvs)>; -static void lowerOpToLoops(Operation *op, ArrayRef operands, +static void lowerOpToLoops(Operation *op, ArrayRef operands, PatternRewriter &rewriter, LoopIterationFn processIteration) { auto tensorType = (*op->result_type_begin()).cast(); @@ -78,7 +78,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef operands, auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter); // Create an empty affine loop for each of the dimensions within the shape. - SmallVector loopIvs; + SmallVector loopIvs; for (auto dim : tensorType.getShape()) { auto loop = rewriter.create(loc, /*lb=*/0, dim, /*step=*/1); loop.getBody()->clear(); @@ -94,7 +94,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef operands, // Generate a call to the processing function with the rewriter, the memref // operands, and the loop induction variables. This function will return the // value to store at the current index. - Value *valueToStore = processIteration(rewriter, operands, loopIvs); + ValuePtr valueToStore = processIteration(rewriter, operands, loopIvs); rewriter.create(loc, valueToStore, alloc, llvm::makeArrayRef(loopIvs)); @@ -113,13 +113,13 @@ struct BinaryOpLowering : public ConversionPattern { : ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); lowerOpToLoops( op, operands, rewriter, - [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, - ArrayRef loopIvs) { + [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, + ArrayRef loopIvs) { // Generate an adaptor for the remapped operands of the BinaryOp. This // allows for using the nice named accessors that are generated by the // ODS. @@ -163,7 +163,7 @@ struct ConstantOpLowering : public OpRewritePattern { // Create these constants up-front to avoid large amounts of redundant // operations. auto valueShape = memRefType.getShape(); - SmallVector constantIndices; + SmallVector constantIndices; for (auto i : llvm::seq( 0, *std::max_element(valueShape.begin(), valueShape.end()))) constantIndices.push_back(rewriter.create(loc, i)); @@ -172,7 +172,7 @@ struct ConstantOpLowering : public OpRewritePattern { // will need to generate a store for each of the elements. The following // functor recursively walks the dimensions of the constant shape, // generating a store when the recursion hits the base case. - SmallVector indices; + SmallVector indices; auto valueIt = constantValue.getValues().begin(); std::function storeElements = [&](uint64_t dimension) { // The last dimension is the base case of the recursion, at this point @@ -231,22 +231,22 @@ struct TransposeOpLowering : public ConversionPattern { : ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); lowerOpToLoops( op, operands, rewriter, - [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, - ArrayRef loopIvs) { + [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, + ArrayRef loopIvs) { // Generate an adaptor for the remapped operands of the TransposeOp. // This allows for using the nice named accessors that are generated // by the ODS. toy::TransposeOpOperandAdaptor transposeAdaptor(memRefOperands); - Value *input = transposeAdaptor.input(); + ValuePtr input = transposeAdaptor.input(); // Transpose the elements by generating a load from the reverse // indices. - SmallVector reverseIvs(llvm::reverse(loopIvs)); + SmallVector reverseIvs(llvm::reverse(loopIvs)); return rewriter.create(loc, input, reverseIvs); }); return matchSuccess(); diff --git a/examples/toy/Ch5/mlir/MLIRGen.cpp b/examples/toy/Ch5/mlir/MLIRGen.cpp index da474e809b30..902c634a9546 100644 --- a/examples/toy/Ch5/mlir/MLIRGen.cpp +++ b/examples/toy/Ch5/mlir/MLIRGen.cpp @@ -99,7 +99,7 @@ class MLIRGenImpl { /// Entering a function creates a new scope, and the function arguments are /// added to the mapping. When the processing of a function is terminated, the /// scope is destroyed and the mappings created in this scope are dropped. - llvm::ScopedHashTable symbolTable; + llvm::ScopedHashTable symbolTable; /// Helper conversion for a Toy AST location to an MLIR location. mlir::Location loc(Location loc) { @@ -109,7 +109,7 @@ class MLIRGenImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) { + mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) { if (symbolTable.count(var)) return mlir::failure(); symbolTable.insert(var, value); @@ -132,7 +132,8 @@ class MLIRGenImpl { /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope( + symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -183,7 +184,7 @@ class MLIRGenImpl { } /// Emit a binary operation - mlir::Value *mlirGen(BinaryExprAST &binop) { + mlir::ValuePtr mlirGen(BinaryExprAST &binop) { // First emit the operations for each side of the operation before emitting // the operation itself. For example if the expression is `a + foo(a)` // 1) First it will visiting the LHS, which will return a reference to the @@ -195,10 +196,10 @@ class MLIRGenImpl { // and the result value is returned. If an error occurs we get a nullptr // and propagate. // - mlir::Value *lhs = mlirGen(*binop.getLHS()); + mlir::ValuePtr lhs = mlirGen(*binop.getLHS()); if (!lhs) return nullptr; - mlir::Value *rhs = mlirGen(*binop.getRHS()); + mlir::ValuePtr rhs = mlirGen(*binop.getRHS()); if (!rhs) return nullptr; auto location = loc(binop.loc()); @@ -219,8 +220,8 @@ class MLIRGenImpl { /// This is a reference to a variable in an expression. The variable is /// expected to have been declared and so should have a value in the symbol /// table, otherwise emit an error and return nullptr. - mlir::Value *mlirGen(VariableExprAST &expr) { - if (auto *variable = symbolTable.lookup(expr.getName())) + mlir::ValuePtr mlirGen(VariableExprAST &expr) { + if (auto variable = symbolTable.lookup(expr.getName())) return variable; emitError(loc(expr.loc()), "error: unknown variable '") @@ -233,7 +234,7 @@ class MLIRGenImpl { auto location = loc(ret.loc()); // 'return' takes an optional expression, handle that case here. - mlir::Value *expr = nullptr; + mlir::ValuePtr expr = nullptr; if (ret.getExpr().hasValue()) { if (!(expr = mlirGen(*ret.getExpr().getValue()))) return mlir::failure(); @@ -241,7 +242,7 @@ class MLIRGenImpl { // Otherwise, this return operation has zero operands. builder.create(location, expr ? makeArrayRef(expr) - : ArrayRef()); + : ArrayRef()); return mlir::success(); } @@ -263,7 +264,7 @@ class MLIRGenImpl { /// [[1.000000e+00, 2.000000e+00, 3.000000e+00], /// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64> /// - mlir::Value *mlirGen(LiteralExprAST &lit) { + mlir::ValuePtr mlirGen(LiteralExprAST &lit) { auto type = getType(lit.getDims()); // The attribute is a vector with a floating point value per element @@ -309,14 +310,14 @@ class MLIRGenImpl { /// Emit a call expression. It emits specific operations for the `transpose` /// builtin. Other identifiers are assumed to be user-defined functions. - mlir::Value *mlirGen(CallExprAST &call) { + mlir::ValuePtr mlirGen(CallExprAST &call) { llvm::StringRef callee = call.getCallee(); auto location = loc(call.loc()); // Codegen the operands first. - SmallVector operands; + SmallVector operands; for (auto &expr : call.getArgs()) { - auto *arg = mlirGen(*expr); + auto arg = mlirGen(*expr); if (!arg) return nullptr; operands.push_back(arg); @@ -342,7 +343,7 @@ class MLIRGenImpl { /// Emit a print expression. It emits specific operations for two builtins: /// transpose(x) and print(x). mlir::LogicalResult mlirGen(PrintExprAST &call) { - auto *arg = mlirGen(*call.getArg()); + auto arg = mlirGen(*call.getArg()); if (!arg) return mlir::failure(); @@ -351,12 +352,12 @@ class MLIRGenImpl { } /// Emit a constant for a single number (FIXME: semantic? broadcast?) - mlir::Value *mlirGen(NumberExprAST &num) { + mlir::ValuePtr mlirGen(NumberExprAST &num) { return builder.create(loc(num.loc()), num.getValue()); } /// Dispatch codegen for the right expression subclass using RTTI. - mlir::Value *mlirGen(ExprAST &expr) { + mlir::ValuePtr mlirGen(ExprAST &expr) { switch (expr.getKind()) { case toy::ExprAST::Expr_BinOp: return mlirGen(cast(expr)); @@ -380,7 +381,7 @@ class MLIRGenImpl { /// initializer and record the value in the symbol table before returning it. /// Future expressions will be able to reference this variable through symbol /// table lookup. - mlir::Value *mlirGen(VarDeclExprAST &vardecl) { + mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) { auto init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), @@ -388,7 +389,7 @@ class MLIRGenImpl { return nullptr; } - mlir::Value *value = mlirGen(*init); + mlir::ValuePtr value = mlirGen(*init); if (!value) return nullptr; @@ -408,7 +409,7 @@ class MLIRGenImpl { /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/examples/toy/Ch5/mlir/ToyCombine.cpp b/examples/toy/Ch5/mlir/ToyCombine.cpp index 47e1abc6c744..604e9fa6c83e 100644 --- a/examples/toy/Ch5/mlir/ToyCombine.cpp +++ b/examples/toy/Ch5/mlir/ToyCombine.cpp @@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern { matchAndRewrite(TransposeOp op, mlir::PatternRewriter &rewriter) const override { // Look through the input of the current transpose. - mlir::Value *transposeInput = op.getOperand(); + mlir::ValuePtr transposeInput = op.getOperand(); TransposeOp transposeInputOp = llvm::dyn_cast_or_null(transposeInput->getDefiningOp()); diff --git a/examples/toy/Ch6/include/toy/Ops.td b/examples/toy/Ch6/include/toy/Ops.td index e40b661fd34a..b3bda1d647b4 100644 --- a/examples/toy/Ch6/include/toy/Ops.td +++ b/examples/toy/Ch6/include/toy/Ops.td @@ -100,7 +100,7 @@ def AddOp : Toy_Op<"add", // Allow building an AddOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -151,7 +151,7 @@ def GenericCallOp : Toy_Op<"generic_call", // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " - "StringRef callee, ArrayRef arguments"> + "StringRef callee, ArrayRef arguments"> ]; } @@ -168,7 +168,7 @@ def MulOp : Toy_Op<"mul", // Allow building a MulOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -246,7 +246,7 @@ def TransposeOp : Toy_Op<"transpose", // Allow building a TransposeOp with from the input operand. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *input"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr input"> ]; // Invoke a static verify method to verify this transpose operation. diff --git a/examples/toy/Ch6/mlir/Dialect.cpp b/examples/toy/Ch6/mlir/Dialect.cpp index 7003cbdcc810..8be1094cf152 100644 --- a/examples/toy/Ch6/mlir/Dialect.cpp +++ b/examples/toy/Ch6/mlir/Dialect.cpp @@ -55,7 +55,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// Handle the given inlined terminator(toy.return) by replacing it with a new /// operation as necessary. void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const final { + ArrayRef valuesToRepl) const final { // Only "toy.return" needs to be handled here. auto returnOp = cast(op); @@ -70,7 +70,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// operation that takes 'input' as the only operand, and produces a single /// result of 'resultType'. If a conversion can not be generated, nullptr /// should be returned. - Operation *materializeCallConversion(OpBuilder &builder, Value *input, + Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input, Type resultType, Location conversionLoc) const final { return builder.create(conversionLoc, resultType, input); @@ -144,7 +144,7 @@ static mlir::LogicalResult verify(ConstantOp op) { // AddOp void AddOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -164,7 +164,8 @@ void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); } // GenericCallOp void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state, - StringRef callee, ArrayRef arguments) { + StringRef callee, + ArrayRef arguments) { // Generic call always returns an unranked Tensor initially. state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(arguments); @@ -185,7 +186,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); } // MulOp void MulOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -236,7 +237,7 @@ static mlir::LogicalResult verify(ReturnOp op) { // TransposeOp void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *value) { + mlir::ValuePtr value) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(value); } diff --git a/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp index 4ab8c5b501c9..3fa761c74048 100644 --- a/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp +++ b/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp @@ -43,8 +43,8 @@ static MemRefType convertTensorToMemRef(TensorType type) { } /// Insert an allocation and deallocation for the given MemRefType. -static Value *insertAllocAndDealloc(MemRefType type, Location loc, - PatternRewriter &rewriter) { +static ValuePtr insertAllocAndDealloc(MemRefType type, Location loc, + PatternRewriter &rewriter) { auto alloc = rewriter.create(loc, type); // Make sure to allocate at the beginning of the block. @@ -63,11 +63,11 @@ static Value *insertAllocAndDealloc(MemRefType type, Location loc, /// to the operands of the input operation, and the set of loop induction /// variables for the iteration. It returns a value to store at the current /// index of the iteration. -using LoopIterationFn = function_ref memRefOperands, - ArrayRef loopIvs)>; +using LoopIterationFn = function_ref memRefOperands, + ArrayRef loopIvs)>; -static void lowerOpToLoops(Operation *op, ArrayRef operands, +static void lowerOpToLoops(Operation *op, ArrayRef operands, PatternRewriter &rewriter, LoopIterationFn processIteration) { auto tensorType = (*op->result_type_begin()).cast(); @@ -78,7 +78,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef operands, auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter); // Create an empty affine loop for each of the dimensions within the shape. - SmallVector loopIvs; + SmallVector loopIvs; for (auto dim : tensorType.getShape()) { auto loop = rewriter.create(loc, /*lb=*/0, dim, /*step=*/1); loop.getBody()->clear(); @@ -94,7 +94,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef operands, // Generate a call to the processing function with the rewriter, the memref // operands, and the loop induction variables. This function will return the // value to store at the current index. - Value *valueToStore = processIteration(rewriter, operands, loopIvs); + ValuePtr valueToStore = processIteration(rewriter, operands, loopIvs); rewriter.create(loc, valueToStore, alloc, llvm::makeArrayRef(loopIvs)); @@ -113,13 +113,13 @@ struct BinaryOpLowering : public ConversionPattern { : ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); lowerOpToLoops( op, operands, rewriter, - [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, - ArrayRef loopIvs) { + [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, + ArrayRef loopIvs) { // Generate an adaptor for the remapped operands of the BinaryOp. This // allows for using the nice named accessors that are generated by the // ODS. @@ -163,7 +163,7 @@ struct ConstantOpLowering : public OpRewritePattern { // Create these constants up-front to avoid large amounts of redundant // operations. auto valueShape = memRefType.getShape(); - SmallVector constantIndices; + SmallVector constantIndices; for (auto i : llvm::seq( 0, *std::max_element(valueShape.begin(), valueShape.end()))) constantIndices.push_back(rewriter.create(loc, i)); @@ -172,7 +172,7 @@ struct ConstantOpLowering : public OpRewritePattern { // will need to generate a store for each of the elements. The following // functor recursively walks the dimensions of the constant shape, // generating a store when the recursion hits the base case. - SmallVector indices; + SmallVector indices; auto valueIt = constantValue.getValues().begin(); std::function storeElements = [&](uint64_t dimension) { // The last dimension is the base case of the recursion, at this point @@ -231,22 +231,22 @@ struct TransposeOpLowering : public ConversionPattern { : ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); lowerOpToLoops( op, operands, rewriter, - [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, - ArrayRef loopIvs) { + [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, + ArrayRef loopIvs) { // Generate an adaptor for the remapped operands of the TransposeOp. // This allows for using the nice named accessors that are generated // by the ODS. toy::TransposeOpOperandAdaptor transposeAdaptor(memRefOperands); - Value *input = transposeAdaptor.input(); + ValuePtr input = transposeAdaptor.input(); // Transpose the elements by generating a load from the reverse // indices. - SmallVector reverseIvs(llvm::reverse(loopIvs)); + SmallVector reverseIvs(llvm::reverse(loopIvs)); return rewriter.create(loc, input, reverseIvs); }); return matchSuccess(); diff --git a/examples/toy/Ch6/mlir/LowerToLLVM.cpp b/examples/toy/Ch6/mlir/LowerToLLVM.cpp index d35cc5c576ad..c3180b4a92d9 100644 --- a/examples/toy/Ch6/mlir/LowerToLLVM.cpp +++ b/examples/toy/Ch6/mlir/LowerToLLVM.cpp @@ -51,7 +51,7 @@ class PrintOpLowering : public ConversionPattern { : ConversionPattern(toy::PrintOp::getOperationName(), 1, context) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto memRefType = (*op->operand_type_begin()).cast(); auto memRefShape = memRefType.getShape(); @@ -64,14 +64,14 @@ class PrintOpLowering : public ConversionPattern { // Get a symbol reference to the printf function, inserting it if necessary. auto printfRef = getOrInsertPrintf(rewriter, parentModule, llvmDialect); - Value *formatSpecifierCst = getOrCreateGlobalString( + ValuePtr formatSpecifierCst = getOrCreateGlobalString( loc, rewriter, "frmt_spec", StringRef("%f \0", 4), parentModule, llvmDialect); - Value *newLineCst = getOrCreateGlobalString( + ValuePtr newLineCst = getOrCreateGlobalString( loc, rewriter, "nl", StringRef("\n\0", 2), parentModule, llvmDialect); // Create a loop for each of the dimensions within the shape. - SmallVector loopIvs; + SmallVector loopIvs; for (unsigned i = 0, e = memRefShape.size(); i != e; ++i) { auto lowerBound = rewriter.create(loc, 0); auto upperBound = rewriter.create(loc, memRefShape[i]); @@ -97,7 +97,7 @@ class PrintOpLowering : public ConversionPattern { auto elementLoad = rewriter.create(loc, printOp.input(), loopIvs); rewriter.create( loc, printfRef, rewriter.getIntegerType(32), - ArrayRef({formatSpecifierCst, elementLoad})); + ArrayRef({formatSpecifierCst, elementLoad})); // Notify the rewriter that this operation has been removed. rewriter.eraseOp(op); @@ -130,10 +130,10 @@ class PrintOpLowering : public ConversionPattern { /// Return a value representing an access into a global string with the given /// name, creating the string if necessary. - static Value *getOrCreateGlobalString(Location loc, OpBuilder &builder, - StringRef name, StringRef value, - ModuleOp module, - LLVM::LLVMDialect *llvmDialect) { + static ValuePtr getOrCreateGlobalString(Location loc, OpBuilder &builder, + StringRef name, StringRef value, + ModuleOp module, + LLVM::LLVMDialect *llvmDialect) { // Create the global at the entry of the module. LLVM::GlobalOp global; if (!(global = module.lookupSymbol(name))) { @@ -147,13 +147,13 @@ class PrintOpLowering : public ConversionPattern { } // Get the pointer to the first character in the global string. - Value *globalPtr = builder.create(loc, global); - Value *cst0 = builder.create( + ValuePtr globalPtr = builder.create(loc, global); + ValuePtr cst0 = builder.create( loc, LLVM::LLVMType::getInt64Ty(llvmDialect), builder.getIntegerAttr(builder.getIndexType(), 0)); return builder.create( loc, LLVM::LLVMType::getInt8PtrTy(llvmDialect), globalPtr, - ArrayRef({cst0, cst0})); + ArrayRef({cst0, cst0})); } }; } // end anonymous namespace diff --git a/examples/toy/Ch6/mlir/MLIRGen.cpp b/examples/toy/Ch6/mlir/MLIRGen.cpp index da474e809b30..902c634a9546 100644 --- a/examples/toy/Ch6/mlir/MLIRGen.cpp +++ b/examples/toy/Ch6/mlir/MLIRGen.cpp @@ -99,7 +99,7 @@ class MLIRGenImpl { /// Entering a function creates a new scope, and the function arguments are /// added to the mapping. When the processing of a function is terminated, the /// scope is destroyed and the mappings created in this scope are dropped. - llvm::ScopedHashTable symbolTable; + llvm::ScopedHashTable symbolTable; /// Helper conversion for a Toy AST location to an MLIR location. mlir::Location loc(Location loc) { @@ -109,7 +109,7 @@ class MLIRGenImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(llvm::StringRef var, mlir::Value *value) { + mlir::LogicalResult declare(llvm::StringRef var, mlir::ValuePtr value) { if (symbolTable.count(var)) return mlir::failure(); symbolTable.insert(var, value); @@ -132,7 +132,8 @@ class MLIRGenImpl { /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope( + symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -183,7 +184,7 @@ class MLIRGenImpl { } /// Emit a binary operation - mlir::Value *mlirGen(BinaryExprAST &binop) { + mlir::ValuePtr mlirGen(BinaryExprAST &binop) { // First emit the operations for each side of the operation before emitting // the operation itself. For example if the expression is `a + foo(a)` // 1) First it will visiting the LHS, which will return a reference to the @@ -195,10 +196,10 @@ class MLIRGenImpl { // and the result value is returned. If an error occurs we get a nullptr // and propagate. // - mlir::Value *lhs = mlirGen(*binop.getLHS()); + mlir::ValuePtr lhs = mlirGen(*binop.getLHS()); if (!lhs) return nullptr; - mlir::Value *rhs = mlirGen(*binop.getRHS()); + mlir::ValuePtr rhs = mlirGen(*binop.getRHS()); if (!rhs) return nullptr; auto location = loc(binop.loc()); @@ -219,8 +220,8 @@ class MLIRGenImpl { /// This is a reference to a variable in an expression. The variable is /// expected to have been declared and so should have a value in the symbol /// table, otherwise emit an error and return nullptr. - mlir::Value *mlirGen(VariableExprAST &expr) { - if (auto *variable = symbolTable.lookup(expr.getName())) + mlir::ValuePtr mlirGen(VariableExprAST &expr) { + if (auto variable = symbolTable.lookup(expr.getName())) return variable; emitError(loc(expr.loc()), "error: unknown variable '") @@ -233,7 +234,7 @@ class MLIRGenImpl { auto location = loc(ret.loc()); // 'return' takes an optional expression, handle that case here. - mlir::Value *expr = nullptr; + mlir::ValuePtr expr = nullptr; if (ret.getExpr().hasValue()) { if (!(expr = mlirGen(*ret.getExpr().getValue()))) return mlir::failure(); @@ -241,7 +242,7 @@ class MLIRGenImpl { // Otherwise, this return operation has zero operands. builder.create(location, expr ? makeArrayRef(expr) - : ArrayRef()); + : ArrayRef()); return mlir::success(); } @@ -263,7 +264,7 @@ class MLIRGenImpl { /// [[1.000000e+00, 2.000000e+00, 3.000000e+00], /// [4.000000e+00, 5.000000e+00, 6.000000e+00]]>} : () -> tensor<2x3xf64> /// - mlir::Value *mlirGen(LiteralExprAST &lit) { + mlir::ValuePtr mlirGen(LiteralExprAST &lit) { auto type = getType(lit.getDims()); // The attribute is a vector with a floating point value per element @@ -309,14 +310,14 @@ class MLIRGenImpl { /// Emit a call expression. It emits specific operations for the `transpose` /// builtin. Other identifiers are assumed to be user-defined functions. - mlir::Value *mlirGen(CallExprAST &call) { + mlir::ValuePtr mlirGen(CallExprAST &call) { llvm::StringRef callee = call.getCallee(); auto location = loc(call.loc()); // Codegen the operands first. - SmallVector operands; + SmallVector operands; for (auto &expr : call.getArgs()) { - auto *arg = mlirGen(*expr); + auto arg = mlirGen(*expr); if (!arg) return nullptr; operands.push_back(arg); @@ -342,7 +343,7 @@ class MLIRGenImpl { /// Emit a print expression. It emits specific operations for two builtins: /// transpose(x) and print(x). mlir::LogicalResult mlirGen(PrintExprAST &call) { - auto *arg = mlirGen(*call.getArg()); + auto arg = mlirGen(*call.getArg()); if (!arg) return mlir::failure(); @@ -351,12 +352,12 @@ class MLIRGenImpl { } /// Emit a constant for a single number (FIXME: semantic? broadcast?) - mlir::Value *mlirGen(NumberExprAST &num) { + mlir::ValuePtr mlirGen(NumberExprAST &num) { return builder.create(loc(num.loc()), num.getValue()); } /// Dispatch codegen for the right expression subclass using RTTI. - mlir::Value *mlirGen(ExprAST &expr) { + mlir::ValuePtr mlirGen(ExprAST &expr) { switch (expr.getKind()) { case toy::ExprAST::Expr_BinOp: return mlirGen(cast(expr)); @@ -380,7 +381,7 @@ class MLIRGenImpl { /// initializer and record the value in the symbol table before returning it. /// Future expressions will be able to reference this variable through symbol /// table lookup. - mlir::Value *mlirGen(VarDeclExprAST &vardecl) { + mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) { auto init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), @@ -388,7 +389,7 @@ class MLIRGenImpl { return nullptr; } - mlir::Value *value = mlirGen(*init); + mlir::ValuePtr value = mlirGen(*init); if (!value) return nullptr; @@ -408,7 +409,7 @@ class MLIRGenImpl { /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope var_scope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/examples/toy/Ch6/mlir/ToyCombine.cpp b/examples/toy/Ch6/mlir/ToyCombine.cpp index 47e1abc6c744..604e9fa6c83e 100644 --- a/examples/toy/Ch6/mlir/ToyCombine.cpp +++ b/examples/toy/Ch6/mlir/ToyCombine.cpp @@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern { matchAndRewrite(TransposeOp op, mlir::PatternRewriter &rewriter) const override { // Look through the input of the current transpose. - mlir::Value *transposeInput = op.getOperand(); + mlir::ValuePtr transposeInput = op.getOperand(); TransposeOp transposeInputOp = llvm::dyn_cast_or_null(transposeInput->getDefiningOp()); diff --git a/examples/toy/Ch7/include/toy/Ops.td b/examples/toy/Ch7/include/toy/Ops.td index 0d48f74e9fec..94f1bcf3e820 100644 --- a/examples/toy/Ch7/include/toy/Ops.td +++ b/examples/toy/Ch7/include/toy/Ops.td @@ -112,7 +112,7 @@ def AddOp : Toy_Op<"add", // Allow building an AddOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -164,7 +164,7 @@ def GenericCallOp : Toy_Op<"generic_call", // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " - "StringRef callee, ArrayRef arguments"> + "StringRef callee, ArrayRef arguments"> ]; } @@ -181,7 +181,7 @@ def MulOp : Toy_Op<"mul", // Allow building a MulOp with from the two input operands. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *lhs, Value *rhs"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr lhs, ValuePtr rhs"> ]; } @@ -260,7 +260,7 @@ def StructAccessOp : Toy_Op<"struct_access", [NoSideEffect]> { // Allow building a StructAccessOp with just a struct value and an index. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *input, size_t index"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr input, size_t index"> ]; let verifier = [{ return ::verify(*this); }]; @@ -299,7 +299,7 @@ def TransposeOp : Toy_Op<"transpose", // Allow building a TransposeOp with from the input operand. let builders = [ - OpBuilder<"Builder *b, OperationState &state, Value *input"> + OpBuilder<"Builder *b, OperationState &state, ValuePtr input"> ]; // Invoke a static verify method to verify this transpose operation. diff --git a/examples/toy/Ch7/mlir/Dialect.cpp b/examples/toy/Ch7/mlir/Dialect.cpp index 2beaa870a89c..0ce896db5dea 100644 --- a/examples/toy/Ch7/mlir/Dialect.cpp +++ b/examples/toy/Ch7/mlir/Dialect.cpp @@ -56,7 +56,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// Handle the given inlined terminator(toy.return) by replacing it with a new /// operation as necessary. void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const final { + ArrayRef valuesToRepl) const final { // Only "toy.return" needs to be handled here. auto returnOp = cast(op); @@ -71,7 +71,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// operation that takes 'input' as the only operand, and produces a single /// result of 'resultType'. If a conversion can not be generated, nullptr /// should be returned. - Operation *materializeCallConversion(OpBuilder &builder, Value *input, + Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input, Type resultType, Location conversionLoc) const final { return builder.create(conversionLoc, resultType, input); @@ -195,7 +195,7 @@ void ConstantOp::inferShapes() { getResult()->setType(value().getType()); } // AddOp void AddOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -215,7 +215,8 @@ void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); } // GenericCallOp void GenericCallOp::build(mlir::Builder *builder, mlir::OperationState &state, - StringRef callee, ArrayRef arguments) { + StringRef callee, + ArrayRef arguments) { // Generic call always returns an unranked Tensor initially. state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(arguments); @@ -236,7 +237,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); } // MulOp void MulOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *lhs, mlir::Value *rhs) { + mlir::ValuePtr lhs, mlir::ValuePtr rhs) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands({lhs, rhs}); } @@ -287,7 +288,7 @@ static mlir::LogicalResult verify(ReturnOp op) { // StructAccessOp void StructAccessOp::build(mlir::Builder *b, mlir::OperationState &state, - mlir::Value *input, size_t index) { + mlir::ValuePtr input, size_t index) { // Extract the result type from the input type. StructType structTy = input->getType().cast(); assert(index < structTy.getNumElementTypes()); @@ -314,7 +315,7 @@ static mlir::LogicalResult verify(StructAccessOp op) { // TransposeOp void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state, - mlir::Value *value) { + mlir::ValuePtr value) { state.addTypes(UnrankedTensorType::get(builder->getF64Type())); state.addOperands(value); } diff --git a/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp index 4ab8c5b501c9..3fa761c74048 100644 --- a/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp +++ b/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp @@ -43,8 +43,8 @@ static MemRefType convertTensorToMemRef(TensorType type) { } /// Insert an allocation and deallocation for the given MemRefType. -static Value *insertAllocAndDealloc(MemRefType type, Location loc, - PatternRewriter &rewriter) { +static ValuePtr insertAllocAndDealloc(MemRefType type, Location loc, + PatternRewriter &rewriter) { auto alloc = rewriter.create(loc, type); // Make sure to allocate at the beginning of the block. @@ -63,11 +63,11 @@ static Value *insertAllocAndDealloc(MemRefType type, Location loc, /// to the operands of the input operation, and the set of loop induction /// variables for the iteration. It returns a value to store at the current /// index of the iteration. -using LoopIterationFn = function_ref memRefOperands, - ArrayRef loopIvs)>; +using LoopIterationFn = function_ref memRefOperands, + ArrayRef loopIvs)>; -static void lowerOpToLoops(Operation *op, ArrayRef operands, +static void lowerOpToLoops(Operation *op, ArrayRef operands, PatternRewriter &rewriter, LoopIterationFn processIteration) { auto tensorType = (*op->result_type_begin()).cast(); @@ -78,7 +78,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef operands, auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter); // Create an empty affine loop for each of the dimensions within the shape. - SmallVector loopIvs; + SmallVector loopIvs; for (auto dim : tensorType.getShape()) { auto loop = rewriter.create(loc, /*lb=*/0, dim, /*step=*/1); loop.getBody()->clear(); @@ -94,7 +94,7 @@ static void lowerOpToLoops(Operation *op, ArrayRef operands, // Generate a call to the processing function with the rewriter, the memref // operands, and the loop induction variables. This function will return the // value to store at the current index. - Value *valueToStore = processIteration(rewriter, operands, loopIvs); + ValuePtr valueToStore = processIteration(rewriter, operands, loopIvs); rewriter.create(loc, valueToStore, alloc, llvm::makeArrayRef(loopIvs)); @@ -113,13 +113,13 @@ struct BinaryOpLowering : public ConversionPattern { : ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); lowerOpToLoops( op, operands, rewriter, - [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, - ArrayRef loopIvs) { + [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, + ArrayRef loopIvs) { // Generate an adaptor for the remapped operands of the BinaryOp. This // allows for using the nice named accessors that are generated by the // ODS. @@ -163,7 +163,7 @@ struct ConstantOpLowering : public OpRewritePattern { // Create these constants up-front to avoid large amounts of redundant // operations. auto valueShape = memRefType.getShape(); - SmallVector constantIndices; + SmallVector constantIndices; for (auto i : llvm::seq( 0, *std::max_element(valueShape.begin(), valueShape.end()))) constantIndices.push_back(rewriter.create(loc, i)); @@ -172,7 +172,7 @@ struct ConstantOpLowering : public OpRewritePattern { // will need to generate a store for each of the elements. The following // functor recursively walks the dimensions of the constant shape, // generating a store when the recursion hits the base case. - SmallVector indices; + SmallVector indices; auto valueIt = constantValue.getValues().begin(); std::function storeElements = [&](uint64_t dimension) { // The last dimension is the base case of the recursion, at this point @@ -231,22 +231,22 @@ struct TransposeOpLowering : public ConversionPattern { : ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); lowerOpToLoops( op, operands, rewriter, - [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, - ArrayRef loopIvs) { + [loc](PatternRewriter &rewriter, ArrayRef memRefOperands, + ArrayRef loopIvs) { // Generate an adaptor for the remapped operands of the TransposeOp. // This allows for using the nice named accessors that are generated // by the ODS. toy::TransposeOpOperandAdaptor transposeAdaptor(memRefOperands); - Value *input = transposeAdaptor.input(); + ValuePtr input = transposeAdaptor.input(); // Transpose the elements by generating a load from the reverse // indices. - SmallVector reverseIvs(llvm::reverse(loopIvs)); + SmallVector reverseIvs(llvm::reverse(loopIvs)); return rewriter.create(loc, input, reverseIvs); }); return matchSuccess(); diff --git a/examples/toy/Ch7/mlir/LowerToLLVM.cpp b/examples/toy/Ch7/mlir/LowerToLLVM.cpp index d35cc5c576ad..c3180b4a92d9 100644 --- a/examples/toy/Ch7/mlir/LowerToLLVM.cpp +++ b/examples/toy/Ch7/mlir/LowerToLLVM.cpp @@ -51,7 +51,7 @@ class PrintOpLowering : public ConversionPattern { : ConversionPattern(toy::PrintOp::getOperationName(), 1, context) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto memRefType = (*op->operand_type_begin()).cast(); auto memRefShape = memRefType.getShape(); @@ -64,14 +64,14 @@ class PrintOpLowering : public ConversionPattern { // Get a symbol reference to the printf function, inserting it if necessary. auto printfRef = getOrInsertPrintf(rewriter, parentModule, llvmDialect); - Value *formatSpecifierCst = getOrCreateGlobalString( + ValuePtr formatSpecifierCst = getOrCreateGlobalString( loc, rewriter, "frmt_spec", StringRef("%f \0", 4), parentModule, llvmDialect); - Value *newLineCst = getOrCreateGlobalString( + ValuePtr newLineCst = getOrCreateGlobalString( loc, rewriter, "nl", StringRef("\n\0", 2), parentModule, llvmDialect); // Create a loop for each of the dimensions within the shape. - SmallVector loopIvs; + SmallVector loopIvs; for (unsigned i = 0, e = memRefShape.size(); i != e; ++i) { auto lowerBound = rewriter.create(loc, 0); auto upperBound = rewriter.create(loc, memRefShape[i]); @@ -97,7 +97,7 @@ class PrintOpLowering : public ConversionPattern { auto elementLoad = rewriter.create(loc, printOp.input(), loopIvs); rewriter.create( loc, printfRef, rewriter.getIntegerType(32), - ArrayRef({formatSpecifierCst, elementLoad})); + ArrayRef({formatSpecifierCst, elementLoad})); // Notify the rewriter that this operation has been removed. rewriter.eraseOp(op); @@ -130,10 +130,10 @@ class PrintOpLowering : public ConversionPattern { /// Return a value representing an access into a global string with the given /// name, creating the string if necessary. - static Value *getOrCreateGlobalString(Location loc, OpBuilder &builder, - StringRef name, StringRef value, - ModuleOp module, - LLVM::LLVMDialect *llvmDialect) { + static ValuePtr getOrCreateGlobalString(Location loc, OpBuilder &builder, + StringRef name, StringRef value, + ModuleOp module, + LLVM::LLVMDialect *llvmDialect) { // Create the global at the entry of the module. LLVM::GlobalOp global; if (!(global = module.lookupSymbol(name))) { @@ -147,13 +147,13 @@ class PrintOpLowering : public ConversionPattern { } // Get the pointer to the first character in the global string. - Value *globalPtr = builder.create(loc, global); - Value *cst0 = builder.create( + ValuePtr globalPtr = builder.create(loc, global); + ValuePtr cst0 = builder.create( loc, LLVM::LLVMType::getInt64Ty(llvmDialect), builder.getIntegerAttr(builder.getIndexType(), 0)); return builder.create( loc, LLVM::LLVMType::getInt8PtrTy(llvmDialect), globalPtr, - ArrayRef({cst0, cst0})); + ArrayRef({cst0, cst0})); } }; } // end anonymous namespace diff --git a/examples/toy/Ch7/mlir/MLIRGen.cpp b/examples/toy/Ch7/mlir/MLIRGen.cpp index b33137a1066e..590b21e53a1e 100644 --- a/examples/toy/Ch7/mlir/MLIRGen.cpp +++ b/examples/toy/Ch7/mlir/MLIRGen.cpp @@ -108,11 +108,11 @@ class MLIRGenImpl { /// Entering a function creates a new scope, and the function arguments are /// added to the mapping. When the processing of a function is terminated, the /// scope is destroyed and the mappings created in this scope are dropped. - llvm::ScopedHashTable> + llvm::ScopedHashTable> symbolTable; using SymbolTableScopeT = llvm::ScopedHashTableScope>; + std::pair>; /// A mapping for the functions that have been code generated to MLIR. llvm::StringMap functionMap; @@ -129,7 +129,7 @@ class MLIRGenImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(VarDeclExprAST &var, mlir::Value *value) { + mlir::LogicalResult declare(VarDeclExprAST &var, mlir::ValuePtr value) { if (symbolTable.count(var.getName())) return mlir::failure(); symbolTable.insert(var.getName(), {value, &var}); @@ -301,7 +301,7 @@ class MLIRGenImpl { } /// Emit a binary operation - mlir::Value *mlirGen(BinaryExprAST &binop) { + mlir::ValuePtr mlirGen(BinaryExprAST &binop) { // First emit the operations for each side of the operation before emitting // the operation itself. For example if the expression is `a + foo(a)` // 1) First it will visiting the LHS, which will return a reference to the @@ -313,7 +313,7 @@ class MLIRGenImpl { // and the result value is returned. If an error occurs we get a nullptr // and propagate. // - mlir::Value *lhs = mlirGen(*binop.getLHS()); + mlir::ValuePtr lhs = mlirGen(*binop.getLHS()); if (!lhs) return nullptr; auto location = loc(binop.loc()); @@ -329,7 +329,7 @@ class MLIRGenImpl { } // Otherwise, this is a normal binary op. - mlir::Value *rhs = mlirGen(*binop.getRHS()); + mlir::ValuePtr rhs = mlirGen(*binop.getRHS()); if (!rhs) return nullptr; @@ -349,8 +349,8 @@ class MLIRGenImpl { /// This is a reference to a variable in an expression. The variable is /// expected to have been declared and so should have a value in the symbol /// table, otherwise emit an error and return nullptr. - mlir::Value *mlirGen(VariableExprAST &expr) { - if (auto *variable = symbolTable.lookup(expr.getName()).first) + mlir::ValuePtr mlirGen(VariableExprAST &expr) { + if (auto variable = symbolTable.lookup(expr.getName()).first) return variable; emitError(loc(expr.loc()), "error: unknown variable '") @@ -363,7 +363,7 @@ class MLIRGenImpl { auto location = loc(ret.loc()); // 'return' takes an optional expression, handle that case here. - mlir::Value *expr = nullptr; + mlir::ValuePtr expr = nullptr; if (ret.getExpr().hasValue()) { if (!(expr = mlirGen(*ret.getExpr().getValue()))) return mlir::failure(); @@ -371,7 +371,7 @@ class MLIRGenImpl { // Otherwise, this return operation has zero operands. builder.create(location, expr ? makeArrayRef(expr) - : ArrayRef()); + : ArrayRef()); return mlir::success(); } @@ -450,7 +450,7 @@ class MLIRGenImpl { } /// Emit an array literal. - mlir::Value *mlirGen(LiteralExprAST &lit) { + mlir::ValuePtr mlirGen(LiteralExprAST &lit) { mlir::Type type = getType(lit.getDims()); mlir::DenseElementsAttr dataAttribute = getConstantAttr(lit); @@ -462,7 +462,7 @@ class MLIRGenImpl { /// Emit a struct literal. It will be emitted as an array of /// other literals in an Attribute attached to a `toy.struct_constant` /// operation. - mlir::Value *mlirGen(StructLiteralExprAST &lit) { + mlir::ValuePtr mlirGen(StructLiteralExprAST &lit) { mlir::ArrayAttr dataAttr; mlir::Type dataType; std::tie(dataAttr, dataType) = getConstantAttr(lit); @@ -493,14 +493,14 @@ class MLIRGenImpl { /// Emit a call expression. It emits specific operations for the `transpose` /// builtin. Other identifiers are assumed to be user-defined functions. - mlir::Value *mlirGen(CallExprAST &call) { + mlir::ValuePtr mlirGen(CallExprAST &call) { llvm::StringRef callee = call.getCallee(); auto location = loc(call.loc()); // Codegen the operands first. - SmallVector operands; + SmallVector operands; for (auto &expr : call.getArgs()) { - auto *arg = mlirGen(*expr); + auto arg = mlirGen(*expr); if (!arg) return nullptr; operands.push_back(arg); @@ -534,7 +534,7 @@ class MLIRGenImpl { /// Emit a print expression. It emits specific operations for two builtins: /// transpose(x) and print(x). mlir::LogicalResult mlirGen(PrintExprAST &call) { - auto *arg = mlirGen(*call.getArg()); + auto arg = mlirGen(*call.getArg()); if (!arg) return mlir::failure(); @@ -543,12 +543,12 @@ class MLIRGenImpl { } /// Emit a constant for a single number (FIXME: semantic? broadcast?) - mlir::Value *mlirGen(NumberExprAST &num) { + mlir::ValuePtr mlirGen(NumberExprAST &num) { return builder.create(loc(num.loc()), num.getValue()); } /// Dispatch codegen for the right expression subclass using RTTI. - mlir::Value *mlirGen(ExprAST &expr) { + mlir::ValuePtr mlirGen(ExprAST &expr) { switch (expr.getKind()) { case toy::ExprAST::Expr_BinOp: return mlirGen(cast(expr)); @@ -574,7 +574,7 @@ class MLIRGenImpl { /// initializer and record the value in the symbol table before returning it. /// Future expressions will be able to reference this variable through symbol /// table lookup. - mlir::Value *mlirGen(VarDeclExprAST &vardecl) { + mlir::ValuePtr mlirGen(VarDeclExprAST &vardecl) { auto init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), @@ -582,7 +582,7 @@ class MLIRGenImpl { return nullptr; } - mlir::Value *value = mlirGen(*init); + mlir::ValuePtr value = mlirGen(*init); if (!value) return nullptr; diff --git a/examples/toy/Ch7/mlir/ToyCombine.cpp b/examples/toy/Ch7/mlir/ToyCombine.cpp index ebd4f5d11036..d18396c63bb9 100644 --- a/examples/toy/Ch7/mlir/ToyCombine.cpp +++ b/examples/toy/Ch7/mlir/ToyCombine.cpp @@ -71,7 +71,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern { matchAndRewrite(TransposeOp op, mlir::PatternRewriter &rewriter) const override { // Look through the input of the current transpose. - mlir::Value *transposeInput = op.getOperand(); + mlir::ValuePtr transposeInput = op.getOperand(); TransposeOp transposeInputOp = llvm::dyn_cast_or_null(transposeInput->getDefiningOp()); diff --git a/g3doc/DeclarativeRewrites.md b/g3doc/DeclarativeRewrites.md index 5adcb3209835..9fcd4341611d 100644 --- a/g3doc/DeclarativeRewrites.md +++ b/g3doc/DeclarativeRewrites.md @@ -233,7 +233,7 @@ In the above, we are using `BOp`'s result for building `COp`. Given that `COp` was specified with table-driven op definition, there will be several `build()` methods generated for it. One of them has aggregated parameters for result types, operands, and attributes in the signature: `void -COp::build(..., ArrayRef resultTypes, Array operands, +COp::build(..., ArrayRef resultTypes, Array operands, ArrayRef attr)`. The pattern in the above calls this `build()` method for constructing the `COp`. @@ -266,7 +266,7 @@ For example, for the above `AOp`, a possible builder is: ```c++ void AOp::build(Builder *builder, OperationState &state, - Value *input, Attribute attr) { + ValuePtr input, Attribute attr) { state.addOperands({input}); state.addAttribute("a_attr", attr); Type type = ...; // Deduce result type here @@ -422,7 +422,7 @@ op; it can be also used to specify how to build an op entirely. An example: If we have a C++ function for building an op: ```c++ -Operation *createMyOp(OpBuilder builder, Value *input, Attribute attr); +Operation *createMyOp(OpBuilder builder, ValuePtr input, Attribute attr); ``` We can wrap it up and invoke it like: diff --git a/g3doc/DialectConversion.md b/g3doc/DialectConversion.md index b4e309daf1fc..6771860366c7 100644 --- a/g3doc/DialectConversion.md +++ b/g3doc/DialectConversion.md @@ -209,7 +209,7 @@ class TypeConverter { /// the conversion has finished. virtual Operation *materializeConversion(PatternRewriter &rewriter, Type resultType, - ArrayRef inputs, + ArrayRef inputs, Location loc); }; ``` @@ -232,7 +232,7 @@ struct MyConversionPattern : public ConversionPattern { /// `operands` parameter, containing the remapped operands of the original /// operation. virtual PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const; }; ``` @@ -269,7 +269,7 @@ public: /// Remap an input of the original signature to another `replacement` /// value. This drops the original argument. - void remapInput(unsigned origInputNo, Value *replacement); + void remapInput(unsigned origInputNo, ValuePtr replacement); }; ``` diff --git a/g3doc/EDSC.md b/g3doc/EDSC.md index afceac2dfc16..eaaeb6c7009b 100644 --- a/g3doc/EDSC.md +++ b/g3doc/EDSC.md @@ -15,10 +15,10 @@ declarative builders are available within the lifetime of a `ScopedContext`. ## ValueHandle and IndexHandle `mlir::edsc::ValueHandle` and `mlir::edsc::IndexHandle` provide typed -abstractions around an `mlir::Value*`. These abstractions are "delayed", in the -sense that they allow separating declaration from definition. They may -capture IR snippets, as they are built, for programmatic manipulation. -Intuitive operators are provided to allow concise and idiomatic expressions. +abstractions around an `mlir::Value`. These abstractions are "delayed", in the +sense that they allow separating declaration from definition. They may capture +IR snippets, as they are built, for programmatic manipulation. Intuitive +operators are provided to allow concise and idiomatic expressions. ```c++ ValueHandle zero = constant_index(0); diff --git a/g3doc/GenericDAGRewriter.md b/g3doc/GenericDAGRewriter.md index 3b26c22eb378..64b8f4f7ade3 100644 --- a/g3doc/GenericDAGRewriter.md +++ b/g3doc/GenericDAGRewriter.md @@ -128,7 +128,7 @@ complicated :) if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1)))) if (C1->countTrailingZeros() == 0) if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) { - Value *NewOr = Builder.CreateOr(Z, ~(*C2)); + ValuePtr NewOr = Builder.CreateOr(Z, ~(*C2)); return Builder.CreateSub(RHS, NewOr, "sub"); } ``` diff --git a/g3doc/OpDefinitions.md b/g3doc/OpDefinitions.md index 1f98671d59af..1db18266ee09 100644 --- a/g3doc/OpDefinitions.md +++ b/g3doc/OpDefinitions.md @@ -360,7 +360,7 @@ def MyInterface : OpInterface<"MyInterface"> { // A new non-static method accepting an input argument. InterfaceMethod<"/*insert doc here*/", - "Value *", "bar", (ins "unsigned":$i) + "ValuePtr ", "bar", (ins "unsigned":$i) >, // Query a static property of the derived operation. @@ -438,7 +438,7 @@ static void build(Builder *tblgen_builder, OperationState &tblgen_state, // for attributes are of mlir::Attribute types. static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type i32_result, Type f32_result, ..., - Value *i32_operand, Value *f32_operand, ..., + ValuePtr i32_operand, ValuePtr f32_operand, ..., IntegerAttr i32_attr, FloatAttr f32_attr, ...); // Each result-type/operand/attribute has a separate parameter. The parameters @@ -447,13 +447,13 @@ static void build(Builder *tblgen_builder, OperationState &tblgen_state, // explanation for more details.) static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type i32_result, Type f32_result, ..., - Value *i32_operand, Value *f32_operand, ..., + ValuePtr i32_operand, ValuePtr f32_operand, ..., APInt i32_attr, StringRef f32_attr, ...); // Each operand/attribute has a separate parameter but result type is aggregate. static void build(Builder *tblgen_builder, OperationState &tblgen_state, ArrayRef resultTypes, - Value *i32_operand, Value *f32_operand, ..., + ValuePtr i32_operand, ValuePtr f32_operand, ..., IntegerAttr i32_attr, FloatAttr f32_attr, ...); // All operands/attributes have aggregate parameters. @@ -615,7 +615,7 @@ coding style requirements. For each operation, we automatically generate an _operand adaptor_. This class solves the problem of accessing operands provided as a list of `Value`s without using "magic" constants. The operand adaptor takes a reference to an array of -`Value *` and provides methods with the same names as those in the operation +`ValuePtr` and provides methods with the same names as those in the operation class to access them. For example, for a binary arithmetic operation, it may provide `.lhs()` to access the first operand and `.rhs()` to access the second operand. @@ -629,11 +629,11 @@ Operand adaptors can be used in function templates that also process operations: ```c++ template -std::pair zip(BinaryOpTy &&op) { +std::pair zip(BinaryOpTy &&op) { return std::make_pair(op.lhs(), op.rhs());; } -void process(AddOp op, ArrayRef newOperands) { +void process(AddOp op, ArrayRef newOperands) { zip(op); zip(OperandAdaptor(newOperands)); /*...*/ diff --git a/g3doc/QuickstartRewrites.md b/g3doc/QuickstartRewrites.md index d7bf9a543703..6a4a7cca8b88 100644 --- a/g3doc/QuickstartRewrites.md +++ b/g3doc/QuickstartRewrites.md @@ -128,8 +128,8 @@ def : Pat<(TF_LeakyReluOp:$old_value, $arg, F32Attr:$a), ``` ```c++ -static Value* createTFLLeakyRelu(PatternRewriter &rewriter, Operation *op, - Value* operand, Attribute attr) { +static Value createTFLLeakyRelu(PatternRewriter &rewriter, Operation *op, + Value operand, Attribute attr) { return rewriter.create( op->getLoc(), operands[0]->getType(), /*arg=*/operands[0], /*alpha=*/attrs[0].cast()); diff --git a/g3doc/Rationale.md b/g3doc/Rationale.md index 66cf800621d4..763442dce063 100644 --- a/g3doc/Rationale.md +++ b/g3doc/Rationale.md @@ -1099,7 +1099,7 @@ those chunks independently. The problem is that LLVM has several objects in its IR that are globally uniqued and also mutable: notably constants like `i32 0`. In LLVM, these constants are -`Value*r`'s, which allow them to be used as operands to instructions, and that +`Value`'s, which allow them to be used as operands to instructions, and that they also have SSA use lists. Because these things are uniqued, every `i32 0` in any function shares a use list. This means that optimizing multiple functions in parallel won't work (at least without some sort of synchronization on the use diff --git a/g3doc/Tutorials/Toy/Ch-3.md b/g3doc/Tutorials/Toy/Ch-3.md index 07ead64d4559..fb470434d6ff 100644 --- a/g3doc/Tutorials/Toy/Ch-3.md +++ b/g3doc/Tutorials/Toy/Ch-3.md @@ -90,7 +90,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern { matchAndRewrite(TransposeOp op, mlir::PatternRewriter &rewriter) const override { // Look through the input of the current transpose. - mlir::Value *transposeInput = op.getOperand(); + mlir::ValuePtr transposeInput = op.getOperand(); TransposeOp transposeInputOp = llvm::dyn_cast_or_null(transposeInput->getDefiningOp()); // If the input is defined by another Transpose, bingo! diff --git a/g3doc/Tutorials/Toy/Ch-4.md b/g3doc/Tutorials/Toy/Ch-4.md index ac124699c2f3..921e5cdc52af 100644 --- a/g3doc/Tutorials/Toy/Ch-4.md +++ b/g3doc/Tutorials/Toy/Ch-4.md @@ -75,7 +75,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// previously returned by the call operation with the operands of the /// return. void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const final { + ArrayRef valuesToRepl) const final { // Only "toy.return" needs to be handled here. auto returnOp = cast(op); @@ -207,7 +207,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { /// operation that takes 'input' as the only operand, and produces a single /// result of 'resultType'. If a conversion can not be generated, nullptr /// should be returned. - Operation *materializeCallConversion(OpBuilder &builder, Value *input, + Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input, Type resultType, Location conversionLoc) const final { return builder.create(conversionLoc, resultType, input); diff --git a/g3doc/Tutorials/Toy/Ch-5.md b/g3doc/Tutorials/Toy/Ch-5.md index 1124cf14a436..ed62f8954b74 100644 --- a/g3doc/Tutorials/Toy/Ch-5.md +++ b/g3doc/Tutorials/Toy/Ch-5.md @@ -101,7 +101,7 @@ struct TransposeOpLowering : public mlir::ConversionPattern { /// Match and rewrite the given `toy.transpose` operation, with the given /// operands that have been remapped from `tensor<...>` to `memref<...>`. mlir::PatternMatchResult - matchAndRewrite(mlir::Operation *op, ArrayRef operands, + matchAndRewrite(mlir::Operation *op, ArrayRef operands, mlir::ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); @@ -112,18 +112,18 @@ struct TransposeOpLowering : public mlir::ConversionPattern { lowerOpToLoops( op, operands, rewriter, [loc](mlir::PatternRewriter &rewriter, - ArrayRef memRefOperands, - ArrayRef loopIvs) { + ArrayRef memRefOperands, + ArrayRef loopIvs) { // Generate an adaptor for the remapped operands of the TransposeOp. // This allows for using the nice named accessors that are generated // by the ODS. This adaptor is automatically provided by the ODS // framework. TransposeOpOperandAdaptor transposeAdaptor(memRefOperands); - mlir::Value *input = transposeAdaptor.input(); + mlir::ValuePtr input = transposeAdaptor.input(); // Transpose the elements by generating a load from the reverse // indices. - SmallVector reverseIvs(llvm::reverse(loopIvs)); + SmallVector reverseIvs(llvm::reverse(loopIvs)); return rewriter.create(loc, input, reverseIvs); }); return matchSuccess(); diff --git a/g3doc/UsageOfConst.md b/g3doc/UsageOfConst.md index 052f14ddf012..5f6d37931647 100644 --- a/g3doc/UsageOfConst.md +++ b/g3doc/UsageOfConst.md @@ -10,7 +10,7 @@ understood (even though the LLVM implementation is flawed in many ways). The design team since decided to change to a different module, which eschews `const` entirely for the core IR types: you should never see a `const` method on -`Operation`, should never see the type `const Value *`, and you shouldn't feel +`Operation`, should never see the type `const ValuePtr`, and you shouldn't feel bad about this. That said, you *should* use `const` for non-IR types, like `SmallVector`'s and many other things. @@ -39,7 +39,7 @@ into the MLIR codebase, argues that the cost/benefit tradeoff of this design is a poor tradeoff, and proposes switching to a much simpler approach - eliminating the use of const of these IR types entirely. -**Note:** **This document is only discussing things like `const Value*` and +**Note:** **This document is only discussing things like `const Value` and `const Operation*`. There is no proposed change for other types, e.g. `SmallVector` references, the immutable types like `Attribute`, etc.** @@ -130,7 +130,7 @@ const. operand_iterator operand_begin(); operand_iterator operand_end(); - /// Returns an iterator on the underlying Value's (Value *). + /// Returns an iterator on the underlying Value's (ValuePtr ). operand_range getOperands(); // Support const operand iteration. @@ -141,7 +141,7 @@ const. const_operand_iterator operand_begin() const; const_operand_iterator operand_end() const; - /// Returns a const iterator on the underlying Value's (Value *). + /// Returns a const iterator on the underlying Value's (ValuePtr ). llvm::iterator_range getOperands() const; ArrayRef getOpOperands() const { diff --git a/include/mlir/Analysis/AffineAnalysis.h b/include/mlir/Analysis/AffineAnalysis.h index 8243d1f6f63f..f506470f36a0 100644 --- a/include/mlir/Analysis/AffineAnalysis.h +++ b/include/mlir/Analysis/AffineAnalysis.h @@ -39,10 +39,13 @@ class FlatAffineConstraints; class Operation; class Value; +// TODO(riverriddle) Remove this after Value is value-typed. +using ValuePtr = Value *; + /// Returns in `affineApplyOps`, the sequence of those AffineApplyOp /// Operations that are reachable via a search starting from `operands` and /// ending at those operands that are not the result of an AffineApplyOp. -void getReachableAffineApplyOps(ArrayRef operands, +void getReachableAffineApplyOps(ArrayRef operands, SmallVectorImpl &affineApplyOps); /// Builds a system of constraints with dimensional identifiers corresponding to @@ -56,9 +59,9 @@ LogicalResult getIndexSet(MutableArrayRef forOps, /// Encapsulates a memref load or store access information. struct MemRefAccess { - Value *memref; + ValuePtr memref; Operation *opInst; - SmallVector indices; + SmallVector indices; /// Constructs a MemRefAccess from a load or store operation. // TODO(b/119949820): add accessors to standard op's load, store, DMA op's to diff --git a/include/mlir/Analysis/AffineStructures.h b/include/mlir/Analysis/AffineStructures.h index e53af5024dac..65cf13a0ce62 100644 --- a/include/mlir/Analysis/AffineStructures.h +++ b/include/mlir/Analysis/AffineStructures.h @@ -123,8 +123,8 @@ class AffineValueMap { // Creates an empty AffineValueMap (users should call 'reset' to reset map // and operands). AffineValueMap() {} - AffineValueMap(AffineMap map, ArrayRef operands, - ArrayRef results = llvm::None); + AffineValueMap(AffineMap map, ArrayRef operands, + ArrayRef results = llvm::None); explicit AffineValueMap(AffineApplyOp applyOp); explicit AffineValueMap(AffineBound bound); @@ -132,8 +132,8 @@ class AffineValueMap { ~AffineValueMap(); // Resets this AffineValueMap with 'map', 'operands', and 'results'. - void reset(AffineMap map, ArrayRef operands, - ArrayRef results = llvm::None); + void reset(AffineMap map, ArrayRef operands, + ArrayRef results = llvm::None); /// Return the value map that is the difference of value maps 'a' and 'b', /// represented as an affine map and its operands. The output map + operands @@ -146,7 +146,7 @@ class AffineValueMap { inline bool isMultipleOf(unsigned idx, int64_t factor) const; /// Return true if the idx^th result depends on 'value', false otherwise. - bool isFunctionOf(unsigned idx, Value *value) const; + bool isFunctionOf(unsigned idx, ValuePtr value) const; /// Return true if the result at 'idx' is a constant, false /// otherwise. @@ -162,8 +162,8 @@ class AffineValueMap { inline unsigned getNumSymbols() const { return map.getNumSymbols(); } inline unsigned getNumResults() const { return map.getNumResults(); } - Value *getOperand(unsigned i) const; - ArrayRef getOperands() const; + ValuePtr getOperand(unsigned i) const; + ArrayRef getOperands() const; AffineMap getAffineMap() const; private: @@ -172,9 +172,9 @@ class AffineValueMap { // TODO: make these trailing objects? /// The SSA operands binding to the dim's and symbols of 'map'. - SmallVector operands; + SmallVector operands; /// The SSA results binding to the results of 'map'. - SmallVector results; + SmallVector results; }; /// An IntegerValueSet is an integer set plus its operands. @@ -207,7 +207,7 @@ class IntegerValueSet { // 'AffineCondition'. MutableIntegerSet set; /// The SSA operands binding to the dim's and symbols of 'set'. - SmallVector operands; + SmallVector operands; }; /// A flat list of affine equalities and inequalities in the form. @@ -245,7 +245,7 @@ class FlatAffineConstraints { unsigned numReservedEqualities, unsigned numReservedCols, unsigned numDims = 0, unsigned numSymbols = 0, unsigned numLocals = 0, - ArrayRef> idArgs = {}) + ArrayRef> idArgs = {}) : numReservedCols(numReservedCols), numDims(numDims), numSymbols(numSymbols) { assert(numReservedCols >= numDims + numSymbols + 1); @@ -264,7 +264,7 @@ class FlatAffineConstraints { /// dimensions and symbols. FlatAffineConstraints(unsigned numDims = 0, unsigned numSymbols = 0, unsigned numLocals = 0, - ArrayRef> idArgs = {}) + ArrayRef> idArgs = {}) : numReservedCols(numDims + numSymbols + numLocals + 1), numDims(numDims), numSymbols(numSymbols) { assert(numReservedCols >= numDims + numSymbols + 1); @@ -304,10 +304,10 @@ class FlatAffineConstraints { // Clears any existing data and reserves memory for the specified constraints. void reset(unsigned numReservedInequalities, unsigned numReservedEqualities, unsigned numReservedCols, unsigned numDims, unsigned numSymbols, - unsigned numLocals = 0, ArrayRef idArgs = {}); + unsigned numLocals = 0, ArrayRef idArgs = {}); void reset(unsigned numDims = 0, unsigned numSymbols = 0, - unsigned numLocals = 0, ArrayRef idArgs = {}); + unsigned numLocals = 0, ArrayRef idArgs = {}); /// Appends constraints from 'other' into this. This is equivalent to an /// intersection with no simplification of any sort attempted. @@ -396,7 +396,7 @@ class FlatAffineConstraints { /// operands. If `eq` is true, add a single equality equal to the bound map's /// first result expr. LogicalResult addLowerOrUpperBound(unsigned pos, AffineMap boundMap, - ArrayRef operands, bool eq, + ArrayRef operands, bool eq, bool lower = true); /// Computes the lower and upper bounds of the first 'num' dimensional @@ -415,10 +415,10 @@ class FlatAffineConstraints { /// operand list 'operands'. /// This function assumes 'values.size' == 'lbMaps.size' == 'ubMaps.size'. /// Note that both lower/upper bounds use operands from 'operands'. - LogicalResult addSliceBounds(ArrayRef values, + LogicalResult addSliceBounds(ArrayRef values, ArrayRef lbMaps, ArrayRef ubMaps, - ArrayRef operands); + ArrayRef operands); // Adds an inequality (>= 0) from the coefficients specified in inEq. void addInequality(ArrayRef inEq); @@ -447,25 +447,25 @@ class FlatAffineConstraints { /// Sets the identifier corresponding to the specified Value id to a /// constant. Asserts if the 'id' is not found. - void setIdToConstant(Value &id, int64_t val); + void setIdToConstant(ValueRef id, int64_t val); /// Looks up the position of the identifier with the specified Value. Returns /// true if found (false otherwise). `pos' is set to the (column) position of /// the identifier. - bool findId(Value &id, unsigned *pos) const; + bool findId(ValueRef id, unsigned *pos) const; /// Returns true if an identifier with the specified Value exists, false /// otherwise. - bool containsId(Value &id) const; + bool containsId(ValueRef id) const; // Add identifiers of the specified kind - specified positions are relative to // the kind of identifier. The coefficient column corresponding to the added // identifier is initialized to zero. 'id' is the Value corresponding to the // identifier that can optionally be provided. - void addDimId(unsigned pos, Value *id = nullptr); - void addSymbolId(unsigned pos, Value *id = nullptr); + void addDimId(unsigned pos, ValuePtr id = nullptr); + void addSymbolId(unsigned pos, ValuePtr id = nullptr); void addLocalId(unsigned pos); - void addId(IdKind kind, unsigned pos, Value *id = nullptr); + void addId(IdKind kind, unsigned pos, ValuePtr id = nullptr); /// Add the specified values as a dim or symbol id depending on its nature, if /// it already doesn't exist in the system. `id' has to be either a terminal @@ -473,7 +473,7 @@ class FlatAffineConstraints { /// symbols or loop IVs. The identifier is added to the end of the existing /// dims or symbols. Additional information on the identifier is extracted /// from the IR and added to the constraint system. - void addInductionVarOrTerminalSymbol(Value *id); + void addInductionVarOrTerminalSymbol(ValuePtr id); /// Composes the affine value map with this FlatAffineConstrains, adding the /// results of the map as dimensions at the front [0, vMap->getNumResults()) @@ -500,8 +500,8 @@ class FlatAffineConstraints { void projectOut(unsigned pos, unsigned num); inline void projectOut(unsigned pos) { return projectOut(pos, 1); } - /// Projects out the identifier that is associate with Value *. - void projectOut(Value *id); + /// Projects out the identifier that is associate with ValuePtr . + void projectOut(ValuePtr id); void removeId(IdKind idKind, unsigned pos); void removeId(unsigned pos); @@ -577,20 +577,20 @@ class FlatAffineConstraints { return numIds - numDims - numSymbols; } - inline ArrayRef> getIds() const { + inline ArrayRef> getIds() const { return {ids.data(), ids.size()}; } - inline MutableArrayRef> getIds() { + inline MutableArrayRef> getIds() { return {ids.data(), ids.size()}; } /// Returns the optional Value corresponding to the pos^th identifier. - inline Optional getId(unsigned pos) const { return ids[pos]; } - inline Optional &getId(unsigned pos) { return ids[pos]; } + inline Optional getId(unsigned pos) const { return ids[pos]; } + inline Optional &getId(unsigned pos) { return ids[pos]; } /// Returns the Value associated with the pos^th identifier. Asserts if /// no Value identifier was associated. - inline Value *getIdValue(unsigned pos) const { + inline ValuePtr getIdValue(unsigned pos) const { assert(ids[pos].hasValue() && "identifier's Value not set"); return ids[pos].getValue(); } @@ -598,7 +598,7 @@ class FlatAffineConstraints { /// Returns the Values associated with identifiers in range [start, end). /// Asserts if no Value was associated with one of these identifiers. void getIdValues(unsigned start, unsigned end, - SmallVectorImpl *values) const { + SmallVectorImpl *values) const { assert((start < numIds || start == end) && "invalid start position"); assert(end <= numIds && "invalid end position"); values->clear(); @@ -607,17 +607,17 @@ class FlatAffineConstraints { values->push_back(getIdValue(i)); } } - inline void getAllIdValues(SmallVectorImpl *values) const { + inline void getAllIdValues(SmallVectorImpl *values) const { getIdValues(0, numIds, values); } /// Sets Value associated with the pos^th identifier. - inline void setIdValue(unsigned pos, Value *val) { + inline void setIdValue(unsigned pos, ValuePtr val) { assert(pos < numIds && "invalid id position"); ids[pos] = val; } /// Sets Values associated with identifiers in the range [start, end). - void setIdValues(unsigned start, unsigned end, ArrayRef values) { + void setIdValues(unsigned start, unsigned end, ArrayRef values) { assert((start < numIds || end == start) && "invalid start position"); assert(end <= numIds && "invalid end position"); assert(values.size() == end - start); @@ -766,7 +766,7 @@ class FlatAffineConstraints { /// system appearing in the order the identifiers correspond to columns. /// Temporary ones or those that aren't associated to any Value are set to /// None. - SmallVector, 8> ids; + SmallVector, 8> ids; /// A parameter that controls detection of an unrealistic number of /// constraints. If the number of constraints is this many times the number of diff --git a/include/mlir/Analysis/CallInterfaces.h b/include/mlir/Analysis/CallInterfaces.h index dd23d77889f5..a18cfa7aba4a 100644 --- a/include/mlir/Analysis/CallInterfaces.h +++ b/include/mlir/Analysis/CallInterfaces.h @@ -30,8 +30,8 @@ namespace mlir { /// A callable is either a symbol, or an SSA value, that is referenced by a /// call-like operation. This represents the destination of the call. -struct CallInterfaceCallable : public PointerUnion { - using PointerUnion::PointerUnion; +struct CallInterfaceCallable : public PointerUnion { + using PointerUnion::PointerUnion; }; #include "mlir/Analysis/CallInterfaces.h.inc" diff --git a/include/mlir/Analysis/Dominance.h b/include/mlir/Analysis/Dominance.h index 09114eafbb14..f46241e2af0a 100644 --- a/include/mlir/Analysis/Dominance.h +++ b/include/mlir/Analysis/Dominance.h @@ -74,10 +74,10 @@ class DominanceInfo : public detail::DominanceInfoBase { } /// Return true if value A properly dominates operation B. - bool properlyDominates(Value *a, Operation *b); + bool properlyDominates(ValuePtr a, Operation *b); /// Return true if operation A dominates operation B. - bool dominates(Value *a, Operation *b) { + bool dominates(ValuePtr a, Operation *b) { return (Operation *)a->getDefiningOp() == b || properlyDominates(a, b); } diff --git a/include/mlir/Analysis/Liveness.h b/include/mlir/Analysis/Liveness.h index 0bdb474fd927..0aa9d9693e4f 100644 --- a/include/mlir/Analysis/Liveness.h +++ b/include/mlir/Analysis/Liveness.h @@ -41,6 +41,9 @@ class Operation; class Region; class Value; +// TODO(riverriddle) Remove this after Value is value-typed. +using ValuePtr = Value *; + /// Represents an analysis for computing liveness information from a /// given top-level operation. The analysis iterates over all associated /// regions that are attached to the given top-level operation. It @@ -57,7 +60,7 @@ class Liveness { public: using OperationListT = std::vector; using BlockMapT = DenseMap; - using ValueSetT = SmallPtrSet; + using ValueSetT = SmallPtrSet; public: /// Creates a new Liveness analysis that computes liveness @@ -72,7 +75,7 @@ class Liveness { /// Note that the operations in this list are not ordered and the current /// implementation is computationally expensive (as it iterates over all /// blocks in which the given value is live). - OperationListT resolveLiveness(Value *value) const; + OperationListT resolveLiveness(ValuePtr value) const; /// Gets liveness info (if any) for the block. const LivenessBlockInfo *getLiveness(Block *block) const; @@ -85,7 +88,7 @@ class Liveness { /// Returns true if the given operation represent the last use of the /// given value. - bool isLastUse(Value *value, Operation *operation) const; + bool isLastUse(ValuePtr value, Operation *operation) const; /// Dumps the liveness information in a human readable format. void dump() const; @@ -124,20 +127,20 @@ class LivenessBlockInfo { const ValueSetT &out() const { return outValues; } /// Returns true if the given value is in the live-in set. - bool isLiveIn(Value *value) const; + bool isLiveIn(ValuePtr value) const; /// Returns true if the given value is in the live-out set. - bool isLiveOut(Value *value) const; + bool isLiveOut(ValuePtr value) const; /// Gets the start operation for the given value. This is the first operation /// the given value is considered to be live. This could either be the start /// operation of the current block (in case the value is live-in) or the /// operation that defines the given value (must be referenced in this block). - Operation *getStartOperation(Value *value) const; + Operation *getStartOperation(ValuePtr value) const; /// Gets the end operation for the given value using the start operation /// provided (must be referenced in this block). - Operation *getEndOperation(Value *value, Operation *startOperation) const; + Operation *getEndOperation(ValuePtr value, Operation *startOperation) const; private: /// The underlying block. diff --git a/include/mlir/Analysis/LoopAnalysis.h b/include/mlir/Analysis/LoopAnalysis.h index 47cc22a49236..ad7dc6d60924 100644 --- a/include/mlir/Analysis/LoopAnalysis.h +++ b/include/mlir/Analysis/LoopAnalysis.h @@ -36,6 +36,9 @@ class NestedPattern; class Operation; class Value; +// TODO(riverriddle) Remove this after Value is value-typed. +using ValuePtr = Value *; + /// Returns the trip count of the loop as an affine map with its corresponding /// operands if the latter is expressible as an affine expression, and nullptr /// otherwise. This method always succeeds as long as the lower bound is not a @@ -45,7 +48,7 @@ class Value; // TODO(mlir-team): this should be moved into 'Transforms/' and be replaced by a // pure analysis method relying on FlatAffineConstraints void buildTripCountMapAndOperands(AffineForOp forOp, AffineMap *map, - SmallVectorImpl *operands); + SmallVectorImpl *operands); /// Returns the trip count of the loop if it's a constant, None otherwise. This /// uses affine expression analysis and is able to determine constant trip count @@ -66,8 +69,8 @@ uint64_t getLargestDivisorOfTripCount(AffineForOp forOp); /// /// Emits a note if it encounters a chain of affine.apply and conservatively /// those cases. -DenseSet> -getInvariantAccesses(Value *iv, ArrayRef indices); +DenseSet> +getInvariantAccesses(ValuePtr iv, ArrayRef indices); using VectorizableLoopFun = std::function; diff --git a/include/mlir/Analysis/Utils.h b/include/mlir/Analysis/Utils.h index cffa222154f4..ea0987df3fed 100644 --- a/include/mlir/Analysis/Utils.h +++ b/include/mlir/Analysis/Utils.h @@ -55,7 +55,7 @@ unsigned getNestingDepth(Operation &op); /// Returns in 'sequentialLoops' all sequential loops in loop nest rooted /// at 'forOp'. void getSequentialLoops(AffineForOp forOp, - llvm::SmallDenseSet *sequentialLoops); + llvm::SmallDenseSet *sequentialLoops); /// ComputationSliceState aggregates loop IVs, loop bound AffineMaps and their /// associated operands for a set of loops within a loop nest (typically the @@ -64,15 +64,15 @@ void getSequentialLoops(AffineForOp forOp, struct ComputationSliceState { // List of sliced loop IVs (ordered from outermost to innermost). // EX: 'ivs[i]' has lower bound 'lbs[i]' and upper bound 'ubs[i]'. - SmallVector ivs; + SmallVector ivs; // List of lower bound AffineMaps. SmallVector lbs; // List of upper bound AffineMaps. SmallVector ubs; // List of lower bound operands (lbOperands[i] are used by 'lbs[i]'). - std::vector> lbOperands; + std::vector> lbOperands; // List of upper bound operands (ubOperands[i] are used by 'ubs[i]'). - std::vector> ubOperands; + std::vector> ubOperands; // Slice loop nest insertion point in target loop nest. Block::iterator insertPoint; // Adds to 'cst' with constraints which represent the slice bounds on 'ivs' @@ -257,7 +257,7 @@ struct MemRefRegion { unsigned getRank() const; /// Memref that this region corresponds to. - Value *memref; + ValuePtr memref; /// Read or write. bool write; diff --git a/include/mlir/Conversion/AffineToStandard/AffineToStandard.h b/include/mlir/Conversion/AffineToStandard/AffineToStandard.h index b5c51ad4b4c6..4bbe6610e31d 100644 --- a/include/mlir/Conversion/AffineToStandard/AffineToStandard.h +++ b/include/mlir/Conversion/AffineToStandard/AffineToStandard.h @@ -30,14 +30,17 @@ class OpBuilder; class RewritePattern; class Value; +// TODO(riverriddle) Remove this after Value is value-typed. +using ValuePtr = Value *; + // Owning list of rewriting patterns. class OwningRewritePatternList; /// Emit code that computes the given affine expression using standard /// arithmetic operations applied to the provided dimension and symbol values. -Value *expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr, - ArrayRef dimValues, - ArrayRef symbolValues); +ValuePtr expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr, + ArrayRef dimValues, + ArrayRef symbolValues); /// Collect a set of patterns to convert from the Affine dialect to the Standard /// dialect, in particular convert structured affine control flow into CFG @@ -47,11 +50,11 @@ void populateAffineToStdConversionPatterns(OwningRewritePatternList &patterns, /// Emit code that computes the lower bound of the given affine loop using /// standard arithmetic operations. -Value *lowerAffineLowerBound(AffineForOp op, OpBuilder &builder); +ValuePtr lowerAffineLowerBound(AffineForOp op, OpBuilder &builder); /// Emit code that computes the upper bound of the given affine loop using /// standard arithmetic operations. -Value *lowerAffineUpperBound(AffineForOp op, OpBuilder &builder); +ValuePtr lowerAffineUpperBound(AffineForOp op, OpBuilder &builder); } // namespace mlir #endif // MLIR_CONVERSION_AFFINETOSTANDARD_AFFINETOSTANDARD_H diff --git a/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h b/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h index 0aab8723eabd..58d49a133910 100644 --- a/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h +++ b/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h @@ -24,6 +24,9 @@ class AffineForOp; struct LogicalResult; class Value; +// TODO(riverriddle) Remove this after Value is value-typed. +using ValuePtr = Value *; + namespace loop { class ForOp; } // end namespace loop @@ -78,8 +81,8 @@ LogicalResult convertLoopNestToGPULaunch(loop::ForOp forOp, /// The above conditions are assumed to be satisfied by the computation rooted /// at `forOp`. LogicalResult convertLoopToGPULaunch(loop::ForOp forOp, - ArrayRef numWorkGroups, - ArrayRef workGroupSizes); + ArrayRef numWorkGroups, + ArrayRef workGroupSizes); } // namespace mlir diff --git a/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h b/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h index e8d16f064a8f..6f41fb686338 100644 --- a/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h +++ b/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h @@ -74,16 +74,16 @@ class LLVMTypeConverter : public TypeConverter { /// Promote the LLVM struct representation of all MemRef descriptors to stack /// and use pointers to struct to avoid the complexity of the /// platform-specific C/C++ ABI lowering related to struct argument passing. - SmallVector promoteMemRefDescriptors(Location loc, - ValueRange opOperands, - ValueRange operands, - OpBuilder &builder); + SmallVector promoteMemRefDescriptors(Location loc, + ValueRange opOperands, + ValueRange operands, + OpBuilder &builder); /// Promote the LLVM struct representation of one MemRef descriptor to stack /// and use pointer to struct to avoid the complexity of the platform-specific /// C/C++ ABI lowering related to struct argument passing. - Value *promoteOneMemRefDescriptor(Location loc, Value *operand, - OpBuilder &builder); + ValuePtr promoteOneMemRefDescriptor(Location loc, ValuePtr operand, + OpBuilder &builder); protected: /// LLVM IR module used to parse/create types. @@ -139,24 +139,24 @@ class LLVMTypeConverter : public TypeConverter { class StructBuilder { public: /// Construct a helper for the given value. - explicit StructBuilder(Value *v); + explicit StructBuilder(ValuePtr v); /// Builds IR creating an `undef` value of the descriptor type. static StructBuilder undef(OpBuilder &builder, Location loc, Type descriptorType); - /*implicit*/ operator Value *() { return value; } + /*implicit*/ operator ValuePtr() { return value; } protected: // LLVM value - Value *value; + ValuePtr value; // Cached struct type. Type structType; protected: /// Builds IR to extract a value from the struct at position pos - Value *extractPtr(OpBuilder &builder, Location loc, unsigned pos); + ValuePtr extractPtr(OpBuilder &builder, Location loc, unsigned pos); /// Builds IR to set a value in the struct at position pos - void setPtr(OpBuilder &builder, Location loc, unsigned pos, Value *ptr); + void setPtr(OpBuilder &builder, Location loc, unsigned pos, ValuePtr ptr); }; /// Helper class to produce LLVM dialect operations extracting or inserting /// elements of a MemRef descriptor. Wraps a Value pointing to the descriptor. @@ -164,7 +164,7 @@ class StructBuilder { class MemRefDescriptor : public StructBuilder { public: /// Construct a helper for the given descriptor value. - explicit MemRefDescriptor(Value *descriptor); + explicit MemRefDescriptor(ValuePtr descriptor); /// Builds IR creating an `undef` value of the descriptor type. static MemRefDescriptor undef(OpBuilder &builder, Location loc, Type descriptorType); @@ -173,39 +173,40 @@ class MemRefDescriptor : public StructBuilder { /// type. static MemRefDescriptor fromStaticShape(OpBuilder &builder, Location loc, LLVMTypeConverter &typeConverter, - MemRefType type, Value *memory); + MemRefType type, ValuePtr memory); /// Builds IR extracting the allocated pointer from the descriptor. - Value *allocatedPtr(OpBuilder &builder, Location loc); + ValuePtr allocatedPtr(OpBuilder &builder, Location loc); /// Builds IR inserting the allocated pointer into the descriptor. - void setAllocatedPtr(OpBuilder &builder, Location loc, Value *ptr); + void setAllocatedPtr(OpBuilder &builder, Location loc, ValuePtr ptr); /// Builds IR extracting the aligned pointer from the descriptor. - Value *alignedPtr(OpBuilder &builder, Location loc); + ValuePtr alignedPtr(OpBuilder &builder, Location loc); /// Builds IR inserting the aligned pointer into the descriptor. - void setAlignedPtr(OpBuilder &builder, Location loc, Value *ptr); + void setAlignedPtr(OpBuilder &builder, Location loc, ValuePtr ptr); /// Builds IR extracting the offset from the descriptor. - Value *offset(OpBuilder &builder, Location loc); + ValuePtr offset(OpBuilder &builder, Location loc); /// Builds IR inserting the offset into the descriptor. - void setOffset(OpBuilder &builder, Location loc, Value *offset); + void setOffset(OpBuilder &builder, Location loc, ValuePtr offset); void setConstantOffset(OpBuilder &builder, Location loc, uint64_t offset); /// Builds IR extracting the pos-th size from the descriptor. - Value *size(OpBuilder &builder, Location loc, unsigned pos); + ValuePtr size(OpBuilder &builder, Location loc, unsigned pos); /// Builds IR inserting the pos-th size into the descriptor - void setSize(OpBuilder &builder, Location loc, unsigned pos, Value *size); + void setSize(OpBuilder &builder, Location loc, unsigned pos, ValuePtr size); void setConstantSize(OpBuilder &builder, Location loc, unsigned pos, uint64_t size); /// Builds IR extracting the pos-th size from the descriptor. - Value *stride(OpBuilder &builder, Location loc, unsigned pos); + ValuePtr stride(OpBuilder &builder, Location loc, unsigned pos); /// Builds IR inserting the pos-th stride into the descriptor - void setStride(OpBuilder &builder, Location loc, unsigned pos, Value *stride); + void setStride(OpBuilder &builder, Location loc, unsigned pos, + ValuePtr stride); void setConstantStride(OpBuilder &builder, Location loc, unsigned pos, uint64_t stride); @@ -220,19 +221,19 @@ class MemRefDescriptor : public StructBuilder { class UnrankedMemRefDescriptor : public StructBuilder { public: /// Construct a helper for the given descriptor value. - explicit UnrankedMemRefDescriptor(Value *descriptor); + explicit UnrankedMemRefDescriptor(ValuePtr descriptor); /// Builds IR creating an `undef` value of the descriptor type. static UnrankedMemRefDescriptor undef(OpBuilder &builder, Location loc, Type descriptorType); /// Builds IR extracting the rank from the descriptor - Value *rank(OpBuilder &builder, Location loc); + ValuePtr rank(OpBuilder &builder, Location loc); /// Builds IR setting the rank in the descriptor - void setRank(OpBuilder &builder, Location loc, Value *value); + void setRank(OpBuilder &builder, Location loc, ValuePtr value); /// Builds IR extracting ranked memref descriptor ptr - Value *memRefDescPtr(OpBuilder &builder, Location loc); + ValuePtr memRefDescPtr(OpBuilder &builder, Location loc); /// Builds IR setting ranked memref descriptor ptr - void setMemRefDescPtr(OpBuilder &builder, Location loc, Value *value); + void setMemRefDescPtr(OpBuilder &builder, Location loc, ValuePtr value); }; /// Base class for operation conversions targeting the LLVM IR dialect. Provides /// conversion patterns with an access to the containing LLVMLowering for the diff --git a/include/mlir/Dialect/AffineOps/AffineOps.h b/include/mlir/Dialect/AffineOps/AffineOps.h index 36b4e55e77c0..764f439e0204 100644 --- a/include/mlir/Dialect/AffineOps/AffineOps.h +++ b/include/mlir/Dialect/AffineOps/AffineOps.h @@ -41,7 +41,7 @@ class OpBuilder; /// A utility function to check if a value is defined at the top level of a /// function. A value of index type defined at the top level is always a valid /// symbol. -bool isTopLevelValue(Value *value); +bool isTopLevelValue(ValuePtr value); class AffineOpsDialect : public Dialect { public: @@ -148,18 +148,19 @@ class AffineDmaStartOp : public OpgetType().cast(); } @@ -191,7 +192,7 @@ class AffineDmaStartOp : public OpgetType().cast(); } @@ -225,7 +226,7 @@ class AffineDmaStartOp : public OpgetType().cast(); } @@ -249,13 +250,13 @@ class AffineDmaStartOp : public OpgetType().cast(); } @@ -367,14 +368,16 @@ class AffineDmaWaitOp : public OpgetType().cast(); } @@ -435,7 +438,7 @@ class AffineLoadOp : public OpgetType().cast(); @@ -506,7 +509,7 @@ class AffineStoreOp : public Op *operands); + SmallVectorImpl *operands); /// Canonicalizes an integer set the same way canonicalizeMapAndOperands does /// for affine maps. void canonicalizeSetAndOperands(IntegerSet *set, - SmallVectorImpl *operands); + SmallVectorImpl *operands); /// Returns a composed AffineApplyOp by composing `map` and `operands` with /// other AffineApplyOps supplying those operands. The operands of the resulting /// AffineApplyOp do not change the length of AffineApplyOp chains. AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map, - ArrayRef operands); + ArrayRef operands); /// Given an affine map `map` and its input `operands`, this method composes /// into `map`, maps of AffineApplyOps whose results are the values in @@ -558,22 +561,22 @@ AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map, /// terminal symbol, i.e., a symbol defined at the top level or a block/function /// argument. void fullyComposeAffineMapAndOperands(AffineMap *map, - SmallVectorImpl *operands); + SmallVectorImpl *operands); #define GET_OP_CLASSES #include "mlir/Dialect/AffineOps/AffineOps.h.inc" /// Returns if the provided value is the induction variable of a AffineForOp. -bool isForInductionVar(Value *val); +bool isForInductionVar(ValuePtr val); /// Returns the loop parent of an induction variable. If the provided value is /// not an induction variable, then return nullptr. -AffineForOp getForInductionVarOwner(Value *val); +AffineForOp getForInductionVarOwner(ValuePtr val); /// Extracts the induction variables from a list of AffineForOps and places them /// in the output argument `ivs`. void extractForInductionVars(ArrayRef forInsts, - SmallVectorImpl *ivs); + SmallVectorImpl *ivs); /// AffineBound represents a lower or upper bound in the for operation. /// This class does not own the underlying operands. Instead, it refers @@ -588,7 +591,7 @@ class AffineBound { AffineValueMap getAsAffineValueMap(); unsigned getNumOperands() { return opEnd - opStart; } - Value *getOperand(unsigned idx) { return op.getOperand(opStart + idx); } + ValuePtr getOperand(unsigned idx) { return op.getOperand(opStart + idx); } using operand_iterator = AffineForOp::operand_iterator; using operand_range = AffineForOp::operand_range; @@ -613,7 +616,7 @@ class AffineBound { }; /// An `AffineApplyNormalizer` is a helper class that supports renumbering -/// operands of AffineApplyOp. This acts as a reindexing map of Value* to +/// operands of AffineApplyOp. This acts as a reindexing map of Value to /// positional dims or symbols and allows simplifications such as: /// /// ```mlir @@ -626,13 +629,13 @@ class AffineBound { /// %1 = affine.apply () -> (0) /// ``` struct AffineApplyNormalizer { - AffineApplyNormalizer(AffineMap map, ArrayRef operands); + AffineApplyNormalizer(AffineMap map, ArrayRef operands); /// Returns the AffineMap resulting from normalization. AffineMap getAffineMap() { return affineMap; } - SmallVector getOperands() { - SmallVector res(reorderedDims); + SmallVector getOperands() { + SmallVector res(reorderedDims); res.append(concatenatedSymbols.begin(), concatenatedSymbols.end()); return res; } @@ -642,13 +645,13 @@ struct AffineApplyNormalizer { /// Normalizes 'otherMap' and its operands 'otherOperands' to map to this /// normalizer's coordinate space. - void normalize(AffineMap *otherMap, SmallVectorImpl *otherOperands); + void normalize(AffineMap *otherMap, SmallVectorImpl *otherOperands); private: /// Helper function to insert `v` into the coordinate system of the current /// AffineApplyNormalizer. Returns the AffineDimExpr with the corresponding /// renumbered position. - AffineDimExpr renumberOneDim(Value *v); + AffineDimExpr renumberOneDim(ValuePtr v); /// Given an `other` normalizer, this rewrites `other.affineMap` in the /// coordinate system of the current AffineApplyNormalizer. @@ -656,13 +659,13 @@ struct AffineApplyNormalizer { /// `this`. AffineMap renumber(const AffineApplyNormalizer &other); - /// Maps of Value* to position in `affineMap`. - DenseMap dimValueToPosition; + /// Maps of Value to position in `affineMap`. + DenseMap dimValueToPosition; /// Ordered dims and symbols matching positional dims and symbols in /// `affineMap`. - SmallVector reorderedDims; - SmallVector concatenatedSymbols; + SmallVector reorderedDims; + SmallVector concatenatedSymbols; AffineMap affineMap; diff --git a/include/mlir/Dialect/AffineOps/AffineOps.td b/include/mlir/Dialect/AffineOps/AffineOps.td index b40990ecb5de..befdc2f62376 100644 --- a/include/mlir/Dialect/AffineOps/AffineOps.td +++ b/include/mlir/Dialect/AffineOps/AffineOps.td @@ -101,7 +101,7 @@ def AffineForOp : Affine_Op<"for", static StringRef getUpperBoundAttrName() { return "upper_bound"; } Block *getBody() { return ®ion().front(); } - Value *getInductionVar() { return getBody()->getArgument(0); } + ValuePtr getInductionVar() { return getBody()->getArgument(0); } OpBuilder getBodyBuilder() { return OpBuilder(getBody(), std::prev(getBody()->end())); } @@ -286,8 +286,8 @@ def AffinePrefetchOp : Affine_Op<"prefetch"> { BoolAttr:$isDataCache); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *memref," - "AffineMap map, ArrayRef mapOperands, bool isWrite," + "Builder *builder, OperationState &result, ValuePtr memref," + "AffineMap map, ArrayRef mapOperands, bool isWrite," "unsigned localityHint, bool isDataCache", [{ assert(map.getNumInputs() == mapOperands.size() @@ -315,7 +315,7 @@ def AffinePrefetchOp : Affine_Op<"prefetch"> { } /// Returns the AffineMapAttr associated with 'memref'. - NamedAttribute getAffineMapAttrForMemRef(Value *mref) { + NamedAttribute getAffineMapAttrForMemRef(ValuePtr mref) { assert(mref == memref()); return {Identifier::get(getMapAttrName(), getContext()), getAffineMapAttr()}; diff --git a/include/mlir/Dialect/GPU/GPUDialect.h b/include/mlir/Dialect/GPU/GPUDialect.h index 93c0b13ee3ed..12c2aa1bbd1f 100644 --- a/include/mlir/Dialect/GPU/GPUDialect.h +++ b/include/mlir/Dialect/GPU/GPUDialect.h @@ -77,9 +77,9 @@ class GPUDialect : public Dialect { /// Utility class for the GPU dialect to represent triples of `Value`s /// accessible through `.x`, `.y`, and `.z` similarly to CUDA notation. struct KernelDim3 { - Value *x; - Value *y; - Value *z; + ValuePtr x; + ValuePtr y; + ValuePtr z; }; #define GET_OP_CLASSES diff --git a/include/mlir/Dialect/GPU/GPUOps.td b/include/mlir/Dialect/GPU/GPUOps.td index 6751f0a3f708..def1ff2b8a16 100644 --- a/include/mlir/Dialect/GPU/GPUOps.td +++ b/include/mlir/Dialect/GPU/GPUOps.td @@ -157,7 +157,7 @@ def GPU_GPUFuncOp : GPU_Op<"func", [FunctionLike, IsolatedFromAbove, Symbol]> { /// Returns a list of block arguments that correspond to buffers located in /// the workgroup memory - ArrayRef getWorkgroupAttributions() { + ArrayRef getWorkgroupAttributions() { auto begin = std::next(getBody().front().args_begin(), getType().getNumInputs()); auto end = std::next(begin, getNumWorkgroupAttributions()); @@ -166,7 +166,7 @@ def GPU_GPUFuncOp : GPU_Op<"func", [FunctionLike, IsolatedFromAbove, Symbol]> { /// Returns a list of block arguments that correspond to buffers located in /// the private memory. - ArrayRef getPrivateAttributions() { + ArrayRef getPrivateAttributions() { auto begin = std::next(getBody().front().args_begin(), getType().getNumInputs() + getNumWorkgroupAttributions()); @@ -282,8 +282,8 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func">, let builders = [ OpBuilder<"Builder *builder, OperationState &result, GPUFuncOp kernelFunc, " - "Value *gridSizeX, Value *gridSizeY, Value *gridSizeZ, " - "Value *blockSizeX, Value *blockSizeY, Value *blockSizeZ, " + "ValuePtr gridSizeX, ValuePtr gridSizeY, ValuePtr gridSizeZ, " + "ValuePtr blockSizeX, ValuePtr blockSizeY, ValuePtr blockSizeZ, " "ValueRange kernelOperands">, OpBuilder<"Builder *builder, OperationState &result, GPUFuncOp kernelFunc, " "KernelDim3 gridSize, KernelDim3 blockSize, " @@ -302,7 +302,7 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func">, StringRef getKernelModuleName(); /// The i-th operand passed to the kernel function. - Value *getKernelOperand(unsigned i); + ValuePtr getKernelOperand(unsigned i); /// Get the SSA values passed as operands to specify the grid size. KernelDim3 getGridSizeOperandValues(); @@ -415,9 +415,9 @@ def GPU_LaunchOp : GPU_Op<"launch", [IsolatedFromAbove]>, let skipDefaultBuilders = 1; let builders = [ - OpBuilder<"Builder *builder, OperationState &result, Value *gridSizeX," - "Value *gridSizeY, Value *gridSizeZ, Value *blockSizeX," - "Value *blockSizeY, Value *blockSizeZ," + OpBuilder<"Builder *builder, OperationState &result, ValuePtr gridSizeX," + "ValuePtr gridSizeY, ValuePtr gridSizeZ, ValuePtr blockSizeX," + "ValuePtr blockSizeY, ValuePtr blockSizeZ," "ValueRange operands"> ]; diff --git a/include/mlir/Dialect/LLVMIR/LLVMDialect.h b/include/mlir/Dialect/LLVMIR/LLVMDialect.h index dae27d00e5a7..a599d51b31f2 100644 --- a/include/mlir/Dialect/LLVMIR/LLVMDialect.h +++ b/include/mlir/Dialect/LLVMIR/LLVMDialect.h @@ -194,9 +194,9 @@ class LLVMDialect : public Dialect { /// surrounding the insertion point of builder. Obtain the address of that /// global and use it to compute the address of the first character in the /// string (operations inserted at the builder insertion point). -Value *createGlobalString(Location loc, OpBuilder &builder, StringRef name, - StringRef value, LLVM::Linkage linkage, - LLVM::LLVMDialect *llvmDialect); +ValuePtr createGlobalString(Location loc, OpBuilder &builder, StringRef name, + StringRef value, LLVM::Linkage linkage, + LLVM::LLVMDialect *llvmDialect); /// LLVM requires some operations to be inside of a Module operation. This /// function confirms that the Operation has the desired properties. diff --git a/include/mlir/Dialect/LLVMIR/LLVMOps.td b/include/mlir/Dialect/LLVMIR/LLVMOps.td index 00acc539dab6..cfbbf7da65dc 100644 --- a/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -185,8 +185,8 @@ def LLVM_ICmpOp : LLVM_OneResultOp<"icmp", [NoSideEffect]>, $res = builder.CreateICmp(getLLVMCmpPredicate($predicate), $lhs, $rhs); }]; let builders = [OpBuilder< - "Builder *b, OperationState &result, ICmpPredicate predicate, Value *lhs, " - "Value *rhs", [{ + "Builder *b, OperationState &result, ICmpPredicate predicate, ValuePtr lhs, " + "ValuePtr rhs", [{ LLVMDialect *dialect = &lhs->getType().cast().getDialect(); build(b, result, LLVMType::getInt1Ty(dialect), b->getI64IntegerAttr(static_cast(predicate)), lhs, rhs); @@ -232,8 +232,8 @@ def LLVM_FCmpOp : LLVM_OneResultOp<"fcmp", [NoSideEffect]>, $res = builder.CreateFCmp(getLLVMCmpPredicate($predicate), $lhs, $rhs); }]; let builders = [OpBuilder< - "Builder *b, OperationState &result, FCmpPredicate predicate, Value *lhs, " - "Value *rhs", [{ + "Builder *b, OperationState &result, FCmpPredicate predicate, ValuePtr lhs, " + "ValuePtr rhs", [{ LLVMDialect *dialect = &lhs->getType().cast().getDialect(); build(b, result, LLVMType::getInt1Ty(dialect), b->getI64IntegerAttr(static_cast(predicate)), lhs, rhs); @@ -265,7 +265,7 @@ def LLVM_AllocaOp : $res = alloca; }]; let builders = [OpBuilder< - "Builder *b, OperationState &result, Type resultType, Value *arraySize, " + "Builder *b, OperationState &result, Type resultType, ValuePtr arraySize, " "unsigned alignment", [{ if (alignment == 0) @@ -292,7 +292,7 @@ def LLVM_GEPOp : LLVM_OneResultOp<"getelementptr", [NoSideEffect]>, def LLVM_LoadOp : LLVM_OneResultOp<"load">, Arguments<(ins LLVM_Type:$addr)>, LLVM_Builder<"$res = builder.CreateLoad($addr);"> { let builders = [OpBuilder< - "Builder *b, OperationState &result, Value *addr", + "Builder *b, OperationState &result, ValuePtr addr", [{ auto type = addr->getType().cast().getPointerElementTy(); build(b, result, type, addr); @@ -353,7 +353,7 @@ def LLVM_ExtractElementOp : LLVM_OneResultOp<"extractelement", [NoSideEffect]>, $res = builder.CreateExtractElement($vector, $position); }]; let builders = [OpBuilder< - "Builder *b, OperationState &result, Value *vector, Value *position," + "Builder *b, OperationState &result, ValuePtr vector, ValuePtr position," "ArrayRef attrs = {}">]; let parser = [{ return parseExtractElementOp(parser, result); }]; let printer = [{ printExtractElementOp(p, *this); }]; @@ -384,7 +384,7 @@ def LLVM_InsertValueOp : LLVM_OneResultOp<"insertvalue", [NoSideEffect]>, extractPosition($position)); }]; let builders = [OpBuilder< - "Builder *b, OperationState &result, Value *container, Value *value, " + "Builder *b, OperationState &result, ValuePtr container, ValuePtr value, " "ArrayAttr position", [{ build(b, result, container->getType(), container, value, position); @@ -398,7 +398,7 @@ def LLVM_ShuffleVectorOp LLVM_Builder< "$res = builder.CreateShuffleVector($v1, $v2, extractPosition($mask));"> { let builders = [OpBuilder< - "Builder *b, OperationState &result, Value *v1, Value *v2, " + "Builder *b, OperationState &result, ValuePtr v1, ValuePtr v2, " "ArrayAttr mask, ArrayRef attrs = {}">]; let verifier = [{ auto wrappedVectorType1 = v1()->getType().cast(); @@ -422,8 +422,8 @@ def LLVM_SelectOp LLVM_Builder< "$res = builder.CreateSelect($condition, $trueValue, $falseValue);"> { let builders = [OpBuilder< - "Builder *b, OperationState &result, Value *condition, Value *lhs, " - "Value *rhs", [{ + "Builder *b, OperationState &result, ValuePtr condition, ValuePtr lhs, " + "ValuePtr rhs", [{ build(b, result, lhs->getType(), condition, lhs, rhs); }]>]; let parser = [{ return parseSelectOp(parser, result); }]; diff --git a/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h b/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h index 01d3e4b239c3..426708b14a87 100644 --- a/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h +++ b/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h @@ -37,15 +37,15 @@ class LinalgOp; class Aliases { public: /// Returns true if v1 and v2 alias. - bool alias(Value *v1, Value *v2) { return find(v1) == find(v2); } + bool alias(ValuePtr v1, ValuePtr v2) { return find(v1) == find(v2); } private: /// Returns the base buffer or block argument into which the view `v` aliases. /// This lazily records the new aliases discovered while walking back the /// use-def chain. - Value *find(Value *v); + ValuePtr find(ValuePtr v); - DenseMap aliases; + DenseMap aliases; }; /// Data structure for holding a dependence graph that operates on LinalgOp and @@ -54,7 +54,7 @@ class LinalgDependenceGraph { public: struct LinalgOpView { Operation *op; - Value *view; + ValuePtr view; }; struct LinalgDependenceGraphElem { // dependentOpView may be either: @@ -64,7 +64,7 @@ class LinalgDependenceGraph { // View in the op that is used to index in the graph: // 1. src in the case of dependencesFromDstGraphs. // 2. dst in the case of dependencesIntoGraphs. - Value *indexingView; + ValuePtr indexingView; }; using LinalgDependences = SmallVector; using DependenceGraph = DenseMap; @@ -97,14 +97,14 @@ class LinalgDependenceGraph { /// Dependences are restricted to views aliasing `view`. SmallVector findCoveringReads(LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, - Value *view) const; + ValuePtr view) const; /// Returns the operations that are interleaved between `srcLinalgOp` and /// `dstLinalgOp` and that are involved in a WAR or WAW with `srcLinalgOp`. /// Dependences are restricted to views aliasing `view`. SmallVector findCoveringWrites(LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, - Value *view) const; + ValuePtr view) const; private: // Keep dependences in both directions, this is not just a performance gain @@ -130,7 +130,7 @@ class LinalgDependenceGraph { /// Implementation detail for findCoveringxxx. SmallVector findOperationsWithCoveringDependences(LinalgOp srcLinalgOp, - LinalgOp dstLinalgOp, Value *view, + LinalgOp dstLinalgOp, ValuePtr view, ArrayRef types) const; Aliases &aliases; diff --git a/include/mlir/Dialect/Linalg/EDSC/Builders.h b/include/mlir/Dialect/Linalg/EDSC/Builders.h index cf6335278b7c..8375e750a5c9 100644 --- a/include/mlir/Dialect/Linalg/EDSC/Builders.h +++ b/include/mlir/Dialect/Linalg/EDSC/Builders.h @@ -55,34 +55,34 @@ inline StringRef toString(IterType t) { /// makeLinalgGenericOp({A({m, n}), B({k, n})}, {C({m, n})}, ... ); /// ``` struct StructuredIndexed { - StructuredIndexed(Value *v) : value(v) {} + StructuredIndexed(ValuePtr v) : value(v) {} StructuredIndexed operator()(ArrayRef indexings) { return StructuredIndexed(value, indexings); } - operator Value *() const /* implicit */ { return value; } + operator ValuePtr() const /* implicit */ { return value; } ArrayRef getExprs() { return exprs; } private: - StructuredIndexed(Value *v, ArrayRef indexings) + StructuredIndexed(ValuePtr v, ArrayRef indexings) : value(v), exprs(indexings.begin(), indexings.end()) { assert(v->getType().isa() && "MemRefType expected"); } StructuredIndexed(ValueHandle v, ArrayRef indexings) : StructuredIndexed(v.getValue(), indexings) {} - Value *value; + ValuePtr value; SmallVector exprs; }; -inline void defaultRegionBuilder(ArrayRef args) {} +inline void defaultRegionBuilder(ArrayRef args) {} Operation *makeLinalgGenericOp(ArrayRef iteratorTypes, ArrayRef inputs, ArrayRef outputs, - function_ref)> + function_ref)> regionBuilder = defaultRegionBuilder, - ArrayRef otherValues = {}, + ArrayRef otherValues = {}, ArrayRef otherAttributes = {}); namespace ops { @@ -96,7 +96,7 @@ using edsc::intrinsics::linalg_yield; /// Build the body of a region to compute a multiply-accumulate, under the /// current ScopedContext, at the current insert point. -void macRegionBuilder(ArrayRef args); +void macRegionBuilder(ArrayRef args); /// TODO(ntv): In the future we should tie these implementations to something in /// Tablegen that generates the proper interfaces and the proper sugared named @@ -120,7 +120,7 @@ void macRegionBuilder(ArrayRef args); /// with in-place semantics and parallelism. /// Unary pointwise operation (with broadcast) entry point. -using UnaryPointwiseOpBuilder = function_ref; +using UnaryPointwiseOpBuilder = function_ref; Operation *linalg_pointwise(UnaryPointwiseOpBuilder unaryOp, StructuredIndexed I, StructuredIndexed O); @@ -131,7 +131,7 @@ Operation *linalg_pointwise_tanh(StructuredIndexed I, StructuredIndexed O); /// Binary pointwise operation (with broadcast) entry point. using BinaryPointwiseOpBuilder = - function_ref; + function_ref; Operation *linalg_pointwise(BinaryPointwiseOpBuilder binaryOp, StructuredIndexed I1, StructuredIndexed I2, StructuredIndexed O); diff --git a/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td b/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td index 12318a244dfa..18ca31cc3762 100644 --- a/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td +++ b/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td @@ -92,22 +92,22 @@ def LinalgLibraryInterface : OpInterface<"LinalgOp"> { "Query the number of loops within the current operation.", "unsigned", "getNumLoops">, InterfaceMethod<"Query the input view at the given index.", - "Value *", "getInput", (ins "unsigned":$i) + "ValuePtr ", "getInput", (ins "unsigned":$i) >, InterfaceMethod<"Query the output view at the given index.", - "Value *", "getOutput", (ins "unsigned":$i) + "ValuePtr ", "getOutput", (ins "unsigned":$i) >, InterfaceMethod<[{ Query the index of the given input value, or `None` if the value is not an input. }], - "Optional", "getIndexOfInput", (ins "Value *":$view) + "Optional", "getIndexOfInput", (ins "ValuePtr ":$view) >, InterfaceMethod<[{ Query the index of the given view value, or `None` if the value is not an view. }], - "Optional", "getIndexOfOutput", (ins "Value *":$view) + "Optional", "getIndexOfOutput", (ins "ValuePtr ":$view) >, InterfaceMethod<[{ Query the type of the input view at the given index. @@ -228,7 +228,7 @@ def CopyOp : LinalgLibrary_Op<"copy", [NInputs<1>, NOutputs<1>]> { // TODO(ntv) this should go away once the usage of OptionalAttr triggers // emission of builders with default arguments left unspecified. let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *input, Value *output", [{ + "Builder *builder, OperationState &result, ValuePtr input, ValuePtr output", [{ return build( builder, result, input, output, AffineMapAttr(), AffineMapAttr()); }]>]; diff --git a/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/include/mlir/Dialect/Linalg/IR/LinalgOps.td index b806d7548fb1..5d402a9ded97 100644 --- a/include/mlir/Dialect/Linalg/IR/LinalgOps.td +++ b/include/mlir/Dialect/Linalg/IR/LinalgOps.td @@ -56,8 +56,8 @@ def Linalg_RangeOp : ```` }]; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *min, Value *max, " - "Value *step", + "Builder *builder, OperationState &result, ValuePtr min, ValuePtr max, " + "ValuePtr step", [{ auto rangeType = RangeType::get(builder->getContext()); build(builder, result, rangeType, min, max, step); @@ -112,7 +112,7 @@ def Linalg_SliceOp : Linalg_Op<"slice", [NoSideEffect]>, }]; let builders = [OpBuilder< - "Builder *b, OperationState &result, Value *base, " + "Builder *b, OperationState &result, ValuePtr base, " "ValueRange indexings">]; let extraClassDeclaration = [{ @@ -124,12 +124,12 @@ def Linalg_SliceOp : Linalg_Op<"slice", [NoSideEffect]>, MemRefType getBaseViewType() { return view()->getType().cast(); } // Get the underlying indexing at a given rank. - Value *indexing(unsigned rank) { return *(indexings().begin() + rank); } + ValuePtr indexing(unsigned rank) { return *(indexings().begin() + rank); } // Get the subset of indexings that are of RangeType. - SmallVector getRanges() { - SmallVector res; - for (auto *operand : indexings()) + SmallVector getRanges() { + SmallVector res; + for (auto operand : indexings()) if (!operand->getType().isa()) res.push_back(operand); return res; @@ -154,7 +154,7 @@ def Linalg_TransposeOp : Linalg_Op<"transpose", [NoSideEffect]>, }]; let builders = [OpBuilder< - "Builder *b, OperationState &result, Value *view, " + "Builder *b, OperationState &result, ValuePtr view, " "AffineMapAttr permutation, ArrayRef attrs = {}">]; let verifier = [{ diff --git a/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td index 75b63c93cd80..774be6616cd7 100644 --- a/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -92,22 +92,22 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> { "Query the number of loops within the current operation.", "unsigned", "getNumLoops">, InterfaceMethod<"Query the input view at the given index.", - "Value *", "getInput", (ins "unsigned":$i) + "ValuePtr ", "getInput", (ins "unsigned":$i) >, InterfaceMethod<"Query the output view at the given index.", - "Value *", "getOutput", (ins "unsigned":$i) + "ValuePtr ", "getOutput", (ins "unsigned":$i) >, InterfaceMethod<[{ Query the index of the given input value, or `None` if the value is not an input. }], - "llvm::Optional", "getIndexOfInput", (ins "Value *":$view) + "llvm::Optional", "getIndexOfInput", (ins "ValuePtr ":$view) >, InterfaceMethod<[{ Query the index of the given view value, or `None` if the value is not an view. }], - "llvm::Optional", "getIndexOfOutput", (ins "Value *":$view) + "llvm::Optional", "getIndexOfOutput", (ins "ValuePtr ":$view) >, InterfaceMethod<[{ Query the type of the input view at the given index. @@ -228,7 +228,7 @@ def CopyOp : LinalgStructured_Op<"copy", [NInputs<1>, NOutputs<1>]> { // TODO(ntv) this should go away once the usage of OptionalAttr triggers // emission of builders with default arguments left unspecified. let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *input, Value *output", [{ + "Builder *builder, OperationState &result, ValuePtr input, ValuePtr output", [{ return build( builder, result, input, output, AffineMapAttr(), AffineMapAttr()); }]>]; diff --git a/include/mlir/Dialect/Linalg/IR/LinalgTraits.h b/include/mlir/Dialect/Linalg/IR/LinalgTraits.h index a24c1ca63c49..d196e6ccf947 100644 --- a/include/mlir/Dialect/Linalg/IR/LinalgTraits.h +++ b/include/mlir/Dialect/Linalg/IR/LinalgTraits.h @@ -77,13 +77,13 @@ class ViewTraits : public OpTrait::TraitBase { public: /// Return the `i`-th input view. - Value *getInput(unsigned i) { + ValuePtr getInput(unsigned i) { assert(i < nInputs()); return this->getOperation()->getOperand(i); } /// Return the index of `view` in the list of input views if found, llvm::None /// otherwise. - Optional getIndexOfInput(Value *view) { + Optional getIndexOfInput(ValuePtr view) { auto it = llvm::find(getInputs(), view); if (it != getInputs().end()) return it - getInputs().begin(); @@ -99,12 +99,12 @@ class ViewTraits : public OpTrait::TraitBase { return {range.begin(), range.begin() + nInputs()}; } /// Return the `i`-th output view. - Value *getOutput(unsigned i) { + ValuePtr getOutput(unsigned i) { return this->getOperation()->getOperand(nInputs() + i); } /// Return the index of `view` in the list of output views if found, /// llvm::None otherwise. - Optional getIndexOfOutput(Value *view) { + Optional getIndexOfOutput(ValuePtr view) { auto it = llvm::find(getOutputs(), view); if (it != getOutputs().end()) return it - getOutputs().begin(); diff --git a/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td b/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td index 415dd918f743..dbc162f4132e 100644 --- a/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td +++ b/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td @@ -45,7 +45,7 @@ class AffineMapDomainHasDim : CPred<[{ class HasOperandsOfType: CPred<[{ llvm::any_of($0.getOperands(), - [](Value* v) { + [](ValuePtr v) { return dyn_cast_or_null<}] # type # [{>(v->getDefiningOp()); }) }]>; diff --git a/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h b/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h index dfbac5ac1933..a1a7458ae7fa 100644 --- a/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h +++ b/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h @@ -38,7 +38,7 @@ struct LinalgTransforms { namespace detail { // Implementation detail of isProducedByOpOfType avoids the need for explicit // template instantiations. -bool isProducedByOpOfTypeImpl(Operation *consumerOp, Value *consumedView, +bool isProducedByOpOfTypeImpl(Operation *consumerOp, ValuePtr consumedView, function_ref isaOpType); } // namespace detail @@ -46,7 +46,7 @@ bool isProducedByOpOfTypeImpl(Operation *consumerOp, Value *consumedView, // an op of type `OpTy`. This is used to implement use-def type information on // buffers. template -bool isProducedByOpOfType(Operation *consumerOp, Value *consumedView) { +bool isProducedByOpOfType(Operation *consumerOp, ValuePtr consumedView) { return detail::isProducedByOpOfTypeImpl( consumerOp, consumedView, [](Operation *op) { return isa(op); }); } diff --git a/include/mlir/Dialect/Linalg/Utils/Utils.h b/include/mlir/Dialect/Linalg/Utils/Utils.h index f8d10ecfa576..50039dd93369 100644 --- a/include/mlir/Dialect/Linalg/Utils/Utils.h +++ b/include/mlir/Dialect/Linalg/Utils/Utils.h @@ -34,7 +34,7 @@ namespace edsc { /// A LoopRangeBuilder is a generic NestedBuilder for loop.for operations. /// More specifically it is meant to be used as a temporary object for -/// representing any nested MLIR construct that is "related to" an mlir::Value* +/// representing any nested MLIR construct that is "related to" an mlir::Value /// (for now an induction variable). class LoopRangeBuilder : public NestedBuilder { public: @@ -42,7 +42,7 @@ class LoopRangeBuilder : public NestedBuilder { /// variable. A ValueHandle pointer is passed as the first argument and is the /// *only* way to capture the loop induction variable. LoopRangeBuilder(ValueHandle *iv, ValueHandle range); - LoopRangeBuilder(ValueHandle *iv, Value *range); + LoopRangeBuilder(ValueHandle *iv, ValuePtr range); LoopRangeBuilder(ValueHandle *iv, SubViewOp::Range range); LoopRangeBuilder(const LoopRangeBuilder &) = delete; @@ -65,7 +65,7 @@ class LoopNestRangeBuilder { LoopNestRangeBuilder(ArrayRef ivs, ArrayRef ranges); LoopNestRangeBuilder(ArrayRef ivs, - ArrayRef ranges); + ArrayRef ranges); LoopNestRangeBuilder(ArrayRef ivs, ArrayRef ranges); edsc::ValueHandle operator()(std::function fun = nullptr); @@ -88,14 +88,14 @@ struct FusionInfo { /// whole `consumedView`. This checks structural dominance, that the dependence /// is a RAW without any interleaved write to any piece of `consumedView`. bool isProducerLastWriteOfView(const LinalgDependenceGraph &graph, - LinalgOp consumer, Value *consumedView, + LinalgOp consumer, ValuePtr consumedView, LinalgOp producer); /// Checks whether fusing the specific `producer` of the `consumedView` is /// feasible. This checks `producer` is the last write of `consumedView` and /// that no interleaved dependence would be violated (RAW, WAR or WAW). bool isFusableInto(const LinalgDependenceGraph &graph, LinalgOp consumer, - Value *consumedView, LinalgOp producer); + ValuePtr consumedView, LinalgOp producer); /// Fuses producer into consumer if the producer is structurally feasible and /// the fusion would not violate dependencies. @@ -111,8 +111,8 @@ Optional fuseProducerOf(OpBuilder &b, LinalgOp consumer, /// the inverse, concatenated loopToOperandRangeMaps to this list allows the /// derivation of loop ranges for any linalgOp. template -SmallVector getViewSizes(ConcreteOp linalgOp) { - SmallVector res; +SmallVector getViewSizes(ConcreteOp linalgOp) { + SmallVector res; for (auto v : linalgOp.getInputsAndOutputs()) { MemRefType t = v->getType().template cast(); for (unsigned i = 0; i < t.getRank(); ++i) @@ -125,10 +125,10 @@ SmallVector getViewSizes(ConcreteOp linalgOp) { /// When non-null, the optional pointer `folder` is used to call into the /// `createAndFold` builder method. If `folder` is null, the regular `create` /// method is called. -SmallVector applyMapToValues(OpBuilder &b, Location loc, - AffineMap map, - ArrayRef values, - OperationFolder *folder = nullptr); +SmallVector applyMapToValues(OpBuilder &b, Location loc, + AffineMap map, + ArrayRef values, + OperationFolder *folder = nullptr); struct TiledLinalgOp { LinalgOp op; @@ -151,7 +151,7 @@ struct TiledLinalgOp { /// `createAndFold` builder method. If `folder` is null, the regular `create` /// method is called. Optional tileLinalgOp(OpBuilder &b, LinalgOp op, - ArrayRef tileSizes, + ArrayRef tileSizes, ArrayRef permutation = {}, OperationFolder *folder = nullptr); @@ -182,9 +182,9 @@ Optional tileLinalgOperation(OpBuilder &b, Operation *op, } struct PromotionInfo { - Value *buffer; - Value *fullLocalView; - Value *partialLocalView; + ValuePtr buffer; + ValuePtr fullLocalView; + ValuePtr partialLocalView; }; /// Promotes the `subViews` into a new buffer allocated at the insertion point @@ -199,13 +199,13 @@ struct PromotionInfo { /// Returns a list of PromotionInfo which hold the promoted buffer and the /// full and partial views indexing into the buffer. SmallVector -promoteSubViews(OpBuilder &b, Location loc, ArrayRef subViews, +promoteSubViews(OpBuilder &b, Location loc, ArrayRef subViews, bool dynamicBuffers = false, OperationFolder *folder = nullptr); /// Returns all the operands of `linalgOp` that are not views. /// Asserts that these operands are value types to allow transformations like /// tiling to just use the values when cloning `linalgOp`. -SmallVector getAssumedNonViewOperands(LinalgOp linalgOp); +SmallVector getAssumedNonViewOperands(LinalgOp linalgOp); /// Apply the permutation defined by `permutation` to `inVec`. /// Element `i` in `inVec` is mapped to location `j = permutation[i]`. @@ -226,7 +226,7 @@ void applyPermutationToVector(SmallVector &inVec, /// It is the entry point for declarative transformation /// Returns the cloned `LinalgOp` with the new operands LinalgOp promoteSubViewOperands(OpBuilder &b, LinalgOp op, - llvm::SetVector subViews, + llvm::SetVector subViews, bool dynamicBuffers = false, OperationFolder *folder = nullptr); diff --git a/include/mlir/Dialect/LoopOps/LoopOps.h b/include/mlir/Dialect/LoopOps/LoopOps.h index fdadf4a40dd5..e7ff6f849772 100644 --- a/include/mlir/Dialect/LoopOps/LoopOps.h +++ b/include/mlir/Dialect/LoopOps/LoopOps.h @@ -50,7 +50,7 @@ void ensureLoopTerminator(Region ®ion, Builder &builder, Location loc); /// Returns the loop parent of an induction variable. If the provided value is /// not an induction variable, then return nullptr. -ForOp getForInductionVarOwner(Value *val); +ForOp getForInductionVarOwner(ValuePtr val); } // end namespace loop } // end namespace mlir diff --git a/include/mlir/Dialect/LoopOps/LoopOps.td b/include/mlir/Dialect/LoopOps/LoopOps.td index 5e0b80984119..e0f5b896309d 100644 --- a/include/mlir/Dialect/LoopOps/LoopOps.td +++ b/include/mlir/Dialect/LoopOps/LoopOps.td @@ -74,18 +74,18 @@ def ForOp : Loop_Op<"for", let skipDefaultBuilders = 1; let builders = [ OpBuilder<"Builder *builder, OperationState &result, " - "Value *lowerBound, Value *upperBound, Value *step"> + "ValuePtr lowerBound, ValuePtr upperBound, ValuePtr step"> ]; let extraClassDeclaration = [{ Block *getBody() { return ®ion().front(); } - Value *getInductionVar() { return getBody()->getArgument(0); } + ValuePtr getInductionVar() { return getBody()->getArgument(0); } OpBuilder getBodyBuilder() { return OpBuilder(getBody(), std::prev(getBody()->end())); } - void setLowerBound(Value *bound) { getOperation()->setOperand(0, bound); } - void setUpperBound(Value *bound) { getOperation()->setOperand(1, bound); } - void setStep(Value *step) { getOperation()->setOperand(2, step); } + void setLowerBound(ValuePtr bound) { getOperation()->setOperand(0, bound); } + void setUpperBound(ValuePtr bound) { getOperation()->setOperand(1, bound); } + void setStep(ValuePtr step) { getOperation()->setOperand(2, step); } }]; } @@ -116,7 +116,7 @@ def IfOp : Loop_Op<"if", let skipDefaultBuilders = 1; let builders = [ OpBuilder<"Builder *builder, OperationState &result, " - "Value *cond, bool withElseRegion"> + "ValuePtr cond, bool withElseRegion"> ]; let extraClassDeclaration = [{ diff --git a/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td b/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td index d6e2e1c6fdad..d19fd974684e 100644 --- a/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td +++ b/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td @@ -120,7 +120,7 @@ def SPV_CompositeExtractOp : SPV_Op<"CompositeExtract", [NoSideEffect]> { let builders = [ OpBuilder<[{Builder *builder, OperationState &state, - Value *composite, ArrayRef indices}]> + ValuePtr composite, ArrayRef indices}]> ]; let hasFolder = 1; diff --git a/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td b/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td index 464b670dae9a..32a78024560a 100644 --- a/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td +++ b/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td @@ -132,7 +132,7 @@ def SPV_BranchConditionalOp : SPV_Op<"BranchConditional", let builders = [ OpBuilder< - "Builder *builder, OperationState &state, Value *condition, " + "Builder *builder, OperationState &state, ValuePtr condition, " "Block *trueBlock, ValueRange trueArguments, " "Block *falseBlock, ValueRange falseArguments, " "Optional> weights = {}", diff --git a/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td b/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td index 0c4b2902a128..e1e94bcd8619 100644 --- a/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td +++ b/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td @@ -858,8 +858,8 @@ def SPV_SelectOp : SPV_Op<"Select", [NoSideEffect]> { ); let builders = [OpBuilder<[{Builder *builder, OperationState &state, - Value *cond, Value *trueValue, - Value *falseValue}]>]; + ValuePtr cond, ValuePtr trueValue, + ValuePtr falseValue}]>]; } // ----- diff --git a/include/mlir/Dialect/SPIRV/SPIRVLowering.h b/include/mlir/Dialect/SPIRV/SPIRVLowering.h index f48a1d0b1294..37b4ee242370 100644 --- a/include/mlir/Dialect/SPIRV/SPIRVLowering.h +++ b/include/mlir/Dialect/SPIRV/SPIRVLowering.h @@ -64,8 +64,8 @@ class SPIRVOpLowering : public OpConversionPattern { namespace spirv { /// Returns a value that represents a builtin variable value within the SPIR-V /// module. -Value *getBuiltinVariableValue(Operation *op, spirv::BuiltIn builtin, - OpBuilder &builder); +ValuePtr getBuiltinVariableValue(Operation *op, spirv::BuiltIn builtin, + OpBuilder &builder); /// Attribute name for specifying argument ABI information. StringRef getInterfaceVarABIAttrName(); diff --git a/include/mlir/Dialect/SPIRV/SPIRVOps.td b/include/mlir/Dialect/SPIRV/SPIRVOps.td index 91ea8d7d6761..777e5750486a 100644 --- a/include/mlir/Dialect/SPIRV/SPIRVOps.td +++ b/include/mlir/Dialect/SPIRV/SPIRVOps.td @@ -102,7 +102,7 @@ def SPV_AccessChainOp : SPV_Op<"AccessChain", [NoSideEffect]> { ); let builders = [OpBuilder<[{Builder *builder, OperationState &state, - Value *basePtr, ValueRange indices}]>]; + ValuePtr basePtr, ValueRange indices}]>]; let hasCanonicalizer = 1; } @@ -272,7 +272,7 @@ def SPV_LoadOp : SPV_Op<"Load", []> { ); let builders = [OpBuilder<[{Builder *builder, OperationState &state, - Value *basePtr, /*optional*/IntegerAttr memory_access, + ValuePtr basePtr, /*optional*/IntegerAttr memory_access, /*optional*/IntegerAttr alignment}]>]; } @@ -367,7 +367,7 @@ def SPV_StoreOp : SPV_Op<"Store", []> { let builders = [ OpBuilder<"Builder *builder, OperationState &state, " - "Value *ptr, Value *value, ArrayRef namedAttrs", [{ + "ValuePtr ptr, ValuePtr value, ArrayRef namedAttrs", [{ state.addOperands(ptr); state.addOperands(value); state.addAttributes(namedAttrs); diff --git a/include/mlir/Dialect/StandardOps/Ops.h b/include/mlir/Dialect/StandardOps/Ops.h index 1b1cf02d2046..563116823d98 100644 --- a/include/mlir/Dialect/StandardOps/Ops.h +++ b/include/mlir/Dialect/StandardOps/Ops.h @@ -182,15 +182,15 @@ class DmaStartOp public: using Op::Op; - static void build(Builder *builder, OperationState &result, Value *srcMemRef, - ValueRange srcIndices, Value *destMemRef, - ValueRange destIndices, Value *numElements, - Value *tagMemRef, ValueRange tagIndices, - Value *stride = nullptr, - Value *elementsPerStride = nullptr); + static void build(Builder *builder, OperationState &result, + ValuePtr srcMemRef, ValueRange srcIndices, + ValuePtr destMemRef, ValueRange destIndices, + ValuePtr numElements, ValuePtr tagMemRef, + ValueRange tagIndices, ValuePtr stride = nullptr, + ValuePtr elementsPerStride = nullptr); // Returns the source MemRefType for this DMA operation. - Value *getSrcMemRef() { return getOperand(0); } + ValuePtr getSrcMemRef() { return getOperand(0); } // Returns the rank (number of indices) of the source MemRefType. unsigned getSrcMemRefRank() { return getSrcMemRef()->getType().cast().getRank(); @@ -202,7 +202,7 @@ class DmaStartOp } // Returns the destination MemRefType for this DMA operations. - Value *getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); } + ValuePtr getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); } // Returns the rank (number of indices) of the destination MemRefType. unsigned getDstMemRefRank() { return getDstMemRef()->getType().cast().getRank(); @@ -222,12 +222,12 @@ class DmaStartOp } // Returns the number of elements being transferred by this DMA operation. - Value *getNumElements() { + ValuePtr getNumElements() { return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank()); } // Returns the Tag MemRef for this DMA operation. - Value *getTagMemRef() { + ValuePtr getTagMemRef() { return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1); } // Returns the rank (number of indices) of the tag MemRefType. @@ -276,13 +276,13 @@ class DmaStartOp 1 + 1 + getTagMemRefRank(); } - Value *getStride() { + ValuePtr getStride() { if (!isStrided()) return nullptr; return getOperand(getNumOperands() - 1 - 1); } - Value *getNumElementsPerStride() { + ValuePtr getNumElementsPerStride() { if (!isStrided()) return nullptr; return getOperand(getNumOperands() - 1); @@ -307,13 +307,14 @@ class DmaWaitOp public: using Op::Op; - static void build(Builder *builder, OperationState &result, Value *tagMemRef, - ValueRange tagIndices, Value *numElements); + static void build(Builder *builder, OperationState &result, + ValuePtr tagMemRef, ValueRange tagIndices, + ValuePtr numElements); static StringRef getOperationName() { return "std.dma_wait"; } // Returns the Tag MemRef associated with the DMA operation being waited on. - Value *getTagMemRef() { return getOperand(0); } + ValuePtr getTagMemRef() { return getOperand(0); } // Returns the tag memref index for this DMA operation. operand_range getTagIndices() { @@ -327,7 +328,7 @@ class DmaWaitOp } // Returns the number of elements transferred in the associated DMA operation. - Value *getNumElements() { return getOperand(1 + getTagMemRefRank()); } + ValuePtr getNumElements() { return getOperand(1 + getTagMemRefRank()); } static ParseResult parse(OpAsmParser &parser, OperationState &result); void print(OpAsmPrinter &p); @@ -342,7 +343,7 @@ void printDimAndSymbolList(Operation::operand_iterator begin, /// Parses dimension and symbol list and returns true if parsing failed. ParseResult parseDimAndSymbolList(OpAsmParser &parser, - SmallVectorImpl &operands, + SmallVectorImpl &operands, unsigned &numDims); raw_ostream &operator<<(raw_ostream &os, SubViewOp::Range &range); diff --git a/include/mlir/Dialect/StandardOps/Ops.td b/include/mlir/Dialect/StandardOps/Ops.td index c26baf6a76e1..e00674708f65 100644 --- a/include/mlir/Dialect/StandardOps/Ops.td +++ b/include/mlir/Dialect/StandardOps/Ops.td @@ -52,7 +52,7 @@ class CastOp traits = []> : let results = (outs AnyType); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *source, Type destType", [{ + "Builder *builder, OperationState &result, ValuePtr source, Type destType", [{ impl::buildCastOp(builder, result, source, destType); }]>]; @@ -191,7 +191,7 @@ def AllocOp : Std_Op<"alloc"> { }]>, OpBuilder< "Builder *builder, OperationState &result, MemRefType memrefType, " # - "ArrayRef operands, IntegerAttr alignment = IntegerAttr()", [{ + "ArrayRef operands, IntegerAttr alignment = IntegerAttr()", [{ result.addOperands(operands); result.types.push_back(memrefType); if (alignment) @@ -330,7 +330,7 @@ def CallIndirectOp : Std_Op<"call_indirect", [CallOpInterface]> { let results = (outs Variadic); let builders = [OpBuilder< - "Builder *, OperationState &result, Value *callee," + "Builder *, OperationState &result, ValuePtr callee," "ValueRange operands = {}", [{ result.operands.push_back(callee); result.addOperands(operands); @@ -338,7 +338,7 @@ def CallIndirectOp : Std_Op<"call_indirect", [CallOpInterface]> { }]>]; let extraClassDeclaration = [{ - Value *getCallee() { return getOperand(0); } + ValuePtr getCallee() { return getOperand(0); } /// Get the argument operands to the called function. operand_range getArgOperands() { @@ -395,7 +395,7 @@ def CmpFOp : Std_Op<"cmpf", let builders = [OpBuilder< "Builder *builder, OperationState &result, CmpFPredicate predicate," - "Value *lhs, Value *rhs", [{ + "ValuePtr lhs, ValuePtr rhs", [{ ::buildCmpFOp(builder, result, predicate, lhs, rhs); }]>]; @@ -463,7 +463,7 @@ def CmpIOp : Std_Op<"cmpi", let builders = [OpBuilder< "Builder *builder, OperationState &result, CmpIPredicate predicate," - "Value *lhs, Value *rhs", [{ + "ValuePtr lhs, ValuePtr rhs", [{ ::buildCmpIOp(builder, result, predicate, lhs, rhs); }]>]; @@ -502,7 +502,7 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> { let arguments = (ins I1:$condition, Variadic:$branchOperands); let builders = [OpBuilder< - "Builder *, OperationState &result, Value *condition," + "Builder *, OperationState &result, ValuePtr condition," "Block *trueDest, ValueRange trueOperands," "Block *falseDest, ValueRange falseOperands", [{ result.addOperands(condition); @@ -518,7 +518,7 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> { enum { trueIndex = 0, falseIndex = 1 }; // The condition operand is the first operand in the list. - Value *getCondition() { return getOperand(0); } + ValuePtr getCondition() { return getOperand(0); } /// Return the destination if the condition is true. Block *getTrueDest() { @@ -531,12 +531,12 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> { } // Accessors for operands to the 'true' destination. - Value *getTrueOperand(unsigned idx) { + ValuePtr getTrueOperand(unsigned idx) { assert(idx < getNumTrueOperands()); return getOperand(getTrueDestOperandIndex() + idx); } - void setTrueOperand(unsigned idx, Value *value) { + void setTrueOperand(unsigned idx, ValuePtr value) { assert(idx < getNumTrueOperands()); setOperand(getTrueDestOperandIndex() + idx, value); } @@ -561,11 +561,11 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> { } // Accessors for operands to the 'false' destination. - Value *getFalseOperand(unsigned idx) { + ValuePtr getFalseOperand(unsigned idx) { assert(idx < getNumFalseOperands()); return getOperand(getFalseDestOperandIndex() + idx); } - void setFalseOperand(unsigned idx, Value *value) { + void setFalseOperand(unsigned idx, ValuePtr value) { assert(idx < getNumFalseOperands()); setOperand(getFalseDestOperandIndex() + idx, value); } @@ -678,7 +678,7 @@ def DimOp : Std_Op<"dim", [NoSideEffect]> { let results = (outs Index); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *memrefOrTensor," + "Builder *builder, OperationState &result, ValuePtr memrefOrTensor," "unsigned index", [{ auto indexType = builder->getIndexType(); auto indexAttr = builder->getIntegerAttr(indexType, index); @@ -730,7 +730,7 @@ def ExtractElementOp : Std_Op<"extract_element", [NoSideEffect]> { let results = (outs AnyType); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *aggregate," + "Builder *builder, OperationState &result, ValuePtr aggregate," "ValueRange indices = {}", [{ auto resType = aggregate->getType().cast() .getElementType(); @@ -738,7 +738,7 @@ def ExtractElementOp : Std_Op<"extract_element", [NoSideEffect]> { }]>]; let extraClassDeclaration = [{ - Value *getAggregate() { return getOperand(0); } + ValuePtr getAggregate() { return getOperand(0); } operand_range getIndices() { return {operand_begin() + 1, operand_end()}; @@ -816,7 +816,7 @@ def LoadOp : Std_Op<"load"> { let results = (outs AnyType); let builders = [OpBuilder< - "Builder *, OperationState &result, Value *memref," + "Builder *, OperationState &result, ValuePtr memref," "ValueRange indices = {}", [{ auto memrefType = memref->getType().cast(); result.addOperands(memref); @@ -825,8 +825,8 @@ def LoadOp : Std_Op<"load"> { }]>]; let extraClassDeclaration = [{ - Value *getMemRef() { return getOperand(0); } - void setMemRef(Value *value) { setOperand(0, value); } + ValuePtr getMemRef() { return getOperand(0); } + void setMemRef(ValuePtr value) { setOperand(0, value); } MemRefType getMemRefType() { return getMemRef()->getType().cast(); } @@ -952,8 +952,8 @@ def PrefetchOp : Std_Op<"prefetch"> { BoolAttr:$isDataCache); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *memref," - "ArrayRef indices, bool isWrite, unsigned hint, bool isData", + "Builder *builder, OperationState &result, ValuePtr memref," + "ArrayRef indices, bool isWrite, unsigned hint, bool isData", [{ auto hintAttr = builder->getI32IntegerAttr(hint); auto isWriteAttr = builder->getBoolAttr(isWrite); @@ -990,7 +990,7 @@ def RankOp : Std_Op<"rank", [NoSideEffect]> { let verifier = ?; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *tensor", [{ + "Builder *builder, OperationState &result, ValuePtr tensor", [{ auto indexType = builder->getIndexType(); build(builder, result, indexType, tensor); }]>]; @@ -1052,16 +1052,16 @@ def SelectOp : Std_Op<"select", [NoSideEffect, SameOperandsAndResultShape]> { let results = (outs AnyType); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *condition," - "Value *trueValue, Value *falseValue", [{ + "Builder *builder, OperationState &result, ValuePtr condition," + "ValuePtr trueValue, ValuePtr falseValue", [{ result.addOperands({condition, trueValue, falseValue}); result.addTypes(trueValue->getType()); }]>]; let extraClassDeclaration = [{ - Value *getCondition() { return condition(); } - Value *getTrueValue() { return true_value(); } - Value *getFalseValue() { return false_value(); } + ValuePtr getCondition() { return condition(); } + ValuePtr getTrueValue() { return true_value(); } + ValuePtr getFalseValue() { return false_value(); } }]; let hasFolder = 1; @@ -1089,7 +1089,7 @@ def SignExtendIOp : Std_Op<"sexti", let results = (outs IntegerLike); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *value, Type destType", [{ + "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{ result.addOperands(value); result.addTypes(destType); }]>]; @@ -1189,7 +1189,7 @@ def SplatOp : Std_Op<"splat", [NoSideEffect]> { let results = (outs AnyTypeOf<[AnyVector, AnyStaticShapeTensor]>:$aggregate); let builders = - [OpBuilder<"Builder *builder, OperationState &result, Value *element, " + [OpBuilder<"Builder *builder, OperationState &result, ValuePtr element, " "Type aggregateType", [{ build(builder, result, aggregateType, element); }]>]; @@ -1213,16 +1213,16 @@ def StoreOp : Std_Op<"store"> { Variadic:$indices); let builders = [OpBuilder< - "Builder *, OperationState &result, Value *valueToStore, Value *memref", [{ + "Builder *, OperationState &result, ValuePtr valueToStore, ValuePtr memref", [{ result.addOperands(valueToStore); result.addOperands(memref); }]>]; let extraClassDeclaration = [{ - Value *getValueToStore() { return getOperand(0); } + ValuePtr getValueToStore() { return getOperand(0); } - Value *getMemRef() { return getOperand(1); } - void setMemRef(Value *value) { setOperand(1, value); } + ValuePtr getMemRef() { return getOperand(1); } + void setMemRef(ValuePtr value) { setOperand(1, value); } MemRefType getMemRefType() { return getMemRef()->getType().cast(); } @@ -1364,13 +1364,13 @@ def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> { let builders = [ OpBuilder< - "Builder *b, OperationState &result, Value *source, " + "Builder *b, OperationState &result, ValuePtr source, " "ValueRange offsets, ValueRange sizes, " "ValueRange strides, Type resultType = Type(), " "ArrayRef attrs = {}">, OpBuilder< "Builder *builder, OperationState &result, " - "Type resultType, Value *source"> + "Type resultType, ValuePtr source"> ]; let extraClassDeclaration = [{ @@ -1403,7 +1403,7 @@ def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> { // offset, size and stride operands of the SubViewOp into a list of triples. // Such a list of triple is sometimes more convenient to manipulate. struct Range { - Value *offset, *size, *stride; + ValuePtr offset, size, stride; }; SmallVector getRanges(); }]; @@ -1465,7 +1465,7 @@ def TensorLoadOp : Std_Op<"tensor_load", let verifier = ?; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *memref", [{ + "Builder *builder, OperationState &result, ValuePtr memref", [{ auto memrefType = memref->getType().cast(); auto resultType = RankedTensorType::get(memrefType.getShape(), memrefType.getElementType()); @@ -1519,7 +1519,7 @@ def TruncateIOp : Std_Op<"trunci", [NoSideEffect, SameOperandsAndResultShape]> { let results = (outs IntegerLike); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *value, Type destType", [{ + "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{ result.addOperands(value); result.addTypes(destType); }]>]; @@ -1578,7 +1578,7 @@ def ViewOp : Std_Op<"view", [NoSideEffect]> { /// Returns the dynamic offset for this view operation if specified. /// Returns nullptr if no dynamic offset was specified. - Value *getDynamicOffset(); + ValuePtr getDynamicOffset(); /// Returns the starting operand list position of the dynamic size operands. unsigned getDynamicSizesOperandStart() { @@ -1619,7 +1619,7 @@ def ZeroExtendIOp : Std_Op<"zexti", [NoSideEffect, SameOperandsAndResultShape]> let results = (outs IntegerLike); let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *value, Type destType", [{ + "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{ result.addOperands(value); result.addTypes(destType); }]>]; diff --git a/include/mlir/Dialect/VectorOps/Utils.h b/include/mlir/Dialect/VectorOps/Utils.h index f61a813855d2..68c62cc7ec75 100644 --- a/include/mlir/Dialect/VectorOps/Utils.h +++ b/include/mlir/Dialect/VectorOps/Utils.h @@ -34,6 +34,9 @@ class Operation; class Value; class VectorType; +// TODO(riverriddle) Remove this after Value is value-typed. +using ValuePtr = Value *; + /// Computes and returns the multi-dimensional ratio of `superShape` to /// `subShape`. This is calculated by performing a traversal from minor to major /// dimensions (i.e. in reverse shape order). If integral division is not @@ -122,7 +125,7 @@ Optional> shapeRatio(VectorType superVectorType, /// `%arg0[%c0, %c0]` into vector<128xf32> which needs a 1-D vector broadcast. /// AffineMap -makePermutationMap(Operation *op, ArrayRef indices, +makePermutationMap(Operation *op, ArrayRef indices, const DenseMap &loopToVectorDim); namespace matcher { diff --git a/include/mlir/Dialect/VectorOps/VectorOps.td b/include/mlir/Dialect/VectorOps/VectorOps.td index 5fd19498350a..94262e6f1ff3 100644 --- a/include/mlir/Dialect/VectorOps/VectorOps.td +++ b/include/mlir/Dialect/VectorOps/VectorOps.td @@ -128,8 +128,8 @@ def Vector_ContractionOp : : vector<7x8x16x15xf32>, vector<8x16x7x5xf32> into vector<8x15x8x5xf32> }]; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *lhs, Value *rhs, " - "Value *acc, ArrayAttr indexingMaps, ArrayAttr iteratorTypes">]; + "Builder *builder, OperationState &result, ValuePtr lhs, ValuePtr rhs, " + "ValuePtr acc, ArrayAttr indexingMaps, ArrayAttr iteratorTypes">]; let extraClassDeclaration = [{ VectorType getLhsType() { return lhs()->getType().cast(); @@ -252,7 +252,8 @@ def Vector_ShuffleOp : ``` }]; - let builders = [OpBuilder<"Builder *builder, OperationState &result, Value *v1, Value *v2, ArrayRef">]; + let builders = [OpBuilder<"Builder *builder, OperationState &result," + "ValuePtr v1, ValuePtr v2, ArrayRef">]; let extraClassDeclaration = [{ static StringRef getMaskAttrName() { return "mask"; } VectorType getV1VectorType() { @@ -312,7 +313,8 @@ def Vector_ExtractOp : ``` }]; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *source, ArrayRef">]; + "Builder *builder, OperationState &result, ValuePtr source," + "ArrayRef">]; let extraClassDeclaration = [{ static StringRef getPositionAttrName() { return "position"; } VectorType getVectorType() { @@ -357,7 +359,7 @@ def Vector_ExtractSlicesOp : }]; let builders = [OpBuilder< "Builder *builder, OperationState &result, TupleType tupleType, " # - "Value *vector, ArrayRef sizes, " # + "ValuePtr vector, ArrayRef sizes, " # "ArrayRef strides">]; let extraClassDeclaration = [{ VectorType getSourceVectorType() { @@ -428,8 +430,8 @@ def Vector_InsertOp : ``` }]; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *source, " # - "Value *dest, ArrayRef">]; + "Builder *builder, OperationState &result, ValuePtr source, " # + "ValuePtr dest, ArrayRef">]; let extraClassDeclaration = [{ static StringRef getPositionAttrName() { return "position"; } Type getSourceType() { return source()->getType(); } @@ -521,7 +523,7 @@ def Vector_InsertStridedSliceOp : ``` }]; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *source, Value *dest, " # + "Builder *builder, OperationState &result, ValuePtr source, ValuePtr dest, " # "ArrayRef offsets, ArrayRef strides">]; let extraClassDeclaration = [{ static StringRef getOffsetsAttrName() { return "offsets"; } @@ -723,7 +725,7 @@ def Vector_StridedSliceOp : vector<4x8x16xf32> to vector<2x4x16xf32> }]; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *source, " # + "Builder *builder, OperationState &result, ValuePtr source, " # "ArrayRef offsets, ArrayRef sizes, " # "ArrayRef strides">]; let extraClassDeclaration = [{ @@ -975,7 +977,7 @@ def Vector_TypeCastOp : }]; let builders = [OpBuilder< - "Builder *builder, OperationState &result, Value *source">]; + "Builder *builder, OperationState &result, ValuePtr source">]; let parser = [{ return impl::parseCastOp(parser, result); diff --git a/include/mlir/Dialect/VectorOps/VectorTransforms.h b/include/mlir/Dialect/VectorOps/VectorTransforms.h index 2c2e4e7c4fae..b48cb51533fd 100644 --- a/include/mlir/Dialect/VectorOps/VectorTransforms.h +++ b/include/mlir/Dialect/VectorOps/VectorTransforms.h @@ -73,8 +73,9 @@ namespace vector { // // This will be extended in the future to support more advanced use cases than // simple pointwise ops. -Value *unrollSingleResultOpMatchingType(PatternRewriter &builder, Operation *op, - ArrayRef targetShape); +ValuePtr unrollSingleResultOpMatchingType(PatternRewriter &builder, + Operation *op, + ArrayRef targetShape); } // namespace vector } // namespace mlir diff --git a/include/mlir/EDSC/Builders.h b/include/mlir/EDSC/Builders.h index 69c72a50870f..11ee0bff3427 100644 --- a/include/mlir/EDSC/Builders.h +++ b/include/mlir/EDSC/Builders.h @@ -152,7 +152,7 @@ class NestedBuilder { /// A LoopBuilder is a generic NestedBuilder for loop-like MLIR operations. /// More specifically it is meant to be used as a temporary object for -/// representing any nested MLIR construct that is "related to" an mlir::Value* +/// representing any nested MLIR construct that is "related to" an mlir::Value /// (for now an induction variable). /// This is extensible and will evolve in the future as MLIR evolves, hence /// the name LoopBuilder (as opposed to say ForBuilder or AffineForBuilder). @@ -242,7 +242,7 @@ class Append {}; /// A BlockBuilder is a NestedBuilder for mlir::Block*. /// This exists by opposition to LoopBuilder which is not related to an -/// mlir::Block* but to a mlir::Value*. +/// mlir::Block* but to a mlir::Value. /// It is meant to be used as a temporary object for representing any nested /// MLIR construct that is "related to" an mlir::Block*. class BlockBuilder : public NestedBuilder { @@ -257,7 +257,7 @@ class BlockBuilder : public NestedBuilder { /// /// Prerequisites: /// The ValueHandle `args` are typed delayed ValueHandles; i.e. they are - /// not yet bound to mlir::Value*. + /// not yet bound to mlir::Value. BlockBuilder(BlockHandle *bh, ArrayRef args); /// The only purpose of this operator is to serve as a sequence point so that @@ -291,10 +291,10 @@ class CapturableHandle { /// typed "delayed" value that can be hold a Value in the future; /// 3. constructed state,in which case it holds a Value. /// -/// A ValueHandle is meant to capture a single Value* and should be used for +/// A ValueHandle is meant to capture a single Value and should be used for /// operations that have a single result. For convenience of use, we also /// include AffineForOp in this category although it does not return a value. -/// In the case of AffineForOp, the captured Value* is the loop induction +/// In the case of AffineForOp, the captured Value is the loop induction /// variable. class ValueHandle : public CapturableHandle { public: @@ -304,15 +304,15 @@ class ValueHandle : public CapturableHandle { /// A ValueHandle that is constructed from a Type represents a typed "delayed" /// Value. A delayed Value can only capture Values of the specified type. /// Such a delayed value represents the declaration (in the PL sense) of a - /// placeholder for an mlir::Value* that will be constructed and captured at + /// placeholder for an mlir::Value that will be constructed and captured at /// some later point in the program. explicit ValueHandle(Type t) : t(t), v(nullptr) {} - /// A ValueHandle that is constructed from an mlir::Value* is an "eager" + /// A ValueHandle that is constructed from an mlir::Value is an "eager" /// Value. An eager Value represents both the declaration and the definition - /// (in the PL sense) of a placeholder for an mlir::Value* that has already + /// (in the PL sense) of a placeholder for an mlir::Value that has already /// been constructed in the past and that is captured "now" in the program. - explicit ValueHandle(Value *v) : t(v->getType()), v(v) {} + explicit ValueHandle(ValuePtr v) : t(v->getType()), v(v) {} /// Builds a ConstantIndexOp of value `cst`. The constant is created at the /// current insertion point. @@ -336,8 +336,8 @@ class ValueHandle : public CapturableHandle { std::swap(v, other.v); } - /// Implicit conversion useful for automatic conversion to Container. - operator Value *() const { return getValue(); } + /// Implicit conversion useful for automatic conversion to Container. + operator ValuePtr() const { return getValue(); } /// Generic mlir::Op create. This is the key to being extensible to the whole /// of MLIR without duplicating the type system or the op definitions. @@ -355,7 +355,7 @@ class ValueHandle : public CapturableHandle { /// Special case to build composed AffineApply operations. // TODO: createOrFold when available and move inside of the `create` method. static ValueHandle createComposedAffineApply(AffineMap map, - ArrayRef operands); + ArrayRef operands); /// Generic create for a named operation producing a single value. static ValueHandle create(StringRef name, ArrayRef operands, @@ -363,7 +363,7 @@ class ValueHandle : public CapturableHandle { ArrayRef attributes = {}); bool hasValue() const { return v != nullptr; } - Value *getValue() const { + ValuePtr getValue() const { assert(hasValue() && "Unexpected null value;"); return v; } @@ -380,12 +380,12 @@ class ValueHandle : public CapturableHandle { ValueHandle() : t(), v(nullptr) {} Type t; - Value *v; + ValuePtr v; }; /// An OperationHandle can be used in lieu of ValueHandle to capture the /// operation in cases when one does not care about, or cannot extract, a -/// unique Value* from the operation. +/// unique Value from the operation. /// This can be used for capturing zero result operations as well as /// multi-result operations that are not supported by ValueHandle. /// We do not distinguish further between zero and multi-result operations at @@ -529,7 +529,7 @@ ValueHandle operator>=(ValueHandle lhs, ValueHandle rhs); } // namespace op -/// Entry point to build multiple ValueHandle from a `Container` of Value* or +/// Entry point to build multiple ValueHandle from a `Container` of Value or /// Type. template inline SmallVector makeValueHandles(Container values) { diff --git a/include/mlir/EDSC/Helpers.h b/include/mlir/EDSC/Helpers.h index 423c92b2d064..c18307e71214 100644 --- a/include/mlir/EDSC/Helpers.h +++ b/include/mlir/EDSC/Helpers.h @@ -75,7 +75,7 @@ class View { // TODO(ntv): Support MemRefs with layoutMaps. class MemRefView : public View { public: - explicit MemRefView(Value *v); + explicit MemRefView(ValuePtr v); MemRefView(const MemRefView &) = default; MemRefView &operator=(const MemRefView &) = default; @@ -91,7 +91,7 @@ class MemRefView : public View { /// a MemRefView but for vectors. This exists purely for boilerplate avoidance. class VectorView : public View { public: - explicit VectorView(Value *v); + explicit VectorView(ValuePtr v); VectorView(const VectorView &) = default; VectorView &operator=(const VectorView &) = default; @@ -120,7 +120,7 @@ class VectorView : public View { template class TemplatedIndexedValue { public: explicit TemplatedIndexedValue(Type t) : base(t) {} - explicit TemplatedIndexedValue(Value *v) + explicit TemplatedIndexedValue(ValuePtr v) : TemplatedIndexedValue(ValueHandle(v)) {} explicit TemplatedIndexedValue(ValueHandle v) : base(v) {} @@ -161,8 +161,8 @@ template class TemplatedIndexedValue { return Load(getBase(), {indices.begin(), indices.end()}); } - /// Emits a `load` when converting to a Value*. - Value *operator*(void)const { + /// Emits a `load` when converting to a Value. + ValuePtr operator*(void) const { return Load(getBase(), {indices.begin(), indices.end()}).getValue(); } diff --git a/include/mlir/EDSC/Intrinsics.h b/include/mlir/EDSC/Intrinsics.h index 06c75505cb75..dc0c1186c7a9 100644 --- a/include/mlir/EDSC/Intrinsics.h +++ b/include/mlir/EDSC/Intrinsics.h @@ -44,7 +44,7 @@ struct IndexHandle : public ValueHandle { explicit IndexHandle() : ValueHandle(ScopedContext::getBuilder().getIndexType()) {} explicit IndexHandle(index_t v) : ValueHandle(v) {} - explicit IndexHandle(Value *v) : ValueHandle(v) { + explicit IndexHandle(ValuePtr v) : ValueHandle(v) { assert(v->getType() == ScopedContext::getBuilder().getIndexType() && "Expected index type"); } @@ -79,9 +79,9 @@ makeHandlePointers(MutableArrayRef ivs) { return pivs; } -/// Returns a vector of the underlying Value* from `ivs`. -inline SmallVector extractValues(ArrayRef ivs) { - SmallVector vals; +/// Returns a vector of the underlying Value from `ivs`. +inline SmallVector extractValues(ArrayRef ivs) { + SmallVector vals; vals.reserve(ivs.size()); for (auto &iv : ivs) { vals.push_back(iv.getValue()); @@ -96,7 +96,7 @@ namespace intrinsics { namespace detail { /// Helper structure to be used with ValueBuilder / OperationBuilder. /// It serves the purpose of removing boilerplate specialization for the sole -/// purpose of implicitly converting ArrayRef -> ArrayRef. +/// purpose of implicitly converting ArrayRef -> ArrayRef. class ValueHandleArray { public: ValueHandleArray(ArrayRef vals) { @@ -109,11 +109,11 @@ class ValueHandleArray { SmallVector tmp(vals.begin(), vals.end()); values.append(tmp.begin(), tmp.end()); } - operator ArrayRef() { return values; } + operator ArrayRef() { return values; } private: ValueHandleArray() = default; - SmallVector values; + SmallVector values; }; template inline T unpack(T value) { return value; } @@ -128,8 +128,8 @@ inline detail::ValueHandleArray unpack(ArrayRef values) { /// boilerplate or Tablegen. /// Arguably a builder is not a ValueHandle but in practice it is only used as /// an alias to a notional ValueHandle. -/// Implementing it as a subclass allows it to compose all the way to Value*. -/// Without subclassing, implicit conversion to Value* would fail when composing +/// Implementing it as a subclass allows it to compose all the way to Value. +/// Without subclassing, implicit conversion to Value would fail when composing /// in patterns such as: `select(a, b, select(c, d, e))`. template struct ValueBuilder : public ValueHandle { // Builder-based @@ -238,8 +238,8 @@ OperationHandle br(BlockHandle bh, ArrayRef operands); /// /// Prerequisites: /// `b` has not yet captured an mlir::Block*. -/// No `captures` have captured any mlir::Value*. -/// All `operands` have already captured an mlir::Value* +/// No `captures` have captured any mlir::Value. +/// All `operands` have already captured an mlir::Value /// captures.size() == operands.size() /// captures and operands are pairwise of the same type. OperationHandle br(BlockHandle *bh, ArrayRef captures, @@ -266,8 +266,8 @@ OperationHandle cond_br(ValueHandle cond, BlockHandle trueBranch, /// /// Prerequisites: /// `trueBranch`/`falseBranch` has not yet captured an mlir::Block*. -/// No `trueCaptures`/`falseCaptures` have captured any mlir::Value*. -/// All `trueOperands`/`trueOperands` have already captured an mlir::Value* +/// No `trueCaptures`/`falseCaptures` have captured any mlir::Value. +/// All `trueOperands`/`trueOperands` have already captured an mlir::Value /// `trueCaptures`.size() == `trueOperands`.size() /// `falseCaptures`.size() == `falseOperands`.size() /// `trueCaptures` and `trueOperands` are pairwise of the same type diff --git a/include/mlir/IR/Block.h b/include/mlir/IR/Block.h index 6c5099b06da7..87c77160e1d2 100644 --- a/include/mlir/IR/Block.h +++ b/include/mlir/IR/Block.h @@ -72,7 +72,7 @@ class Block : public IRObjectWithUseList, //===--------------------------------------------------------------------===// // This is the list of arguments to the block. - using BlockArgListType = ArrayRef; + using BlockArgListType = ArrayRef; BlockArgListType getArguments() { return arguments; } @@ -86,7 +86,7 @@ class Block : public IRObjectWithUseList, bool args_empty() { return arguments.empty(); } /// Add one value to the argument list. - BlockArgument *addArgument(Type type); + BlockArgumentPtr addArgument(Type type); /// Add one argument to the argument list for each type specified in the list. iterator_range addArguments(ArrayRef types); @@ -97,7 +97,7 @@ class Block : public IRObjectWithUseList, void eraseArgument(unsigned index, bool updatePredTerms = true); unsigned getNumArguments() { return arguments.size(); } - BlockArgument *getArgument(unsigned i) { return arguments[i]; } + BlockArgumentPtr getArgument(unsigned i) { return arguments[i]; } //===--------------------------------------------------------------------===// // Operation list management @@ -332,7 +332,7 @@ class Block : public IRObjectWithUseList, OpListType operations; /// This is the list of arguments to the block. - std::vector arguments; + std::vector arguments; Block(Block &) = delete; void operator=(Block &) = delete; diff --git a/include/mlir/IR/BlockAndValueMapping.h b/include/mlir/IR/BlockAndValueMapping.h index cd15d457a774..287dd508fa68 100644 --- a/include/mlir/IR/BlockAndValueMapping.h +++ b/include/mlir/IR/BlockAndValueMapping.h @@ -37,7 +37,7 @@ class BlockAndValueMapping { /// Inserts a new mapping for 'from' to 'to'. If there is an existing mapping, /// it is overwritten. void map(Block *from, Block *to) { valueMap[from] = to; } - void map(Value *from, Value *to) { valueMap[from] = to; } + void map(ValuePtr from, ValuePtr to) { valueMap[from] = to; } /// Erases a mapping for 'from'. void erase(IRObjectWithUseList *from) { valueMap.erase(from); } @@ -52,8 +52,8 @@ class BlockAndValueMapping { Block *lookupOrNull(Block *from) const { return lookupOrValue(from, (Block *)nullptr); } - Value *lookupOrNull(Value *from) const { - return lookupOrValue(from, (Value *)nullptr); + ValuePtr lookupOrNull(ValuePtr from) const { + return lookupOrValue(from, (ValuePtr) nullptr); } /// Lookup a mapped value within the map. If a mapping for the provided value @@ -61,7 +61,7 @@ class BlockAndValueMapping { Block *lookupOrDefault(Block *from) const { return lookupOrValue(from, from); } - Value *lookupOrDefault(Value *from) const { + ValuePtr lookupOrDefault(ValuePtr from) const { return lookupOrValue(from, from); } diff --git a/include/mlir/IR/Builders.h b/include/mlir/IR/Builders.h index 766902fabfab..c199c09feb56 100644 --- a/include/mlir/IR/Builders.h +++ b/include/mlir/IR/Builders.h @@ -313,7 +313,7 @@ class OpBuilder : public Builder { /// and immediately try to fold it. This functions populates 'results' with /// the results after folding the operation. template - void createOrFold(SmallVectorImpl &results, Location location, + void createOrFold(SmallVectorImpl &results, Location location, Args &&... args) { // Create the operation without using 'createOperation' as we don't want to // insert it yet. @@ -331,9 +331,9 @@ class OpBuilder : public Builder { /// Overload to create or fold a single result operation. template typename std::enable_if(), - Value *>::type + ValuePtr>::type createOrFold(Location location, Args &&... args) { - SmallVector results; + SmallVector results; createOrFold(results, location, std::forward(args)...); return results.front(); } @@ -344,7 +344,7 @@ class OpBuilder : public Builder { OpTy>::type createOrFold(Location location, Args &&... args) { auto op = create(location, std::forward(args)...); - SmallVector unused; + SmallVector unused; tryFold(op.getOperation(), unused); // Folding cannot remove a zero-result operation, so for convenience we @@ -355,7 +355,7 @@ class OpBuilder : public Builder { /// Attempts to fold the given operation and places new results within /// 'results'. Returns success if the operation was folded, failure otherwise. /// Note: This function does not erase the operation on a successful fold. - LogicalResult tryFold(Operation *op, SmallVectorImpl &results); + LogicalResult tryFold(Operation *op, SmallVectorImpl &results); /// Creates a deep copy of the specified operation, remapping any operands /// that use values outside of the operation using the map that is provided diff --git a/include/mlir/IR/FunctionSupport.h b/include/mlir/IR/FunctionSupport.h index b15b056a3ecf..1ba85d73df9b 100644 --- a/include/mlir/IR/FunctionSupport.h +++ b/include/mlir/IR/FunctionSupport.h @@ -183,7 +183,7 @@ class FunctionLike : public OpTrait::TraitBase { } /// Gets argument. - BlockArgument *getArgument(unsigned idx) { + BlockArgumentPtr getArgument(unsigned idx) { return getBlocks().front().getArgument(idx); } diff --git a/include/mlir/IR/Matchers.h b/include/mlir/IR/Matchers.h index 1261916dae2a..3b36f2fb5eb0 100644 --- a/include/mlir/IR/Matchers.h +++ b/include/mlir/IR/Matchers.h @@ -142,7 +142,7 @@ using has_operation_or_value_matcher_t = /// Statically switch to a Value matcher. template typename std::enable_if_t::value, + MatcherClass, ValuePtr>::value, bool> matchOperandOrValueAtIndex(Operation *op, unsigned idx, MatcherClass &matcher) { return matcher.match(op->getOperand(idx)); @@ -161,14 +161,14 @@ matchOperandOrValueAtIndex(Operation *op, unsigned idx, MatcherClass &matcher) { /// Terminal matcher, always returns true. struct AnyValueMatcher { - bool match(Value *op) const { return true; } + bool match(ValuePtr op) const { return true; } }; /// Binds to a specific value and matches it. struct PatternMatcherValue { - PatternMatcherValue(Value *val) : value(val) {} - bool match(Value *val) const { return val == value; } - Value *value; + PatternMatcherValue(ValuePtr val) : value(val) {} + bool match(ValuePtr val) const { return val == value; } + ValuePtr value; }; template @@ -235,7 +235,7 @@ inline detail::constant_int_not_value_matcher<0> m_NonZero() { /// Entry point for matching a pattern over a Value. template -inline bool matchPattern(Value *value, const Pattern &pattern) { +inline bool matchPattern(ValuePtr value, const Pattern &pattern) { // TODO: handle other cases if (auto *op = value->getDefiningOp()) return const_cast(pattern).match(op); @@ -262,7 +262,7 @@ auto m_Op(Matchers... matchers) { namespace matchers { inline auto m_Any() { return detail::AnyValueMatcher(); } -inline auto m_Val(Value *v) { return detail::PatternMatcherValue(v); } +inline auto m_Val(ValuePtr v) { return detail::PatternMatcherValue(v); } } // namespace matchers } // end namespace mlir diff --git a/include/mlir/IR/OpDefinition.h b/include/mlir/IR/OpDefinition.h index c220120b3372..437540117c43 100644 --- a/include/mlir/IR/OpDefinition.h +++ b/include/mlir/IR/OpDefinition.h @@ -257,8 +257,8 @@ inline bool operator!=(OpState lhs, OpState rhs) { } /// This class represents a single result from folding an operation. -class OpFoldResult : public PointerUnion { - using PointerUnion::PointerUnion; +class OpFoldResult : public PointerUnion { + using PointerUnion::PointerUnion; }; /// This template defines the foldHook as used by AbstractOperation. @@ -311,8 +311,8 @@ class FoldingHook::type> { public: /// If the operation returns a single value, then the Op can be implicitly - /// converted to an Value*. This yields the value of the only result. - operator Value *() { + /// converted to an Value. This yields the value of the only result. + operator ValuePtr() { return static_cast(this)->getOperation()->getResult(0); } @@ -326,7 +326,7 @@ class FoldingHook() != op->getResult(0)) + if (result.template dyn_cast() != op->getResult(0)) results.push_back(result); return success(); } @@ -428,10 +428,12 @@ struct MultiOperandTraitBase : public TraitBase { unsigned getNumOperands() { return this->getOperation()->getNumOperands(); } /// Return the operand at index 'i'. - Value *getOperand(unsigned i) { return this->getOperation()->getOperand(i); } + ValuePtr getOperand(unsigned i) { + return this->getOperation()->getOperand(i); + } /// Set the operand at index 'i' to 'value'. - void setOperand(unsigned i, Value *value) { + void setOperand(unsigned i, ValuePtr value) { this->getOperation()->setOperand(i, value); } @@ -475,9 +477,11 @@ class ZeroOperands : public TraitBase { template class OneOperand : public TraitBase { public: - Value *getOperand() { return this->getOperation()->getOperand(0); } + ValuePtr getOperand() { return this->getOperation()->getOperand(0); } - void setOperand(Value *value) { this->getOperation()->setOperand(0, value); } + void setOperand(ValuePtr value) { + this->getOperation()->setOperand(0, value); + } static LogicalResult verifyTrait(Operation *op) { return impl::verifyOneOperand(op); @@ -550,7 +554,7 @@ struct MultiResultTraitBase : public TraitBase { unsigned getNumResults() { return this->getOperation()->getNumResults(); } /// Return the result at index 'i'. - Value *getResult(unsigned i) { return this->getOperation()->getResult(i); } + ValuePtr getResult(unsigned i) { return this->getOperation()->getResult(i); } /// Replace all uses of results of this operation with the provided 'values'. /// 'values' may correspond to an existing operation, or a range of 'Value'. @@ -586,13 +590,13 @@ struct MultiResultTraitBase : public TraitBase { template class OneResult : public TraitBase { public: - Value *getResult() { return this->getOperation()->getResult(0); } + ValuePtr getResult() { return this->getOperation()->getResult(0); } Type getType() { return getResult()->getType(); } /// Replace all uses of 'this' value with the new value, updating anything in /// the IR that uses 'this' to use the other value instead. When this returns /// there are zero uses of 'this'. - void replaceAllUsesWith(Value *newValue) { + void replaceAllUsesWith(ValuePtr newValue) { getResult()->replaceAllUsesWith(newValue); } @@ -820,10 +824,10 @@ class IsTerminator : public TraitBase { return this->getOperation()->setSuccessor(block, index); } - void addSuccessorOperand(unsigned index, Value *value) { + void addSuccessorOperand(unsigned index, ValuePtr value) { return this->getOperation()->addSuccessorOperand(index, value); } - void addSuccessorOperands(unsigned index, ArrayRef values) { + void addSuccessorOperands(unsigned index, ArrayRef values) { return this->getOperation()->addSuccessorOperand(index, values); } }; @@ -1209,8 +1213,8 @@ namespace impl { ParseResult parseOneResultOneOperandTypeOp(OpAsmParser &parser, OperationState &result); -void buildBinaryOp(Builder *builder, OperationState &result, Value *lhs, - Value *rhs); +void buildBinaryOp(Builder *builder, OperationState &result, ValuePtr lhs, + ValuePtr rhs); ParseResult parseOneResultSameOperandTypeOp(OpAsmParser &parser, OperationState &result); @@ -1223,11 +1227,11 @@ void printOneResultOp(Operation *op, OpAsmPrinter &p); // These functions are out-of-line implementations of the methods in CastOp, // which avoids them being template instantiated/duplicated. namespace impl { -void buildCastOp(Builder *builder, OperationState &result, Value *source, +void buildCastOp(Builder *builder, OperationState &result, ValuePtr source, Type destType); ParseResult parseCastOp(OpAsmParser &parser, OperationState &result); void printCastOp(Operation *op, OpAsmPrinter &p); -Value *foldCastOp(Operation *op); +ValuePtr foldCastOp(Operation *op); } // namespace impl } // end namespace mlir diff --git a/include/mlir/IR/OpImplementation.h b/include/mlir/IR/OpImplementation.h index 7dd11d089c2c..fcadce9ab162 100644 --- a/include/mlir/IR/OpImplementation.h +++ b/include/mlir/IR/OpImplementation.h @@ -45,7 +45,7 @@ class OpAsmPrinter { virtual raw_ostream &getStream() const = 0; /// Print implementations for various things an operation contains. - virtual void printOperand(Value *value) = 0; + virtual void printOperand(ValuePtr value) = 0; /// Print a comma separated list of operands. template @@ -121,7 +121,7 @@ class OpAsmPrinter { void printFunctionalType(Operation *op) { auto &os = getStream(); os << "("; - interleaveComma(op->getNonSuccessorOperands(), os, [&](Value *operand) { + interleaveComma(op->getNonSuccessorOperands(), os, [&](ValuePtr operand) { if (operand) printType(operand->getType()); else @@ -150,18 +150,18 @@ class OpAsmPrinter { }; // Make the implementations convenient to use. -inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Value &value) { +inline OpAsmPrinter &operator<<(OpAsmPrinter &p, ValueRef value) { p.printOperand(&value); return p; } -inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Value *value) { +inline OpAsmPrinter &operator<<(OpAsmPrinter &p, ValuePtr value) { return p << *value; } -template ::value && - !std::is_convertible::value, - T>::type * = nullptr> +template ::value && + !std::is_convertible::value, + T>::type * = nullptr> inline OpAsmPrinter &operator<<(OpAsmPrinter &p, const T &values) { p.printOperands(values); return p; @@ -181,8 +181,8 @@ inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Attribute attr) { // even if it isn't exactly one of them. For example, we want to print // FunctionType with the Type version above, not have it match this. template ::value && - !std::is_convertible::value && + !std::is_convertible::value && + !std::is_convertible::value && !std::is_convertible::value && !std::is_convertible::value && !std::is_convertible::value && @@ -467,13 +467,13 @@ class OpAsmParser { /// Resolve an operand to an SSA value, emitting an error on failure. virtual ParseResult resolveOperand(const OperandType &operand, Type type, - SmallVectorImpl &result) = 0; + SmallVectorImpl &result) = 0; /// Resolve a list of operands to SSA values, emitting an error on failure, or /// appending the results to the list on success. This method should be used /// when all operands have the same type. ParseResult resolveOperands(ArrayRef operands, Type type, - SmallVectorImpl &result) { + SmallVectorImpl &result) { for (auto elt : operands) if (resolveOperand(elt, type, result)) return failure(); @@ -485,7 +485,7 @@ class OpAsmParser { /// to the list on success. ParseResult resolveOperands(ArrayRef operands, ArrayRef types, llvm::SMLoc loc, - SmallVectorImpl &result) { + SmallVectorImpl &result) { if (operands.size() != types.size()) return emitError(loc) << operands.size() << " operands present, but expected " @@ -556,7 +556,7 @@ class OpAsmParser { /// Parse a single operation successor and its operand list. virtual ParseResult parseSuccessorAndUseList(Block *&dest, - SmallVectorImpl &operands) = 0; + SmallVectorImpl &operands) = 0; //===--------------------------------------------------------------------===// // Type Parsing @@ -634,7 +634,7 @@ class OpAsmParser { /// A functor used to set the name of the start of a result group of an /// operation. See 'getAsmResultNames' below for more details. -using OpAsmSetValueNameFn = function_ref; +using OpAsmSetValueNameFn = function_ref; class OpAsmDialectInterface : public DialectInterface::Base { diff --git a/include/mlir/IR/Operation.h b/include/mlir/IR/Operation.h index 2159d10fd2a4..ad0dc600f8f4 100644 --- a/include/mlir/IR/Operation.h +++ b/include/mlir/IR/Operation.h @@ -44,7 +44,7 @@ class Operation final /// Create a new Operation with the specific fields. static Operation *create(Location location, OperationName name, ArrayRef resultTypes, - ArrayRef operands, + ArrayRef operands, ArrayRef attributes, ArrayRef successors, unsigned numRegions, bool resizableOperandList); @@ -53,7 +53,7 @@ class Operation final /// unnecessarily uniquing a list of attributes. static Operation *create(Location location, OperationName name, ArrayRef resultTypes, - ArrayRef operands, + ArrayRef operands, NamedAttributeList attributes, ArrayRef successors, unsigned numRegions, bool resizableOperandList); @@ -64,7 +64,7 @@ class Operation final /// Create a new Operation with the specific fields. static Operation * create(Location location, OperationName name, ArrayRef resultTypes, - ArrayRef operands, NamedAttributeList attributes, + ArrayRef operands, NamedAttributeList attributes, ArrayRef successors = {}, RegionRange regions = {}, bool resizableOperandList = false); @@ -149,7 +149,7 @@ class Operation final } /// Replace any uses of 'from' with 'to' within this operation. - void replaceUsesOfWith(Value *from, Value *to); + void replaceUsesOfWith(ValuePtr from, ValuePtr to); /// Replace all uses of results of this operation with the provided 'values'. template > decomposeSuccessorOperandIndex(unsigned operandIndex); - /// Returns the `BlockArgument*` corresponding to operand `operandIndex` in + /// Returns the `BlockArgument` corresponding to operand `operandIndex` in /// some successor, or None if `operandIndex` isn't a successor operand index. - Optional getSuccessorBlockArgument(unsigned operandIndex) { + Optional getSuccessorBlockArgument(unsigned operandIndex) { auto decomposed = decomposeSuccessorOperandIndex(operandIndex); if (!decomposed.hasValue()) return None; diff --git a/include/mlir/IR/OperationSupport.h b/include/mlir/IR/OperationSupport.h index 23ef0ce59372..b7f63218ba5e 100644 --- a/include/mlir/IR/OperationSupport.h +++ b/include/mlir/IR/OperationSupport.h @@ -270,7 +270,7 @@ inline llvm::hash_code hash_value(OperationName arg) { struct OperationState { Location location; OperationName name; - SmallVector operands; + SmallVector operands; /// Types of the results of this operation. SmallVector types; SmallVector attributes; @@ -534,8 +534,8 @@ class OpPrintingFlags { /// This class implements iteration on the types of a given range of values. template class ValueTypeIterator final - : public llvm::mapped_iterator { - static Type unwrap(Value *value) { return value->getType(); } + : public llvm::mapped_iterator { + static Type unwrap(ValuePtr value) { return value->getType(); } public: using reference = Type; @@ -545,7 +545,8 @@ class ValueTypeIterator final /// Initializes the type iterator to the specified value iterator. ValueTypeIterator(ValueIteratorT it) - : llvm::mapped_iterator(it, &unwrap) {} + : llvm::mapped_iterator(it, &unwrap) { + } }; //===----------------------------------------------------------------------===// @@ -554,7 +555,7 @@ class ValueTypeIterator final /// This class implements the operand iterators for the Operation class. class OperandRange final : public detail::indexed_accessor_range_base { + ValuePtr, ValuePtr, ValuePtr> { public: using RangeBaseT::RangeBaseT; OperandRange(Operation *op); @@ -569,7 +570,7 @@ class OperandRange final return object + index; } /// See `detail::indexed_accessor_range_base` for details. - static Value *dereference_iterator(OpOperand *object, ptrdiff_t index) { + static ValuePtr dereference_iterator(OpOperand *object, ptrdiff_t index) { return object[index].get(); } @@ -582,8 +583,8 @@ class OperandRange final /// This class implements the result iterators for the Operation class. class ResultRange final - : public detail::indexed_accessor_range_base { + : public detail::indexed_accessor_range_base { public: using RangeBaseT::RangeBaseT; ResultRange(Operation *op); @@ -594,11 +595,11 @@ class ResultRange final private: /// See `detail::indexed_accessor_range_base` for details. - static OpResult *offset_base(OpResult *object, ptrdiff_t index) { + static OpResultPtr offset_base(OpResultPtr object, ptrdiff_t index) { return object + index; } /// See `detail::indexed_accessor_range_base` for details. - static Value *dereference_iterator(OpResult *object, ptrdiff_t index) { + static ValuePtr dereference_iterator(OpResultPtr object, ptrdiff_t index) { return &object[index]; } @@ -610,31 +611,31 @@ class ResultRange final // ValueRange /// This class provides an abstraction over the different types of ranges over -/// Value*s. In many cases, this prevents the need to explicitly materialize a +/// Values. In many cases, this prevents the need to explicitly materialize a /// SmallVector/std::vector. This class should be used in places that are not /// suitable for a more derived type (e.g. ArrayRef) or a template range /// parameter. class ValueRange final : public detail::indexed_accessor_range_base< - ValueRange, PointerUnion, - Value *, Value *, Value *> { + ValueRange, PointerUnion, + ValuePtr, ValuePtr, ValuePtr> { public: using RangeBaseT::RangeBaseT; template , Arg>::value && - !std::is_convertible::value>> + std::is_constructible, Arg>::value && + !std::is_convertible::value>> ValueRange(Arg &&arg) - : ValueRange(ArrayRef(std::forward(arg))) {} - ValueRange(Value *const &value) : ValueRange(&value, /*count=*/1) {} - ValueRange(const std::initializer_list &values) - : ValueRange(ArrayRef(values)) {} + : ValueRange(ArrayRef(std::forward(arg))) {} + ValueRange(ValuePtr const &value) : ValueRange(&value, /*count=*/1) {} + ValueRange(const std::initializer_list &values) + : ValueRange(ArrayRef(values)) {} ValueRange(iterator_range values) : ValueRange(OperandRange(values)) {} ValueRange(iterator_range values) : ValueRange(ResultRange(values)) {} - ValueRange(ArrayRef values = llvm::None); + ValueRange(ArrayRef values = llvm::None); ValueRange(OperandRange values); ValueRange(ResultRange values); @@ -645,12 +646,12 @@ class ValueRange final private: /// The type representing the owner of this range. This is either a list of /// values, operands, or results. - using OwnerT = PointerUnion; + using OwnerT = PointerUnion; /// See `detail::indexed_accessor_range_base` for details. static OwnerT offset_base(const OwnerT &owner, ptrdiff_t index); /// See `detail::indexed_accessor_range_base` for details. - static Value *dereference_iterator(const OwnerT &owner, ptrdiff_t index); + static ValuePtr dereference_iterator(const OwnerT &owner, ptrdiff_t index); /// Allow access to `offset_base` and `dereference_iterator`. friend RangeBaseT; diff --git a/include/mlir/IR/TypeUtilities.h b/include/mlir/IR/TypeUtilities.h index 2cce4dbb6cff..af22f9c4a9f9 100644 --- a/include/mlir/IR/TypeUtilities.h +++ b/include/mlir/IR/TypeUtilities.h @@ -41,8 +41,8 @@ Type getElementTypeOrSelf(Type type); /// Return the element type or return the type itself. Type getElementTypeOrSelf(Attribute attr); -Type getElementTypeOrSelf(Value *val); -Type getElementTypeOrSelf(Value &val); +Type getElementTypeOrSelf(ValuePtr val); +Type getElementTypeOrSelf(ValueRef val); /// Get the types within a nested Tuple. A helper for the class method that /// handles storage concerns, which is tricky to do in tablegen. @@ -72,7 +72,7 @@ LogicalResult verifyCompatibleShape(Type type1, Type type2); // An iterator for the element types of an op's operands of shaped types. class OperandElementTypeIterator final : public llvm::mapped_iterator { + Type (*)(ValuePtr)> { public: using reference = Type; @@ -81,7 +81,7 @@ class OperandElementTypeIterator final explicit OperandElementTypeIterator(Operation::operand_iterator it); private: - static Type unwrap(Value *value); + static Type unwrap(ValuePtr value); }; using OperandElementTypeRange = iterator_range; @@ -89,7 +89,7 @@ using OperandElementTypeRange = iterator_range; // An iterator for the tensor element types of an op's results of shaped types. class ResultElementTypeIterator final : public llvm::mapped_iterator { + Type (*)(ValuePtr)> { public: using reference = Type; @@ -98,7 +98,7 @@ class ResultElementTypeIterator final explicit ResultElementTypeIterator(Operation::result_iterator it); private: - static Type unwrap(Value *value); + static Type unwrap(ValuePtr value); }; using ResultElementTypeRange = iterator_range; diff --git a/include/mlir/IR/Value.h b/include/mlir/IR/Value.h index 34c74c888cba..11cb8cdcbc71 100644 --- a/include/mlir/IR/Value.h +++ b/include/mlir/IR/Value.h @@ -28,10 +28,18 @@ namespace mlir { class Block; +class BlockArgument; class Operation; +class OpResult; class Region; class Value; +/// Using directives that simplify the transition of Value to being value typed. +using BlockArgumentPtr = BlockArgument *; +using OpResultPtr = OpResult *; +using ValueRef = Value &; +using ValuePtr = Value *; + /// Operands contain a Value. using OpOperand = IROperandImpl; @@ -48,6 +56,15 @@ class Value : public IRObjectWithUseList { ~Value() {} + template bool isa() const { return U::classof(this); } + template U *dyn_cast() const { + return isa() ? (U *)this : nullptr; + } + template U *cast() const { + assert(isa()); + return (U *)this; + } + Kind getKind() const { return typeAndKind.getInt(); } Type getType() const { return typeAndKind.getPointer(); } @@ -66,7 +83,7 @@ class Value : public IRObjectWithUseList { /// Replace all uses of 'this' value with the new value, updating anything in /// the IR that uses 'this' to use the other value instead. When this returns /// there are zero uses of 'this'. - void replaceAllUsesWith(Value *newValue) { + void replaceAllUsesWith(ValuePtr newValue) { IRObjectWithUseList::replaceAllUsesWith(newValue); } @@ -100,7 +117,7 @@ class Value : public IRObjectWithUseList { llvm::PointerIntPair typeAndKind; }; -inline raw_ostream &operator<<(raw_ostream &os, Value &value) { +inline raw_ostream &operator<<(raw_ostream &os, ValueRef value) { value.print(os); return os; } @@ -160,7 +177,6 @@ class OpResult : public Value { /// through bitpacking shenanigans. Operation *const owner; }; - } // namespace mlir #endif diff --git a/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h b/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h index 070b3c36e8c9..202e86566fcb 100644 --- a/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h +++ b/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h @@ -163,7 +163,7 @@ class CAGAnchorNode : public CAGNode { } virtual Operation *getOp() const = 0; - virtual Value *getValue() const = 0; + virtual ValuePtr getValue() const = 0; static bool classof(const CAGNode *n) { return n->getKind() >= Kind::Anchor && n->getKind() <= Kind::LastAnchor; @@ -210,7 +210,7 @@ class CAGOperandAnchor : public CAGAnchorNode { return n->getKind() == Kind::Anchor || n->getKind() == Kind::OperandAnchor; } - Value *getValue() const final { return op->getOperand(operandIdx); } + ValuePtr getValue() const final { return op->getOperand(operandIdx); } void printLabel(raw_ostream &os) const override; @@ -221,7 +221,7 @@ class CAGOperandAnchor : public CAGAnchorNode { /// An anchor tied to a specific result. /// Since a result is already anchored to its defining op, result anchors refer -/// directly to the underlying Value*. +/// directly to the underlying Value. class CAGResultAnchor : public CAGAnchorNode { public: CAGResultAnchor(Operation *op, unsigned resultIdx); @@ -231,12 +231,12 @@ class CAGResultAnchor : public CAGAnchorNode { } Operation *getOp() const final { return resultValue->getDefiningOp(); } - Value *getValue() const final { return resultValue; } + ValuePtr getValue() const final { return resultValue; } void printLabel(raw_ostream &os) const override; private: - Value *resultValue; + ValuePtr resultValue; }; /// Base class for constraint nodes. diff --git a/include/mlir/Target/LLVMIR/ModuleTranslation.h b/include/mlir/Target/LLVMIR/ModuleTranslation.h index 7adb4aac2e21..7464e2a347dd 100644 --- a/include/mlir/Target/LLVMIR/ModuleTranslation.h +++ b/include/mlir/Target/LLVMIR/ModuleTranslation.h @@ -113,7 +113,7 @@ class ModuleTranslation { protected: // Mappings between original and translated values, used for lookups. llvm::StringMap functionMapping; - DenseMap valueMapping; + DenseMap valueMapping; DenseMap blockMapping; }; diff --git a/include/mlir/Transforms/DialectConversion.h b/include/mlir/Transforms/DialectConversion.h index 814f2202f012..f9f1207c0a08 100644 --- a/include/mlir/Transforms/DialectConversion.h +++ b/include/mlir/Transforms/DialectConversion.h @@ -60,7 +60,7 @@ class TypeConverter { /// remaps an existing signature input. struct InputMapping { size_t inputNo, size; - Value *replacementValue; + ValuePtr replacementValue; }; /// Return the argument types for the new signature. @@ -90,7 +90,7 @@ class TypeConverter { /// Remap an input of the original signature to another `replacement` /// value. This drops the original argument. - void remapInput(unsigned origInputNo, Value *replacement); + void remapInput(unsigned origInputNo, ValuePtr replacement); private: /// The remapping information for each of the original arguments. @@ -143,7 +143,7 @@ class TypeConverter { /// the conversion has finished. virtual Operation *materializeConversion(PatternRewriter &rewriter, Type resultType, - ArrayRef inputs, + ArrayRef inputs, Location loc) { llvm_unreachable("expected 'materializeConversion' to be overridden"); } @@ -172,7 +172,7 @@ class ConversionPattern : public RewritePattern { /// ConversionPattern ever needs to replace an operation that does not /// have successors. This function should not fail. If some specific cases of /// the operation are not supported, these cases should not be matched. - virtual void rewrite(Operation *op, ArrayRef operands, + virtual void rewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const { llvm_unreachable("unimplemented rewrite"); } @@ -187,18 +187,18 @@ class ConversionPattern : public RewritePattern { /// terminator operation that has successors. This function should not fail /// the pass. If some specific cases of the operation are not supported, /// these cases should not be matched. - virtual void rewrite(Operation *op, ArrayRef properOperands, + virtual void rewrite(Operation *op, ArrayRef properOperands, ArrayRef destinations, - ArrayRef> operands, + ArrayRef> operands, ConversionPatternRewriter &rewriter) const { llvm_unreachable("unimplemented rewrite for terminators"); } /// Hook for derived classes to implement combined matching and rewriting. virtual PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef properOperands, + matchAndRewrite(Operation *op, ArrayRef properOperands, ArrayRef destinations, - ArrayRef> operands, + ArrayRef> operands, ConversionPatternRewriter &rewriter) const { if (!match(op)) return matchFailure(); @@ -208,7 +208,7 @@ class ConversionPattern : public RewritePattern { /// Hook for derived classes to implement combined matching and rewriting. virtual PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const { if (!match(op)) return matchFailure(); @@ -234,27 +234,27 @@ struct OpConversionPattern : public ConversionPattern { /// Wrappers around the ConversionPattern methods that pass the derived op /// type. - void rewrite(Operation *op, ArrayRef operands, + void rewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { rewrite(cast(op), operands, rewriter); } - void rewrite(Operation *op, ArrayRef properOperands, + void rewrite(Operation *op, ArrayRef properOperands, ArrayRef destinations, - ArrayRef> operands, + ArrayRef> operands, ConversionPatternRewriter &rewriter) const final { rewrite(cast(op), properOperands, destinations, operands, rewriter); } PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef properOperands, + matchAndRewrite(Operation *op, ArrayRef properOperands, ArrayRef destinations, - ArrayRef> operands, + ArrayRef> operands, ConversionPatternRewriter &rewriter) const final { return matchAndRewrite(cast(op), properOperands, destinations, operands, rewriter); } PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { return matchAndRewrite(cast(op), operands, rewriter); } @@ -264,22 +264,22 @@ struct OpConversionPattern : public ConversionPattern { /// Rewrite and Match methods that operate on the SourceOp type. These must be /// overridden by the derived pattern class. - virtual void rewrite(SourceOp op, ArrayRef operands, + virtual void rewrite(SourceOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const { llvm_unreachable("must override matchAndRewrite or a rewrite method"); } - virtual void rewrite(SourceOp op, ArrayRef properOperands, + virtual void rewrite(SourceOp op, ArrayRef properOperands, ArrayRef destinations, - ArrayRef> operands, + ArrayRef> operands, ConversionPatternRewriter &rewriter) const { llvm_unreachable("unimplemented rewrite for terminators"); } virtual PatternMatchResult - matchAndRewrite(SourceOp op, ArrayRef properOperands, + matchAndRewrite(SourceOp op, ArrayRef properOperands, ArrayRef destinations, - ArrayRef> operands, + ArrayRef> operands, ConversionPatternRewriter &rewriter) const { if (!match(op)) return matchFailure(); @@ -288,7 +288,7 @@ struct OpConversionPattern : public ConversionPattern { } virtual PatternMatchResult - matchAndRewrite(SourceOp op, ArrayRef operands, + matchAndRewrite(SourceOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const { if (!match(op)) return matchFailure(); @@ -330,11 +330,11 @@ class ConversionPatternRewriter final : public PatternRewriter { TypeConverter::SignatureConversion &conversion); /// Replace all the uses of the block argument `from` with value `to`. - void replaceUsesOfBlockArgument(BlockArgument *from, Value *to); + void replaceUsesOfBlockArgument(BlockArgumentPtr from, ValuePtr to); /// Return the converted value that replaces 'key'. Return 'key' if there is /// no such a converted value. - Value *getRemappedValue(Value *key); + ValuePtr getRemappedValue(ValuePtr key); //===--------------------------------------------------------------------===// // PatternRewriter Hooks diff --git a/include/mlir/Transforms/FoldUtils.h b/include/mlir/Transforms/FoldUtils.h index bdf88d3bfb2b..65dd1b6df165 100644 --- a/include/mlir/Transforms/FoldUtils.h +++ b/include/mlir/Transforms/FoldUtils.h @@ -82,7 +82,7 @@ class OperationFolder { /// and immediately try to fold it. This function populates 'results' with /// the results after folding the operation. template - void create(OpBuilder &builder, SmallVectorImpl &results, + void create(OpBuilder &builder, SmallVectorImpl &results, Location location, Args &&... args) { Operation *op = builder.create(location, std::forward(args)...); if (failed(tryToFold(op, results))) @@ -94,9 +94,9 @@ class OperationFolder { /// Overload to create or fold a single result operation. template typename std::enable_if(), - Value *>::type + ValuePtr>::type create(OpBuilder &builder, Location location, Args &&... args) { - SmallVector results; + SmallVector results; create(builder, results, location, std::forward(args)...); return results.front(); } @@ -107,7 +107,7 @@ class OperationFolder { OpTy>::type create(OpBuilder &builder, Location location, Args &&... args) { auto op = builder.create(location, std::forward(args)...); - SmallVector unused; + SmallVector unused; (void)tryToFold(op.getOperation(), unused); // Folding cannot remove a zero-result operation, so for convenience we @@ -126,7 +126,7 @@ class OperationFolder { /// Tries to perform folding on the given `op`. If successful, populates /// `results` with the results of the folding. LogicalResult tryToFold( - Operation *op, SmallVectorImpl &results, + Operation *op, SmallVectorImpl &results, function_ref processGeneratedConstants = nullptr); /// Try to get or create a new constant entry. On success this returns the diff --git a/include/mlir/Transforms/InliningUtils.h b/include/mlir/Transforms/InliningUtils.h index 590b46a5d12f..47c4f48f4687 100644 --- a/include/mlir/Transforms/InliningUtils.h +++ b/include/mlir/Transforms/InliningUtils.h @@ -105,7 +105,7 @@ class DialectInlinerInterface /// operation). The given 'op' will be removed by the caller, after this /// function has been called. virtual void handleTerminator(Operation *op, - ArrayRef valuesToReplace) const { + ArrayRef valuesToReplace) const { llvm_unreachable( "must implement handleTerminator in the case of one inlined block"); } @@ -125,8 +125,8 @@ class DialectInlinerInterface /// ... = foo.call @foo(%input : i32) -> i16 /// /// NOTE: This hook may be invoked before the 'isLegal' checks above. - virtual Operation *materializeCallConversion(OpBuilder &builder, Value *input, - Type resultType, + virtual Operation *materializeCallConversion(OpBuilder &builder, + ValuePtr input, Type resultType, Location conversionLoc) const { return nullptr; } @@ -165,7 +165,7 @@ class InlinerInterface virtual void handleTerminator(Operation *op, Block *newDest) const; virtual void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const; + ArrayRef valuesToRepl) const; }; //===----------------------------------------------------------------------===// @@ -187,7 +187,7 @@ class InlinerInterface /// be cloned into the 'inlinePoint' or spliced directly. LogicalResult inlineRegion(InlinerInterface &interface, Region *src, Operation *inlinePoint, BlockAndValueMapping &mapper, - ArrayRef resultsToReplace, + ArrayRef resultsToReplace, Optional inlineLoc = llvm::None, bool shouldCloneInlinedRegion = true); @@ -196,8 +196,8 @@ LogicalResult inlineRegion(InlinerInterface &interface, Region *src, /// in-favor of the region arguments when inlining. LogicalResult inlineRegion(InlinerInterface &interface, Region *src, Operation *inlinePoint, - ArrayRef inlinedOperands, - ArrayRef resultsToReplace, + ArrayRef inlinedOperands, + ArrayRef resultsToReplace, Optional inlineLoc = llvm::None, bool shouldCloneInlinedRegion = true); diff --git a/include/mlir/Transforms/LoopLikeInterface.td b/include/mlir/Transforms/LoopLikeInterface.td index 5c324b79f670..583cfe26d872 100644 --- a/include/mlir/Transforms/LoopLikeInterface.td +++ b/include/mlir/Transforms/LoopLikeInterface.td @@ -38,7 +38,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> { explicit capture of dependencies, an implementation could check whether the value corresponds to a captured dependency. }], - "bool", "isDefinedOutsideOfLoop", (ins "Value *":$value) + "bool", "isDefinedOutsideOfLoop", (ins "ValuePtr ":$value) >, InterfaceMethod<[{ Returns the region that makes up the body of the loop and should be diff --git a/include/mlir/Transforms/LoopUtils.h b/include/mlir/Transforms/LoopUtils.h index 5ca3f7f65108..37434ea2ea88 100644 --- a/include/mlir/Transforms/LoopUtils.h +++ b/include/mlir/Transforms/LoopUtils.h @@ -85,7 +85,7 @@ void promoteSingleIterationLoops(FuncOp f); /// expression. void getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor, AffineMap *map, - SmallVectorImpl *operands, + SmallVectorImpl *operands, OpBuilder &builder); /// Skew the operations in the body of a 'affine.for' operation with the @@ -140,7 +140,7 @@ SmallVector, 8> tile(ArrayRef forOps, ArrayRef sizes, ArrayRef targets); SmallVector tile(ArrayRef forOps, - ArrayRef sizes, + ArrayRef sizes, ArrayRef targets); /// Performs tiling (with interchange) by strip-mining the `forOps` by `sizes` @@ -149,7 +149,7 @@ SmallVector tile(ArrayRef forOps, /// `target`. SmallVector tile(ArrayRef forOps, ArrayRef sizes, AffineForOp target); -Loops tile(ArrayRef forOps, ArrayRef sizes, +Loops tile(ArrayRef forOps, ArrayRef sizes, loop::ForOp target); /// Tile a nest of loop::ForOp loops rooted at `rootForOp` with the given @@ -157,7 +157,7 @@ Loops tile(ArrayRef forOps, ArrayRef sizes, /// runtime. If more sizes than loops are provided, discard the trailing values /// in sizes. Assumes the loop nest is permutable. /// Returns the newly created intra-tile loops. -Loops tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef sizes); +Loops tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef sizes); /// Explicit copy / DMA generation options for mlir::affineDataCopyGenerate. struct AffineCopyOptions { @@ -229,8 +229,8 @@ void coalesceLoops(MutableArrayRef loops); /// ... /// } /// ``` -void mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef processorId, - ArrayRef numProcessors); +void mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef processorId, + ArrayRef numProcessors); } // end namespace mlir #endif // MLIR_TRANSFORMS_LOOP_UTILS_H diff --git a/include/mlir/Transforms/RegionUtils.h b/include/mlir/Transforms/RegionUtils.h index 48080b26c2cd..63236d6a5a09 100644 --- a/include/mlir/Transforms/RegionUtils.h +++ b/include/mlir/Transforms/RegionUtils.h @@ -30,14 +30,14 @@ namespace mlir { /// of `limit`. template bool areValuesDefinedAbove(Range values, Region &limit) { - for (Value *v : values) + for (ValuePtr v : values) if (!v->getParentRegion()->isProperAncestor(&limit)) return false; return true; } /// Replace all uses of `orig` within the given region with `replacement`. -void replaceAllUsesInRegionWith(Value *orig, Value *replacement, +void replaceAllUsesInRegionWith(ValuePtr orig, ValuePtr replacement, Region ®ion); /// Calls `callback` for each use of a value within `region` or its descendants @@ -53,12 +53,12 @@ void visitUsedValuesDefinedAbove(MutableArrayRef regions, /// Fill `values` with a list of values defined at the ancestors of the `limit` /// region and used within `region` or its descendants. void getUsedValuesDefinedAbove(Region ®ion, Region &limit, - llvm::SetVector &values); + llvm::SetVector &values); /// Fill `values` with a list of values used within any of the regions provided /// but defined in one of the ancestors. void getUsedValuesDefinedAbove(MutableArrayRef regions, - llvm::SetVector &values); + llvm::SetVector &values); /// Run a set of structural simplifications over the given regions. This /// includes transformations like unreachable block elimination, dead argument diff --git a/include/mlir/Transforms/Utils.h b/include/mlir/Transforms/Utils.h index c682b48f331c..02c368ec4964 100644 --- a/include/mlir/Transforms/Utils.h +++ b/include/mlir/Transforms/Utils.h @@ -66,22 +66,22 @@ class OpBuilder; // extra operands, note that 'indexRemap' would just be applied to existing // indices (%i, %j). // TODO(bondhugula): allow extraIndices to be added at any position. -LogicalResult replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, - ArrayRef extraIndices = {}, +LogicalResult replaceAllMemRefUsesWith(ValuePtr oldMemRef, ValuePtr newMemRef, + ArrayRef extraIndices = {}, AffineMap indexRemap = AffineMap(), - ArrayRef extraOperands = {}, - ArrayRef symbolOperands = {}, + ArrayRef extraOperands = {}, + ArrayRef symbolOperands = {}, Operation *domInstFilter = nullptr, Operation *postDomInstFilter = nullptr); /// Performs the same replacement as the other version above but only for the /// dereferencing uses of `oldMemRef` in `op`. -LogicalResult replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, +LogicalResult replaceAllMemRefUsesWith(ValuePtr oldMemRef, ValuePtr newMemRef, Operation *op, - ArrayRef extraIndices = {}, + ArrayRef extraIndices = {}, AffineMap indexRemap = AffineMap(), - ArrayRef extraOperands = {}, - ArrayRef symbolOperands = {}); + ArrayRef extraOperands = {}, + ArrayRef symbolOperands = {}); /// Rewrites the memref defined by this alloc op to have an identity layout map /// and updates all its indexing uses. Returns failure if any of its uses @@ -96,9 +96,9 @@ LogicalResult normalizeMemRef(AllocOp op); /// The final results of the composed AffineApplyOp are returned in output /// parameter 'results'. Returns the affine apply op created. Operation *createComposedAffineApplyOp(OpBuilder &builder, Location loc, - ArrayRef operands, + ArrayRef operands, ArrayRef affineApplyOps, - SmallVectorImpl *results); + SmallVectorImpl *results); /// Given an operation, inserts one or more single result affine apply /// operations, results of which are exclusively used by this operation. diff --git a/lib/Analysis/AffineAnalysis.cpp b/lib/Analysis/AffineAnalysis.cpp index 97868a565247..60b2f17292b1 100644 --- a/lib/Analysis/AffineAnalysis.cpp +++ b/lib/Analysis/AffineAnalysis.cpp @@ -48,15 +48,15 @@ using llvm::dbgs; // TODO(andydavis) Add a method to AffineApplyOp which forward substitutes // the AffineApplyOp into any user AffineApplyOps. void mlir::getReachableAffineApplyOps( - ArrayRef operands, SmallVectorImpl &affineApplyOps) { + ArrayRef operands, SmallVectorImpl &affineApplyOps) { struct State { // The ssa value for this node in the DFS traversal. - Value *value; + ValuePtr value; // The operand index of 'value' to explore next during DFS traversal. unsigned operandIndex; }; SmallVector worklist; - for (auto *operand : operands) { + for (auto operand : operands) { worklist.push_back({operand, 0}); } @@ -77,7 +77,7 @@ void mlir::getReachableAffineApplyOps( if (state.operandIndex < opInst->getNumOperands()) { // Visit: Add next 'affineApplyOp' operand to worklist. // Get next operand to visit at 'operandIndex'. - auto *nextOperand = opInst->getOperand(state.operandIndex); + auto nextOperand = opInst->getOperand(state.operandIndex); // Increment 'operandIndex' in 'state'. ++state.operandIndex; // Add 'nextOperand' to worklist. @@ -99,7 +99,7 @@ void mlir::getReachableAffineApplyOps( // setExprStride(ArrayRef expr, int64_t stride) LogicalResult mlir::getIndexSet(MutableArrayRef forOps, FlatAffineConstraints *domain) { - SmallVector indices; + SmallVector indices; extractForInductionVars(forOps, &indices); // Reset while associated Values in 'indices' to the domain. domain->reset(forOps.size(), /*numSymbols=*/0, /*numLocals=*/0, indices); @@ -146,25 +146,25 @@ static LogicalResult getInstIndexSet(Operation *op, // of maps to check. So getSrcDimOrSymPos would be "getPos(value, {0, 2})". class ValuePositionMap { public: - void addSrcValue(Value *value) { + void addSrcValue(ValuePtr value) { if (addValueAt(value, &srcDimPosMap, numSrcDims)) ++numSrcDims; } - void addDstValue(Value *value) { + void addDstValue(ValuePtr value) { if (addValueAt(value, &dstDimPosMap, numDstDims)) ++numDstDims; } - void addSymbolValue(Value *value) { + void addSymbolValue(ValuePtr value) { if (addValueAt(value, &symbolPosMap, numSymbols)) ++numSymbols; } - unsigned getSrcDimOrSymPos(Value *value) const { + unsigned getSrcDimOrSymPos(ValuePtr value) const { return getDimOrSymPos(value, srcDimPosMap, 0); } - unsigned getDstDimOrSymPos(Value *value) const { + unsigned getDstDimOrSymPos(ValuePtr value) const { return getDimOrSymPos(value, dstDimPosMap, numSrcDims); } - unsigned getSymPos(Value *value) const { + unsigned getSymPos(ValuePtr value) const { auto it = symbolPosMap.find(value); assert(it != symbolPosMap.end()); return numSrcDims + numDstDims + it->second; @@ -176,7 +176,7 @@ class ValuePositionMap { unsigned getNumSymbols() const { return numSymbols; } private: - bool addValueAt(Value *value, DenseMap *posMap, + bool addValueAt(ValuePtr value, DenseMap *posMap, unsigned position) { auto it = posMap->find(value); if (it == posMap->end()) { @@ -185,8 +185,8 @@ class ValuePositionMap { } return false; } - unsigned getDimOrSymPos(Value *value, - const DenseMap &dimPosMap, + unsigned getDimOrSymPos(ValuePtr value, + const DenseMap &dimPosMap, unsigned dimPosOffset) const { auto it = dimPosMap.find(value); if (it != dimPosMap.end()) { @@ -200,9 +200,9 @@ class ValuePositionMap { unsigned numSrcDims = 0; unsigned numDstDims = 0; unsigned numSymbols = 0; - DenseMap srcDimPosMap; - DenseMap dstDimPosMap; - DenseMap symbolPosMap; + DenseMap srcDimPosMap; + DenseMap dstDimPosMap; + DenseMap symbolPosMap; }; // Builds a map from Value to identifier position in a new merged identifier @@ -219,9 +219,9 @@ static void buildDimAndSymbolPositionMaps( const FlatAffineConstraints &dstDomain, const AffineValueMap &srcAccessMap, const AffineValueMap &dstAccessMap, ValuePositionMap *valuePosMap, FlatAffineConstraints *dependenceConstraints) { - auto updateValuePosMap = [&](ArrayRef values, bool isSrc) { + auto updateValuePosMap = [&](ArrayRef values, bool isSrc) { for (unsigned i = 0, e = values.size(); i < e; ++i) { - auto *value = values[i]; + auto value = values[i]; if (!isForInductionVar(values[i])) { assert(isValidSymbol(values[i]) && "access operand has to be either a loop IV or a symbol"); @@ -234,7 +234,7 @@ static void buildDimAndSymbolPositionMaps( } }; - SmallVector srcValues, destValues; + SmallVector srcValues, destValues; srcDomain.getIdValues(0, srcDomain.getNumDimAndSymbolIds(), &srcValues); dstDomain.getIdValues(0, dstDomain.getNumDimAndSymbolIds(), &destValues); // Update value position map with identifiers from src iteration domain. @@ -273,7 +273,7 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain, numLocals); // Set values corresponding to dependence constraint identifiers. - SmallVector srcLoopIVs, dstLoopIVs; + SmallVector srcLoopIVs, dstLoopIVs; srcDomain.getIdValues(0, srcDomain.getNumDimIds(), &srcLoopIVs); dstDomain.getIdValues(0, dstDomain.getNumDimIds(), &dstLoopIVs); @@ -282,8 +282,8 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain, srcLoopIVs.size(), srcLoopIVs.size() + dstLoopIVs.size(), dstLoopIVs); // Set values for the symbolic identifier dimensions. - auto setSymbolIds = [&](ArrayRef values) { - for (auto *value : values) { + auto setSymbolIds = [&](ArrayRef values) { + for (auto value : values) { if (!isForInductionVar(value)) { assert(isValidSymbol(value) && "expected symbol"); dependenceConstraints->setIdValue(valuePosMap.getSymPos(value), value); @@ -294,7 +294,7 @@ void initDependenceConstraints(const FlatAffineConstraints &srcDomain, setSymbolIds(srcAccessMap.getOperands()); setSymbolIds(dstAccessMap.getOperands()); - SmallVector srcSymbolValues, dstSymbolValues; + SmallVector srcSymbolValues, dstSymbolValues; srcDomain.getIdValues(srcDomain.getNumDimIds(), srcDomain.getNumDimAndSymbolIds(), &srcSymbolValues); dstDomain.getIdValues(dstDomain.getNumDimIds(), @@ -398,10 +398,10 @@ addMemRefAccessConstraints(const AffineValueMap &srcAccessMap, unsigned numResults = srcMap.getNumResults(); unsigned srcNumIds = srcMap.getNumDims() + srcMap.getNumSymbols(); - ArrayRef srcOperands = srcAccessMap.getOperands(); + ArrayRef srcOperands = srcAccessMap.getOperands(); unsigned dstNumIds = dstMap.getNumDims() + dstMap.getNumSymbols(); - ArrayRef dstOperands = dstAccessMap.getOperands(); + ArrayRef dstOperands = dstAccessMap.getOperands(); std::vector> srcFlatExprs; std::vector> destFlatExprs; @@ -457,11 +457,11 @@ addMemRefAccessConstraints(const AffineValueMap &srcAccessMap, } // Add equality constraints for any operands that are defined by constant ops. - auto addEqForConstOperands = [&](ArrayRef operands) { + auto addEqForConstOperands = [&](ArrayRef operands) { for (unsigned i = 0, e = operands.size(); i < e; ++i) { if (isForInductionVar(operands[i])) continue; - auto *symbol = operands[i]; + auto symbol = operands[i]; assert(isValidSymbol(symbol)); // Check if the symbol is a constant. if (auto cOp = dyn_cast_or_null(symbol->getDefiningOp())) @@ -553,7 +553,7 @@ static Block *getCommonBlock(const MemRefAccess &srcAccess, } return block; } - auto *commonForValue = srcDomain.getIdValue(numCommonLoops - 1); + auto commonForValue = srcDomain.getIdValue(numCommonLoops - 1); auto forOp = getForInductionVarOwner(commonForValue); assert(forOp && "commonForValue was not an induction variable"); return forOp.getBody(); @@ -675,7 +675,7 @@ void MemRefAccess::getAccessMap(AffineValueMap *accessMap) const { map = loadOp.getAffineMap(); else if (auto storeOp = dyn_cast(opInst)) map = storeOp.getAffineMap(); - SmallVector operands(indices.begin(), indices.end()); + SmallVector operands(indices.begin(), indices.end()); fullyComposeAffineMapAndOperands(&map, &operands); map = simplifyAffineMap(map); canonicalizeMapAndOperands(&map, &operands); diff --git a/lib/Analysis/AffineStructures.cpp b/lib/Analysis/AffineStructures.cpp index d678355880e7..21c2830c0167 100644 --- a/lib/Analysis/AffineStructures.cpp +++ b/lib/Analysis/AffineStructures.cpp @@ -204,8 +204,8 @@ MutableIntegerSet::MutableIntegerSet(unsigned numDims, unsigned numSymbols, // AffineValueMap. //===----------------------------------------------------------------------===// -AffineValueMap::AffineValueMap(AffineMap map, ArrayRef operands, - ArrayRef results) +AffineValueMap::AffineValueMap(AffineMap map, ArrayRef operands, + ArrayRef results) : map(map), operands(operands.begin(), operands.end()), results(results.begin(), results.end()) {} @@ -219,8 +219,8 @@ AffineValueMap::AffineValueMap(AffineBound bound) : map(bound.getMap()), operands(bound.operand_begin(), bound.operand_end()) {} -void AffineValueMap::reset(AffineMap map, ArrayRef operands, - ArrayRef results) { +void AffineValueMap::reset(AffineMap map, ArrayRef operands, + ArrayRef results) { this->map.reset(map); this->operands.assign(operands.begin(), operands.end()); this->results.assign(results.begin(), results.end()); @@ -232,14 +232,14 @@ void AffineValueMap::difference(const AffineValueMap &a, // Fully compose A's map + operands. auto aMap = a.getAffineMap(); - SmallVector aOperands(a.getOperands().begin(), - a.getOperands().end()); + SmallVector aOperands(a.getOperands().begin(), + a.getOperands().end()); fullyComposeAffineMapAndOperands(&aMap, &aOperands); // Use the affine apply normalizer to get B's map into A's coordinate space. AffineApplyNormalizer normalizer(aMap, aOperands); - SmallVector bOperands(b.getOperands().begin(), - b.getOperands().end()); + SmallVector bOperands(b.getOperands().begin(), + b.getOperands().end()); auto bMap = b.getAffineMap(); normalizer.normalize(&bMap, &bOperands); @@ -263,7 +263,7 @@ void AffineValueMap::difference(const AffineValueMap &a, // Returns true and sets 'indexOfMatch' if 'valueToMatch' is found in // 'valuesToSearch' beginning at 'indexStart'. Returns false otherwise. -static bool findIndex(Value *valueToMatch, ArrayRef valuesToSearch, +static bool findIndex(ValuePtr valueToMatch, ArrayRef valuesToSearch, unsigned indexStart, unsigned *indexOfMatch) { unsigned size = valuesToSearch.size(); for (unsigned i = indexStart; i < size; ++i) { @@ -281,7 +281,7 @@ inline bool AffineValueMap::isMultipleOf(unsigned idx, int64_t factor) const { /// This method uses the invariant that operands are always positionally aligned /// with the AffineDimExpr in the underlying AffineMap. -bool AffineValueMap::isFunctionOf(unsigned idx, Value *value) const { +bool AffineValueMap::isFunctionOf(unsigned idx, ValuePtr value) const { unsigned index; if (!findIndex(value, operands, /*indexStart=*/0, &index)) { return false; @@ -292,12 +292,12 @@ bool AffineValueMap::isFunctionOf(unsigned idx, Value *value) const { return expr.isFunctionOfDim(index); } -Value *AffineValueMap::getOperand(unsigned i) const { - return static_cast(operands[i]); +ValuePtr AffineValueMap::getOperand(unsigned i) const { + return static_cast(operands[i]); } -ArrayRef AffineValueMap::getOperands() const { - return ArrayRef(operands); +ArrayRef AffineValueMap::getOperands() const { + return ArrayRef(operands); } AffineMap AffineValueMap::getAffineMap() const { return map.getAffineMap(); } @@ -378,7 +378,7 @@ void FlatAffineConstraints::reset(unsigned numReservedInequalities, unsigned newNumReservedCols, unsigned newNumDims, unsigned newNumSymbols, unsigned newNumLocals, - ArrayRef idArgs) { + ArrayRef idArgs) { assert(newNumReservedCols >= newNumDims + newNumSymbols + newNumLocals + 1 && "minimum 1 column"); numReservedCols = newNumReservedCols; @@ -401,7 +401,7 @@ void FlatAffineConstraints::reset(unsigned numReservedInequalities, void FlatAffineConstraints::reset(unsigned newNumDims, unsigned newNumSymbols, unsigned newNumLocals, - ArrayRef idArgs) { + ArrayRef idArgs) { reset(0, 0, newNumDims + newNumSymbols + newNumLocals + 1, newNumDims, newNumSymbols, newNumLocals, idArgs); } @@ -428,17 +428,17 @@ void FlatAffineConstraints::addLocalId(unsigned pos) { addId(IdKind::Local, pos); } -void FlatAffineConstraints::addDimId(unsigned pos, Value *id) { +void FlatAffineConstraints::addDimId(unsigned pos, ValuePtr id) { addId(IdKind::Dimension, pos, id); } -void FlatAffineConstraints::addSymbolId(unsigned pos, Value *id) { +void FlatAffineConstraints::addSymbolId(unsigned pos, ValuePtr id) { addId(IdKind::Symbol, pos, id); } /// Adds a dimensional identifier. The added column is initialized to /// zero. -void FlatAffineConstraints::addId(IdKind kind, unsigned pos, Value *id) { +void FlatAffineConstraints::addId(IdKind kind, unsigned pos, ValuePtr id) { if (kind == IdKind::Dimension) { assert(pos <= getNumDimIds()); } else if (kind == IdKind::Symbol) { @@ -527,7 +527,7 @@ bool FlatAffineConstraints::areIdsAlignedWithOther( /// Checks if the SSA values associated with `cst''s identifiers are unique. static bool LLVM_ATTRIBUTE_UNUSED areIdsUnique(const FlatAffineConstraints &cst) { - SmallPtrSet uniqueIds; + SmallPtrSet uniqueIds; for (auto id : cst.getIds()) { if (id.hasValue() && !uniqueIds.insert(id.getValue()).second) return false; @@ -571,11 +571,11 @@ static void mergeAndAlignIds(unsigned offset, FlatAffineConstraints *A, assert(std::all_of(A->getIds().begin() + offset, A->getIds().begin() + A->getNumDimAndSymbolIds(), - [](Optional id) { return id.hasValue(); })); + [](Optional id) { return id.hasValue(); })); assert(std::all_of(B->getIds().begin() + offset, B->getIds().begin() + B->getNumDimAndSymbolIds(), - [](Optional id) { return id.hasValue(); })); + [](Optional id) { return id.hasValue(); })); // Place local id's of A after local id's of B. for (unsigned l = 0, e = A->getNumLocalIds(); l < e; l++) { @@ -586,13 +586,13 @@ static void mergeAndAlignIds(unsigned offset, FlatAffineConstraints *A, A->addLocalId(A->getNumLocalIds()); } - SmallVector aDimValues, aSymValues; + SmallVector aDimValues, aSymValues; A->getIdValues(offset, A->getNumDimIds(), &aDimValues); A->getIdValues(A->getNumDimIds(), A->getNumDimAndSymbolIds(), &aSymValues); { // Merge dims from A into B. unsigned d = offset; - for (auto *aDimValue : aDimValues) { + for (auto aDimValue : aDimValues) { unsigned loc; if (B->findId(*aDimValue, &loc)) { assert(loc >= offset && "A's dim appears in B's aligned range"); @@ -615,7 +615,7 @@ static void mergeAndAlignIds(unsigned offset, FlatAffineConstraints *A, { // Merge symbols: merge A's symbols into B first. unsigned s = B->getNumDimIds(); - for (auto *aSymValue : aSymValues) { + for (auto aSymValue : aSymValues) { unsigned loc; if (B->findId(*aSymValue, &loc)) { assert(loc >= B->getNumDimIds() && loc < B->getNumDimAndSymbolIds() && @@ -785,7 +785,7 @@ LogicalResult FlatAffineConstraints::composeMatchingMap(AffineMap other) { } // Turn a dimension into a symbol. -static void turnDimIntoSymbol(FlatAffineConstraints *cst, Value &id) { +static void turnDimIntoSymbol(FlatAffineConstraints *cst, ValueRef id) { unsigned pos; if (cst->findId(id, &pos) && pos < cst->getNumDimIds()) { swapId(cst, pos, cst->getNumDimIds() - 1); @@ -794,7 +794,7 @@ static void turnDimIntoSymbol(FlatAffineConstraints *cst, Value &id) { } // Turn a symbol into a dimension. -static void turnSymbolIntoDim(FlatAffineConstraints *cst, Value &id) { +static void turnSymbolIntoDim(FlatAffineConstraints *cst, ValueRef id) { unsigned pos; if (cst->findId(id, &pos) && pos >= cst->getNumDimIds() && pos < cst->getNumDimAndSymbolIds()) { @@ -806,18 +806,18 @@ static void turnSymbolIntoDim(FlatAffineConstraints *cst, Value &id) { // Changes all symbol identifiers which are loop IVs to dim identifiers. void FlatAffineConstraints::convertLoopIVSymbolsToDims() { // Gather all symbols which are loop IVs. - SmallVector loopIVs; + SmallVector loopIVs; for (unsigned i = getNumDimIds(), e = getNumDimAndSymbolIds(); i < e; i++) { if (ids[i].hasValue() && getForInductionVarOwner(ids[i].getValue())) loopIVs.push_back(ids[i].getValue()); } // Turn each symbol in 'loopIVs' into a dim identifier. - for (auto *iv : loopIVs) { + for (auto iv : loopIVs) { turnSymbolIntoDim(this, *iv); } } -void FlatAffineConstraints::addInductionVarOrTerminalSymbol(Value *id) { +void FlatAffineConstraints::addInductionVarOrTerminalSymbol(ValuePtr id) { if (containsId(*id)) return; @@ -876,8 +876,8 @@ LogicalResult FlatAffineConstraints::addAffineForOpDomain(AffineForOp forOp) { addConstantLowerBound(pos, forOp.getConstantLowerBound()); } else { // Non-constant lower bound case. - SmallVector lbOperands(forOp.getLowerBoundOperands().begin(), - forOp.getLowerBoundOperands().end()); + SmallVector lbOperands(forOp.getLowerBoundOperands().begin(), + forOp.getLowerBoundOperands().end()); if (failed(addLowerOrUpperBound(pos, forOp.getLowerBoundMap(), lbOperands, /*eq=*/false, /*lower=*/true))) return failure(); @@ -888,8 +888,8 @@ LogicalResult FlatAffineConstraints::addAffineForOpDomain(AffineForOp forOp) { return success(); } // Non-constant upper bound case. - SmallVector ubOperands(forOp.getUpperBoundOperands().begin(), - forOp.getUpperBoundOperands().end()); + SmallVector ubOperands(forOp.getUpperBoundOperands().begin(), + forOp.getUpperBoundOperands().end()); return addLowerOrUpperBound(pos, forOp.getUpperBoundMap(), ubOperands, /*eq=*/false, /*lower=*/false); } @@ -1757,7 +1757,7 @@ void FlatAffineConstraints::getSliceBounds(unsigned offset, unsigned num, LogicalResult FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap, - ArrayRef boundOperands, + ArrayRef boundOperands, bool eq, bool lower) { assert(pos < getNumDimAndSymbolIds() && "invalid position"); // Equality follows the logic of lower bound except that we add an equality @@ -1769,11 +1769,11 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap, // Fully compose map and operands; canonicalize and simplify so that we // transitively get to terminal symbols or loop IVs. auto map = boundMap; - SmallVector operands(boundOperands.begin(), boundOperands.end()); + SmallVector operands(boundOperands.begin(), boundOperands.end()); fullyComposeAffineMapAndOperands(&map, &operands); map = simplifyAffineMap(map); canonicalizeMapAndOperands(&map, &operands); - for (auto *operand : operands) + for (auto operand : operands) addInductionVarOrTerminalSymbol(operand); FlatAffineConstraints localVarCst; @@ -1787,7 +1787,7 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap, if (localVarCst.getNumLocalIds() > 0) { // Set values for localVarCst. localVarCst.setIdValues(0, localVarCst.getNumDimAndSymbolIds(), operands); - for (auto *operand : operands) { + for (auto operand : operands) { unsigned pos; if (findId(*operand, &pos)) { if (pos >= getNumDimIds() && pos < getNumDimAndSymbolIds()) { @@ -1807,7 +1807,7 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap, // this here since the constraint system changes after a bound is added. SmallVector positions; unsigned numOperands = operands.size(); - for (auto *operand : operands) { + for (auto operand : operands) { unsigned pos; if (!findId(*operand, &pos)) assert(0 && "expected to be found"); @@ -1848,8 +1848,8 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap, // Returns failure for unimplemented cases such as semi-affine expressions or // expressions with mod/floordiv. LogicalResult FlatAffineConstraints::addSliceBounds( - ArrayRef values, ArrayRef lbMaps, - ArrayRef ubMaps, ArrayRef operands) { + ArrayRef values, ArrayRef lbMaps, + ArrayRef ubMaps, ArrayRef operands) { assert(values.size() == lbMaps.size()); assert(lbMaps.size() == ubMaps.size()); @@ -1971,7 +1971,7 @@ void FlatAffineConstraints::addLocalFloorDiv(ArrayRef dividend, addInequality(bound); } -bool FlatAffineConstraints::findId(Value &id, unsigned *pos) const { +bool FlatAffineConstraints::findId(ValueRef id, unsigned *pos) const { unsigned i = 0; for (const auto &mayBeId : ids) { if (mayBeId.hasValue() && mayBeId.getValue() == &id) { @@ -1983,8 +1983,8 @@ bool FlatAffineConstraints::findId(Value &id, unsigned *pos) const { return false; } -bool FlatAffineConstraints::containsId(Value &id) const { - return llvm::any_of(ids, [&](const Optional &mayBeId) { +bool FlatAffineConstraints::containsId(ValueRef id) const { + return llvm::any_of(ids, [&](const Optional &mayBeId) { return mayBeId.hasValue() && mayBeId.getValue() == &id; }); } @@ -2008,7 +2008,7 @@ void FlatAffineConstraints::setIdToConstant(unsigned pos, int64_t val) { /// Sets the specified identifier to a constant value; asserts if the id is not /// found. -void FlatAffineConstraints::setIdToConstant(Value &id, int64_t val) { +void FlatAffineConstraints::setIdToConstant(ValueRef id, int64_t val) { unsigned pos; if (!findId(id, &pos)) // This is a pre-condition for this method. @@ -2573,7 +2573,7 @@ void FlatAffineConstraints::FourierMotzkinEliminate( unsigned newNumDims = dimsSymbols.first; unsigned newNumSymbols = dimsSymbols.second; - SmallVector, 8> newIds; + SmallVector, 8> newIds; newIds.reserve(numIds - 1); newIds.append(ids.begin(), ids.begin() + pos); newIds.append(ids.begin() + pos + 1, ids.end()); @@ -2709,7 +2709,7 @@ void FlatAffineConstraints::projectOut(unsigned pos, unsigned num) { normalizeConstraintsByGCD(); } -void FlatAffineConstraints::projectOut(Value *id) { +void FlatAffineConstraints::projectOut(ValuePtr id) { unsigned pos; bool ret = findId(*id, &pos); assert(ret); diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp index 93017ca3b57c..6ec7c0595269 100644 --- a/lib/Analysis/CallGraph.cpp +++ b/lib/Analysis/CallGraph.cpp @@ -188,7 +188,7 @@ CallGraphNode *CallGraph::resolveCallable(CallInterfaceCallable callable, callee = SymbolTable::lookupNearestSymbolFrom(from, symbolRef.getRootReference()); else - callee = callable.get()->getDefiningOp(); + callee = callable.get()->getDefiningOp(); // If the callee is non-null and is a valid callable object, try to get the // called region from it. diff --git a/lib/Analysis/Dominance.cpp b/lib/Analysis/Dominance.cpp index c422578320ff..532972b771b4 100644 --- a/lib/Analysis/Dominance.cpp +++ b/lib/Analysis/Dominance.cpp @@ -127,7 +127,7 @@ bool DominanceInfo::properlyDominates(Operation *a, Operation *b) { } /// Return true if value A properly dominates operation B. -bool DominanceInfo::properlyDominates(Value *a, Operation *b) { +bool DominanceInfo::properlyDominates(ValuePtr a, Operation *b) { if (auto *aOp = a->getDefiningOp()) { // The values defined by an operation do *not* dominate any nested // operations. diff --git a/lib/Analysis/Liveness.cpp b/lib/Analysis/Liveness.cpp index 6aaec4cc719d..edb18e5645d1 100644 --- a/lib/Analysis/Liveness.cpp +++ b/lib/Analysis/Liveness.cpp @@ -40,13 +40,13 @@ struct BlockInfoBuilder { /// Fills the block builder with initial liveness information. BlockInfoBuilder(Block *block) : block(block) { // Mark all block arguments (phis) as defined. - for (BlockArgument *argument : block->getArguments()) + for (BlockArgumentPtr argument : block->getArguments()) defValues.insert(argument); // Check all result values and whether their uses // are inside this block or not (see outValues). for (Operation &operation : *block) - for (Value *result : operation.getResults()) { + for (ValuePtr result : operation.getResults()) { defValues.insert(result); // Check whether this value will be in the outValues @@ -63,7 +63,7 @@ struct BlockInfoBuilder { // Check all operations for used operands. for (Operation &operation : block->getOperations()) - for (Value *operand : operation.getOperands()) { + for (ValuePtr operand : operation.getOperands()) { // If the operand is already defined in the scope of this // block, we can skip the value in the use set. if (!defValues.count(operand)) @@ -173,7 +173,7 @@ void Liveness::build(MutableArrayRef regions) { } /// Gets liveness info (if any) for the given value. -Liveness::OperationListT Liveness::resolveLiveness(Value *value) const { +Liveness::OperationListT Liveness::resolveLiveness(ValuePtr value) const { OperationListT result; SmallPtrSet visited; SmallVector toProcess; @@ -238,7 +238,7 @@ const Liveness::ValueSetT &Liveness::getLiveOut(Block *block) const { /// Returns true if the given operation represent the last use of the /// given value. -bool Liveness::isLastUse(Value *value, Operation *operation) const { +bool Liveness::isLastUse(ValuePtr value, Operation *operation) const { Block *block = operation->getBlock(); const LivenessBlockInfo *blockInfo = getLiveness(block); @@ -263,21 +263,21 @@ void Liveness::print(raw_ostream &os) const { // Builds unique block/value mappings for testing purposes. DenseMap blockIds; DenseMap operationIds; - DenseMap valueIds; + DenseMap valueIds; for (Region ®ion : operation->getRegions()) for (Block &block : region) { blockIds.insert({&block, blockIds.size()}); - for (BlockArgument *argument : block.getArguments()) + for (BlockArgumentPtr argument : block.getArguments()) valueIds.insert({argument, valueIds.size()}); for (Operation &operation : block) { operationIds.insert({&operation, operationIds.size()}); - for (Value *result : operation.getResults()) + for (ValuePtr result : operation.getResults()) valueIds.insert({result, valueIds.size()}); } } // Local printing helpers - auto printValueRef = [&](Value *value) { + auto printValueRef = [&](ValuePtr value) { if (Operation *defOp = value->getDefiningOp()) os << "val_" << defOp->getName(); else { @@ -289,12 +289,12 @@ void Liveness::print(raw_ostream &os) const { }; auto printValueRefs = [&](const ValueSetT &values) { - std::vector orderedValues(values.begin(), values.end()); + std::vector orderedValues(values.begin(), values.end()); std::sort(orderedValues.begin(), orderedValues.end(), - [&](Value *left, Value *right) { + [&](ValuePtr left, ValuePtr right) { return valueIds[left] < valueIds[right]; }); - for (Value *value : orderedValues) + for (ValuePtr value : orderedValues) printValueRef(value); }; @@ -315,7 +315,7 @@ void Liveness::print(raw_ostream &os) const { if (op.getNumResults() < 1) continue; os << "\n"; - for (Value *result : op.getResults()) { + for (ValuePtr result : op.getResults()) { os << "// "; printValueRef(result); os << ":"; @@ -340,18 +340,18 @@ void Liveness::print(raw_ostream &os) const { //===----------------------------------------------------------------------===// /// Returns true if the given value is in the live-in set. -bool LivenessBlockInfo::isLiveIn(Value *value) const { +bool LivenessBlockInfo::isLiveIn(ValuePtr value) const { return inValues.count(value); } /// Returns true if the given value is in the live-out set. -bool LivenessBlockInfo::isLiveOut(Value *value) const { +bool LivenessBlockInfo::isLiveOut(ValuePtr value) const { return outValues.count(value); } /// Gets the start operation for the given value /// (must be referenced in this block). -Operation *LivenessBlockInfo::getStartOperation(Value *value) const { +Operation *LivenessBlockInfo::getStartOperation(ValuePtr value) const { Operation *definingOp = value->getDefiningOp(); // The given value is either live-in or is defined // in the scope of this block. @@ -362,7 +362,7 @@ Operation *LivenessBlockInfo::getStartOperation(Value *value) const { /// Gets the end operation for the given value using the start operation /// provided (must be referenced in this block). -Operation *LivenessBlockInfo::getEndOperation(Value *value, +Operation *LivenessBlockInfo::getEndOperation(ValuePtr value, Operation *startOperation) const { // The given value is either dying in this block or live-out. if (isLiveOut(value)) diff --git a/lib/Analysis/LoopAnalysis.cpp b/lib/Analysis/LoopAnalysis.cpp index a81116579ce5..9dfbfe0c5423 100644 --- a/lib/Analysis/LoopAnalysis.cpp +++ b/lib/Analysis/LoopAnalysis.cpp @@ -43,7 +43,7 @@ using namespace mlir; // be more powerful (since both inequalities and equalities will be considered). void mlir::buildTripCountMapAndOperands( AffineForOp forOp, AffineMap *tripCountMap, - SmallVectorImpl *tripCountOperands) { + SmallVectorImpl *tripCountOperands) { int64_t loopSpan; int64_t step = forOp.getStep(); @@ -65,8 +65,8 @@ void mlir::buildTripCountMapAndOperands( *tripCountMap = AffineMap(); return; } - SmallVector lbOperands(forOp.getLowerBoundOperands()); - SmallVector ubOperands(forOp.getUpperBoundOperands()); + SmallVector lbOperands(forOp.getLowerBoundOperands()); + SmallVector ubOperands(forOp.getUpperBoundOperands()); // Difference of each upper bound expression from the single lower bound // expression (divided by the step) provides the expressions for the trip @@ -98,7 +98,7 @@ void mlir::buildTripCountMapAndOperands( // works with analysis structures (FlatAffineConstraints) and thus doesn't // update the IR. Optional mlir::getConstantTripCount(AffineForOp forOp) { - SmallVector operands; + SmallVector operands; AffineMap map; buildTripCountMapAndOperands(forOp, &map, &operands); @@ -124,7 +124,7 @@ Optional mlir::getConstantTripCount(AffineForOp forOp) { /// expression analysis is used (indirectly through getTripCount), and /// this method is thus able to determine non-trivial divisors. uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) { - SmallVector operands; + SmallVector operands; AffineMap map; buildTripCountMapAndOperands(forOp, &map, &operands); @@ -173,7 +173,7 @@ uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) { /// /// Returns false in cases with more than one AffineApplyOp, this is /// conservative. -static bool isAccessIndexInvariant(Value *iv, Value *index) { +static bool isAccessIndexInvariant(ValuePtr iv, ValuePtr index) { assert(isForInductionVar(iv) && "iv must be a AffineForOp"); assert(index->getType().isa() && "index must be of IndexType"); SmallVector affineApplyOps; @@ -197,11 +197,11 @@ static bool isAccessIndexInvariant(Value *iv, Value *index) { return !(AffineValueMap(composeOp).isFunctionOf(0, iv)); } -DenseSet mlir::getInvariantAccesses(Value *iv, - ArrayRef indices) { - DenseSet res; +DenseSet mlir::getInvariantAccesses(ValuePtr iv, + ArrayRef indices) { + DenseSet res; for (unsigned idx = 0, n = indices.size(); idx < n; ++idx) { - auto *val = indices[idx]; + auto val = indices[idx]; if (isAccessIndexInvariant(iv, val)) { res.insert(val); } @@ -229,7 +229,7 @@ DenseSet mlir::getInvariantAccesses(Value *iv, /// // TODO(ntv): check strides. template -static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp, +static bool isContiguousAccess(ValuePtr iv, LoadOrStoreOp memoryOp, int *memRefDim) { static_assert(std::is_same::value || std::is_same::value, @@ -250,11 +250,11 @@ static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp, int uniqueVaryingIndexAlongIv = -1; auto accessMap = memoryOp.getAffineMap(); - SmallVector mapOperands(memoryOp.getMapOperands()); + SmallVector mapOperands(memoryOp.getMapOperands()); unsigned numDims = accessMap.getNumDims(); for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) { // Gather map operands used result expr 'i' in 'exprOperands'. - SmallVector exprOperands; + SmallVector exprOperands; auto resultExpr = accessMap.getResult(i); resultExpr.walk([&](AffineExpr expr) { if (auto dimExpr = expr.dyn_cast()) @@ -263,7 +263,7 @@ static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp, exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]); }); // Check access invariance of each operand in 'exprOperands'. - for (auto *exprOperand : exprOperands) { + for (auto exprOperand : exprOperands) { if (!isAccessIndexInvariant(iv, exprOperand)) { if (uniqueVaryingIndexAlongIv != -1) { // 2+ varying indices -> do not vectorize along iv. @@ -382,7 +382,7 @@ bool mlir::isInstwiseShiftValid(AffineForOp forOp, ArrayRef shifts) { // Validate the results of this operation if it were to be shifted. for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) { - Value *result = op.getResult(i); + ValuePtr result = op.getResult(i); for (auto *user : result->getUsers()) { // If an ancestor operation doesn't lie in the block of forOp, // there is no shift to check. diff --git a/lib/Analysis/SliceAnalysis.cpp b/lib/Analysis/SliceAnalysis.cpp index 700321ebb400..b09bddddd66d 100644 --- a/lib/Analysis/SliceAnalysis.cpp +++ b/lib/Analysis/SliceAnalysis.cpp @@ -104,8 +104,8 @@ static void getBackwardSliceImpl(Operation *op, } for (auto en : llvm::enumerate(op->getOperands())) { - auto *operand = en.value(); - if (auto *blockArg = dyn_cast(operand)) { + auto operand = en.value(); + if (auto blockArg = dyn_cast(operand)) { if (auto affIv = getForInductionVarOwner(operand)) { auto *affOp = affIv.getOperation(); if (backwardSlice->count(affOp) == 0) diff --git a/lib/Analysis/Utils.cpp b/lib/Analysis/Utils.cpp index 3ba27bbb2995..73aa07e7d7b9 100644 --- a/lib/Analysis/Utils.cpp +++ b/lib/Analysis/Utils.cpp @@ -60,7 +60,7 @@ ComputationSliceState::getAsConstraints(FlatAffineConstraints *cst) { // Adds operands (dst ivs and symbols) as symbols in 'cst'. unsigned numSymbols = lbOperands[0].size(); - SmallVector values(ivs); + SmallVector values(ivs); // Append 'ivs' then 'operands' to 'values'. values.append(lbOperands[0].begin(), lbOperands[0].end()); cst->reset(numDims, numSymbols, 0, values); @@ -185,7 +185,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth, if (rank == 0) { SmallVector ivs; getLoopIVs(*op, &ivs); - SmallVector regionSymbols; + SmallVector regionSymbols; extractForInductionVars(ivs, ®ionSymbols); // A rank 0 memref has a 0-d region. cst.reset(rank, loopDepth, 0, regionSymbols); @@ -201,7 +201,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth, unsigned numSymbols = accessMap.getNumSymbols(); unsigned numOperands = accessValueMap.getNumOperands(); // Merge operands with slice operands. - SmallVector operands; + SmallVector operands; operands.resize(numOperands); for (unsigned i = 0; i < numOperands; ++i) operands[i] = accessValueMap.getOperand(i); @@ -224,7 +224,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth, // Add equality constraints. // Add inequalities for loop lower/upper bounds. for (unsigned i = 0; i < numDims + numSymbols; ++i) { - auto *operand = operands[i]; + auto operand = operands[i]; if (auto loop = getForInductionVarOwner(operand)) { // Note that cst can now have more dimensions than accessMap if the // bounds expressions involve outer loops or other symbols. @@ -234,7 +234,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth, return failure(); } else { // Has to be a valid symbol. - auto *symbol = operand; + auto symbol = operand; assert(isValidSymbol(symbol)); // Check if the symbol is a constant. if (auto *op = symbol->getDefiningOp()) { @@ -278,9 +278,9 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth, getLoopIVs(*op, &enclosingIVs); assert(loopDepth <= enclosingIVs.size() && "invalid loop depth"); enclosingIVs.resize(loopDepth); - SmallVector ids; + SmallVector ids; cst.getIdValues(cst.getNumDimIds(), cst.getNumDimAndSymbolIds(), &ids); - for (auto *id : ids) { + for (auto id : ids) { AffineForOp iv; if ((iv = getForInductionVarOwner(id)) && llvm::is_contained(enclosingIVs, iv) == false) { @@ -345,9 +345,9 @@ Optional MemRefRegion::getRegionSize() { // Indices to use for the DmaStart op. // Indices for the original memref being DMAed from/to. - SmallVector memIndices; + SmallVector memIndices; // Indices for the faster buffer being DMAed into/from. - SmallVector bufIndices; + SmallVector bufIndices; // Compute the extents of the buffer. Optional numElements = getConstantBoundingSizeAndShape(); @@ -480,10 +480,10 @@ static Operation *getInstAtPosition(ArrayRef positions, } // Adds loop IV bounds to 'cst' for loop IVs not found in 'ivs'. -LogicalResult addMissingLoopIVBounds(SmallPtrSet &ivs, +LogicalResult addMissingLoopIVBounds(SmallPtrSet &ivs, FlatAffineConstraints *cst) { for (unsigned i = 0, e = cst->getNumDimIds(); i < e; ++i) { - auto *value = cst->getIdValue(i); + auto value = cst->getIdValue(i); if (ivs.count(value) == 0) { assert(isForInductionVar(value)); auto loop = getForInductionVarOwner(value); @@ -596,10 +596,10 @@ LogicalResult mlir::computeSliceUnion(ArrayRef opsA, // Pre-constraint id alignment: record loop IVs used in each constraint // system. - SmallPtrSet sliceUnionIVs; + SmallPtrSet sliceUnionIVs; for (unsigned k = 0, l = sliceUnionCst.getNumDimIds(); k < l; ++k) sliceUnionIVs.insert(sliceUnionCst.getIdValue(k)); - SmallPtrSet tmpSliceIVs; + SmallPtrSet tmpSliceIVs; for (unsigned k = 0, l = tmpSliceCst.getNumDimIds(); k < l; ++k) tmpSliceIVs.insert(tmpSliceCst.getIdValue(k)); @@ -659,7 +659,7 @@ LogicalResult mlir::computeSliceUnion(ArrayRef opsA, &sliceUnion->ubs); // Add slice bound operands of union. - SmallVector sliceBoundOperands; + SmallVector sliceBoundOperands; sliceUnionCst.getIdValues(numSliceLoopIVs, sliceUnionCst.getNumDimAndSymbolIds(), &sliceBoundOperands); @@ -725,7 +725,7 @@ void mlir::getComputationSliceState( &sliceState->lbs, &sliceState->ubs); // Set up bound operands for the slice's lower and upper bounds. - SmallVector sliceBoundOperands; + SmallVector sliceBoundOperands; unsigned numDimsAndSymbols = dependenceConstraints->getNumDimAndSymbolIds(); for (unsigned i = 0; i < numDimsAndSymbols; ++i) { if (i < offset || i >= offset + numSliceLoopIVs) { @@ -743,7 +743,7 @@ void mlir::getComputationSliceState( isBackwardSlice ? dstLoopIVs[loopDepth - 1].getBody()->begin() : std::prev(srcLoopIVs[loopDepth - 1].getBody()->end()); - llvm::SmallDenseSet sequentialLoops; + llvm::SmallDenseSet sequentialLoops; if (isa(depSourceOp) && isa(depSinkOp)) { // For read-read access pairs, clear any slice bounds on sequential loops. // Get sequential loops in loop nest rooted at 'srcLoopIVs[0]'. @@ -758,7 +758,7 @@ void mlir::getComputationSliceState( return isBackwardSlice ? srcLoopIVs[i] : dstLoopIVs[i]; }; for (unsigned i = 0; i < numSliceLoopIVs; ++i) { - Value *iv = getSliceLoop(i).getInductionVar(); + ValuePtr iv = getSliceLoop(i).getInductionVar(); if (sequentialLoops.count(iv) == 0 && getSliceLoop(i).getAttr(kSliceFusionBarrierAttrName) == nullptr) continue; @@ -846,7 +846,7 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) { opInst = loadOrStoreOpInst; auto loadMemrefType = loadOp.getMemRefType(); indices.reserve(loadMemrefType.getRank()); - for (auto *index : loadOp.getMapOperands()) { + for (auto index : loadOp.getMapOperands()) { indices.push_back(index); } } else { @@ -856,7 +856,7 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) { memref = storeOp.getMemRef(); auto storeMemrefType = storeOp.getMemRefType(); indices.reserve(storeMemrefType.getRank()); - for (auto *index : storeOp.getMapOperands()) { + for (auto index : storeOp.getMapOperands()) { indices.push_back(index); } } @@ -919,7 +919,7 @@ static Optional getMemoryFootprintBytes(Block &block, Block::iterator start, Block::iterator end, int memorySpace) { - SmallDenseMap, 4> regions; + SmallDenseMap, 4> regions; // Walk this 'affine.for' operation to gather all memory regions. auto result = block.walk(start, end, [&](Operation *opInst) -> WalkResult { @@ -970,7 +970,7 @@ Optional mlir::getMemoryFootprintBytes(AffineForOp forOp, /// Returns in 'sequentialLoops' all sequential loops in loop nest rooted /// at 'forOp'. void mlir::getSequentialLoops( - AffineForOp forOp, llvm::SmallDenseSet *sequentialLoops) { + AffineForOp forOp, llvm::SmallDenseSet *sequentialLoops) { forOp.getOperation()->walk([&](Operation *op) { if (auto innerFor = dyn_cast(op)) if (!isLoopParallel(innerFor)) diff --git a/lib/Analysis/VectorAnalysis.cpp b/lib/Analysis/VectorAnalysis.cpp index 42d3f10b14c7..a7917eba5033 100644 --- a/lib/Analysis/VectorAnalysis.cpp +++ b/lib/Analysis/VectorAnalysis.cpp @@ -109,7 +109,7 @@ Optional> mlir::shapeRatio(VectorType superVectorType, /// Examples can be found in the documentation of `makePermutationMap`, in the /// header file. static AffineMap makePermutationMap( - ArrayRef indices, + ArrayRef indices, const DenseMap &enclosingLoopToVectorDim) { if (enclosingLoopToVectorDim.empty()) return AffineMap(); @@ -167,7 +167,7 @@ static SetVector getEnclosingforOps(Operation *op) { } AffineMap mlir::makePermutationMap( - Operation *op, ArrayRef indices, + Operation *op, ArrayRef indices, const DenseMap &loopToVectorDim) { DenseMap enclosingLoopToVectorDim; auto enclosingLoops = getEnclosingforOps(op); diff --git a/lib/Analysis/Verifier.cpp b/lib/Analysis/Verifier.cpp index 82f5aa5e01c8..be499a938986 100644 --- a/lib/Analysis/Verifier.cpp +++ b/lib/Analysis/Verifier.cpp @@ -138,7 +138,7 @@ LogicalResult OperationVerifier::verifyRegion(Region ®ion) { } LogicalResult OperationVerifier::verifyBlock(Block &block) { - for (auto *arg : block.getArguments()) + for (auto arg : block.getArguments()) if (arg->getOwner() != &block) return emitError(block, "block argument not owned by block"); @@ -175,7 +175,7 @@ LogicalResult OperationVerifier::verifyBlock(Block &block) { LogicalResult OperationVerifier::verifyOperation(Operation &op) { // Check that operands are non-nil and structurally ok. - for (auto *operand : op.getOperands()) + for (auto operand : op.getOperands()) if (!operand) return op.emitError("null operand found"); @@ -244,7 +244,7 @@ LogicalResult OperationVerifier::verifyDominance(Operation &op) { // Check that operands properly dominate this use. for (unsigned operandNo = 0, e = op.getNumOperands(); operandNo != e; ++operandNo) { - auto *operand = op.getOperand(operandNo); + auto operand = op.getOperand(operandNo); if (domInfo->properlyDominates(operand, &op)) continue; diff --git a/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/lib/Conversion/AffineToStandard/AffineToStandard.cpp index 3f613c6bfb5c..144b4a97e874 100644 --- a/lib/Conversion/AffineToStandard/AffineToStandard.cpp +++ b/lib/Conversion/AffineToStandard/AffineToStandard.cpp @@ -42,16 +42,16 @@ namespace { // that correspond to it. Visitation functions return an Value of the // expression subtree they visited or `nullptr` on error. class AffineApplyExpander - : public AffineExprVisitor { + : public AffineExprVisitor { public: // This internal class expects arguments to be non-null, checks must be // performed at the call site. - AffineApplyExpander(OpBuilder &builder, ArrayRef dimValues, - ArrayRef symbolValues, Location loc) + AffineApplyExpander(OpBuilder &builder, ArrayRef dimValues, + ArrayRef symbolValues, Location loc) : builder(builder), dimValues(dimValues), symbolValues(symbolValues), loc(loc) {} - template Value *buildBinaryExpr(AffineBinaryOpExpr expr) { + template ValuePtr buildBinaryExpr(AffineBinaryOpExpr expr) { auto lhs = visit(expr.getLHS()); auto rhs = visit(expr.getRHS()); if (!lhs || !rhs) @@ -60,11 +60,11 @@ class AffineApplyExpander return op.getResult(); } - Value *visitAddExpr(AffineBinaryOpExpr expr) { + ValuePtr visitAddExpr(AffineBinaryOpExpr expr) { return buildBinaryExpr(expr); } - Value *visitMulExpr(AffineBinaryOpExpr expr) { + ValuePtr visitMulExpr(AffineBinaryOpExpr expr) { return buildBinaryExpr(expr); } @@ -77,7 +77,7 @@ class AffineApplyExpander // let remainder = srem a, b; // negative = a < 0 in // select negative, remainder + b, remainder. - Value *visitModExpr(AffineBinaryOpExpr expr) { + ValuePtr visitModExpr(AffineBinaryOpExpr expr) { auto rhsConst = expr.getRHS().dyn_cast(); if (!rhsConst) { emitError( @@ -94,13 +94,13 @@ class AffineApplyExpander auto rhs = visit(expr.getRHS()); assert(lhs && rhs && "unexpected affine expr lowering failure"); - Value *remainder = builder.create(loc, lhs, rhs); - Value *zeroCst = builder.create(loc, 0); - Value *isRemainderNegative = + ValuePtr remainder = builder.create(loc, lhs, rhs); + ValuePtr zeroCst = builder.create(loc, 0); + ValuePtr isRemainderNegative = builder.create(loc, CmpIPredicate::slt, remainder, zeroCst); - Value *correctedRemainder = builder.create(loc, remainder, rhs); - Value *result = builder.create(loc, isRemainderNegative, - correctedRemainder, remainder); + ValuePtr correctedRemainder = builder.create(loc, remainder, rhs); + ValuePtr result = builder.create(loc, isRemainderNegative, + correctedRemainder, remainder); return result; } @@ -114,7 +114,7 @@ class AffineApplyExpander // let absolute = negative ? -a - 1 : a in // let quotient = absolute / b in // negative ? -quotient - 1 : quotient - Value *visitFloorDivExpr(AffineBinaryOpExpr expr) { + ValuePtr visitFloorDivExpr(AffineBinaryOpExpr expr) { auto rhsConst = expr.getRHS().dyn_cast(); if (!rhsConst) { emitError( @@ -131,16 +131,16 @@ class AffineApplyExpander auto rhs = visit(expr.getRHS()); assert(lhs && rhs && "unexpected affine expr lowering failure"); - Value *zeroCst = builder.create(loc, 0); - Value *noneCst = builder.create(loc, -1); - Value *negative = + ValuePtr zeroCst = builder.create(loc, 0); + ValuePtr noneCst = builder.create(loc, -1); + ValuePtr negative = builder.create(loc, CmpIPredicate::slt, lhs, zeroCst); - Value *negatedDecremented = builder.create(loc, noneCst, lhs); - Value *dividend = + ValuePtr negatedDecremented = builder.create(loc, noneCst, lhs); + ValuePtr dividend = builder.create(loc, negative, negatedDecremented, lhs); - Value *quotient = builder.create(loc, dividend, rhs); - Value *correctedQuotient = builder.create(loc, noneCst, quotient); - Value *result = + ValuePtr quotient = builder.create(loc, dividend, rhs); + ValuePtr correctedQuotient = builder.create(loc, noneCst, quotient); + ValuePtr result = builder.create(loc, negative, correctedQuotient, quotient); return result; } @@ -155,7 +155,7 @@ class AffineApplyExpander // let absolute = negative ? -a : a - 1 in // let quotient = absolute / b in // negative ? -quotient : quotient + 1 - Value *visitCeilDivExpr(AffineBinaryOpExpr expr) { + ValuePtr visitCeilDivExpr(AffineBinaryOpExpr expr) { auto rhsConst = expr.getRHS().dyn_cast(); if (!rhsConst) { emitError(loc) << "semi-affine expressions (division by non-const) are " @@ -170,23 +170,24 @@ class AffineApplyExpander auto rhs = visit(expr.getRHS()); assert(lhs && rhs && "unexpected affine expr lowering failure"); - Value *zeroCst = builder.create(loc, 0); - Value *oneCst = builder.create(loc, 1); - Value *nonPositive = + ValuePtr zeroCst = builder.create(loc, 0); + ValuePtr oneCst = builder.create(loc, 1); + ValuePtr nonPositive = builder.create(loc, CmpIPredicate::sle, lhs, zeroCst); - Value *negated = builder.create(loc, zeroCst, lhs); - Value *decremented = builder.create(loc, lhs, oneCst); - Value *dividend = + ValuePtr negated = builder.create(loc, zeroCst, lhs); + ValuePtr decremented = builder.create(loc, lhs, oneCst); + ValuePtr dividend = builder.create(loc, nonPositive, negated, decremented); - Value *quotient = builder.create(loc, dividend, rhs); - Value *negatedQuotient = builder.create(loc, zeroCst, quotient); - Value *incrementedQuotient = builder.create(loc, quotient, oneCst); - Value *result = builder.create(loc, nonPositive, negatedQuotient, - incrementedQuotient); + ValuePtr quotient = builder.create(loc, dividend, rhs); + ValuePtr negatedQuotient = builder.create(loc, zeroCst, quotient); + ValuePtr incrementedQuotient = + builder.create(loc, quotient, oneCst); + ValuePtr result = builder.create( + loc, nonPositive, negatedQuotient, incrementedQuotient); return result; } - Value *visitConstantExpr(AffineConstantExpr expr) { + ValuePtr visitConstantExpr(AffineConstantExpr expr) { auto valueAttr = builder.getIntegerAttr(builder.getIndexType(), expr.getValue()); auto op = @@ -194,13 +195,13 @@ class AffineApplyExpander return op.getResult(); } - Value *visitDimExpr(AffineDimExpr expr) { + ValuePtr visitDimExpr(AffineDimExpr expr) { assert(expr.getPosition() < dimValues.size() && "affine dim position out of range"); return dimValues[expr.getPosition()]; } - Value *visitSymbolExpr(AffineSymbolExpr expr) { + ValuePtr visitSymbolExpr(AffineSymbolExpr expr) { assert(expr.getPosition() < symbolValues.size() && "symbol dim position out of range"); return symbolValues[expr.getPosition()]; @@ -208,8 +209,8 @@ class AffineApplyExpander private: OpBuilder &builder; - ArrayRef dimValues; - ArrayRef symbolValues; + ArrayRef dimValues; + ArrayRef symbolValues; Location loc; }; @@ -217,18 +218,18 @@ class AffineApplyExpander // Create a sequence of operations that implement the `expr` applied to the // given dimension and symbol values. -mlir::Value *mlir::expandAffineExpr(OpBuilder &builder, Location loc, - AffineExpr expr, - ArrayRef dimValues, - ArrayRef symbolValues) { +mlir::ValuePtr mlir::expandAffineExpr(OpBuilder &builder, Location loc, + AffineExpr expr, + ArrayRef dimValues, + ArrayRef symbolValues) { return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr); } // Create a sequence of operations that implement the `affineMap` applied to // the given `operands` (as it it were an AffineApplyOp). -Optional> static expandAffineMap( +Optional> static expandAffineMap( OpBuilder &builder, Location loc, AffineMap affineMap, - ArrayRef operands) { + ArrayRef operands) { auto numDims = affineMap.getNumDims(); auto expanded = functional::map( [numDims, &builder, loc, operands](AffineExpr expr) { @@ -237,7 +238,7 @@ Optional> static expandAffineMap( operands.drop_front(numDims)); }, affineMap.getResults()); - if (llvm::all_of(expanded, [](Value *v) { return v; })) + if (llvm::all_of(expanded, [](ValuePtr v) { return v; })) return expanded; return None; } @@ -253,13 +254,13 @@ Optional> static expandAffineMap( // Multiple values are scanned in a linear sequence. This creates a data // dependences that wouldn't exist in a tree reduction, but is easier to // recognize as a reduction by the subsequent passes. -static Value *buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate, - ArrayRef values, - OpBuilder &builder) { +static ValuePtr buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate, + ArrayRef values, + OpBuilder &builder) { assert(!llvm::empty(values) && "empty min/max chain"); auto valueIt = values.begin(); - Value *value = *valueIt++; + ValuePtr value = *valueIt++; for (; valueIt != values.end(); ++valueIt) { auto cmpOp = builder.create(loc, predicate, value, *valueIt); value = builder.create(loc, cmpOp.getResult(), value, *valueIt); @@ -271,8 +272,8 @@ static Value *buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate, // Emit instructions that correspond to the affine map in the lower bound // applied to the respective operands, and compute the maximum value across // the results. -Value *mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) { - SmallVector boundOperands(op.getLowerBoundOperands()); +ValuePtr mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) { + SmallVector boundOperands(op.getLowerBoundOperands()); auto lbValues = expandAffineMap(builder, op.getLoc(), op.getLowerBoundMap(), boundOperands); if (!lbValues) @@ -284,8 +285,8 @@ Value *mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) { // Emit instructions that correspond to the affine map in the upper bound // applied to the respective operands, and compute the minimum value across // the results. -Value *mlir::lowerAffineUpperBound(AffineForOp op, OpBuilder &builder) { - SmallVector boundOperands(op.getUpperBoundOperands()); +ValuePtr mlir::lowerAffineUpperBound(AffineForOp op, OpBuilder &builder) { + SmallVector boundOperands(op.getUpperBoundOperands()); auto ubValues = expandAffineMap(builder, op.getLoc(), op.getUpperBoundMap(), boundOperands); if (!ubValues) @@ -314,9 +315,9 @@ class AffineForLowering : public OpRewritePattern { PatternMatchResult matchAndRewrite(AffineForOp op, PatternRewriter &rewriter) const override { Location loc = op.getLoc(); - Value *lowerBound = lowerAffineLowerBound(op, rewriter); - Value *upperBound = lowerAffineUpperBound(op, rewriter); - Value *step = rewriter.create(loc, op.getStep()); + ValuePtr lowerBound = lowerAffineLowerBound(op, rewriter); + ValuePtr upperBound = lowerAffineUpperBound(op, rewriter); + ValuePtr step = rewriter.create(loc, op.getStep()); auto f = rewriter.create(loc, lowerBound, upperBound, step); f.region().getBlocks().clear(); rewriter.inlineRegionBefore(op.region(), f.region(), f.region().end()); @@ -335,25 +336,25 @@ class AffineIfLowering : public OpRewritePattern { // Now we just have to handle the condition logic. auto integerSet = op.getIntegerSet(); - Value *zeroConstant = rewriter.create(loc, 0); - SmallVector operands(op.getOperands()); + ValuePtr zeroConstant = rewriter.create(loc, 0); + SmallVector operands(op.getOperands()); auto operandsRef = llvm::makeArrayRef(operands); // Calculate cond as a conjunction without short-circuiting. - Value *cond = nullptr; + ValuePtr cond = nullptr; for (unsigned i = 0, e = integerSet.getNumConstraints(); i < e; ++i) { AffineExpr constraintExpr = integerSet.getConstraint(i); bool isEquality = integerSet.isEq(i); // Build and apply an affine expression auto numDims = integerSet.getNumDims(); - Value *affResult = expandAffineExpr(rewriter, loc, constraintExpr, - operandsRef.take_front(numDims), - operandsRef.drop_front(numDims)); + ValuePtr affResult = expandAffineExpr(rewriter, loc, constraintExpr, + operandsRef.take_front(numDims), + operandsRef.drop_front(numDims)); if (!affResult) return matchFailure(); auto pred = isEquality ? CmpIPredicate::eq : CmpIPredicate::sge; - Value *cmpVal = + ValuePtr cmpVal = rewriter.create(loc, pred, affResult, zeroConstant); cond = cond ? rewriter.create(loc, cond, cmpVal).getResult() : cmpVal; @@ -404,7 +405,7 @@ class AffineLoadLowering : public OpRewritePattern { PatternMatchResult matchAndRewrite(AffineLoadOp op, PatternRewriter &rewriter) const override { // Expand affine map from 'affineLoadOp'. - SmallVector indices(op.getMapOperands()); + SmallVector indices(op.getMapOperands()); auto resultOperands = expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices); if (!resultOperands) @@ -426,7 +427,7 @@ class AffinePrefetchLowering : public OpRewritePattern { PatternMatchResult matchAndRewrite(AffinePrefetchOp op, PatternRewriter &rewriter) const override { // Expand affine map from 'affinePrefetchOp'. - SmallVector indices(op.getMapOperands()); + SmallVector indices(op.getMapOperands()); auto resultOperands = expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices); if (!resultOperands) @@ -450,7 +451,7 @@ class AffineStoreLowering : public OpRewritePattern { PatternMatchResult matchAndRewrite(AffineStoreOp op, PatternRewriter &rewriter) const override { // Expand affine map from 'affineStoreOp'. - SmallVector indices(op.getMapOperands()); + SmallVector indices(op.getMapOperands()); auto maybeExpandedMap = expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices); if (!maybeExpandedMap) @@ -472,7 +473,7 @@ class AffineDmaStartLowering : public OpRewritePattern { PatternMatchResult matchAndRewrite(AffineDmaStartOp op, PatternRewriter &rewriter) const override { - SmallVector operands(op.getOperands()); + SmallVector operands(op.getOperands()); auto operandsRef = llvm::makeArrayRef(operands); // Expand affine map for DMA source memref. @@ -513,7 +514,7 @@ class AffineDmaWaitLowering : public OpRewritePattern { PatternMatchResult matchAndRewrite(AffineDmaWaitOp op, PatternRewriter &rewriter) const override { // Expand affine map for DMA tag memref. - SmallVector indices(op.getTagIndices()); + SmallVector indices(op.getTagIndices()); auto maybeExpandedTagMap = expandAffineMap(rewriter, op.getLoc(), op.getTagMap(), indices); if (!maybeExpandedTagMap) diff --git a/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h b/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h index 6a1a580e3694..a408ab5b5d96 100644 --- a/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h +++ b/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h @@ -57,11 +57,11 @@ struct GPUIndexIntrinsicOpLowering : public LLVMOpLowering { // Convert the kernel arguments to an LLVM type, preserve the rest. PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto dialect = lowering.getDialect(); - Value *newOp; + ValuePtr newOp; switch (dimensionToIndex(cast(op))) { case X: newOp = rewriter.create(loc, LLVM::LLVMType::getInt32Ty(dialect)); diff --git a/lib/Conversion/GPUCommon/OpToFuncCallLowering.h b/lib/Conversion/GPUCommon/OpToFuncCallLowering.h index 23bfa303708d..3ab8e75633e8 100644 --- a/lib/Conversion/GPUCommon/OpToFuncCallLowering.h +++ b/lib/Conversion/GPUCommon/OpToFuncCallLowering.h @@ -44,7 +44,7 @@ struct OpToFuncCallLowering : public LLVMOpLowering { f32Func(f32Func), f64Func(f64Func) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { using LLVM::LLVMFuncOp; using LLVM::LLVMType; @@ -69,10 +69,10 @@ struct OpToFuncCallLowering : public LLVMOpLowering { private: LLVM::LLVMType getFunctionType(LLVM::LLVMType resultType, - ArrayRef operands) const { + ArrayRef operands) const { using LLVM::LLVMType; SmallVector operandTypes; - for (Value *operand : operands) { + for (ValuePtr operand : operands) { operandTypes.push_back(operand->getType().cast()); } return LLVMType::getFunctionTy(resultType, operandTypes, diff --git a/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp b/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp index f342083bee75..840ad6ba701b 100644 --- a/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp +++ b/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp @@ -114,7 +114,7 @@ class GpuLaunchFuncToCudaCallsPass } // Allocate a void pointer on the stack. - Value *allocatePointer(OpBuilder &builder, Location loc) { + ValuePtr allocatePointer(OpBuilder &builder, Location loc) { auto one = builder.create(loc, getInt32Type(), builder.getI32IntegerAttr(1)); return builder.create(loc, getPointerPointerType(), one, @@ -122,9 +122,9 @@ class GpuLaunchFuncToCudaCallsPass } void declareCudaFunctions(Location loc); - Value *setupParamsArray(gpu::LaunchFuncOp launchOp, OpBuilder &builder); - Value *generateKernelNameConstant(StringRef name, Location loc, - OpBuilder &builder); + ValuePtr setupParamsArray(gpu::LaunchFuncOp launchOp, OpBuilder &builder); + ValuePtr generateKernelNameConstant(StringRef name, Location loc, + OpBuilder &builder); void translateGpuLaunchCalls(mlir::gpu::LaunchFuncOp launchOp); public: @@ -248,7 +248,7 @@ void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) { // for (i : [0, NumKernelOperands)) // %array[i] = cast(KernelOperand[i]) // return %array -Value * +ValuePtr GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp, OpBuilder &builder) { auto numKernelOperands = launchOp.getNumKernelOperands(); @@ -264,7 +264,7 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp, for (unsigned idx = 0; idx < numKernelOperands; ++idx) { auto operand = launchOp.getKernelOperand(idx); auto llvmType = operand->getType().cast(); - Value *memLocation = builder.create( + ValuePtr memLocation = builder.create( loc, llvmType.getPointerTo(), one, /*alignment=*/1); builder.create(loc, operand, memLocation); auto casted = @@ -280,12 +280,12 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp, getModule().lookupSymbol(kMcuMemHostRegister); auto nullPtr = builder.create(loc, llvmType.getPointerTo()); auto gep = builder.create(loc, llvmType.getPointerTo(), - ArrayRef{nullPtr, one}); + ArrayRef{nullPtr, one}); auto size = builder.create(loc, getInt64Type(), gep); builder.create(loc, ArrayRef{}, builder.getSymbolRefAttr(registerFunc), - ArrayRef{casted, size}); - Value *memLocation = builder.create( + ArrayRef{casted, size}); + ValuePtr memLocation = builder.create( loc, getPointerPointerType(), one, /*alignment=*/1); builder.create(loc, casted, memLocation); casted = @@ -295,7 +295,7 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp, auto index = builder.create( loc, getInt32Type(), builder.getI32IntegerAttr(idx)); auto gep = builder.create(loc, getPointerPointerType(), array, - ArrayRef{index}); + ArrayRef{index}); builder.create(loc, casted, gep); } return array; @@ -311,7 +311,7 @@ GpuLaunchFuncToCudaCallsPass::setupParamsArray(gpu::LaunchFuncOp launchOp, // %1 = llvm.constant (0 : index) // %2 = llvm.getelementptr %0[%1, %1] : !llvm<"i8*"> // } -Value *GpuLaunchFuncToCudaCallsPass::generateKernelNameConstant( +ValuePtr GpuLaunchFuncToCudaCallsPass::generateKernelNameConstant( StringRef name, Location loc, OpBuilder &builder) { // Make sure the trailing zero is included in the constant. std::vector kernelName(name.begin(), name.end()); @@ -367,7 +367,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls( assert(kernelModule.getName() && "expected a named module"); SmallString<128> nameBuffer(*kernelModule.getName()); nameBuffer.append(kCubinStorageSuffix); - Value *data = LLVM::createGlobalString( + ValuePtr data = LLVM::createGlobalString( loc, builder, nameBuffer.str(), cubinAttr.getValue(), LLVM::Linkage::Internal, getLLVMDialect()); @@ -378,7 +378,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls( getModule().lookupSymbol(cuModuleLoadName); builder.create(loc, ArrayRef{getCUResultType()}, builder.getSymbolRefAttr(cuModuleLoad), - ArrayRef{cuModule, data}); + ArrayRef{cuModule, data}); // Get the function from the module. The name corresponds to the name of // the kernel function. auto cuOwningModuleRef = @@ -390,13 +390,13 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls( builder.create( loc, ArrayRef{getCUResultType()}, builder.getSymbolRefAttr(cuModuleGetFunction), - ArrayRef{cuFunction, cuOwningModuleRef, kernelName}); + ArrayRef{cuFunction, cuOwningModuleRef, kernelName}); // Grab the global stream needed for execution. auto cuGetStreamHelper = getModule().lookupSymbol(cuGetStreamHelperName); auto cuStream = builder.create( loc, ArrayRef{getPointerType()}, - builder.getSymbolRefAttr(cuGetStreamHelper), ArrayRef{}); + builder.getSymbolRefAttr(cuGetStreamHelper), ArrayRef{}); // Invoke the function with required arguments. auto cuLaunchKernel = getModule().lookupSymbol(cuLaunchKernelName); @@ -408,19 +408,19 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls( builder.create( loc, ArrayRef{getCUResultType()}, builder.getSymbolRefAttr(cuLaunchKernel), - ArrayRef{cuFunctionRef, launchOp.getOperand(0), - launchOp.getOperand(1), launchOp.getOperand(2), - launchOp.getOperand(3), launchOp.getOperand(4), - launchOp.getOperand(5), zero, /* sharedMemBytes */ - cuStream.getResult(0), /* stream */ - paramsArray, /* kernel params */ - nullpointer /* extra */}); + ArrayRef{cuFunctionRef, launchOp.getOperand(0), + launchOp.getOperand(1), launchOp.getOperand(2), + launchOp.getOperand(3), launchOp.getOperand(4), + launchOp.getOperand(5), zero, /* sharedMemBytes */ + cuStream.getResult(0), /* stream */ + paramsArray, /* kernel params */ + nullpointer /* extra */}); // Sync on the stream to make it synchronous. auto cuStreamSync = getModule().lookupSymbol(cuStreamSynchronizeName); builder.create(loc, ArrayRef{getCUResultType()}, builder.getSymbolRefAttr(cuStreamSync), - ArrayRef(cuStream.getResult(0))); + ArrayRef(cuStream.getResult(0))); launchOp.erase(); } diff --git a/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp index 220df53b977c..bf18ea03dab0 100644 --- a/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -60,8 +60,8 @@ class NVVMTypeConverter : public LLVMTypeConverter { /// Converts all_reduce op to LLVM/NVVM ops. struct GPUAllReduceOpLowering : public LLVMOpLowering { - using AccumulatorFactory = std::function; + using AccumulatorFactory = std::function; explicit GPUAllReduceOpLowering(LLVMTypeConverter &lowering_) : LLVMOpLowering(gpu::AllReduceOp::getOperationName(), @@ -69,10 +69,10 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { int32Type(LLVM::LLVMType::getInt32Ty(lowering_.getDialect())) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Location loc = op->getLoc(); - Value *operand = operands.front(); + ValuePtr operand = operands.front(); // TODO(csigg): Generalize to other types of accumulation. assert(op->getOperand(0)->getType().isIntOrFloat()); @@ -81,7 +81,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { AccumulatorFactory factory = getFactory(cast(op), operand); assert(factory && "failed to create accumulator factory"); - Value *result = createBlockReduce(loc, operand, factory, rewriter); + ValuePtr result = createBlockReduce(loc, operand, factory, rewriter); rewriter.replaceOp(op, {result}); return matchSuccess(); @@ -91,7 +91,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { /// Returns an accumulator factory using either the op attribute or the body /// region. AccumulatorFactory getFactory(gpu::AllReduceOp allReduce, - Value *operand) const { + ValuePtr operand) const { if (!allReduce.body().empty()) { return getFactory(allReduce.body()); } @@ -106,7 +106,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { /// block is expected to have 2 arguments. The gpu.yield return the /// accumulated value of the same type. AccumulatorFactory getFactory(Region &body) const { - return AccumulatorFactory([&](Location loc, Value *lhs, Value *rhs, + return AccumulatorFactory([&](Location loc, ValuePtr lhs, ValuePtr rhs, ConversionPatternRewriter &rewriter) { Block *block = rewriter.getInsertionBlock(); Block *split = rewriter.splitBlock(block, rewriter.getInsertionPoint()); @@ -120,7 +120,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { // Add branch before inserted body, into body. block = block->getNextNode(); - rewriter.create(loc, ArrayRef{}, + rewriter.create(loc, ArrayRef{}, llvm::makeArrayRef(block), ValueRange()); // Replace all gpu.yield ops with branch out of body. @@ -130,7 +130,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { continue; rewriter.setInsertionPointToEnd(block); rewriter.replaceOpWithNewOp( - terminator, ArrayRef{}, llvm::makeArrayRef(split), + terminator, ArrayRef{}, llvm::makeArrayRef(split), ValueRange(terminator->getOperand(0))); } @@ -161,7 +161,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { /// Returns an accumulator factory that creates an op of type T. template AccumulatorFactory getFactory() const { - return [](Location loc, Value *lhs, Value *rhs, + return [](Location loc, ValuePtr lhs, ValuePtr rhs, ConversionPatternRewriter &rewriter) { return rewriter.create(loc, lhs->getType(), lhs, rhs); }; @@ -203,60 +203,60 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { /// %result = llvm.load %result_ptr /// return %result /// - Value *createBlockReduce(Location loc, Value *operand, - AccumulatorFactory &accumFactory, - ConversionPatternRewriter &rewriter) const { + ValuePtr createBlockReduce(Location loc, ValuePtr operand, + AccumulatorFactory &accumFactory, + ConversionPatternRewriter &rewriter) const { auto type = operand->getType().cast(); // Create shared memory array to store the warp reduction. auto module = operand->getDefiningOp()->getParentOfType(); assert(module && "op must belong to a module"); - Value *sharedMemPtr = + ValuePtr sharedMemPtr = createSharedMemoryArray(loc, module, type, kWarpSize, rewriter); - Value *zero = rewriter.create( + ValuePtr zero = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(0u)); - Value *laneId = rewriter.create(loc, int32Type); - Value *isFirstLane = rewriter.create( + ValuePtr laneId = rewriter.create(loc, int32Type); + ValuePtr isFirstLane = rewriter.create( loc, LLVM::ICmpPredicate::eq, laneId, zero); - Value *threadIdx = getLinearThreadIndex(loc, rewriter); - Value *blockSize = getBlockSize(loc, rewriter); - Value *activeWidth = getActiveWidth(loc, threadIdx, blockSize, rewriter); + ValuePtr threadIdx = getLinearThreadIndex(loc, rewriter); + ValuePtr blockSize = getBlockSize(loc, rewriter); + ValuePtr activeWidth = getActiveWidth(loc, threadIdx, blockSize, rewriter); // Reduce elements within each warp to produce the intermediate results. - Value *warpReduce = createWarpReduce(loc, activeWidth, laneId, operand, - accumFactory, rewriter); + ValuePtr warpReduce = createWarpReduce(loc, activeWidth, laneId, operand, + accumFactory, rewriter); // Write the intermediate results to shared memory, using the first lane of // each warp. createPredicatedBlock(loc, rewriter, isFirstLane, [&] { - Value *warpId = getDivideByWarpSize(threadIdx, rewriter); - Value *storeDst = rewriter.create( - loc, type, sharedMemPtr, ArrayRef({zero, warpId})); + ValuePtr warpId = getDivideByWarpSize(threadIdx, rewriter); + ValuePtr storeDst = rewriter.create( + loc, type, sharedMemPtr, ArrayRef({zero, warpId})); rewriter.create(loc, warpReduce, storeDst); }); rewriter.create(loc); - Value *numWarps = getNumWarps(loc, blockSize, rewriter); - Value *isValidWarp = rewriter.create( + ValuePtr numWarps = getNumWarps(loc, blockSize, rewriter); + ValuePtr isValidWarp = rewriter.create( loc, LLVM::ICmpPredicate::slt, threadIdx, numWarps); - Value *resultPtr = rewriter.create( - loc, type, sharedMemPtr, ArrayRef({zero, zero})); + ValuePtr resultPtr = rewriter.create( + loc, type, sharedMemPtr, ArrayRef({zero, zero})); // Use the first numWarps threads to reduce the intermediate results from // shared memory. The final result is written to shared memory again. createPredicatedBlock(loc, rewriter, isValidWarp, [&] { - Value *loadSrc = rewriter.create( - loc, type, sharedMemPtr, ArrayRef({zero, threadIdx})); - Value *value = rewriter.create(loc, type, loadSrc); - Value *result = createWarpReduce(loc, numWarps, laneId, value, - accumFactory, rewriter); + ValuePtr loadSrc = rewriter.create( + loc, type, sharedMemPtr, ArrayRef({zero, threadIdx})); + ValuePtr value = rewriter.create(loc, type, loadSrc); + ValuePtr result = createWarpReduce(loc, numWarps, laneId, value, + accumFactory, rewriter); rewriter.create(loc, result, resultPtr); }); rewriter.create(loc); // Load and return result from shared memory. - Value *result = rewriter.create(loc, type, resultPtr); + ValuePtr result = rewriter.create(loc, type, resultPtr); return result; } @@ -274,7 +274,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { /// template void createIf(Location loc, ConversionPatternRewriter &rewriter, - Value *condition, ThenOpsFactory &&thenOpsFactory, + ValuePtr condition, ThenOpsFactory &&thenOpsFactory, ElseOpsFactory &&elseOpsFactory) const { Block *currentBlock = rewriter.getInsertionBlock(); auto currentPoint = rewriter.getInsertionPoint(); @@ -288,7 +288,7 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { ArrayRef{thenBlock, elseBlock}); auto addBranch = [&](ValueRange operands) { - rewriter.create(loc, ArrayRef{}, + rewriter.create(loc, ArrayRef{}, llvm::makeArrayRef(continueBlock), llvm::makeArrayRef(operands)); }; @@ -303,32 +303,32 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { assert(thenOperands.size() == elseOperands.size()); rewriter.setInsertionPointToStart(continueBlock); - for (auto *operand : thenOperands) + for (auto operand : thenOperands) continueBlock->addArgument(operand->getType()); } /// Shortcut for createIf with empty else block and no block operands. template void createPredicatedBlock(Location loc, ConversionPatternRewriter &rewriter, - Value *condition, + ValuePtr condition, Factory &&predicatedOpsFactory) const { createIf( loc, rewriter, condition, [&] { predicatedOpsFactory(); - return ArrayRef(); + return ArrayRef(); }, - [&] { return ArrayRef(); }); + [&] { return ArrayRef(); }); } /// Creates a reduction across the first activeWidth lanes of a warp. /// The first lane returns the result, all others return values are undefined. - Value *createWarpReduce(Location loc, Value *activeWidth, Value *laneId, - Value *operand, AccumulatorFactory accumFactory, - ConversionPatternRewriter &rewriter) const { - Value *warpSize = rewriter.create( + ValuePtr createWarpReduce(Location loc, ValuePtr activeWidth, ValuePtr laneId, + ValuePtr operand, AccumulatorFactory accumFactory, + ConversionPatternRewriter &rewriter) const { + ValuePtr warpSize = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize)); - Value *isPartialWarp = rewriter.create( + ValuePtr isPartialWarp = rewriter.create( loc, LLVM::ICmpPredicate::slt, activeWidth, warpSize); auto type = operand->getType().cast(); @@ -336,16 +336,16 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { loc, rewriter, isPartialWarp, // Generate reduction over a (potentially) partial warp. [&] { - Value *value = operand; - Value *one = rewriter.create( + ValuePtr value = operand; + ValuePtr one = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(1)); // Bit mask of active lanes: `(1 << activeWidth) - 1`. - Value *activeMask = rewriter.create( + ValuePtr activeMask = rewriter.create( loc, int32Type, rewriter.create(loc, int32Type, one, activeWidth), one); // Clamp lane: `activeWidth - 1` - Value *maskAndClamp = + ValuePtr maskAndClamp = rewriter.create(loc, int32Type, activeWidth, one); auto dialect = lowering.getDialect(); auto predTy = LLVM::LLVMType::getInt1Ty(dialect); @@ -356,53 +356,53 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { // lane is within the active range. All lanes contain the final // result, but only the first lane's result is used. for (int i = 1; i < kWarpSize; i <<= 1) { - Value *offset = rewriter.create( + ValuePtr offset = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(i)); - Value *shfl = rewriter.create( + ValuePtr shfl = rewriter.create( loc, shflTy, activeMask, value, offset, maskAndClamp, returnValueAndIsValidAttr); - Value *isActiveSrcLane = rewriter.create( + ValuePtr isActiveSrcLane = rewriter.create( loc, predTy, shfl, rewriter.getIndexArrayAttr(1)); // Skip the accumulation if the shuffle op read from a lane outside // of the active range. createIf( loc, rewriter, isActiveSrcLane, [&] { - Value *shflValue = rewriter.create( + ValuePtr shflValue = rewriter.create( loc, type, shfl, rewriter.getIndexArrayAttr(0)); - return SmallVector{ + return SmallVector{ accumFactory(loc, value, shflValue, rewriter)}; }, [&] { return llvm::makeArrayRef(value); }); value = rewriter.getInsertionBlock()->getArgument(0); } - return SmallVector{value}; + return SmallVector{value}; }, // Generate a reduction over the entire warp. This is a specialization // of the above reduction with unconditional accumulation. [&] { - Value *value = operand; - Value *activeMask = rewriter.create( + ValuePtr value = operand; + ValuePtr activeMask = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(~0u)); - Value *maskAndClamp = rewriter.create( + ValuePtr maskAndClamp = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize - 1)); for (int i = 1; i < kWarpSize; i <<= 1) { - Value *offset = rewriter.create( + ValuePtr offset = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(i)); - Value *shflValue = rewriter.create( + ValuePtr shflValue = rewriter.create( loc, type, activeMask, value, offset, maskAndClamp, /*return_value_and_is_valid=*/UnitAttr()); value = accumFactory(loc, value, shflValue, rewriter); } - return SmallVector{value}; + return SmallVector{value}; }); return rewriter.getInsertionBlock()->getArgument(0); } /// Creates a global array stored in shared memory. - Value *createSharedMemoryArray(Location loc, ModuleOp module, - LLVM::LLVMType elementType, int numElements, - ConversionPatternRewriter &rewriter) const { + ValuePtr createSharedMemoryArray(Location loc, ModuleOp module, + LLVM::LLVMType elementType, int numElements, + ConversionPatternRewriter &rewriter) const { OpBuilder builder(module.getBodyRegion()); auto arrayType = LLVM::LLVMType::getArrayTy(elementType, numElements); @@ -416,31 +416,32 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { } /// Returns the index of the thread within the block. - Value *getLinearThreadIndex(Location loc, - ConversionPatternRewriter &rewriter) const { - Value *dimX = rewriter.create(loc, int32Type); - Value *dimY = rewriter.create(loc, int32Type); - Value *idX = rewriter.create(loc, int32Type); - Value *idY = rewriter.create(loc, int32Type); - Value *idZ = rewriter.create(loc, int32Type); - Value *tmp1 = rewriter.create(loc, int32Type, idZ, dimY); - Value *tmp2 = rewriter.create(loc, int32Type, tmp1, idY); - Value *tmp3 = rewriter.create(loc, int32Type, tmp2, dimX); + ValuePtr getLinearThreadIndex(Location loc, + ConversionPatternRewriter &rewriter) const { + ValuePtr dimX = rewriter.create(loc, int32Type); + ValuePtr dimY = rewriter.create(loc, int32Type); + ValuePtr idX = rewriter.create(loc, int32Type); + ValuePtr idY = rewriter.create(loc, int32Type); + ValuePtr idZ = rewriter.create(loc, int32Type); + ValuePtr tmp1 = rewriter.create(loc, int32Type, idZ, dimY); + ValuePtr tmp2 = rewriter.create(loc, int32Type, tmp1, idY); + ValuePtr tmp3 = rewriter.create(loc, int32Type, tmp2, dimX); return rewriter.create(loc, int32Type, tmp3, idX); } /// Returns the number of threads in the block. - Value *getBlockSize(Location loc, ConversionPatternRewriter &rewriter) const { - Value *dimX = rewriter.create(loc, int32Type); - Value *dimY = rewriter.create(loc, int32Type); - Value *dimZ = rewriter.create(loc, int32Type); - Value *dimXY = rewriter.create(loc, int32Type, dimX, dimY); + ValuePtr getBlockSize(Location loc, + ConversionPatternRewriter &rewriter) const { + ValuePtr dimX = rewriter.create(loc, int32Type); + ValuePtr dimY = rewriter.create(loc, int32Type); + ValuePtr dimZ = rewriter.create(loc, int32Type); + ValuePtr dimXY = rewriter.create(loc, int32Type, dimX, dimY); return rewriter.create(loc, int32Type, dimXY, dimZ); } /// Returns the number of warps in the block. - Value *getNumWarps(Location loc, Value *blockSize, - ConversionPatternRewriter &rewriter) const { + ValuePtr getNumWarps(Location loc, ValuePtr blockSize, + ConversionPatternRewriter &rewriter) const { auto warpSizeMinusOne = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize - 1)); auto biasedBlockSize = rewriter.create( @@ -449,19 +450,19 @@ struct GPUAllReduceOpLowering : public LLVMOpLowering { } /// Returns the number of active threads in the warp, not clamped to 32. - Value *getActiveWidth(Location loc, Value *threadIdx, Value *blockSize, - ConversionPatternRewriter &rewriter) const { - Value *threadIdxMask = rewriter.create( + ValuePtr getActiveWidth(Location loc, ValuePtr threadIdx, ValuePtr blockSize, + ConversionPatternRewriter &rewriter) const { + ValuePtr threadIdxMask = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(~(kWarpSize - 1))); - Value *numThreadsWithSmallerWarpId = + ValuePtr numThreadsWithSmallerWarpId = rewriter.create(loc, threadIdx, threadIdxMask); return rewriter.create(loc, blockSize, numThreadsWithSmallerWarpId); } /// Returns value divided by the warp size (i.e. 32). - Value *getDivideByWarpSize(Value *value, - ConversionPatternRewriter &rewriter) const { + ValuePtr getDivideByWarpSize(ValuePtr value, + ConversionPatternRewriter &rewriter) const { auto loc = value->getLoc(); auto warpSize = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize)); @@ -495,7 +496,7 @@ struct GPUShuffleOpLowering : public LLVMOpLowering { /// %shfl_pred = llvm.extractvalue %shfl[1 : index] : /// !llvm<"{ float, i1 }"> PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Location loc = op->getLoc(); gpu::ShuffleOpOperandAdaptor adaptor(operands); @@ -506,24 +507,24 @@ struct GPUShuffleOpLowering : public LLVMOpLowering { auto predTy = LLVM::LLVMType::getInt1Ty(dialect); auto resultTy = LLVM::LLVMType::getStructTy(dialect, {valueTy, predTy}); - Value *one = rewriter.create( + ValuePtr one = rewriter.create( loc, int32Type, rewriter.getI32IntegerAttr(1)); // Bit mask of active lanes: `(1 << activeWidth) - 1`. - Value *activeMask = rewriter.create( + ValuePtr activeMask = rewriter.create( loc, int32Type, rewriter.create(loc, int32Type, one, adaptor.width()), one); // Clamp lane: `activeWidth - 1` - Value *maskAndClamp = + ValuePtr maskAndClamp = rewriter.create(loc, int32Type, adaptor.width(), one); auto returnValueAndIsValidAttr = rewriter.getUnitAttr(); - Value *shfl = rewriter.create( + ValuePtr shfl = rewriter.create( loc, resultTy, activeMask, adaptor.value(), adaptor.offset(), maskAndClamp, returnValueAndIsValidAttr); - Value *shflValue = rewriter.create( + ValuePtr shflValue = rewriter.create( loc, valueTy, shfl, rewriter.getIndexArrayAttr(0)); - Value *isActiveSrcLane = rewriter.create( + ValuePtr isActiveSrcLane = rewriter.create( loc, predTy, shfl, rewriter.getIndexArrayAttr(1)); rewriter.replaceOp(op, {shflValue, isActiveSrcLane}); @@ -538,7 +539,7 @@ struct GPUFuncOpLowering : LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { assert(operands.empty() && "func op is not expected to have operands"); auto gpuFuncOp = cast(op); @@ -547,7 +548,7 @@ struct GPUFuncOpLowering : LLVMOpLowering { SmallVector workgroupBuffers; workgroupBuffers.reserve(gpuFuncOp.getNumWorkgroupAttributions()); for (auto en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) { - Value *attribution = en.value(); + ValuePtr attribution = en.value(); auto type = attribution->getType().dyn_cast(); assert(type && type.hasStaticShape() && "unexpected type in attribution"); @@ -604,23 +605,23 @@ struct GPUFuncOpLowering : LLVMOpLowering { unsigned numProperArguments = gpuFuncOp.getNumArguments(); auto i32Type = LLVM::LLVMType::getInt32Ty(lowering.getDialect()); - Value *zero = nullptr; + ValuePtr zero = nullptr; if (!workgroupBuffers.empty()) zero = rewriter.create(loc, i32Type, rewriter.getI32IntegerAttr(0)); for (auto en : llvm::enumerate(workgroupBuffers)) { LLVM::GlobalOp global = en.value(); - Value *address = rewriter.create(loc, global); + ValuePtr address = rewriter.create(loc, global); auto elementType = global.getType().getArrayElementType(); - Value *memory = rewriter.create( + ValuePtr memory = rewriter.create( loc, elementType.getPointerTo(global.addr_space().getZExtValue()), - address, ArrayRef{zero, zero}); + address, ArrayRef{zero, zero}); // Build a memref descriptor pointing to the buffer to plug with the // existing memref infrastructure. This may use more registers than // otherwise necessary given that memref sizes are fixed, but we can try // and canonicalize that away later. - Value *attribution = gpuFuncOp.getWorkgroupAttributions()[en.index()]; + ValuePtr attribution = gpuFuncOp.getWorkgroupAttributions()[en.index()]; auto type = attribution->getType().cast(); auto descr = MemRefDescriptor::fromStaticShape(rewriter, loc, lowering, type, memory); @@ -632,7 +633,7 @@ struct GPUFuncOpLowering : LLVMOpLowering { gpuFuncOp.getNumWorkgroupAttributions(); auto int64Ty = LLVM::LLVMType::getInt64Ty(lowering.getDialect()); for (auto en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) { - Value *attribution = en.value(); + ValuePtr attribution = en.value(); auto type = attribution->getType().cast(); assert(type && type.hasStaticShape() && "unexpected type in attribution"); @@ -643,10 +644,10 @@ struct GPUFuncOpLowering : LLVMOpLowering { auto ptrType = lowering.convertType(type.getElementType()) .cast() .getPointerTo(); - Value *numElements = rewriter.create( + ValuePtr numElements = rewriter.create( gpuFuncOp.getLoc(), int64Ty, rewriter.getI64IntegerAttr(type.getNumElements())); - Value *allocated = rewriter.create( + ValuePtr allocated = rewriter.create( gpuFuncOp.getLoc(), ptrType, numElements, /*alignment=*/0); auto descr = MemRefDescriptor::fromStaticShape(rewriter, loc, lowering, type, allocated); @@ -674,8 +675,8 @@ struct GPUFuncOpLowering : LLVMOpLowering { !en.value().isa()) continue; - BlockArgument *arg = block.getArgument(en.index()); - Value *loaded = rewriter.create(loc, arg); + BlockArgumentPtr arg = block.getArgument(en.index()); + ValuePtr loaded = rewriter.create(loc, arg); rewriter.replaceUsesOfBlockArgument(arg, loaded); } } @@ -692,7 +693,7 @@ struct GPUReturnOpLowering : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, operands, ArrayRef()); diff --git a/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp b/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp index 42483a6e5dfa..0c34fc2b8e16 100644 --- a/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp +++ b/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp @@ -36,7 +36,7 @@ class ForOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(loop::ForOp forOp, ArrayRef operands, + matchAndRewrite(loop::ForOp forOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -48,7 +48,7 @@ class LaunchConfigConversion : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(SourceOp op, ArrayRef operands, + matchAndRewrite(SourceOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -65,7 +65,7 @@ class KernelFnConversion final : public SPIRVOpLowering { } PatternMatchResult - matchAndRewrite(gpu::GPUFuncOp funcOp, ArrayRef operands, + matchAndRewrite(gpu::GPUFuncOp funcOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; private: @@ -79,7 +79,7 @@ class KernelModuleConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(ModuleOp moduleOp, ArrayRef operands, + matchAndRewrite(ModuleOp moduleOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -92,7 +92,7 @@ class KernelModuleTerminatorConversion final using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(ModuleTerminatorOp terminatorOp, ArrayRef operands, + matchAndRewrite(ModuleTerminatorOp terminatorOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -103,7 +103,7 @@ class GPUReturnOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(gpu::ReturnOp returnOp, ArrayRef operands, + matchAndRewrite(gpu::ReturnOp returnOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -114,7 +114,7 @@ class GPUReturnOpConversion final : public SPIRVOpLowering { //===----------------------------------------------------------------------===// PatternMatchResult -ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef operands, +ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { // loop::ForOp can be lowered to the structured control flow represented by // spirv::LoopOp by making the continue block of the spirv::LoopOp the loop @@ -135,7 +135,7 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef operands, loopOp.body().getBlocks().insert(std::next(loopOp.body().begin(), 1), header); // Create the new induction variable to use. - BlockArgument *newIndVar = + BlockArgumentPtr newIndVar = header->addArgument(forOperands.lowerBound()->getType()); Block *body = forOp.getBody(); @@ -166,7 +166,7 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef operands, auto cmpOp = rewriter.create( loc, rewriter.getI1Type(), newIndVar, forOperands.upperBound()); rewriter.create( - loc, cmpOp, body, ArrayRef(), mergeBlock, ArrayRef()); + loc, cmpOp, body, ArrayRef(), mergeBlock, ArrayRef()); // Generate instructions to increment the step of the induction variable and // branch to the header. @@ -174,7 +174,7 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef operands, rewriter.setInsertionPointToEnd(continueBlock); // Add the step to the induction variable and branch to the header. - Value *updatedIndVar = rewriter.create( + ValuePtr updatedIndVar = rewriter.create( loc, newIndVar->getType(), newIndVar, forOperands.step()); rewriter.create(loc, header, updatedIndVar); @@ -188,7 +188,7 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef operands, template PatternMatchResult LaunchConfigConversion::matchAndRewrite( - SourceOp op, ArrayRef operands, + SourceOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const { auto dimAttr = op.getOperation()->template getAttrOfType("dimension"); @@ -267,7 +267,7 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, SPIRVTypeConverter &typeConverter, PatternMatchResult KernelFnConversion::matchAndRewrite(gpu::GPUFuncOp funcOp, - ArrayRef operands, + ArrayRef operands, ConversionPatternRewriter &rewriter) const { if (!gpu::GPUDialect::isKernel(funcOp)) { return matchFailure(); @@ -297,7 +297,7 @@ KernelFnConversion::matchAndRewrite(gpu::GPUFuncOp funcOp, //===----------------------------------------------------------------------===// PatternMatchResult KernelModuleConversion::matchAndRewrite( - ModuleOp moduleOp, ArrayRef operands, + ModuleOp moduleOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { if (!moduleOp.getAttrOfType( gpu::GPUDialect::getKernelModuleAttrName())) { @@ -327,7 +327,7 @@ PatternMatchResult KernelModuleConversion::matchAndRewrite( //===----------------------------------------------------------------------===// PatternMatchResult KernelModuleTerminatorConversion::matchAndRewrite( - ModuleTerminatorOp terminatorOp, ArrayRef operands, + ModuleTerminatorOp terminatorOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { rewriter.replaceOpWithNewOp(terminatorOp); return matchSuccess(); @@ -338,7 +338,7 @@ PatternMatchResult KernelModuleTerminatorConversion::matchAndRewrite( //===----------------------------------------------------------------------===// PatternMatchResult GPUReturnOpConversion::matchAndRewrite( - gpu::ReturnOp returnOp, ArrayRef operands, + gpu::ReturnOp returnOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { if (!operands.empty()) return matchFailure(); diff --git a/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp b/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp index 3eb23c19dc74..8b6b9fb79303 100644 --- a/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp +++ b/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp @@ -120,21 +120,23 @@ class BaseViewConversionHelper { BaseViewConversionHelper(Type type) : d(MemRefDescriptor::undef(rewriter(), loc(), type)) {} - BaseViewConversionHelper(Value *v) : d(v) {} + BaseViewConversionHelper(ValuePtr v) : d(v) {} /// Wrappers around MemRefDescriptor that use EDSC builder and location. - Value *allocatedPtr() { return d.allocatedPtr(rewriter(), loc()); } - void setAllocatedPtr(Value *v) { d.setAllocatedPtr(rewriter(), loc(), v); } - Value *alignedPtr() { return d.alignedPtr(rewriter(), loc()); } - void setAlignedPtr(Value *v) { d.setAlignedPtr(rewriter(), loc(), v); } - Value *offset() { return d.offset(rewriter(), loc()); } - void setOffset(Value *v) { d.setOffset(rewriter(), loc(), v); } - Value *size(unsigned i) { return d.size(rewriter(), loc(), i); } - void setSize(unsigned i, Value *v) { d.setSize(rewriter(), loc(), i, v); } - Value *stride(unsigned i) { return d.stride(rewriter(), loc(), i); } - void setStride(unsigned i, Value *v) { d.setStride(rewriter(), loc(), i, v); } - - operator Value *() { return d; } + ValuePtr allocatedPtr() { return d.allocatedPtr(rewriter(), loc()); } + void setAllocatedPtr(ValuePtr v) { d.setAllocatedPtr(rewriter(), loc(), v); } + ValuePtr alignedPtr() { return d.alignedPtr(rewriter(), loc()); } + void setAlignedPtr(ValuePtr v) { d.setAlignedPtr(rewriter(), loc(), v); } + ValuePtr offset() { return d.offset(rewriter(), loc()); } + void setOffset(ValuePtr v) { d.setOffset(rewriter(), loc(), v); } + ValuePtr size(unsigned i) { return d.size(rewriter(), loc(), i); } + void setSize(unsigned i, ValuePtr v) { d.setSize(rewriter(), loc(), i, v); } + ValuePtr stride(unsigned i) { return d.stride(rewriter(), loc(), i); } + void setStride(unsigned i, ValuePtr v) { + d.setStride(rewriter(), loc(), i, v); + } + + operator ValuePtr() { return d; } private: OpBuilder &rewriter() { return ScopedContext::getBuilder(); } @@ -151,7 +153,7 @@ class RangeOpConversion : public LLVMOpLowering { : LLVMOpLowering(RangeOp::getOperationName(), context, lowering_) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto rangeOp = cast(op); auto rangeDescriptorTy = @@ -161,7 +163,7 @@ class RangeOpConversion : public LLVMOpLowering { // Fill in an aggregate value of the descriptor. RangeOpOperandAdaptor adaptor(operands); - Value *desc = llvm_undef(rangeDescriptorTy); + ValuePtr desc = llvm_undef(rangeDescriptorTy); desc = insertvalue(desc, adaptor.min(), rewriter.getI64ArrayAttr(0)); desc = insertvalue(desc, adaptor.max(), rewriter.getI64ArrayAttr(1)); desc = insertvalue(desc, adaptor.step(), rewriter.getI64ArrayAttr(2)); @@ -184,7 +186,7 @@ class SliceOpConversion : public LLVMOpLowering { : LLVMOpLowering(SliceOp::getOperationName(), context, lowering_) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { edsc::ScopedContext context(rewriter, op->getLoc()); SliceOpOperandAdaptor adaptor(operands); @@ -198,7 +200,7 @@ class SliceOpConversion : public LLVMOpLowering { BaseViewConversionHelper desc(lowering.convertType(sliceOp.getViewType())); // TODO(ntv): extract sizes and emit asserts. - SmallVector strides(memRefType.getRank()); + SmallVector strides(memRefType.getRank()); for (int i = 0, e = memRefType.getRank(); i < e; ++i) strides[i] = baseDesc.stride(i); @@ -207,10 +209,10 @@ class SliceOpConversion : public LLVMOpLowering { }; // Compute base offset. - Value *baseOffset = baseDesc.offset(); + ValuePtr baseOffset = baseDesc.offset(); for (int i = 0, e = memRefType.getRank(); i < e; ++i) { - Value *indexing = adaptor.indexings()[i]; - Value *min = indexing; + ValuePtr indexing = adaptor.indexings()[i]; + ValuePtr min = indexing; if (sliceOp.indexing(i)->getType().isa()) min = extractvalue(int64Ty, indexing, pos(0)); baseOffset = add(baseOffset, mul(min, strides[i])); @@ -227,29 +229,29 @@ class SliceOpConversion : public LLVMOpLowering { if (sliceOp.getViewType().getRank() == 0) return rewriter.replaceOp(op, {desc}), matchSuccess(); - Value *zero = + ValuePtr zero = constant(int64Ty, rewriter.getIntegerAttr(rewriter.getIndexType(), 0)); // Compute and insert view sizes (max - min along the range) and strides. // Skip the non-range operands as they will be projected away from the view. int numNewDims = 0; for (auto en : llvm::enumerate(sliceOp.indexings())) { - Value *indexing = en.value(); + ValuePtr indexing = en.value(); if (indexing->getType().isa()) { int rank = en.index(); - Value *rangeDescriptor = adaptor.indexings()[rank]; - Value *min = extractvalue(int64Ty, rangeDescriptor, pos(0)); - Value *max = extractvalue(int64Ty, rangeDescriptor, pos(1)); - Value *step = extractvalue(int64Ty, rangeDescriptor, pos(2)); - Value *baseSize = baseDesc.size(rank); + ValuePtr rangeDescriptor = adaptor.indexings()[rank]; + ValuePtr min = extractvalue(int64Ty, rangeDescriptor, pos(0)); + ValuePtr max = extractvalue(int64Ty, rangeDescriptor, pos(1)); + ValuePtr step = extractvalue(int64Ty, rangeDescriptor, pos(2)); + ValuePtr baseSize = baseDesc.size(rank); // Bound upper by base view upper bound. max = llvm_select(llvm_icmp(ICmpPredicate::slt, max, baseSize), max, baseSize); - Value *size = sub(max, min); + ValuePtr size = sub(max, min); // Bound lower by zero. size = llvm_select(llvm_icmp(ICmpPredicate::slt, size, zero), zero, size); - Value *stride = mul(strides[rank], step); + ValuePtr stride = mul(strides[rank], step); desc.setSize(numNewDims, size); desc.setStride(numNewDims, stride); ++numNewDims; @@ -275,7 +277,7 @@ class TransposeOpConversion : public LLVMOpLowering { : LLVMOpLowering(TransposeOp::getOperationName(), context, lowering_) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { // Initialize the common boilerplate and alloca at the top of the FuncOp. edsc::ScopedContext context(rewriter, op->getLoc()); @@ -318,7 +320,7 @@ class YieldOpConversion : public LLVMOpLowering { : LLVMOpLowering(YieldOp::getOperationName(), context, lowering_) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, operands); return matchSuccess(); @@ -453,7 +455,7 @@ class LinalgOpConversion op.getLoc(), rewriter.getIntegerAttr(rewriter.getIndexType(), 0)); auto indexedGenericOp = cast(op); auto numLoops = indexedGenericOp.getNumLoops(); - SmallVector operands; + SmallVector operands; operands.reserve(numLoops + op.getNumOperands()); for (unsigned i = 0; i < numLoops; ++i) { operands.push_back(zero); @@ -477,7 +479,7 @@ class CopyTransposeConversion : public OpRewritePattern { PatternMatchResult matchAndRewrite(CopyOp op, PatternRewriter &rewriter) const override { - Value *in = op.input(), *out = op.output(); + ValuePtr in = op.input(), out = op.output(); // If either inputPerm or outputPerm are non-identities, insert transposes. auto inputPerm = op.inputPermutation(); diff --git a/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp b/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp index ff93ce58fd4f..d8df7487e712 100644 --- a/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp +++ b/lib/Conversion/LoopToStandard/ConvertLoopToStandard.cpp @@ -182,22 +182,22 @@ ForLowering::matchAndRewrite(ForOp forOp, PatternRewriter &rewriter) const { rewriter.splitBlock(conditionBlock, conditionBlock->begin()); auto *lastBodyBlock = &forOp.region().back(); rewriter.inlineRegionBefore(forOp.region(), endBlock); - auto *iv = conditionBlock->getArgument(0); + auto iv = conditionBlock->getArgument(0); // Append the induction variable stepping logic to the last body block and // branch back to the condition block. Construct an expression f : // (x -> x+step) and apply this expression to the induction variable. rewriter.setInsertionPointToEnd(lastBodyBlock); - auto *step = forOp.step(); - auto *stepped = rewriter.create(loc, iv, step).getResult(); + auto step = forOp.step(); + auto stepped = rewriter.create(loc, iv, step).getResult(); if (!stepped) return matchFailure(); rewriter.create(loc, conditionBlock, stepped); // Compute loop bounds before branching to the condition. rewriter.setInsertionPointToEnd(initBlock); - Value *lowerBound = forOp.lowerBound(); - Value *upperBound = forOp.upperBound(); + ValuePtr lowerBound = forOp.lowerBound(); + ValuePtr upperBound = forOp.upperBound(); if (!lowerBound || !upperBound) return matchFailure(); rewriter.create(loc, conditionBlock, lowerBound); @@ -208,8 +208,8 @@ ForLowering::matchAndRewrite(ForOp forOp, PatternRewriter &rewriter) const { rewriter.create(loc, CmpIPredicate::slt, iv, upperBound); rewriter.create(loc, comparison, firstBodyBlock, - ArrayRef(), endBlock, - ArrayRef()); + ArrayRef(), endBlock, + ArrayRef()); // Ok, we're done! rewriter.eraseOp(forOp); return matchSuccess(); @@ -248,8 +248,8 @@ IfLowering::matchAndRewrite(IfOp ifOp, PatternRewriter &rewriter) const { rewriter.setInsertionPointToEnd(condBlock); rewriter.create(loc, ifOp.condition(), thenBlock, - /*trueArgs=*/ArrayRef(), elseBlock, - /*falseArgs=*/ArrayRef()); + /*trueArgs=*/ArrayRef(), elseBlock, + /*falseArgs=*/ArrayRef()); // Ok, we're done! rewriter.eraseOp(ifOp); diff --git a/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp b/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp index d663ae105f25..3cbce7caa76f 100644 --- a/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp +++ b/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp @@ -43,7 +43,7 @@ using namespace mlir::loop; using llvm::seq; // Extract an indexed value from KernelDim3. -static Value *getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) { +static ValuePtr getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) { switch (pos) { case 0: return dim3.x; @@ -61,8 +61,8 @@ static Value *getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) { static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) { return forOp.getLowerBoundOperands(); } -static SmallVector getLowerBoundOperands(ForOp forOp) { - SmallVector bounds(1, forOp.lowerBound()); +static SmallVector getLowerBoundOperands(ForOp forOp) { + SmallVector bounds(1, forOp.lowerBound()); return bounds; } @@ -70,33 +70,35 @@ static SmallVector getLowerBoundOperands(ForOp forOp) { static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) { return forOp.getUpperBoundOperands(); } -static SmallVector getUpperBoundOperands(ForOp forOp) { - SmallVector bounds(1, forOp.upperBound()); +static SmallVector getUpperBoundOperands(ForOp forOp) { + SmallVector bounds(1, forOp.upperBound()); return bounds; } // Get a Value that corresponds to the loop step. If the step is an attribute, // materialize a corresponding constant using builder. -static Value *getOrCreateStep(AffineForOp forOp, OpBuilder &builder) { +static ValuePtr getOrCreateStep(AffineForOp forOp, OpBuilder &builder) { return builder.create(forOp.getLoc(), forOp.getStep()); } -static Value *getOrCreateStep(ForOp forOp, OpBuilder &) { return forOp.step(); } +static ValuePtr getOrCreateStep(ForOp forOp, OpBuilder &) { + return forOp.step(); +} // Get a Value for the loop lower bound. If the value requires computation, // materialize the instructions using builder. -static Value *getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) { +static ValuePtr getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) { return lowerAffineLowerBound(forOp, builder); } -static Value *getOrEmitLowerBound(ForOp forOp, OpBuilder &) { +static ValuePtr getOrEmitLowerBound(ForOp forOp, OpBuilder &) { return forOp.lowerBound(); } // Get a Value for the loop upper bound. If the value requires computation, // materialize the instructions using builder. -static Value *getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) { +static ValuePtr getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) { return lowerAffineUpperBound(forOp, builder); } -static Value *getOrEmitUpperBound(ForOp forOp, OpBuilder &) { +static ValuePtr getOrEmitUpperBound(ForOp forOp, OpBuilder &) { return forOp.upperBound(); } @@ -212,18 +214,18 @@ struct LoopToGpuConverter { unsigned numThreadDims); // Ranges of the loops mapped to blocks or threads. - SmallVector dims; + SmallVector dims; // Lower bounds of the loops mapped to blocks or threads. - SmallVector lbs; + SmallVector lbs; // Induction variables of the loops mapped to blocks or threads. - SmallVector ivs; + SmallVector ivs; // Steps of the loops mapped to blocks or threads. - SmallVector steps; + SmallVector steps; }; } // namespace // Return true if the value is obviously a constant "one". -static bool isConstantOne(Value *value) { +static bool isConstantOne(ValuePtr value) { if (auto def = dyn_cast_or_null(value->getDefiningOp())) return def.getValue() == 1; return false; @@ -244,15 +246,15 @@ Optional LoopToGpuConverter::collectBounds(OpTy forOp, steps.reserve(numLoops); OpTy currentLoop = forOp; for (unsigned i = 0; i < numLoops; ++i) { - Value *lowerBound = getOrEmitLowerBound(currentLoop, builder); - Value *upperBound = getOrEmitUpperBound(currentLoop, builder); + ValuePtr lowerBound = getOrEmitLowerBound(currentLoop, builder); + ValuePtr upperBound = getOrEmitUpperBound(currentLoop, builder); if (!lowerBound || !upperBound) { return llvm::None; } - Value *range = + ValuePtr range = builder.create(currentLoop.getLoc(), upperBound, lowerBound); - Value *step = getOrCreateStep(currentLoop, builder); + ValuePtr step = getOrCreateStep(currentLoop, builder); if (!isConstantOne(step)) range = builder.create(currentLoop.getLoc(), range, step); dims.push_back(range); @@ -274,8 +276,8 @@ Optional LoopToGpuConverter::collectBounds(OpTy forOp, /// `nids`. The innermost loop is mapped to the x-dimension, followed by the /// next innermost loop to y-dimension, followed by z-dimension. template -OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef ids, - ArrayRef nids) { +OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef ids, + ArrayRef nids) { auto nDims = ids.size(); assert(nDims == nids.size()); for (auto dim : llvm::seq(0, nDims)) { @@ -295,11 +297,11 @@ OpTy createGPULaunchLoops(OpTy rootForOp, ArrayRef ids, /// each workgroup/workitem and number of workgroup/workitems along a dimension /// of the launch into a container. void packIdAndNumId(gpu::KernelDim3 kernelIds, gpu::KernelDim3 kernelNids, - unsigned nDims, SmallVectorImpl &ids, - SmallVectorImpl &nids) { + unsigned nDims, SmallVectorImpl &ids, + SmallVectorImpl &nids) { assert(nDims <= 3 && "invalid number of launch dimensions"); - SmallVector allIds = {kernelIds.z, kernelIds.y, kernelIds.x}; - SmallVector allNids = {kernelNids.z, kernelNids.y, kernelNids.x}; + SmallVector allIds = {kernelIds.z, kernelIds.y, kernelIds.x}; + SmallVector allNids = {kernelNids.z, kernelNids.y, kernelNids.x}; ids.clear(); ids.append(std::next(allIds.begin(), allIds.size() - nDims), allIds.end()); nids.clear(); @@ -317,7 +319,7 @@ LogicalResult createLaunchBody(OpBuilder &builder, OpTy rootForOp, auto returnOp = builder.create(launchOp.getLoc()); rootForOp.getOperation()->moveBefore(returnOp); - SmallVector workgroupID, numWorkGroups; + SmallVector workgroupID, numWorkGroups; packIdAndNumId(launchOp.getBlockIds(), launchOp.getGridSize(), numBlockDims, workgroupID, numWorkGroups); @@ -333,7 +335,7 @@ LogicalResult createLaunchBody(OpBuilder &builder, OpTy rootForOp, } } - SmallVector workItemID, workGroupSize; + SmallVector workItemID, workGroupSize; packIdAndNumId(launchOp.getThreadIds(), launchOp.getBlockSize(), numThreadDims, workItemID, workGroupSize); for (auto &loopOp : threadRootForOps) { @@ -347,17 +349,17 @@ LogicalResult createLaunchBody(OpBuilder &builder, OpTy rootForOp, // given workgroup size and number of workgroups. template LogicalResult createLaunchFromOp(OpTy rootForOp, - ArrayRef numWorkGroups, - ArrayRef workGroupSizes) { + ArrayRef numWorkGroups, + ArrayRef workGroupSizes) { OpBuilder builder(rootForOp.getOperation()); if (numWorkGroups.size() > 3) { return rootForOp.emitError("invalid ") << numWorkGroups.size() << "-D workgroup specification"; } auto loc = rootForOp.getLoc(); - Value *one = builder.create( + ValuePtr one = builder.create( loc, builder.getIntegerAttr(builder.getIndexType(), 1)); - SmallVector numWorkGroups3D(3, one), workGroupSize3D(3, one); + SmallVector numWorkGroups3D(3, one), workGroupSize3D(3, one); for (auto numWorkGroup : enumerate(numWorkGroups)) { numWorkGroups3D[numWorkGroup.index()] = numWorkGroup.value(); } @@ -367,7 +369,7 @@ LogicalResult createLaunchFromOp(OpTy rootForOp, // Get the values used within the region of the rootForOp but defined above // it. - llvm::SetVector valuesToForwardSet; + llvm::SetVector valuesToForwardSet; getUsedValuesDefinedAbove(rootForOp.region(), rootForOp.region(), valuesToForwardSet); // Also add the values used for the lb, ub, and step of the rootForOp. @@ -387,8 +389,8 @@ LogicalResult createLaunchFromOp(OpTy rootForOp, // defined outside. They all are replaced with kernel arguments. for (const auto &pair : llvm::zip_first(valuesToForward, launchOp.getKernelArguments())) { - Value *from = std::get<0>(pair); - Value *to = std::get<1>(pair); + ValuePtr from = std::get<0>(pair); + ValuePtr to = std::get<1>(pair); replaceAllUsesInRegionWith(from, to, launchOp.body()); } return success(); @@ -408,22 +410,23 @@ void LoopToGpuConverter::createLaunch(OpTy rootForOp, OpTy innermostForOp, OpBuilder builder(rootForOp.getOperation()); // Prepare the grid and block sizes for the launch operation. If there is // no loop mapped to a specific dimension, use constant "1" as its size. - Value *constOne = (numBlockDims < 3 || numThreadDims < 3) - ? builder.create(rootForOp.getLoc(), 1) - : nullptr; - Value *gridSizeX = dims[0]; - Value *gridSizeY = numBlockDims > 1 ? dims[1] : constOne; - Value *gridSizeZ = numBlockDims > 2 ? dims[2] : constOne; - Value *blockSizeX = dims[numBlockDims]; - Value *blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne; - Value *blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne; + ValuePtr constOne = + (numBlockDims < 3 || numThreadDims < 3) + ? builder.create(rootForOp.getLoc(), 1) + : nullptr; + ValuePtr gridSizeX = dims[0]; + ValuePtr gridSizeY = numBlockDims > 1 ? dims[1] : constOne; + ValuePtr gridSizeZ = numBlockDims > 2 ? dims[2] : constOne; + ValuePtr blockSizeX = dims[numBlockDims]; + ValuePtr blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne; + ValuePtr blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne; // Create a launch op and move the body region of the innermost loop to the // launch op. Pass the values defined outside the outermost loop and used // inside the innermost loop and loop lower bounds as kernel data arguments. // Still assuming perfect nesting so there are no values other than induction // variables that are defined in one loop and used in deeper loops. - llvm::SetVector valuesToForwardSet; + llvm::SetVector valuesToForwardSet; getUsedValuesDefinedAbove(innermostForOp.region(), rootForOp.region(), valuesToForwardSet); auto valuesToForward = valuesToForwardSet.takeVector(); @@ -457,15 +460,15 @@ void LoopToGpuConverter::createLaunch(OpTy rootForOp, OpTy innermostForOp, originallyForwardedValues); auto stepArgumentIt = std::next(lbArgumentIt, lbs.size()); for (auto en : llvm::enumerate(ivs)) { - Value *id = + ValuePtr id = en.index() < numBlockDims ? getDim3Value(launchOp.getBlockIds(), en.index()) : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims); - Value *step = steps[en.index()]; + ValuePtr step = steps[en.index()]; if (!isConstantOne(step)) id = builder.create(rootForOp.getLoc(), step, id); - Value *ivReplacement = + ValuePtr ivReplacement = builder.create(rootForOp.getLoc(), *lbArgumentIt, id); en.value()->replaceAllUsesWith(ivReplacement); replaceAllUsesInRegionWith(steps[en.index()], *stepArgumentIt, @@ -479,8 +482,8 @@ void LoopToGpuConverter::createLaunch(OpTy rootForOp, OpTy innermostForOp, // trailing positions, make sure we don't touch those. for (const auto &pair : llvm::zip_first(valuesToForward, launchOp.getKernelArguments())) { - Value *from = std::get<0>(pair); - Value *to = std::get<1>(pair); + ValuePtr from = std::get<0>(pair); + ValuePtr to = std::get<1>(pair); replaceAllUsesInRegionWith(from, to, launchOp.body()); } @@ -510,8 +513,8 @@ static LogicalResult convertLoopNestToGPULaunch(OpTy forOp, // nested. The workgroup size and num workgroups is provided as input template static LogicalResult convertLoopToGPULaunch(OpTy forOp, - ArrayRef numWorkGroups, - ArrayRef workGroupSize) { + ArrayRef numWorkGroups, + ArrayRef workGroupSize) { if (failed(checkLoopOpMappable(forOp, numWorkGroups.size(), workGroupSize.size()))) { return failure(); @@ -532,7 +535,7 @@ LogicalResult mlir::convertLoopNestToGPULaunch(ForOp forOp, } LogicalResult mlir::convertLoopToGPULaunch(loop::ForOp forOp, - ArrayRef numWorkGroups, - ArrayRef workGroupSizes) { + ArrayRef numWorkGroups, + ArrayRef workGroupSizes) { return ::convertLoopToGPULaunch(forOp, numWorkGroups, workGroupSizes); } diff --git a/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp b/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp index 21abc3cf99bf..63836883512b 100644 --- a/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp +++ b/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp @@ -98,7 +98,7 @@ struct ImperfectlyNestedForLoopMapper // pass is only used for testing. FuncOp funcOp = getFunction(); OpBuilder builder(funcOp.getOperation()->getRegion(0)); - SmallVector numWorkGroupsVal, workGroupSizeVal; + SmallVector numWorkGroupsVal, workGroupSizeVal; for (auto val : numWorkGroups) { auto constOp = builder.create( funcOp.getLoc(), builder.getIntegerAttr(builder.getIndexType(), val)); diff --git a/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp b/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp index fdc90851b642..67b545c4ec84 100644 --- a/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp +++ b/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp @@ -256,20 +256,20 @@ LLVMOpLowering::LLVMOpLowering(StringRef rootOpName, MLIRContext *context, /*============================================================================*/ /* StructBuilder implementation */ /*============================================================================*/ -StructBuilder::StructBuilder(Value *v) : value(v) { +StructBuilder::StructBuilder(ValuePtr v) : value(v) { assert(value != nullptr && "value cannot be null"); structType = value->getType().cast(); } -Value *StructBuilder::extractPtr(OpBuilder &builder, Location loc, - unsigned pos) { +ValuePtr StructBuilder::extractPtr(OpBuilder &builder, Location loc, + unsigned pos) { Type type = structType.cast().getStructElementType(pos); return builder.create(loc, type, value, builder.getI64ArrayAttr(pos)); } void StructBuilder::setPtr(OpBuilder &builder, Location loc, unsigned pos, - Value *ptr) { + ValuePtr ptr) { value = builder.create(loc, structType, value, ptr, builder.getI64ArrayAttr(pos)); } @@ -278,7 +278,7 @@ void StructBuilder::setPtr(OpBuilder &builder, Location loc, unsigned pos, /*============================================================================*/ /// Construct a helper for the given descriptor value. -MemRefDescriptor::MemRefDescriptor(Value *descriptor) +MemRefDescriptor::MemRefDescriptor(ValuePtr descriptor) : StructBuilder(descriptor) { assert(value != nullptr && "value cannot be null"); indexType = value->getType().cast().getStructElementType( @@ -289,7 +289,7 @@ MemRefDescriptor::MemRefDescriptor(Value *descriptor) MemRefDescriptor MemRefDescriptor::undef(OpBuilder &builder, Location loc, Type descriptorType) { - Value *descriptor = + ValuePtr descriptor = builder.create(loc, descriptorType.cast()); return MemRefDescriptor(descriptor); } @@ -300,7 +300,7 @@ MemRefDescriptor MemRefDescriptor::undef(OpBuilder &builder, Location loc, MemRefDescriptor MemRefDescriptor::fromStaticShape(OpBuilder &builder, Location loc, LLVMTypeConverter &typeConverter, - MemRefType type, Value *memory) { + MemRefType type, ValuePtr memory) { assert(type.hasStaticShape() && "unexpected dynamic shape"); assert(type.getAffineMaps().empty() && "unexpected layout map"); @@ -325,37 +325,37 @@ MemRefDescriptor::fromStaticShape(OpBuilder &builder, Location loc, } /// Builds IR extracting the allocated pointer from the descriptor. -Value *MemRefDescriptor::allocatedPtr(OpBuilder &builder, Location loc) { +ValuePtr MemRefDescriptor::allocatedPtr(OpBuilder &builder, Location loc) { return extractPtr(builder, loc, kAllocatedPtrPosInMemRefDescriptor); } /// Builds IR inserting the allocated pointer into the descriptor. void MemRefDescriptor::setAllocatedPtr(OpBuilder &builder, Location loc, - Value *ptr) { + ValuePtr ptr) { setPtr(builder, loc, kAllocatedPtrPosInMemRefDescriptor, ptr); } /// Builds IR extracting the aligned pointer from the descriptor. -Value *MemRefDescriptor::alignedPtr(OpBuilder &builder, Location loc) { +ValuePtr MemRefDescriptor::alignedPtr(OpBuilder &builder, Location loc) { return extractPtr(builder, loc, kAlignedPtrPosInMemRefDescriptor); } /// Builds IR inserting the aligned pointer into the descriptor. void MemRefDescriptor::setAlignedPtr(OpBuilder &builder, Location loc, - Value *ptr) { + ValuePtr ptr) { setPtr(builder, loc, kAlignedPtrPosInMemRefDescriptor, ptr); } // Creates a constant Op producing a value of `resultType` from an index-typed // integer attribute. -static Value *createIndexAttrConstant(OpBuilder &builder, Location loc, - Type resultType, int64_t value) { +static ValuePtr createIndexAttrConstant(OpBuilder &builder, Location loc, + Type resultType, int64_t value) { return builder.create( loc, resultType, builder.getIntegerAttr(builder.getIndexType(), value)); } /// Builds IR extracting the offset from the descriptor. -Value *MemRefDescriptor::offset(OpBuilder &builder, Location loc) { +ValuePtr MemRefDescriptor::offset(OpBuilder &builder, Location loc) { return builder.create( loc, indexType, value, builder.getI64ArrayAttr(kOffsetPosInMemRefDescriptor)); @@ -363,7 +363,7 @@ Value *MemRefDescriptor::offset(OpBuilder &builder, Location loc) { /// Builds IR inserting the offset into the descriptor. void MemRefDescriptor::setOffset(OpBuilder &builder, Location loc, - Value *offset) { + ValuePtr offset) { value = builder.create( loc, structType, value, offset, builder.getI64ArrayAttr(kOffsetPosInMemRefDescriptor)); @@ -377,7 +377,8 @@ void MemRefDescriptor::setConstantOffset(OpBuilder &builder, Location loc, } /// Builds IR extracting the pos-th size from the descriptor. -Value *MemRefDescriptor::size(OpBuilder &builder, Location loc, unsigned pos) { +ValuePtr MemRefDescriptor::size(OpBuilder &builder, Location loc, + unsigned pos) { return builder.create( loc, indexType, value, builder.getI64ArrayAttr({kSizePosInMemRefDescriptor, pos})); @@ -385,7 +386,7 @@ Value *MemRefDescriptor::size(OpBuilder &builder, Location loc, unsigned pos) { /// Builds IR inserting the pos-th size into the descriptor void MemRefDescriptor::setSize(OpBuilder &builder, Location loc, unsigned pos, - Value *size) { + ValuePtr size) { value = builder.create( loc, structType, value, size, builder.getI64ArrayAttr({kSizePosInMemRefDescriptor, pos})); @@ -399,8 +400,8 @@ void MemRefDescriptor::setConstantSize(OpBuilder &builder, Location loc, } /// Builds IR extracting the pos-th size from the descriptor. -Value *MemRefDescriptor::stride(OpBuilder &builder, Location loc, - unsigned pos) { +ValuePtr MemRefDescriptor::stride(OpBuilder &builder, Location loc, + unsigned pos) { return builder.create( loc, indexType, value, builder.getI64ArrayAttr({kStridePosInMemRefDescriptor, pos})); @@ -408,7 +409,7 @@ Value *MemRefDescriptor::stride(OpBuilder &builder, Location loc, /// Builds IR inserting the pos-th stride into the descriptor void MemRefDescriptor::setStride(OpBuilder &builder, Location loc, unsigned pos, - Value *stride) { + ValuePtr stride) { value = builder.create( loc, structType, value, stride, builder.getI64ArrayAttr({kStridePosInMemRefDescriptor, pos})); @@ -431,30 +432,30 @@ LLVM::LLVMType MemRefDescriptor::getElementType() { /*============================================================================*/ /// Construct a helper for the given descriptor value. -UnrankedMemRefDescriptor::UnrankedMemRefDescriptor(Value *descriptor) +UnrankedMemRefDescriptor::UnrankedMemRefDescriptor(ValuePtr descriptor) : StructBuilder(descriptor) {} /// Builds IR creating an `undef` value of the descriptor type. UnrankedMemRefDescriptor UnrankedMemRefDescriptor::undef(OpBuilder &builder, Location loc, Type descriptorType) { - Value *descriptor = + ValuePtr descriptor = builder.create(loc, descriptorType.cast()); return UnrankedMemRefDescriptor(descriptor); } -Value *UnrankedMemRefDescriptor::rank(OpBuilder &builder, Location loc) { +ValuePtr UnrankedMemRefDescriptor::rank(OpBuilder &builder, Location loc) { return extractPtr(builder, loc, kRankInUnrankedMemRefDescriptor); } void UnrankedMemRefDescriptor::setRank(OpBuilder &builder, Location loc, - Value *v) { + ValuePtr v) { setPtr(builder, loc, kRankInUnrankedMemRefDescriptor, v); } -Value *UnrankedMemRefDescriptor::memRefDescPtr(OpBuilder &builder, - Location loc) { +ValuePtr UnrankedMemRefDescriptor::memRefDescPtr(OpBuilder &builder, + Location loc) { return extractPtr(builder, loc, kPtrInUnrankedMemRefDescriptor); } void UnrankedMemRefDescriptor::setMemRefDescPtr(OpBuilder &builder, - Location loc, Value *v) { + Location loc, ValuePtr v) { setPtr(builder, loc, kPtrInUnrankedMemRefDescriptor, v); } namespace { @@ -495,8 +496,8 @@ class LLVMLegalizationPattern : public LLVMOpLowering { } // Create an LLVM IR pseudo-operation defining the given index constant. - Value *createIndexConstant(ConversionPatternRewriter &builder, Location loc, - uint64_t value) const { + ValuePtr createIndexConstant(ConversionPatternRewriter &builder, Location loc, + uint64_t value) const { return createIndexAttrConstant(builder, loc, getIndexType(), value); } @@ -508,7 +509,7 @@ struct FuncOpConversion : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto funcOp = cast(op); FunctionType type = funcOp.getType(); @@ -556,8 +557,8 @@ struct FuncOpConversion : public LLVMLegalizationPattern { Block *firstBlock = &newFuncOp.getBody().front(); rewriter.setInsertionPoint(firstBlock, firstBlock->begin()); for (unsigned idx : promotedArgIndices) { - BlockArgument *arg = firstBlock->getArgument(idx); - Value *loaded = rewriter.create(funcOp.getLoc(), arg); + BlockArgumentPtr arg = firstBlock->getArgument(idx); + ValuePtr loaded = rewriter.create(funcOp.getLoc(), arg); rewriter.replaceUsesOfBlockArgument(arg, loaded); } } @@ -656,7 +657,7 @@ struct OneToOneLLVMOpLowering : public LLVMLegalizationPattern { // Convert the type of the result to an LLVM type, pass operands as is, // preserve attributes. PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { unsigned numResults = op->getNumResults(); @@ -680,7 +681,7 @@ struct OneToOneLLVMOpLowering : public LLVMLegalizationPattern { // Otherwise, it had been converted to an operation producing a structure. // Extract individual results from the structure and return them as list. - SmallVector results; + SmallVector results; results.reserve(numResults); for (unsigned i = 0; i < numResults; ++i) { auto type = this->lowering.convertType(op->getResult(i)->getType()); @@ -721,7 +722,7 @@ struct NaryOpLLVMOpLowering : public LLVMLegalizationPattern { // Convert the type of the result to an LLVM type, pass operands as is, // preserve attributes. PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { ValidateOpCount(); static_assert( @@ -732,7 +733,7 @@ struct NaryOpLLVMOpLowering : public LLVMLegalizationPattern { "expected same operands and result type"); // Cannot convert ops if their operands are not of LLVM type. - for (Value *operand : operands) { + for (ValuePtr operand : operands) { if (!operand || !operand->getType().isa()) return this->matchFailure(); } @@ -755,16 +756,16 @@ struct NaryOpLLVMOpLowering : public LLVMLegalizationPattern { if (!llvmVectorTy || llvmArrayTy != vectorTypeInfo.llvmArrayTy) return this->matchFailure(); - Value *desc = rewriter.create(loc, llvmArrayTy); + ValuePtr desc = rewriter.create(loc, llvmArrayTy); nDVectorIterate(vectorTypeInfo, rewriter, [&](ArrayAttr position) { // For this unrolled `position` corresponding to the `linearIndex`^th // element, extract operand vectors - SmallVector extractedOperands; + SmallVector extractedOperands; for (unsigned i = 0; i < OpCount; ++i) { extractedOperands.push_back(rewriter.create( loc, llvmVectorTy, operands[i], position)); } - Value *newVal = rewriter.create( + ValuePtr newVal = rewriter.create( loc, llvmVectorTy, extractedOperands, op->getAttrs()); desc = rewriter.create(loc, llvmArrayTy, desc, newVal, position); @@ -927,7 +928,7 @@ struct AllocOpLowering : public LLVMLegalizationPattern { return matchSuccess(); } - void rewrite(Operation *op, ArrayRef operands, + void rewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto allocOp = cast(op); @@ -936,7 +937,7 @@ struct AllocOpLowering : public LLVMLegalizationPattern { // Get actual sizes of the memref as values: static sizes are constant // values and dynamic sizes are passed to 'alloc' as operands. In case of // zero-dimensional memref, assume a scalar (size 1). - SmallVector sizes; + SmallVector sizes; sizes.reserve(type.getRank()); unsigned i = 0; for (int64_t s : type.getShape()) @@ -946,10 +947,10 @@ struct AllocOpLowering : public LLVMLegalizationPattern { sizes.push_back(createIndexConstant(rewriter, loc, 1)); // Compute the total number of memref elements. - Value *cumulativeSize = sizes.front(); + ValuePtr cumulativeSize = sizes.front(); for (unsigned i = 1, e = sizes.size(); i < e; ++i) cumulativeSize = rewriter.create( - loc, getIndexType(), ArrayRef{cumulativeSize, sizes[i]}); + loc, getIndexType(), ArrayRef{cumulativeSize, sizes[i]}); // Compute the size of an individual element. This emits the MLIR equivalent // of the following sizeof(...) implementation in LLVM IR: @@ -962,17 +963,17 @@ struct AllocOpLowering : public LLVMLegalizationPattern { auto nullPtr = rewriter.create(loc, convertedPtrType); auto one = createIndexConstant(rewriter, loc, 1); auto gep = rewriter.create(loc, convertedPtrType, - ArrayRef{nullPtr, one}); + ArrayRef{nullPtr, one}); auto elementSize = rewriter.create(loc, getIndexType(), gep); cumulativeSize = rewriter.create( - loc, getIndexType(), ArrayRef{cumulativeSize, elementSize}); + loc, getIndexType(), ArrayRef{cumulativeSize, elementSize}); // Allocate the underlying buffer and store a pointer to it in the MemRef // descriptor. - Value *allocated = nullptr; + ValuePtr allocated = nullptr; int alignment = 0; - Value *alignmentValue = nullptr; + ValuePtr alignmentValue = nullptr; if (auto alignAttr = allocOp.alignment()) alignment = alignAttr.getValue().getSExtValue(); @@ -1008,8 +1009,8 @@ struct AllocOpLowering : public LLVMLegalizationPattern { auto structElementType = lowering.convertType(elementType); auto elementPtrType = structElementType.cast().getPointerTo( type.getMemorySpace()); - Value *bitcastAllocated = rewriter.create( - loc, elementPtrType, ArrayRef(allocated)); + ValuePtr bitcastAllocated = rewriter.create( + loc, elementPtrType, ArrayRef(allocated)); int64_t offset; SmallVector strides; @@ -1031,22 +1032,22 @@ struct AllocOpLowering : public LLVMLegalizationPattern { memRefDescriptor.setAllocatedPtr(rewriter, loc, bitcastAllocated); // Field 2: Actual aligned pointer to payload. - Value *bitcastAligned = bitcastAllocated; + ValuePtr bitcastAligned = bitcastAllocated; if (!useAlloca && alignment != 0) { assert(alignmentValue); // offset = (align - (ptr % align))% align - Value *intVal = rewriter.create( + ValuePtr intVal = rewriter.create( loc, this->getIndexType(), allocated); - Value *ptrModAlign = + ValuePtr ptrModAlign = rewriter.create(loc, intVal, alignmentValue); - Value *subbed = + ValuePtr subbed = rewriter.create(loc, alignmentValue, ptrModAlign); - Value *offset = + ValuePtr offset = rewriter.create(loc, subbed, alignmentValue); - Value *aligned = rewriter.create(loc, allocated->getType(), - allocated, offset); + ValuePtr aligned = rewriter.create(loc, allocated->getType(), + allocated, offset); bitcastAligned = rewriter.create( - loc, elementPtrType, ArrayRef(aligned)); + loc, elementPtrType, ArrayRef(aligned)); } memRefDescriptor.setAlignedPtr(rewriter, loc, bitcastAligned); @@ -1061,10 +1062,10 @@ struct AllocOpLowering : public LLVMLegalizationPattern { // Fields 4 and 5: Sizes and strides of the strided MemRef. // Store all sizes in the descriptor. Only dynamic sizes are passed in as // operands to AllocOp. - Value *runningStride = nullptr; + ValuePtr runningStride = nullptr; // Iterate strides in reverse order, compute runningStride and strideValues. auto nStrides = strides.size(); - SmallVector strideValues(nStrides, nullptr); + SmallVector strideValues(nStrides, nullptr); for (auto indexedStride : llvm::enumerate(llvm::reverse(strides))) { int64_t index = nStrides - 1 - indexedStride.index(); if (strides[index] == MemRefType::getDynamicStrideOrOffset()) @@ -1101,7 +1102,7 @@ struct CallOpInterfaceLowering : public LLVMLegalizationPattern { using Base = LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { OperandAdaptor transformed(operands); auto callOp = cast(op); @@ -1139,7 +1140,7 @@ struct CallOpInterfaceLowering : public LLVMLegalizationPattern { // TODO(aminim, ntv, riverriddle, zinenko): this seems like patching around // a particular interaction between MemRefType and CallOp lowering. Find a // way to avoid special casing. - SmallVector results; + SmallVector results; results.reserve(numResults); for (unsigned i = 0; i < numResults; ++i) { auto type = this->lowering.convertType(op->getResult(i)->getType()); @@ -1173,7 +1174,7 @@ struct DeallocOpLowering : public LLVMLegalizationPattern { useAlloca(useAlloca) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { if (useAlloca) return rewriter.eraseOp(op), matchSuccess(); @@ -1193,7 +1194,7 @@ struct DeallocOpLowering : public LLVMLegalizationPattern { } MemRefDescriptor memref(transformed.memref()); - Value *casted = rewriter.create( + ValuePtr casted = rewriter.create( op->getLoc(), getVoidPtrType(), memref.allocatedPtr(rewriter, op->getLoc())); rewriter.replaceOpWithNewOp( @@ -1209,7 +1210,7 @@ struct TanhOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { using LLVMFuncOpT = LLVM::LLVMFuncOp; @@ -1283,7 +1284,7 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern { : matchFailure(); } - void rewrite(Operation *op, ArrayRef operands, + void rewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto memRefCastOp = cast(op); OperandAdaptor transformed(operands); @@ -1324,7 +1325,7 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern { memRefDesc.setRank(rewriter, loc, rankVal); // d2 = InsertValueOp d1, voidptr, 1 memRefDesc.setMemRefDescPtr(rewriter, loc, voidPtr); - rewriter.replaceOp(op, (Value *)memRefDesc); + rewriter.replaceOp(op, (ValuePtr)memRefDesc); } else if (srcType.isa() && dstType.isa()) { // Casting from unranked type to ranked. @@ -1355,7 +1356,7 @@ struct DimOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto dimOp = cast(op); OperandAdaptor transformed(operands); @@ -1397,43 +1398,45 @@ struct LoadStoreOpLowering : public LLVMLegalizationPattern { // by accumulating the running linearized value. // Note that `indices` and `allocSizes` are passed in the same order as they // appear in load/store operations and memref type declarations. - Value *linearizeSubscripts(ConversionPatternRewriter &builder, Location loc, - ArrayRef indices, - ArrayRef allocSizes) const { + ValuePtr linearizeSubscripts(ConversionPatternRewriter &builder, Location loc, + ArrayRef indices, + ArrayRef allocSizes) const { assert(indices.size() == allocSizes.size() && "mismatching number of indices and allocation sizes"); assert(!indices.empty() && "cannot linearize a 0-dimensional access"); - Value *linearized = indices.front(); + ValuePtr linearized = indices.front(); for (int i = 1, nSizes = allocSizes.size(); i < nSizes; ++i) { linearized = builder.create( loc, this->getIndexType(), - ArrayRef{linearized, allocSizes[i]}); + ArrayRef{linearized, allocSizes[i]}); linearized = builder.create( - loc, this->getIndexType(), ArrayRef{linearized, indices[i]}); + loc, this->getIndexType(), + ArrayRef{linearized, indices[i]}); } return linearized; } // This is a strided getElementPtr variant that linearizes subscripts as: // `base_offset + index_0 * stride_0 + ... + index_n * stride_n`. - Value *getStridedElementPtr(Location loc, Type elementTypePtr, - Value *descriptor, ArrayRef indices, - ArrayRef strides, int64_t offset, - ConversionPatternRewriter &rewriter) const { + ValuePtr getStridedElementPtr(Location loc, Type elementTypePtr, + ValuePtr descriptor, ArrayRef indices, + ArrayRef strides, int64_t offset, + ConversionPatternRewriter &rewriter) const { MemRefDescriptor memRefDescriptor(descriptor); - Value *base = memRefDescriptor.alignedPtr(rewriter, loc); - Value *offsetValue = offset == MemRefType::getDynamicStrideOrOffset() - ? memRefDescriptor.offset(rewriter, loc) - : this->createIndexConstant(rewriter, loc, offset); + ValuePtr base = memRefDescriptor.alignedPtr(rewriter, loc); + ValuePtr offsetValue = + offset == MemRefType::getDynamicStrideOrOffset() + ? memRefDescriptor.offset(rewriter, loc) + : this->createIndexConstant(rewriter, loc, offset); for (int i = 0, e = indices.size(); i < e; ++i) { - Value *stride = + ValuePtr stride = strides[i] == MemRefType::getDynamicStrideOrOffset() ? memRefDescriptor.stride(rewriter, loc, i) : this->createIndexConstant(rewriter, loc, strides[i]); - Value *additionalOffset = + ValuePtr additionalOffset = rewriter.create(loc, indices[i], stride); offsetValue = rewriter.create(loc, offsetValue, additionalOffset); @@ -1441,10 +1444,10 @@ struct LoadStoreOpLowering : public LLVMLegalizationPattern { return rewriter.create(loc, elementTypePtr, base, offsetValue); } - Value *getDataPtr(Location loc, MemRefType type, Value *memRefDesc, - ArrayRef indices, - ConversionPatternRewriter &rewriter, - llvm::Module &module) const { + ValuePtr getDataPtr(Location loc, MemRefType type, ValuePtr memRefDesc, + ArrayRef indices, + ConversionPatternRewriter &rewriter, + llvm::Module &module) const { LLVM::LLVMType ptrType = MemRefDescriptor(memRefDesc).getElementType(); int64_t offset; SmallVector strides; @@ -1462,14 +1465,14 @@ struct LoadOpLowering : public LoadStoreOpLowering { using Base::Base; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loadOp = cast(op); OperandAdaptor transformed(operands); auto type = loadOp.getMemRefType(); - Value *dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(), - transformed.indices(), rewriter, getModule()); + ValuePtr dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(), + transformed.indices(), rewriter, getModule()); rewriter.replaceOpWithNewOp(op, dataPtr); return matchSuccess(); } @@ -1481,13 +1484,13 @@ struct StoreOpLowering : public LoadStoreOpLowering { using Base::Base; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto type = cast(op).getMemRefType(); OperandAdaptor transformed(operands); - Value *dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(), - transformed.indices(), rewriter, getModule()); + ValuePtr dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(), + transformed.indices(), rewriter, getModule()); rewriter.replaceOpWithNewOp(op, transformed.value(), dataPtr); return matchSuccess(); @@ -1500,14 +1503,14 @@ struct PrefetchOpLowering : public LoadStoreOpLowering { using Base::Base; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto prefetchOp = cast(op); OperandAdaptor transformed(operands); auto type = prefetchOp.getMemRefType(); - Value *dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(), - transformed.indices(), rewriter, getModule()); + ValuePtr dataPtr = getDataPtr(op->getLoc(), type, transformed.memref(), + transformed.indices(), rewriter, getModule()); // Replace with llvm.prefetch. auto llvmI32Type = lowering.convertType(rewriter.getIntegerType(32)); @@ -1535,7 +1538,7 @@ struct IndexCastOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { IndexCastOpOperandAdaptor transformed(operands); auto indexCastOp = cast(op); @@ -1570,7 +1573,7 @@ struct CmpIOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto cmpiOp = cast(op); CmpIOpOperandAdaptor transformed(operands); @@ -1589,7 +1592,7 @@ struct CmpFOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto cmpfOp = cast(op); CmpFOpOperandAdaptor transformed(operands); @@ -1641,9 +1644,9 @@ struct OneToOneLLVMTerminatorLowering using Super = OneToOneLLVMTerminatorLowering; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef properOperands, + matchAndRewrite(Operation *op, ArrayRef properOperands, ArrayRef destinations, - ArrayRef> operands, + ArrayRef> operands, ConversionPatternRewriter &rewriter) const override { SmallVector operandRanges(operands.begin(), operands.end()); rewriter.replaceOpWithNewOp(op, properOperands, destinations, @@ -1662,19 +1665,19 @@ struct ReturnOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { unsigned numArguments = op->getNumOperands(); // If ReturnOp has 0 or 1 operand, create it and return immediately. if (numArguments == 0) { rewriter.replaceOpWithNewOp( - op, ArrayRef(), ArrayRef(), op->getAttrs()); + op, ArrayRef(), ArrayRef(), op->getAttrs()); return matchSuccess(); } if (numArguments == 1) { rewriter.replaceOpWithNewOp( - op, ArrayRef(operands.front()), ArrayRef(), + op, ArrayRef(operands.front()), ArrayRef(), op->getAttrs()); return matchSuccess(); } @@ -1684,7 +1687,7 @@ struct ReturnOpLowering : public LLVMLegalizationPattern { auto packedType = lowering.packFunctionResults(llvm::to_vector<4>(op->getOperandTypes())); - Value *packed = rewriter.create(op->getLoc(), packedType); + ValuePtr packed = rewriter.create(op->getLoc(), packedType); for (unsigned i = 0; i < numArguments; ++i) { packed = rewriter.create( op->getLoc(), packedType, packed, operands[i], @@ -1712,7 +1715,7 @@ struct SplatOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto splatOp = cast(op); VectorType resultType = splatOp.getType().dyn_cast(); @@ -1721,7 +1724,7 @@ struct SplatOpLowering : public LLVMLegalizationPattern { // First insert it into an undef vector so we can shuffle it. auto vectorType = lowering.convertType(splatOp.getType()); - Value *undef = rewriter.create(op->getLoc(), vectorType); + ValuePtr undef = rewriter.create(op->getLoc(), vectorType); auto zero = rewriter.create( op->getLoc(), lowering.convertType(rewriter.getIntegerType(32)), rewriter.getZeroAttr(rewriter.getIntegerType(32))); @@ -1746,7 +1749,7 @@ struct SplatNdOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto splatOp = cast(op); OperandAdaptor adaptor(operands); @@ -1763,16 +1766,16 @@ struct SplatNdOpLowering : public LLVMLegalizationPattern { return matchFailure(); // Construct returned value. - Value *desc = rewriter.create(loc, llvmArrayTy); + ValuePtr desc = rewriter.create(loc, llvmArrayTy); // Construct a 1-D vector with the splatted value that we insert in all the // places within the returned descriptor. - Value *vdesc = rewriter.create(loc, llvmVectorTy); + ValuePtr vdesc = rewriter.create(loc, llvmVectorTy); auto zero = rewriter.create( loc, lowering.convertType(rewriter.getIntegerType(32)), rewriter.getZeroAttr(rewriter.getIntegerType(32))); - Value *v = rewriter.create(loc, llvmVectorTy, vdesc, - adaptor.input(), zero); + ValuePtr v = rewriter.create( + loc, llvmVectorTy, vdesc, adaptor.input(), zero); // Shuffle the value across the desired number of elements. int64_t width = resultType.getDimSize(resultType.getRank() - 1); @@ -1800,21 +1803,21 @@ struct SubViewOpLowering : public LLVMLegalizationPattern { using LLVMLegalizationPattern::LLVMLegalizationPattern; PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto viewOp = cast(op); // TODO(b/144779634, ravishankarm) : After Tblgen is adapted to support // having multiple variadic operands where each operand can have different // number of entries, clean all of this up. - SmallVector dynamicOffsets( + SmallVector dynamicOffsets( std::next(operands.begin()), std::next(operands.begin(), 1 + viewOp.getNumOffsets())); - SmallVector dynamicSizes( + SmallVector dynamicSizes( std::next(operands.begin(), 1 + viewOp.getNumOffsets()), std::next(operands.begin(), 1 + viewOp.getNumOffsets() + viewOp.getNumSizes())); - SmallVector dynamicStrides( + SmallVector dynamicStrides( std::next(operands.begin(), 1 + viewOp.getNumOffsets() + viewOp.getNumSizes()), operands.end()); @@ -1851,8 +1854,8 @@ struct SubViewOpLowering : public LLVMLegalizationPattern { auto targetMemRef = MemRefDescriptor::undef(rewriter, loc, targetDescTy); // Copy the buffer pointer from the old descriptor to the new one. - Value *extracted = sourceMemRef.allocatedPtr(rewriter, loc); - Value *bitcastPtr = rewriter.create( + ValuePtr extracted = sourceMemRef.allocatedPtr(rewriter, loc); + ValuePtr bitcastPtr = rewriter.create( loc, targetElementTy.getPointerTo(), extracted); targetMemRef.setAllocatedPtr(rewriter, loc, bitcastPtr); @@ -1862,7 +1865,7 @@ struct SubViewOpLowering : public LLVMLegalizationPattern { targetMemRef.setAlignedPtr(rewriter, loc, bitcastPtr); // Extract strides needed to compute offset. - SmallVector strideValues; + SmallVector strideValues; strideValues.reserve(viewMemRefType.getRank()); for (int i = 0, e = viewMemRefType.getRank(); i < e; ++i) strideValues.push_back(sourceMemRef.stride(rewriter, loc, i)); @@ -1879,9 +1882,9 @@ struct SubViewOpLowering : public LLVMLegalizationPattern { } // Offset. - Value *baseOffset = sourceMemRef.offset(rewriter, loc); + ValuePtr baseOffset = sourceMemRef.offset(rewriter, loc); for (int i = 0, e = viewMemRefType.getRank(); i < e; ++i) { - Value *min = dynamicOffsets[i]; + ValuePtr min = dynamicOffsets[i]; baseOffset = rewriter.create( loc, baseOffset, rewriter.create(loc, min, strideValues[i])); @@ -1891,7 +1894,7 @@ struct SubViewOpLowering : public LLVMLegalizationPattern { // Update sizes and strides. for (int i = viewMemRefType.getRank() - 1; i >= 0; --i) { targetMemRef.setSize(rewriter, loc, i, dynamicSizes[i]); - Value *newStride; + ValuePtr newStride; if (dynamicStrides.empty()) newStride = rewriter.create( loc, llvmIndexType, rewriter.getI64IntegerAttr(strides[i])); @@ -1916,9 +1919,9 @@ struct ViewOpLowering : public LLVMLegalizationPattern { // Build and return the value for the idx^th shape dimension, either by // returning the constant shape dimension or counting the proper dynamic size. - Value *getSize(ConversionPatternRewriter &rewriter, Location loc, - ArrayRef shape, ArrayRef dynamicSizes, - unsigned idx) const { + ValuePtr getSize(ConversionPatternRewriter &rewriter, Location loc, + ArrayRef shape, ArrayRef dynamicSizes, + unsigned idx) const { assert(idx < shape.size()); if (!ShapedType::isDynamic(shape[idx])) return createIndexConstant(rewriter, loc, shape[idx]); @@ -1933,9 +1936,9 @@ struct ViewOpLowering : public LLVMLegalizationPattern { // or by computing the dynamic stride from the current `runningStride` and // `nextSize`. The caller should keep a running stride and update it with the // result returned by this function. - Value *getStride(ConversionPatternRewriter &rewriter, Location loc, - ArrayRef strides, Value *nextSize, - Value *runningStride, unsigned idx) const { + ValuePtr getStride(ConversionPatternRewriter &rewriter, Location loc, + ArrayRef strides, ValuePtr nextSize, + ValuePtr runningStride, unsigned idx) const { assert(idx < strides.size()); if (strides[idx] != MemRefType::getDynamicStrideOrOffset()) return createIndexConstant(rewriter, loc, strides[idx]); @@ -1948,7 +1951,7 @@ struct ViewOpLowering : public LLVMLegalizationPattern { } PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto viewOp = cast(op); @@ -1975,8 +1978,8 @@ struct ViewOpLowering : public LLVMLegalizationPattern { auto targetMemRef = MemRefDescriptor::undef(rewriter, loc, targetDescTy); // Field 1: Copy the allocated pointer, used for malloc/free. - Value *extracted = sourceMemRef.allocatedPtr(rewriter, loc); - Value *bitcastPtr = rewriter.create( + ValuePtr extracted = sourceMemRef.allocatedPtr(rewriter, loc); + ValuePtr bitcastPtr = rewriter.create( loc, targetElementTy.getPointerTo(), extracted); targetMemRef.setAllocatedPtr(rewriter, loc, bitcastPtr); @@ -1993,10 +1996,10 @@ struct ViewOpLowering : public LLVMLegalizationPattern { auto sizeAndOffsetOperands = adaptor.operands(); assert(llvm::size(sizeAndOffsetOperands) == numDynamicSizes + (hasDynamicOffset ? 1 : 0)); - Value *baseOffset = !hasDynamicOffset - ? createIndexConstant(rewriter, loc, offset) - // TODO(ntv): better adaptor. - : sizeAndOffsetOperands.front(); + ValuePtr baseOffset = !hasDynamicOffset + ? createIndexConstant(rewriter, loc, offset) + // TODO(ntv): better adaptor. + : sizeAndOffsetOperands.front(); targetMemRef.setOffset(rewriter, loc, baseOffset); // Early exit for 0-D corner case. @@ -2007,14 +2010,14 @@ struct ViewOpLowering : public LLVMLegalizationPattern { if (strides.back() != 1) return op->emitWarning("cannot cast to non-contiguous shape"), matchFailure(); - Value *stride = nullptr, *nextSize = nullptr; + ValuePtr stride = nullptr, nextSize = nullptr; // Drop the dynamic stride from the operand list, if present. - ArrayRef sizeOperands(sizeAndOffsetOperands); + ArrayRef sizeOperands(sizeAndOffsetOperands); if (hasDynamicOffset) sizeOperands = sizeOperands.drop_front(); for (int i = viewMemRefType.getRank() - 1; i >= 0; --i) { // Update size. - Value *size = + ValuePtr size = getSize(rewriter, loc, viewMemRefType.getShape(), sizeOperands, i); targetMemRef.setSize(rewriter, loc, i, size); // Update stride. @@ -2058,7 +2061,7 @@ static void ensureDistinctSuccessors(Block &bb) { auto *dummyBlock = new Block(); bb.getParent()->push_back(dummyBlock); auto builder = OpBuilder(dummyBlock); - SmallVector operands( + SmallVector operands( terminator->getSuccessorOperands(*position)); builder.create(terminator->getLoc(), successor.first, operands); terminator->setSuccessor(dummyBlock, *position); @@ -2179,33 +2182,33 @@ Type LLVMTypeConverter::packFunctionResults(ArrayRef types) { return LLVM::LLVMType::getStructTy(llvmDialect, resultTypes); } -Value *LLVMTypeConverter::promoteOneMemRefDescriptor(Location loc, - Value *operand, - OpBuilder &builder) { +ValuePtr LLVMTypeConverter::promoteOneMemRefDescriptor(Location loc, + ValuePtr operand, + OpBuilder &builder) { auto *context = builder.getContext(); auto int64Ty = LLVM::LLVMType::getInt64Ty(getDialect()); auto indexType = IndexType::get(context); // Alloca with proper alignment. We do not expect optimizations of this // alloca op and so we omit allocating at the entry block. auto ptrType = operand->getType().cast().getPointerTo(); - Value *one = builder.create(loc, int64Ty, - IntegerAttr::get(indexType, 1)); - Value *allocated = + ValuePtr one = builder.create( + loc, int64Ty, IntegerAttr::get(indexType, 1)); + ValuePtr allocated = builder.create(loc, ptrType, one, /*alignment=*/0); // Store into the alloca'ed descriptor. builder.create(loc, operand, allocated); return allocated; } -SmallVector +SmallVector LLVMTypeConverter::promoteMemRefDescriptors(Location loc, ValueRange opOperands, ValueRange operands, OpBuilder &builder) { - SmallVector promotedOperands; + SmallVector promotedOperands; promotedOperands.reserve(operands.size()); for (auto it : llvm::zip(opOperands, operands)) { - auto *operand = std::get<0>(it); - auto *llvmOperand = std::get<1>(it); + auto operand = std::get<0>(it); + auto llvmOperand = std::get<1>(it); if (!operand->getType().isa() && !operand->getType().isa()) { promotedOperands.push_back(operand); diff --git a/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp b/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp index a14271efbb62..f7b0c9cb9bc4 100644 --- a/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp +++ b/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp @@ -44,7 +44,7 @@ class ConstantIndexOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(ConstantOp constIndexOp, ArrayRef operands, + matchAndRewrite(ConstantOp constIndexOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -54,7 +54,7 @@ class CmpIOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(CmpIOp cmpIOp, ArrayRef operands, + matchAndRewrite(CmpIOp cmpIOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -70,7 +70,7 @@ class IntegerOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(StdOp operation, ArrayRef operands, + matchAndRewrite(StdOp operation, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto resultType = this->typeConverter.convertType(operation.getResult()->getType()); @@ -89,7 +89,7 @@ class LoadOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(LoadOp loadOp, ArrayRef operands, + matchAndRewrite(LoadOp loadOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -100,7 +100,7 @@ class ReturnOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(ReturnOp returnOp, ArrayRef operands, + matchAndRewrite(ReturnOp returnOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -110,7 +110,7 @@ class SelectOpConversion final : public SPIRVOpLowering { public: using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(SelectOp op, ArrayRef operands, + matchAndRewrite(SelectOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -123,7 +123,7 @@ class StoreOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(StoreOp storeOp, ArrayRef operands, + matchAndRewrite(StoreOp storeOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -141,7 +141,8 @@ class StoreOpConversion final : public SPIRVOpLowering { spirv::AccessChainOp getElementPtr(OpBuilder &builder, SPIRVTypeConverter &typeConverter, Location loc, MemRefType origBaseType, - Value *basePtr, ArrayRef indices) { + ValuePtr basePtr, + ArrayRef indices) { // Get base and offset of the MemRefType and verify they are static. int64_t offset; SmallVector strides; @@ -152,18 +153,18 @@ spirv::AccessChainOp getElementPtr(OpBuilder &builder, auto indexType = typeConverter.getIndexType(builder.getContext()); - Value *ptrLoc = nullptr; + ValuePtr ptrLoc = nullptr; assert(indices.size() == strides.size()); for (auto index : enumerate(indices)) { - Value *strideVal = builder.create( + ValuePtr strideVal = builder.create( loc, indexType, IntegerAttr::get(indexType, strides[index.index()])); - Value *update = + ValuePtr update = builder.create(loc, strideVal, index.value()); ptrLoc = (ptrLoc ? builder.create(loc, ptrLoc, update).getResult() : update); } - SmallVector linearizedIndices; + SmallVector linearizedIndices; // Add a '0' at the start to index into the struct. linearizedIndices.push_back(builder.create( loc, indexType, IntegerAttr::get(indexType, 0))); @@ -176,7 +177,7 @@ spirv::AccessChainOp getElementPtr(OpBuilder &builder, //===----------------------------------------------------------------------===// PatternMatchResult ConstantIndexOpConversion::matchAndRewrite( - ConstantOp constIndexOp, ArrayRef operands, + ConstantOp constIndexOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { if (!constIndexOp.getResult()->getType().isa()) { return matchFailure(); @@ -210,7 +211,7 @@ PatternMatchResult ConstantIndexOpConversion::matchAndRewrite( //===----------------------------------------------------------------------===// PatternMatchResult -CmpIOpConversion::matchAndRewrite(CmpIOp cmpIOp, ArrayRef operands, +CmpIOpConversion::matchAndRewrite(CmpIOp cmpIOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { CmpIOpOperandAdaptor cmpIOpOperands(operands); @@ -242,7 +243,7 @@ CmpIOpConversion::matchAndRewrite(CmpIOp cmpIOp, ArrayRef operands, //===----------------------------------------------------------------------===// PatternMatchResult -LoadOpConversion::matchAndRewrite(LoadOp loadOp, ArrayRef operands, +LoadOpConversion::matchAndRewrite(LoadOp loadOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { LoadOpOperandAdaptor loadOperands(operands); auto loadPtr = getElementPtr(rewriter, typeConverter, loadOp.getLoc(), @@ -260,7 +261,7 @@ LoadOpConversion::matchAndRewrite(LoadOp loadOp, ArrayRef operands, PatternMatchResult ReturnOpConversion::matchAndRewrite(ReturnOp returnOp, - ArrayRef operands, + ArrayRef operands, ConversionPatternRewriter &rewriter) const { if (returnOp.getNumOperands()) { return matchFailure(); @@ -274,7 +275,7 @@ ReturnOpConversion::matchAndRewrite(ReturnOp returnOp, //===----------------------------------------------------------------------===// PatternMatchResult -SelectOpConversion::matchAndRewrite(SelectOp op, ArrayRef operands, +SelectOpConversion::matchAndRewrite(SelectOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const { SelectOpOperandAdaptor selectOperands(operands); rewriter.replaceOpWithNewOp(op, selectOperands.condition(), @@ -288,7 +289,7 @@ SelectOpConversion::matchAndRewrite(SelectOp op, ArrayRef operands, //===----------------------------------------------------------------------===// PatternMatchResult -StoreOpConversion::matchAndRewrite(StoreOp storeOp, ArrayRef operands, +StoreOpConversion::matchAndRewrite(StoreOp storeOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { StoreOpOperandAdaptor storeOperands(operands); auto storePtr = diff --git a/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp b/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp index c0c56a3b0b21..113789abe8ab 100644 --- a/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp +++ b/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp @@ -37,7 +37,7 @@ class FuncOpConversion final : public SPIRVOpLowering { using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(FuncOp funcOp, ArrayRef operands, + matchAndRewrite(FuncOp funcOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -49,7 +49,7 @@ class ConvertStandardToSPIRVPass } // namespace PatternMatchResult -FuncOpConversion::matchAndRewrite(FuncOp funcOp, ArrayRef operands, +FuncOpConversion::matchAndRewrite(FuncOp funcOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { auto fnType = funcOp.getType(); if (fnType.getNumResults()) { diff --git a/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp b/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp index 4469c2802a85..2e1a7f09ff80 100644 --- a/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp +++ b/lib/Conversion/StandardToSPIRV/LegalizeStandardForSPIRV.cpp @@ -69,7 +69,7 @@ class StoreOpOfSubViewFolder final : public OpRewritePattern { static LogicalResult resolveSourceIndices(Location loc, PatternRewriter &rewriter, SubViewOp subViewOp, ValueRange indices, - SmallVectorImpl &sourceIndices) { + SmallVectorImpl &sourceIndices) { // TODO: Aborting when the offsets are static. There might be a way to fold // the subview op with load even if the offsets have been canonicalized // away. @@ -77,7 +77,7 @@ resolveSourceIndices(Location loc, PatternRewriter &rewriter, return failure(); ValueRange opOffsets = subViewOp.offsets(); - SmallVector opStrides; + SmallVector opStrides; if (subViewOp.getNumStrides()) { // If the strides are dynamic, get the stride operands. opStrides = llvm::to_vector<2>(subViewOp.strides()); @@ -124,7 +124,7 @@ LoadOpOfSubViewFolder::matchAndRewrite(LoadOp loadOp, if (!subViewOp) { return matchFailure(); } - SmallVector sourceIndices; + SmallVector sourceIndices; if (failed(resolveSourceIndices(loadOp.getLoc(), rewriter, subViewOp, loadOp.indices(), sourceIndices))) return matchFailure(); @@ -146,7 +146,7 @@ StoreOpOfSubViewFolder::matchAndRewrite(StoreOp storeOp, if (!subViewOp) { return matchFailure(); } - SmallVector sourceIndices; + SmallVector sourceIndices; if (failed(resolveSourceIndices(storeOp.getLoc(), rewriter, subViewOp, storeOp.indices(), sourceIndices))) return matchFailure(); diff --git a/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index 9ec8ec6f88df..5099cb01bbc4 100644 --- a/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -62,9 +62,10 @@ static VectorType reducedVectorTypeBack(VectorType tp) { } // Helper that picks the proper sequence for inserting. -static Value *insertOne(ConversionPatternRewriter &rewriter, - LLVMTypeConverter &lowering, Location loc, Value *val1, - Value *val2, Type llvmType, int64_t rank, int64_t pos) { +static ValuePtr insertOne(ConversionPatternRewriter &rewriter, + LLVMTypeConverter &lowering, Location loc, + ValuePtr val1, ValuePtr val2, Type llvmType, + int64_t rank, int64_t pos) { if (rank == 1) { auto idxType = rewriter.getIndexType(); auto constant = rewriter.create( @@ -78,9 +79,10 @@ static Value *insertOne(ConversionPatternRewriter &rewriter, } // Helper that picks the proper sequence for extracting. -static Value *extractOne(ConversionPatternRewriter &rewriter, - LLVMTypeConverter &lowering, Location loc, Value *val, - Type llvmType, int64_t rank, int64_t pos) { +static ValuePtr extractOne(ConversionPatternRewriter &rewriter, + LLVMTypeConverter &lowering, Location loc, + ValuePtr val, Type llvmType, int64_t rank, + int64_t pos) { if (rank == 1) { auto idxType = rewriter.getIndexType(); auto constant = rewriter.create( @@ -101,7 +103,7 @@ class VectorBroadcastOpConversion : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto broadcastOp = cast(op); VectorType dstVectorType = broadcastOp.getVectorType(); @@ -129,9 +131,9 @@ class VectorBroadcastOpConversion : public LLVMOpLowering { // ops once all insert/extract/shuffle operations // are available with lowering implemention. // - Value *expandRanks(Value *value, Location loc, VectorType srcVectorType, - VectorType dstVectorType, - ConversionPatternRewriter &rewriter) const { + ValuePtr expandRanks(ValuePtr value, Location loc, VectorType srcVectorType, + VectorType dstVectorType, + ConversionPatternRewriter &rewriter) const { assert((dstVectorType != nullptr) && "invalid result type in broadcast"); // Determine rank of source and destination. int64_t srcRank = srcVectorType ? srcVectorType.getRank() : 0; @@ -168,23 +170,24 @@ class VectorBroadcastOpConversion : public LLVMOpLowering { // becomes: // x = [s,s] // v = [x,x,x,x] - Value *duplicateOneRank(Value *value, Location loc, VectorType srcVectorType, - VectorType dstVectorType, int64_t rank, int64_t dim, - ConversionPatternRewriter &rewriter) const { + ValuePtr duplicateOneRank(ValuePtr value, Location loc, + VectorType srcVectorType, VectorType dstVectorType, + int64_t rank, int64_t dim, + ConversionPatternRewriter &rewriter) const { Type llvmType = lowering.convertType(dstVectorType); assert((llvmType != nullptr) && "unlowerable vector type"); if (rank == 1) { - Value *undef = rewriter.create(loc, llvmType); - Value *expand = + ValuePtr undef = rewriter.create(loc, llvmType); + ValuePtr expand = insertOne(rewriter, lowering, loc, undef, value, llvmType, rank, 0); SmallVector zeroValues(dim, 0); return rewriter.create( loc, expand, undef, rewriter.getI32ArrayAttr(zeroValues)); } - Value *expand = + ValuePtr expand = expandRanks(value, loc, srcVectorType, reducedVectorTypeFront(dstVectorType), rewriter); - Value *result = rewriter.create(loc, llvmType); + ValuePtr result = rewriter.create(loc, llvmType); for (int64_t d = 0; d < dim; ++d) { result = insertOne(rewriter, lowering, loc, result, expand, llvmType, rank, d); @@ -209,19 +212,20 @@ class VectorBroadcastOpConversion : public LLVMOpLowering { // y = broadcast w[1][0] : vector<2xf32> to vector <2x2xf32> // a = [x, y] // etc. - Value *stretchOneRank(Value *value, Location loc, VectorType srcVectorType, - VectorType dstVectorType, int64_t rank, int64_t dim, - ConversionPatternRewriter &rewriter) const { + ValuePtr stretchOneRank(ValuePtr value, Location loc, + VectorType srcVectorType, VectorType dstVectorType, + int64_t rank, int64_t dim, + ConversionPatternRewriter &rewriter) const { Type llvmType = lowering.convertType(dstVectorType); assert((llvmType != nullptr) && "unlowerable vector type"); - Value *result = rewriter.create(loc, llvmType); + ValuePtr result = rewriter.create(loc, llvmType); bool atStretch = dim != srcVectorType.getDimSize(0); if (rank == 1) { assert(atStretch); Type redLlvmType = lowering.convertType(dstVectorType.getElementType()); - Value *one = + ValuePtr one = extractOne(rewriter, lowering, loc, value, redLlvmType, rank, 0); - Value *expand = + ValuePtr expand = insertOne(rewriter, lowering, loc, result, one, llvmType, rank, 0); SmallVector zeroValues(dim, 0); return rewriter.create( @@ -232,9 +236,9 @@ class VectorBroadcastOpConversion : public LLVMOpLowering { Type redLlvmType = lowering.convertType(redSrcType); for (int64_t d = 0; d < dim; ++d) { int64_t pos = atStretch ? 0 : d; - Value *one = + ValuePtr one = extractOne(rewriter, lowering, loc, value, redLlvmType, rank, pos); - Value *expand = expandRanks(one, loc, redSrcType, redDstType, rewriter); + ValuePtr expand = expandRanks(one, loc, redSrcType, redDstType, rewriter); result = insertOne(rewriter, lowering, loc, result, expand, llvmType, rank, d); } @@ -250,7 +254,7 @@ class VectorShuffleOpConversion : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto adaptor = vector::ShuffleOpOperandAdaptor(operands); @@ -274,23 +278,23 @@ class VectorShuffleOpConversion : public LLVMOpLowering { // For rank 1, where both operands have *exactly* the same vector type, // there is direct shuffle support in LLVM. Use it! if (rank == 1 && v1Type == v2Type) { - Value *shuffle = rewriter.create( + ValuePtr shuffle = rewriter.create( loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); rewriter.replaceOp(op, shuffle); return matchSuccess(); } // For all other cases, insert the individual values individually. - Value *insert = rewriter.create(loc, llvmType); + ValuePtr insert = rewriter.create(loc, llvmType); int64_t insPos = 0; for (auto en : llvm::enumerate(maskArrayAttr)) { int64_t extPos = en.value().cast().getInt(); - Value *value = adaptor.v1(); + ValuePtr value = adaptor.v1(); if (extPos >= v1Dim) { extPos -= v1Dim; value = adaptor.v2(); } - Value *extract = + ValuePtr extract = extractOne(rewriter, lowering, loc, value, llvmType, rank, extPos); insert = insertOne(rewriter, lowering, loc, insert, extract, llvmType, rank, insPos++); @@ -308,7 +312,7 @@ class VectorExtractElementOpConversion : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto adaptor = vector::ExtractElementOpOperandAdaptor(operands); auto extractEltOp = cast(op); @@ -333,7 +337,7 @@ class VectorExtractOpConversion : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto adaptor = vector::ExtractOpOperandAdaptor(operands); @@ -349,7 +353,7 @@ class VectorExtractOpConversion : public LLVMOpLowering { // One-shot extraction of vector from array (only requires extractvalue). if (resultType.isa()) { - Value *extracted = rewriter.create( + ValuePtr extracted = rewriter.create( loc, llvmResultType, adaptor.vector(), positionArrayAttr); rewriter.replaceOp(op, extracted); return matchSuccess(); @@ -357,7 +361,7 @@ class VectorExtractOpConversion : public LLVMOpLowering { // Potential extraction of 1-D vector from array. auto *context = op->getContext(); - Value *extracted = adaptor.vector(); + ValuePtr extracted = adaptor.vector(); auto positionAttrs = positionArrayAttr.getValue(); if (positionAttrs.size() > 1) { auto oneDVectorType = reducedVectorTypeBack(vectorType); @@ -388,7 +392,7 @@ class VectorInsertElementOpConversion : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto adaptor = vector::InsertElementOpOperandAdaptor(operands); auto insertEltOp = cast(op); @@ -413,7 +417,7 @@ class VectorInsertOpConversion : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto adaptor = vector::InsertOpOperandAdaptor(operands); @@ -429,7 +433,7 @@ class VectorInsertOpConversion : public LLVMOpLowering { // One-shot insertion of a vector into an array (only requires insertvalue). if (sourceType.isa()) { - Value *inserted = rewriter.create( + ValuePtr inserted = rewriter.create( loc, llvmResultType, adaptor.dest(), adaptor.source(), positionArrayAttr); rewriter.replaceOp(op, inserted); @@ -438,7 +442,7 @@ class VectorInsertOpConversion : public LLVMOpLowering { // Potential extraction of 1-D vector from array. auto *context = op->getContext(); - Value *extracted = adaptor.dest(); + ValuePtr extracted = adaptor.dest(); auto positionAttrs = positionArrayAttr.getValue(); auto position = positionAttrs.back().cast(); auto oneDVectorType = destVectorType; @@ -454,7 +458,7 @@ class VectorInsertOpConversion : public LLVMOpLowering { // Insertion of an element into a 1-D LLVM vector. auto i64Type = LLVM::LLVMType::getInt64Ty(lowering.getDialect()); auto constant = rewriter.create(loc, i64Type, position); - Value *inserted = rewriter.create( + ValuePtr inserted = rewriter.create( loc, lowering.convertType(oneDVectorType), extracted, adaptor.source(), constant); @@ -480,7 +484,7 @@ class VectorOuterProductOpConversion : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto adaptor = vector::OuterProductOpOperandAdaptor(operands); @@ -491,10 +495,10 @@ class VectorOuterProductOpConversion : public LLVMOpLowering { auto rankRHS = vRHS.getUnderlyingType()->getVectorNumElements(); auto llvmArrayOfVectType = lowering.convertType( cast(op).getResult()->getType()); - Value *desc = rewriter.create(loc, llvmArrayOfVectType); - Value *a = adaptor.lhs(), *b = adaptor.rhs(); - Value *acc = adaptor.acc().empty() ? nullptr : adaptor.acc().front(); - SmallVector lhs, accs; + ValuePtr desc = rewriter.create(loc, llvmArrayOfVectType); + ValuePtr a = adaptor.lhs(), b = adaptor.rhs(); + ValuePtr acc = adaptor.acc().empty() ? nullptr : adaptor.acc().front(); + SmallVector lhs, accs; lhs.reserve(rankLHS); accs.reserve(rankLHS); for (unsigned d = 0, e = rankLHS; d < e; ++d) { @@ -502,7 +506,7 @@ class VectorOuterProductOpConversion : public LLVMOpLowering { auto attr = rewriter.getI32IntegerAttr(d); SmallVector bcastAttr(rankRHS, attr); auto bcastArrayAttr = ArrayAttr::get(bcastAttr, ctx); - Value *aD = nullptr, *accD = nullptr; + ValuePtr aD = nullptr, accD = nullptr; // 1. Broadcast the element a[d] into vector aD. aD = rewriter.create(loc, a, a, bcastArrayAttr); // 2. If acc is present, extract 1-d vector acc[d] into accD. @@ -510,7 +514,7 @@ class VectorOuterProductOpConversion : public LLVMOpLowering { accD = rewriter.create( loc, vRHS, acc, rewriter.getI64ArrayAttr(d)); // 3. Compute aD outer b (plus accD, if relevant). - Value *aOuterbD = + ValuePtr aOuterbD = accD ? rewriter.create(loc, vRHS, aD, b, accD) .getResult() : rewriter.create(loc, aD, b).getResult(); @@ -532,7 +536,7 @@ class VectorTypeCastOpConversion : public LLVMOpLowering { typeConverter) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); vector::TypeCastOp castOp = cast(op); @@ -581,12 +585,12 @@ class VectorTypeCastOpConversion : public LLVMOpLowering { auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); Type llvmTargetElementTy = desc.getElementType(); // Set allocated ptr. - Value *allocated = sourceMemRef.allocatedPtr(rewriter, loc); + ValuePtr allocated = sourceMemRef.allocatedPtr(rewriter, loc); allocated = rewriter.create(loc, llvmTargetElementTy, allocated); desc.setAllocatedPtr(rewriter, loc, allocated); // Set aligned ptr. - Value *ptr = sourceMemRef.alignedPtr(rewriter, loc); + ValuePtr ptr = sourceMemRef.alignedPtr(rewriter, loc); ptr = rewriter.create(loc, llvmTargetElementTy, ptr); desc.setAlignedPtr(rewriter, loc, ptr); // Fill offset 0. @@ -632,7 +636,7 @@ class VectorPrintOpConversion : public LLVMOpLowering { // TODO(ajcbik): rely solely on libc in future? something else? // PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto printOp = cast(op); auto adaptor = vector::PrintOpOperandAdaptor(operands); @@ -662,7 +666,7 @@ class VectorPrintOpConversion : public LLVMOpLowering { private: void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, - Value *value, VectorType vectorType, Operation *printer, + ValuePtr value, VectorType vectorType, Operation *printer, int64_t rank) const { Location loc = op->getLoc(); if (rank == 0) { @@ -678,7 +682,7 @@ class VectorPrintOpConversion : public LLVMOpLowering { rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; auto llvmType = lowering.convertType( rank > 1 ? reducedType : vectorType.getElementType()); - Value *nestedVal = + ValuePtr nestedVal = extractOne(rewriter, lowering, loc, value, llvmType, rank, d); emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1); if (d != dim - 1) diff --git a/lib/Dialect/AffineOps/AffineOps.cpp b/lib/Dialect/AffineOps/AffineOps.cpp index ef4060d4302f..3a21de389c7d 100644 --- a/lib/Dialect/AffineOps/AffineOps.cpp +++ b/lib/Dialect/AffineOps/AffineOps.cpp @@ -115,8 +115,8 @@ static bool isFunctionRegion(Region *region) { /// A utility function to check if a value is defined at the top level of a /// function. A value of index type defined at the top level is always a valid /// symbol. -bool mlir::isTopLevelValue(Value *value) { - if (auto *arg = dyn_cast(value)) +bool mlir::isTopLevelValue(ValuePtr value) { + if (auto arg = dyn_cast(value)) return isFunctionRegion(arg->getOwner()->getParent()); return isFunctionRegion(value->getDefiningOp()->getParentRegion()); } @@ -124,7 +124,7 @@ bool mlir::isTopLevelValue(Value *value) { // Value can be used as a dimension id if it is valid as a symbol, or // it is an induction variable, or it is a result of affine apply operation // with dimension id arguments. -bool mlir::isValidDim(Value *value) { +bool mlir::isValidDim(ValuePtr value) { // The value must be an index type. if (!value->getType().isIndex()) return false; @@ -184,7 +184,7 @@ static bool isDimOpValidSymbol(DimOp dimOp) { // the top level, or it is a result of affine apply operation with symbol // arguments, or a result of the dim op on a memref satisfying certain // constraints. -bool mlir::isValidSymbol(Value *value) { +bool mlir::isValidSymbol(ValuePtr value) { // The value must be an index type. if (!value->getType().isIndex()) return false; @@ -207,7 +207,7 @@ bool mlir::isValidSymbol(Value *value) { // Returns true if 'value' is a valid index to an affine operation (e.g. // affine.load, affine.store, affine.dma_start, affine.dma_wait). // Returns false otherwise. -static bool isValidAffineIndexOperand(Value *value) { +static bool isValidAffineIndexOperand(ValuePtr value) { return isValidDim(value) || isValidSymbol(value); } @@ -221,7 +221,7 @@ static LogicalResult verifyDimAndSymbolIdentifiers(OpTy &op, Operation::operand_range operands, unsigned numDims) { unsigned opIt = 0; - for (auto *operand : operands) { + for (auto operand : operands) { if (opIt++ < numDims) { if (!isValidDim(operand)) return op.emitOpError("operand cannot be used as a dimension id"); @@ -306,14 +306,14 @@ LogicalResult AffineApplyOp::verify() { // its operands are valid dimension ids. bool AffineApplyOp::isValidDim() { return llvm::all_of(getOperands(), - [](Value *op) { return mlir::isValidDim(op); }); + [](ValuePtr op) { return mlir::isValidDim(op); }); } // The result of the affine apply operation can be used as a symbol if all its // operands are symbols. bool AffineApplyOp::isValidSymbol() { return llvm::all_of(getOperands(), - [](Value *op) { return mlir::isValidSymbol(op); }); + [](ValuePtr op) { return mlir::isValidSymbol(op); }); } OpFoldResult AffineApplyOp::fold(ArrayRef operands) { @@ -333,8 +333,8 @@ OpFoldResult AffineApplyOp::fold(ArrayRef operands) { return result[0]; } -AffineDimExpr AffineApplyNormalizer::renumberOneDim(Value *v) { - DenseMap::iterator iterPos; +AffineDimExpr AffineApplyNormalizer::renumberOneDim(ValuePtr v) { + DenseMap::iterator iterPos; bool inserted = false; std::tie(iterPos, inserted) = dimValueToPosition.insert(std::make_pair(v, dimValueToPosition.size())); @@ -347,7 +347,7 @@ AffineDimExpr AffineApplyNormalizer::renumberOneDim(Value *v) { AffineMap AffineApplyNormalizer::renumber(const AffineApplyNormalizer &other) { SmallVector dimRemapping; - for (auto *v : other.reorderedDims) { + for (auto v : other.reorderedDims) { auto kvp = other.dimValueToPosition.find(v); if (dimRemapping.size() <= kvp->second) dimRemapping.resize(kvp->second + 1); @@ -371,7 +371,7 @@ AffineMap AffineApplyNormalizer::renumber(const AffineApplyNormalizer &other) { // Gather the positions of the operands that are produced by an AffineApplyOp. static llvm::SetVector -indicesFromAffineApplyOp(ArrayRef operands) { +indicesFromAffineApplyOp(ArrayRef operands) { llvm::SetVector res; for (auto en : llvm::enumerate(operands)) if (isa_and_nonnull(en.value()->getDefiningOp())) @@ -393,13 +393,13 @@ indicesFromAffineApplyOp(ArrayRef operands) { // results in better simplifications and foldings. But we should evaluate // whether this behavior is what we really want after using more. static AffineMap promoteComposedSymbolsAsDims(AffineMap map, - ArrayRef symbols) { + ArrayRef symbols) { if (symbols.empty()) { return map; } // Sanity check on symbols. - for (auto *sym : symbols) { + for (auto sym : symbols) { assert(isValidSymbol(sym) && "Expected only valid symbols"); (void)sym; } @@ -446,7 +446,7 @@ static AffineMap promoteComposedSymbolsAsDims(AffineMap map, /// `(d0)[s0, s1] -> (d0 + s0 + s1)`. /// /// The result is only equivalent to `(d0)[s0] -> (d0 + 2 * s0)` when -/// applied to the same mlir::Value* for both s0 and s1. +/// applied to the same mlir::Value for both s0 and s1. /// As a consequence mathematical composition of AffineMap always concatenates /// symbols. /// @@ -462,7 +462,7 @@ static AffineMap promoteComposedSymbolsAsDims(AffineMap map, /// benefit potentially big: simpler and more maintainable code for a /// non-trivial, recursive, procedure. AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map, - ArrayRef operands) + ArrayRef operands) : AffineApplyNormalizer() { static_assert(kMaxAffineApplyDepth > 0, "kMaxAffineApplyDepth must be > 0"); assert(map.getNumInputs() == operands.size() && @@ -495,7 +495,7 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map, if (!furtherCompose) { // 1. Only dispatch dims or symbols. for (auto en : llvm::enumerate(operands)) { - auto *t = en.value(); + auto t = en.value(); assert(t->getType().isIndex()); bool isDim = (en.index() < map.getNumDims()); if (isDim) { @@ -511,14 +511,14 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map, assert(numDimsBeforeRewrite <= operands.size()); // 2. Compose AffineApplyOps and dispatch dims or symbols. for (unsigned i = 0, e = operands.size(); i < e; ++i) { - auto *t = operands[i]; + auto t = operands[i]; auto affineApply = dyn_cast_or_null(t->getDefiningOp()); if (affineApply) { // a. Compose affine.apply operations. LLVM_DEBUG(affineApply.getOperation()->print( dbgs() << "\nCompose AffineApplyOp recursively: ")); AffineMap affineApplyMap = affineApply.getAffineMap(); - SmallVector affineApplyOperands( + SmallVector affineApplyOperands( affineApply.getOperands().begin(), affineApply.getOperands().end()); AffineApplyNormalizer normalizer(affineApplyMap, affineApplyOperands); @@ -569,8 +569,8 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map, LLVM_DEBUG(dbgs() << "\n"); } -void AffineApplyNormalizer::normalize(AffineMap *otherMap, - SmallVectorImpl *otherOperands) { +void AffineApplyNormalizer::normalize( + AffineMap *otherMap, SmallVectorImpl *otherOperands) { AffineApplyNormalizer other(*otherMap, *otherOperands); *otherMap = renumber(other); @@ -584,7 +584,7 @@ void AffineApplyNormalizer::normalize(AffineMap *otherMap, /// on `map` and `operands` without creating an AffineApplyOp that needs to be /// immediately deleted. static void composeAffineMapAndOperands(AffineMap *map, - SmallVectorImpl *operands) { + SmallVectorImpl *operands) { AffineApplyNormalizer normalizer(*map, *operands); auto normalizedMap = normalizer.getAffineMap(); auto normalizedOperands = normalizer.getOperands(); @@ -595,8 +595,8 @@ static void composeAffineMapAndOperands(AffineMap *map, } void mlir::fullyComposeAffineMapAndOperands( - AffineMap *map, SmallVectorImpl *operands) { - while (llvm::any_of(*operands, [](Value *v) { + AffineMap *map, SmallVectorImpl *operands) { + while (llvm::any_of(*operands, [](ValuePtr v) { return isa_and_nonnull(v->getDefiningOp()); })) { composeAffineMapAndOperands(map, operands); @@ -605,9 +605,9 @@ void mlir::fullyComposeAffineMapAndOperands( AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map, - ArrayRef operands) { + ArrayRef operands) { AffineMap normalizedMap = map; - SmallVector normalizedOperands(operands.begin(), operands.end()); + SmallVector normalizedOperands(operands.begin(), operands.end()); composeAffineMapAndOperands(&normalizedMap, &normalizedOperands); assert(normalizedMap); return b.create(loc, normalizedMap, normalizedOperands); @@ -617,7 +617,7 @@ AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc, // canonicalizes dims that are valid symbols into actual symbols. template static void canonicalizePromotedSymbols(MapOrSet *mapOrSet, - SmallVectorImpl *operands) { + SmallVectorImpl *operands) { if (!mapOrSet || operands->empty()) return; @@ -625,9 +625,9 @@ static void canonicalizePromotedSymbols(MapOrSet *mapOrSet, "map/set inputs must match number of operands"); auto *context = mapOrSet->getContext(); - SmallVector resultOperands; + SmallVector resultOperands; resultOperands.reserve(operands->size()); - SmallVector remappedSymbols; + SmallVector remappedSymbols; remappedSymbols.reserve(operands->size()); unsigned nextDim = 0; unsigned nextSym = 0; @@ -661,7 +661,7 @@ static void canonicalizePromotedSymbols(MapOrSet *mapOrSet, template static void canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet, - SmallVectorImpl *operands) { + SmallVectorImpl *operands) { static_assert(std::is_same::value || std::is_same::value, "Argument must be either of AffineMap or IntegerSet type"); @@ -686,10 +686,10 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet, auto *context = mapOrSet->getContext(); - SmallVector resultOperands; + SmallVector resultOperands; resultOperands.reserve(operands->size()); - llvm::SmallDenseMap seenDims; + llvm::SmallDenseMap seenDims; SmallVector dimRemapping(mapOrSet->getNumDims()); unsigned nextDim = 0; for (unsigned i = 0, e = mapOrSet->getNumDims(); i != e; ++i) { @@ -705,7 +705,7 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet, } } } - llvm::SmallDenseMap seenSymbols; + llvm::SmallDenseMap seenSymbols; SmallVector symRemapping(mapOrSet->getNumSymbols()); unsigned nextSym = 0; for (unsigned i = 0, e = mapOrSet->getNumSymbols(); i != e; ++i) { @@ -738,12 +738,12 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet, } void mlir::canonicalizeMapAndOperands(AffineMap *map, - SmallVectorImpl *operands) { + SmallVectorImpl *operands) { canonicalizeMapOrSetAndOperands(map, operands); } void mlir::canonicalizeSetAndOperands(IntegerSet *set, - SmallVectorImpl *operands) { + SmallVectorImpl *operands) { canonicalizeMapOrSetAndOperands(set, operands); } @@ -758,7 +758,7 @@ struct SimplifyAffineOp : public OpRewritePattern { /// Replace the affine op with another instance of it with the supplied /// map and mapOperands. void replaceAffineOp(PatternRewriter &rewriter, AffineOpTy affineOp, - AffineMap map, ArrayRef mapOperands) const; + AffineMap map, ArrayRef mapOperands) const; PatternMatchResult matchAndRewrite(AffineOpTy affineOp, PatternRewriter &rewriter) const override { @@ -770,7 +770,7 @@ struct SimplifyAffineOp : public OpRewritePattern { auto map = affineOp.getAffineMap(); AffineMap oldMap = map; auto oldOperands = affineOp.getMapOperands(); - SmallVector resultOperands(oldOperands); + SmallVector resultOperands(oldOperands); composeAffineMapAndOperands(&map, &resultOperands); if (map == oldMap && std::equal(oldOperands.begin(), oldOperands.end(), resultOperands.begin())) @@ -786,14 +786,14 @@ struct SimplifyAffineOp : public OpRewritePattern { template <> void SimplifyAffineOp::replaceAffineOp( PatternRewriter &rewriter, AffineLoadOp load, AffineMap map, - ArrayRef mapOperands) const { + ArrayRef mapOperands) const { rewriter.replaceOpWithNewOp(load, load.getMemRef(), map, mapOperands); } template <> void SimplifyAffineOp::replaceAffineOp( PatternRewriter &rewriter, AffinePrefetchOp prefetch, AffineMap map, - ArrayRef mapOperands) const { + ArrayRef mapOperands) const { rewriter.replaceOpWithNewOp( prefetch, prefetch.memref(), map, mapOperands, prefetch.localityHint().getZExtValue(), prefetch.isWrite(), @@ -802,14 +802,14 @@ void SimplifyAffineOp::replaceAffineOp( template <> void SimplifyAffineOp::replaceAffineOp( PatternRewriter &rewriter, AffineStoreOp store, AffineMap map, - ArrayRef mapOperands) const { + ArrayRef mapOperands) const { rewriter.replaceOpWithNewOp( store, store.getValueToStore(), store.getMemRef(), map, mapOperands); } template <> void SimplifyAffineOp::replaceAffineOp( PatternRewriter &rewriter, AffineApplyOp apply, AffineMap map, - ArrayRef mapOperands) const { + ArrayRef mapOperands) const { rewriter.replaceOpWithNewOp(apply, map, mapOperands); } } // end anonymous namespace. @@ -844,12 +844,12 @@ static LogicalResult foldMemRefCast(Operation *op) { // TODO(b/133776335) Check that map operands are loop IVs or symbols. void AffineDmaStartOp::build(Builder *builder, OperationState &result, - Value *srcMemRef, AffineMap srcMap, - ValueRange srcIndices, Value *destMemRef, + ValuePtr srcMemRef, AffineMap srcMap, + ValueRange srcIndices, ValuePtr destMemRef, AffineMap dstMap, ValueRange destIndices, - Value *tagMemRef, AffineMap tagMap, - ValueRange tagIndices, Value *numElements, - Value *stride, Value *elementsPerStride) { + ValuePtr tagMemRef, AffineMap tagMap, + ValueRange tagIndices, ValuePtr numElements, + ValuePtr stride, ValuePtr elementsPerStride) { result.addOperands(srcMemRef); result.addAttribute(getSrcMapAttrName(), AffineMapAttr::get(srcMap)); result.addOperands(srcIndices); @@ -980,19 +980,19 @@ LogicalResult AffineDmaStartOp::verify() { return emitOpError("incorrect number of operands"); } - for (auto *idx : getSrcIndices()) { + for (auto idx : getSrcIndices()) { if (!idx->getType().isIndex()) return emitOpError("src index to dma_start must have 'index' type"); if (!isValidAffineIndexOperand(idx)) return emitOpError("src index must be a dimension or symbol identifier"); } - for (auto *idx : getDstIndices()) { + for (auto idx : getDstIndices()) { if (!idx->getType().isIndex()) return emitOpError("dst index to dma_start must have 'index' type"); if (!isValidAffineIndexOperand(idx)) return emitOpError("dst index must be a dimension or symbol identifier"); } - for (auto *idx : getTagIndices()) { + for (auto idx : getTagIndices()) { if (!idx->getType().isIndex()) return emitOpError("tag index to dma_start must have 'index' type"); if (!isValidAffineIndexOperand(idx)) @@ -1013,8 +1013,8 @@ LogicalResult AffineDmaStartOp::fold(ArrayRef cstOperands, // TODO(b/133776335) Check that map operands are loop IVs or symbols. void AffineDmaWaitOp::build(Builder *builder, OperationState &result, - Value *tagMemRef, AffineMap tagMap, - ValueRange tagIndices, Value *numElements) { + ValuePtr tagMemRef, AffineMap tagMap, + ValueRange tagIndices, ValuePtr numElements) { result.addOperands(tagMemRef); result.addAttribute(getTagMapAttrName(), AffineMapAttr::get(tagMap)); result.addOperands(tagIndices); @@ -1023,7 +1023,7 @@ void AffineDmaWaitOp::build(Builder *builder, OperationState &result, void AffineDmaWaitOp::print(OpAsmPrinter &p) { p << "affine.dma_wait " << *getTagMemRef() << '['; - SmallVector operands(getTagIndices()); + SmallVector operands(getTagIndices()); p.printAffineMapOfSSAIds(getTagMapAttr(), operands); p << "], "; p.printOperand(getNumElements()); @@ -1068,7 +1068,7 @@ ParseResult AffineDmaWaitOp::parse(OpAsmParser &parser, LogicalResult AffineDmaWaitOp::verify() { if (!getOperand(0)->getType().isa()) return emitOpError("expected DMA tag to be of memref type"); - for (auto *idx : getTagIndices()) { + for (auto idx : getTagIndices()) { if (!idx->getType().isIndex()) return emitOpError("index to dma_wait must have 'index' type"); if (!isValidAffineIndexOperand(idx)) @@ -1368,7 +1368,7 @@ static LogicalResult foldLoopBounds(AffineForOp forOp) { SmallVector operandConstants; auto boundOperands = lower ? forOp.getLowerBoundOperands() : forOp.getUpperBoundOperands(); - for (auto *operand : boundOperands) { + for (auto operand : boundOperands) { Attribute operandCst; matchPattern(operand, m_Constant(&operandCst)); operandConstants.push_back(operandCst); @@ -1408,8 +1408,8 @@ static LogicalResult foldLoopBounds(AffineForOp forOp) { /// Canonicalize the bounds of the given loop. static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) { - SmallVector lbOperands(forOp.getLowerBoundOperands()); - SmallVector ubOperands(forOp.getUpperBoundOperands()); + SmallVector lbOperands(forOp.getLowerBoundOperands()); + SmallVector ubOperands(forOp.getUpperBoundOperands()); auto lbMap = forOp.getLowerBoundMap(); auto ubMap = forOp.getUpperBoundMap(); @@ -1474,7 +1474,7 @@ void AffineForOp::setLowerBound(ValueRange lbOperands, AffineMap map) { assert(lbOperands.size() == map.getNumInputs()); assert(map.getNumResults() >= 1 && "bound map has at least one result"); - SmallVector newOperands(lbOperands.begin(), lbOperands.end()); + SmallVector newOperands(lbOperands.begin(), lbOperands.end()); auto ubOperands = getUpperBoundOperands(); newOperands.append(ubOperands.begin(), ubOperands.end()); @@ -1487,7 +1487,7 @@ void AffineForOp::setUpperBound(ValueRange ubOperands, AffineMap map) { assert(ubOperands.size() == map.getNumInputs()); assert(map.getNumResults() >= 1 && "bound map has at least one result"); - SmallVector newOperands(getLowerBoundOperands()); + SmallVector newOperands(getLowerBoundOperands()); newOperands.append(ubOperands.begin(), ubOperands.end()); getOperation()->setOperands(newOperands); @@ -1553,7 +1553,7 @@ bool AffineForOp::matchingBoundOperandList() { unsigned numOperands = lbMap.getNumInputs(); for (unsigned i = 0, e = lbMap.getNumInputs(); i < e; i++) { - // Compare Value *'s. + // Compare ValuePtr 's. if (getOperand(i) != getOperand(numOperands + i)) return false; } @@ -1562,7 +1562,7 @@ bool AffineForOp::matchingBoundOperandList() { Region &AffineForOp::getLoopBody() { return region(); } -bool AffineForOp::isDefinedOutsideOfLoop(Value *value) { +bool AffineForOp::isDefinedOutsideOfLoop(ValuePtr value) { return !region().isAncestor(value->getParentRegion()); } @@ -1573,14 +1573,14 @@ LogicalResult AffineForOp::moveOutOfLoop(ArrayRef ops) { } /// Returns if the provided value is the induction variable of a AffineForOp. -bool mlir::isForInductionVar(Value *val) { +bool mlir::isForInductionVar(ValuePtr val) { return getForInductionVarOwner(val) != AffineForOp(); } /// Returns the loop parent of an induction variable. If the provided value is /// not an induction variable, then return nullptr. -AffineForOp mlir::getForInductionVarOwner(Value *val) { - auto *ivArg = dyn_cast(val); +AffineForOp mlir::getForInductionVarOwner(ValuePtr val) { + auto ivArg = dyn_cast(val); if (!ivArg || !ivArg->getOwner()) return AffineForOp(); auto *containingInst = ivArg->getOwner()->getParent()->getParentOp(); @@ -1590,7 +1590,7 @@ AffineForOp mlir::getForInductionVarOwner(Value *val) { /// Extracts the induction variables from a list of AffineForOps and returns /// them. void mlir::extractForInductionVars(ArrayRef forInsts, - SmallVectorImpl *ivs) { + SmallVectorImpl *ivs) { ivs->reserve(forInsts.size()); for (auto forInst : forInsts) ivs->push_back(forInst.getInductionVar()); @@ -1729,7 +1729,7 @@ void AffineIfOp::build(Builder *builder, OperationState &result, IntegerSet set, LogicalResult AffineIfOp::fold(ArrayRef, SmallVectorImpl &) { auto set = getIntegerSet(); - SmallVector operands(getOperands()); + SmallVector operands(getOperands()); canonicalizeSetAndOperands(&set, &operands); // Any canonicalization change always leads to either a reduction in the @@ -1759,7 +1759,8 @@ void AffineLoadOp::build(Builder *builder, OperationState &result, } void AffineLoadOp::build(Builder *builder, OperationState &result, - Value *memref, AffineMap map, ValueRange mapOperands) { + ValuePtr memref, AffineMap map, + ValueRange mapOperands) { assert(map.getNumInputs() == mapOperands.size() && "inconsistent index info"); result.addOperands(memref); result.addOperands(mapOperands); @@ -1769,7 +1770,7 @@ void AffineLoadOp::build(Builder *builder, OperationState &result, } void AffineLoadOp::build(Builder *builder, OperationState &result, - Value *memref, ValueRange indices) { + ValuePtr memref, ValueRange indices) { auto memrefType = memref->getType().cast(); auto rank = memrefType.getRank(); // Create identity map for memrefs with at least one dimension or () -> () @@ -1825,7 +1826,7 @@ LogicalResult AffineLoadOp::verify() { "expects the number of subscripts to be equal to memref rank"); } - for (auto *idx : getMapOperands()) { + for (auto idx : getMapOperands()) { if (!idx->getType().isIndex()) return emitOpError("index to load must have 'index' type"); if (!isValidAffineIndexOperand(idx)) @@ -1851,7 +1852,7 @@ OpFoldResult AffineLoadOp::fold(ArrayRef cstOperands) { //===----------------------------------------------------------------------===// void AffineStoreOp::build(Builder *builder, OperationState &result, - Value *valueToStore, Value *memref, AffineMap map, + ValuePtr valueToStore, ValuePtr memref, AffineMap map, ValueRange mapOperands) { assert(map.getNumInputs() == mapOperands.size() && "inconsistent index info"); result.addOperands(valueToStore); @@ -1862,7 +1863,7 @@ void AffineStoreOp::build(Builder *builder, OperationState &result, // Use identity map. void AffineStoreOp::build(Builder *builder, OperationState &result, - Value *valueToStore, Value *memref, + ValuePtr valueToStore, ValuePtr memref, ValueRange indices) { auto memrefType = memref->getType().cast(); auto rank = memrefType.getRank(); @@ -1923,7 +1924,7 @@ LogicalResult AffineStoreOp::verify() { "expects the number of subscripts to be equal to memref rank"); } - for (auto *idx : getMapOperands()) { + for (auto idx : getMapOperands()) { if (!idx->getType().isIndex()) return emitOpError("index to store must have 'index' type"); if (!isValidAffineIndexOperand(idx)) @@ -2072,7 +2073,7 @@ void print(OpAsmPrinter &p, AffinePrefetchOp op) { p << AffinePrefetchOp::getOperationName() << " " << *op.memref() << '['; AffineMapAttr mapAttr = op.getAttrOfType(op.getMapAttrName()); if (mapAttr) { - SmallVector operands(op.getMapOperands()); + SmallVector operands(op.getMapOperands()); p.printAffineMapOfSSAIds(mapAttr, operands); } p << ']' << ", " << (op.isWrite() ? "write" : "read") << ", " @@ -2099,7 +2100,7 @@ LogicalResult verify(AffinePrefetchOp op) { return op.emitOpError("too few operands"); } - for (auto *idx : op.getMapOperands()) { + for (auto idx : op.getMapOperands()) { if (!isValidAffineIndexOperand(idx)) return op.emitOpError("index must be a dimension or symbol identifier"); } diff --git a/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp b/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp index 3982a6a47133..e1951ff900b9 100644 --- a/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp +++ b/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp @@ -46,9 +46,9 @@ struct LowerUniformCastsPass : public FunctionPass { // Dequantize //===----------------------------------------------------------------------===// -static Value *emitUniformPerLayerDequantize(Location loc, Value *input, - UniformQuantizedType elementType, - PatternRewriter &rewriter) { +static ValuePtr emitUniformPerLayerDequantize(Location loc, ValuePtr input, + UniformQuantizedType elementType, + PatternRewriter &rewriter) { // Pre-conditions. if (!elementType.isSigned()) { // TODO: Support unsigned storage type. @@ -71,7 +71,7 @@ static Value *emitUniformPerLayerDequantize(Location loc, Value *input, // Apply zero-point offset. if (elementType.getZeroPoint() != 0) { - Value *negZeroPointConst = rewriter.create( + ValuePtr negZeroPointConst = rewriter.create( loc, broadcastScalarConstIntValue(intermediateType, -elementType.getZeroPoint())); input = rewriter.create(loc, input, negZeroPointConst); @@ -81,14 +81,14 @@ static Value *emitUniformPerLayerDequantize(Location loc, Value *input, input = rewriter.create(loc, realType, input); // Mul by scale. - Value *scaleConst = rewriter.create( + ValuePtr scaleConst = rewriter.create( loc, broadcastScalarConstFloatValue(realType, APFloat(elementType.getScale()))); return rewriter.create(loc, input, scaleConst); } -static Value * -emitUniformPerAxisDequantize(Location loc, Value *input, +static ValuePtr +emitUniformPerAxisDequantize(Location loc, ValuePtr input, UniformQuantizedPerAxisType elementType, PatternRewriter &rewriter) { // TODO: Support per-axis dequantize. @@ -97,8 +97,8 @@ emitUniformPerAxisDequantize(Location loc, Value *input, return nullptr; } -static Value *emitDequantize(Location loc, Value *input, - PatternRewriter &rewriter) { +static ValuePtr emitDequantize(Location loc, ValuePtr input, + PatternRewriter &rewriter) { Type inputType = input->getType(); QuantizedType qElementType = QuantizedType::getQuantizedElementType(inputType); @@ -133,7 +133,7 @@ struct UniformDequantizePattern : public OpRewritePattern { return matchFailure(); } - Value *dequantizedValue = emitDequantize(op.getLoc(), op.arg(), rewriter); + ValuePtr dequantizedValue = emitDequantize(op.getLoc(), op.arg(), rewriter); if (!dequantizedValue) { return matchFailure(); } @@ -170,14 +170,14 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info, castElementType(info.resultStorageType, intermediateElementType); // Cast operands to storage type. - Value *lhsValue = rewriter - .create(info.op->getLoc(), - info.lhsStorageType, info.lhs) - .getResult(); - Value *rhsValue = rewriter - .create(info.op->getLoc(), - info.rhsStorageType, info.rhs) - .getResult(); + ValuePtr lhsValue = rewriter + .create(info.op->getLoc(), + info.lhsStorageType, info.lhs) + .getResult(); + ValuePtr rhsValue = rewriter + .create(info.op->getLoc(), + info.rhsStorageType, info.rhs) + .getResult(); // Cast to the intermediate sized type. lhsValue = rewriter.create(info.op->getLoc(), intermediateType, @@ -186,7 +186,7 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info, rhsValue); // Add. - Value *resultValue = + ValuePtr resultValue = rewriter.create(info.op->getLoc(), lhsValue, rhsValue); // Zero point offset adjustment. @@ -194,7 +194,7 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info, // zpOffset = -zp int zpOffset = -1 * info.resultType.getZeroPoint(); if (zpOffset != 0) { - Value *zpOffsetConst = rewriter.create( + ValuePtr zpOffsetConst = rewriter.create( info.op->getLoc(), broadcastScalarConstIntValue(intermediateType, zpOffset)); resultValue = @@ -246,14 +246,14 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info, castElementType(info.resultStorageType, intermediateElementType); // Cast operands to storage type. - Value *lhsValue = rewriter - .create(info.op->getLoc(), - info.lhsStorageType, info.lhs) - .getResult(); - Value *rhsValue = rewriter - .create(info.op->getLoc(), - info.rhsStorageType, info.rhs) - .getResult(); + ValuePtr lhsValue = rewriter + .create(info.op->getLoc(), + info.lhsStorageType, info.lhs) + .getResult(); + ValuePtr rhsValue = rewriter + .create(info.op->getLoc(), + info.rhsStorageType, info.rhs) + .getResult(); // Cast to the intermediate sized type. lhsValue = rewriter.create(info.op->getLoc(), intermediateType, @@ -263,7 +263,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info, // Apply argument zeroPoints. if (info.lhsType.getZeroPoint() != 0) { - Value *zpOffsetConst = rewriter.create( + ValuePtr zpOffsetConst = rewriter.create( info.op->getLoc(), broadcastScalarConstIntValue( intermediateType, -info.lhsType.getZeroPoint())); lhsValue = @@ -271,7 +271,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info, } if (info.rhsType.getZeroPoint() != 0) { - Value *zpOffsetConst = rewriter.create( + ValuePtr zpOffsetConst = rewriter.create( info.op->getLoc(), broadcastScalarConstIntValue( intermediateType, -info.rhsType.getZeroPoint())); rhsValue = @@ -279,7 +279,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info, } // Mul. - Value *resultValue = + ValuePtr resultValue = rewriter.create(info.op->getLoc(), lhsValue, rhsValue); // Scale output. @@ -293,7 +293,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info, // Zero point offset adjustment. if (info.resultType.getZeroPoint() != 0) { - Value *zpOffsetConst = rewriter.create( + ValuePtr zpOffsetConst = rewriter.create( info.op->getLoc(), broadcastScalarConstIntValue(intermediateType, info.resultType.getZeroPoint())); diff --git a/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h b/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h index 955e2ecc88ce..57a8422b3623 100644 --- a/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h +++ b/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h @@ -59,7 +59,7 @@ template bool integralLog2(F x, int &log2Result) { /// Helper class for operating on binary operations where all operands /// and the result are a UniformQuantizedType. struct UniformBinaryOpInfo { - UniformBinaryOpInfo(Operation *op, Value *lhs, Value *rhs, + UniformBinaryOpInfo(Operation *op, ValuePtr lhs, ValuePtr rhs, Optional clampMin, Optional clampMax) : op(op), lhs(lhs), rhs(rhs), clampMin(clampMin), clampMax(clampMax), lhsType(getUniformElementType(lhs->getType())), @@ -128,8 +128,8 @@ struct UniformBinaryOpInfo { } Operation *op; - Value *lhs; - Value *rhs; + ValuePtr lhs; + ValuePtr rhs; Optional clampMin; Optional clampMax; diff --git a/lib/Dialect/GPU/IR/GPUDialect.cpp b/lib/Dialect/GPU/IR/GPUDialect.cpp index 9c0183eb90f5..349c1fa4644f 100644 --- a/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -145,7 +145,7 @@ static LogicalResult verifyAllReduce(gpu::AllReduceOp allReduce) { if (!allReduce.body().empty()) { if (allReduce.body().front().getNumArguments() != 2) return allReduce.emitError("expected two region arguments"); - for (auto *argument : allReduce.body().front().getArguments()) { + for (auto argument : allReduce.body().front().getArguments()) { if (argument->getType() != allReduce.getType()) return allReduce.emitError("incorrect region argument type"); } @@ -213,15 +213,15 @@ static ParseResult parseShuffleOp(OpAsmParser &parser, OperationState &state) { static SmallVector getValueTypes(ValueRange values) { SmallVector types; types.reserve(values.size()); - for (Value *v : values) + for (ValuePtr v : values) types.push_back(v->getType()); return types; } -void LaunchOp::build(Builder *builder, OperationState &result, Value *gridSizeX, - Value *gridSizeY, Value *gridSizeZ, Value *blockSizeX, - Value *blockSizeY, Value *blockSizeZ, - ValueRange operands) { +void LaunchOp::build(Builder *builder, OperationState &result, + ValuePtr gridSizeX, ValuePtr gridSizeY, ValuePtr gridSizeZ, + ValuePtr blockSizeX, ValuePtr blockSizeY, + ValuePtr blockSizeZ, ValueRange operands) { // Add grid and block sizes as op operands, followed by the data operands. result.addOperands( {gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ}); @@ -489,22 +489,22 @@ class PropagateConstantBounds : public OpRewritePattern { // and use it instead of passing the value from the parent region. Perform // the traversal in the inverse order to simplify index arithmetics when // dropping arguments. - SmallVector operands(launchOp.getKernelOperandValues().begin(), - launchOp.getKernelOperandValues().end()); - SmallVector kernelArgs(launchOp.getKernelArguments().begin(), - launchOp.getKernelArguments().end()); + SmallVector operands(launchOp.getKernelOperandValues().begin(), + launchOp.getKernelOperandValues().end()); + SmallVector kernelArgs(launchOp.getKernelArguments().begin(), + launchOp.getKernelArguments().end()); bool found = false; for (unsigned i = operands.size(); i > 0; --i) { unsigned index = i - 1; - Value *operand = operands[index]; + ValuePtr operand = operands[index]; if (!isa_and_nonnull(operand->getDefiningOp())) { continue; } found = true; - Value *internalConstant = + ValuePtr internalConstant = rewriter.clone(*operand->getDefiningOp())->getResult(0); - Value *kernelArg = kernelArgs[index]; + ValuePtr kernelArg = kernelArgs[index]; kernelArg->replaceAllUsesWith(internalConstant); launchOp.eraseKernelArgument(index); } @@ -529,10 +529,10 @@ void LaunchOp::getCanonicalizationPatterns(OwningRewritePatternList &results, //===----------------------------------------------------------------------===// void LaunchFuncOp::build(Builder *builder, OperationState &result, - GPUFuncOp kernelFunc, Value *gridSizeX, - Value *gridSizeY, Value *gridSizeZ, Value *blockSizeX, - Value *blockSizeY, Value *blockSizeZ, - ValueRange kernelOperands) { + GPUFuncOp kernelFunc, ValuePtr gridSizeX, + ValuePtr gridSizeY, ValuePtr gridSizeZ, + ValuePtr blockSizeX, ValuePtr blockSizeY, + ValuePtr blockSizeZ, ValueRange kernelOperands) { // Add grid and block sizes as op operands, followed by the data operands. result.addOperands( {gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ}); @@ -565,7 +565,7 @@ StringRef LaunchFuncOp::getKernelModuleName() { .getRootReference(); } -Value *LaunchFuncOp::getKernelOperand(unsigned i) { +ValuePtr LaunchFuncOp::getKernelOperand(unsigned i) { return getOperation()->getOperand(i + kNumConfigOperands); } @@ -728,13 +728,14 @@ static ParseResult parseGPUFuncOp(OpAsmParser &parser, OperationState &result) { } static void printAttributions(OpAsmPrinter &p, StringRef keyword, - ArrayRef values) { + ArrayRef values) { if (values.empty()) return; p << ' ' << keyword << '('; - interleaveComma(values, p, - [&p](BlockArgument *v) { p << *v << " : " << v->getType(); }); + interleaveComma(values, p, [&p](BlockArgumentPtr v) { + p << *v << " : " << v->getType(); + }); p << ')'; } @@ -781,9 +782,9 @@ LogicalResult GPUFuncOp::verifyType() { } static LogicalResult verifyAttributions(Operation *op, - ArrayRef attributions, + ArrayRef attributions, unsigned memorySpace) { - for (Value *v : attributions) { + for (ValuePtr v : attributions) { auto type = v->getType().dyn_cast(); if (!type) return op->emitOpError() << "expected memref type in attribution"; diff --git a/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/lib/Dialect/GPU/Transforms/KernelOutlining.cpp index 0a6a59156338..8f5f50e49099 100644 --- a/lib/Dialect/GPU/Transforms/KernelOutlining.cpp +++ b/lib/Dialect/GPU/Transforms/KernelOutlining.cpp @@ -31,10 +31,10 @@ using namespace mlir; template static void createForAllDimensions(OpBuilder &builder, Location loc, - SmallVectorImpl &values) { + SmallVectorImpl &values) { for (StringRef dim : {"x", "y", "z"}) { - Value *v = builder.create(loc, builder.getIndexType(), - builder.getStringAttr(dim)); + ValuePtr v = builder.create(loc, builder.getIndexType(), + builder.getStringAttr(dim)); values.push_back(v); } } @@ -46,7 +46,7 @@ static void injectGpuIndexOperations(Location loc, Region &body) { OpBuilder builder(loc->getContext()); Block &firstBlock = body.front(); builder.setInsertionPointToStart(&firstBlock); - SmallVector indexOps; + SmallVector indexOps; createForAllDimensions(builder, loc, indexOps); createForAllDimensions(builder, loc, indexOps); createForAllDimensions(builder, loc, indexOps); @@ -69,7 +69,7 @@ static gpu::LaunchFuncOp inlineBeneficiaryOps(gpu::GPUFuncOp kernelFunc, gpu::LaunchFuncOp launch) { OpBuilder kernelBuilder(kernelFunc.getBody()); auto &firstBlock = kernelFunc.getBody().front(); - SmallVector newLaunchArgs; + SmallVector newLaunchArgs; BlockAndValueMapping map; for (int i = 0, e = launch.getNumKernelOperands(); i < e; ++i) { map.map(launch.getKernelOperand(i), kernelFunc.getArgument(i)); @@ -82,7 +82,7 @@ static gpu::LaunchFuncOp inlineBeneficiaryOps(gpu::GPUFuncOp kernelFunc, } // Only inline operations that do not create new arguments. if (!llvm::all_of(operandOp->getOperands(), - [map](Value *value) { return map.contains(value); })) { + [map](ValuePtr value) { return map.contains(value); })) { continue; } auto clone = kernelBuilder.clone(*operandOp, map); diff --git a/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index 1813b30165fc..b94ee335bd2c 100644 --- a/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -415,7 +415,7 @@ static ParseResult parseCallOp(OpAsmParser &parser, OperationState &result) { // Expects vector to be of wrapped LLVM vector type and position to be of // wrapped LLVM i32 type. void LLVM::ExtractElementOp::build(Builder *b, OperationState &result, - Value *vector, Value *position, + ValuePtr vector, ValuePtr position, ArrayRef attrs) { auto wrappedVectorType = vector->getType().cast(); auto llvmType = wrappedVectorType.getVectorElementType(); @@ -681,7 +681,7 @@ static void printBrOp(OpAsmPrinter &p, BrOp &op) { // attribute-dict? static ParseResult parseBrOp(OpAsmParser &parser, OperationState &result) { Block *dest; - SmallVector operands; + SmallVector operands; if (parser.parseSuccessorAndUseList(dest, operands) || parser.parseOptionalAttrDict(result.attributes)) return failure(); @@ -708,8 +708,8 @@ static void printCondBrOp(OpAsmPrinter &p, CondBrOp &op) { static ParseResult parseCondBrOp(OpAsmParser &parser, OperationState &result) { Block *trueDest; Block *falseDest; - SmallVector trueOperands; - SmallVector falseOperands; + SmallVector trueOperands; + SmallVector falseOperands; OpAsmParser::OperandType condition; Builder &builder = parser.getBuilder(); @@ -1066,8 +1066,8 @@ static LogicalResult verify(GlobalOp op) { //===----------------------------------------------------------------------===// // Expects vector to be of wrapped LLVM vector type and position to be of // wrapped LLVM i32 type. -void LLVM::ShuffleVectorOp::build(Builder *b, OperationState &result, Value *v1, - Value *v2, ArrayAttr mask, +void LLVM::ShuffleVectorOp::build(Builder *b, OperationState &result, + ValuePtr v1, ValuePtr v2, ArrayAttr mask, ArrayRef attrs) { auto wrappedContainerType1 = v1->getType().cast(); auto vType = LLVMType::getVectorTy( @@ -1664,10 +1664,10 @@ LLVMType LLVMType::getVoidTy(LLVMDialect *dialect) { // Utility functions. //===----------------------------------------------------------------------===// -Value *mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder, - StringRef name, StringRef value, - LLVM::Linkage linkage, - LLVM::LLVMDialect *llvmDialect) { +ValuePtr mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder, + StringRef name, StringRef value, + LLVM::Linkage linkage, + LLVM::LLVMDialect *llvmDialect) { assert(builder.getInsertionBlock() && builder.getInsertionBlock()->getParentOp() && "expected builder to point to a block constrained in an op"); @@ -1684,13 +1684,13 @@ Value *mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder, builder.getStringAttr(value)); // Get the pointer to the first character in the global string. - Value *globalPtr = builder.create(loc, global); - Value *cst0 = builder.create( + ValuePtr globalPtr = builder.create(loc, global); + ValuePtr cst0 = builder.create( loc, LLVM::LLVMType::getInt64Ty(llvmDialect), builder.getIntegerAttr(builder.getIndexType(), 0)); return builder.create( loc, LLVM::LLVMType::getInt8PtrTy(llvmDialect), globalPtr, - ArrayRef({cst0, cst0})); + ArrayRef({cst0, cst0})); } bool mlir::LLVM::satisfiesLLVMModule(Operation *op) { diff --git a/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp b/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp index d7e4d08527d3..ee122e160372 100644 --- a/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp +++ b/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp @@ -49,7 +49,7 @@ static StringRef toStringRef(LinalgDependenceGraph::DependenceType dt) { llvm_unreachable("Unexpected DependenceType"); } -Value *Aliases::find(Value *v) { +ValuePtr Aliases::find(ValuePtr v) { if (isa(v)) return v; @@ -147,9 +147,9 @@ LinalgDependenceGraph::getDependencesInto( } void LinalgDependenceGraph::addDependencesBetween(LinalgOp src, LinalgOp dst) { - for (auto *srcView : src.getOutputs()) { // W + for (auto srcView : src.getOutputs()) { // W // RAW graph - for (auto *dstView : dst.getInputs()) { // R + for (auto dstView : dst.getInputs()) { // R if (aliases.alias(srcView, dstView)) { // if alias, fill RAW addDependenceElem(DependenceType::RAW, LinalgOpView{src.getOperation(), srcView}, @@ -157,7 +157,7 @@ void LinalgDependenceGraph::addDependencesBetween(LinalgOp src, LinalgOp dst) { } } // WAW graph - for (auto *dstView : dst.getOutputs()) { // W + for (auto dstView : dst.getOutputs()) { // W if (aliases.alias(srcView, dstView)) { // if alias, fill WAW addDependenceElem(DependenceType::WAW, LinalgOpView{src.getOperation(), srcView}, @@ -165,9 +165,9 @@ void LinalgDependenceGraph::addDependencesBetween(LinalgOp src, LinalgOp dst) { } } } - for (auto *srcView : src.getInputs()) { // R + for (auto srcView : src.getInputs()) { // R // RAR graph - for (auto *dstView : dst.getInputs()) { // R + for (auto dstView : dst.getInputs()) { // R if (aliases.alias(srcView, dstView)) { // if alias, fill RAR addDependenceElem(DependenceType::RAR, LinalgOpView{src.getOperation(), srcView}, @@ -175,7 +175,7 @@ void LinalgDependenceGraph::addDependencesBetween(LinalgOp src, LinalgOp dst) { } } // WAR graph - for (auto *dstView : dst.getOutputs()) { // W + for (auto dstView : dst.getOutputs()) { // W if (aliases.alias(srcView, dstView)) { // if alias, fill WAR addDependenceElem(DependenceType::WAR, LinalgOpView{src.getOperation(), srcView}, @@ -194,14 +194,14 @@ LinalgDependenceGraph::findCoveringDependences(LinalgOp srcLinalgOp, } SmallVector LinalgDependenceGraph::findCoveringWrites( - LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value *view) const { + LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view) const { return findOperationsWithCoveringDependences( srcLinalgOp, dstLinalgOp, view, {DependenceType::WAW, DependenceType::WAR}); } SmallVector LinalgDependenceGraph::findCoveringReads( - LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value *view) const { + LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view) const { return findOperationsWithCoveringDependences( srcLinalgOp, dstLinalgOp, view, {DependenceType::RAR, DependenceType::RAW}); @@ -209,7 +209,7 @@ SmallVector LinalgDependenceGraph::findCoveringReads( SmallVector LinalgDependenceGraph::findOperationsWithCoveringDependences( - LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value *view, + LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view, ArrayRef types) const { auto *src = srcLinalgOp.getOperation(); auto *dst = dstLinalgOp.getOperation(); diff --git a/lib/Dialect/Linalg/EDSC/Builders.cpp b/lib/Dialect/Linalg/EDSC/Builders.cpp index ba96186da384..7b530d7f0df0 100644 --- a/lib/Dialect/Linalg/EDSC/Builders.cpp +++ b/lib/Dialect/Linalg/EDSC/Builders.cpp @@ -44,8 +44,8 @@ static void getMaxDimIndex(ArrayRef structuredIndices, Operation *mlir::edsc::makeLinalgGenericOp( ArrayRef iteratorTypes, ArrayRef inputs, ArrayRef outputs, - function_ref)> regionBuilder, - ArrayRef otherValues, ArrayRef otherAttributes) { + function_ref)> regionBuilder, + ArrayRef otherValues, ArrayRef otherAttributes) { auto &builder = edsc::ScopedContext::getBuilder(); auto *ctx = builder.getContext(); unsigned nInputs = inputs.size(); @@ -66,7 +66,7 @@ Operation *mlir::edsc::makeLinalgGenericOp( AffineMap::get(/*dimCount=*/nDims, /*symbolCount=*/0, out.getExprs())); unsigned nViews = nInputs + nOutputs; - SmallVector values; + SmallVector values; values.reserve(nViews); values.append(inputs.begin(), inputs.end()); values.append(outputs.begin(), outputs.end()); @@ -109,7 +109,7 @@ Operation *mlir::edsc::makeLinalgGenericOp( return op; } -void mlir::edsc::ops::macRegionBuilder(ArrayRef args) { +void mlir::edsc::ops::macRegionBuilder(ArrayRef args) { using edsc::op::operator+; using edsc::op::operator*; assert(args.size() == 3 && "expected 3 block arguments"); @@ -122,7 +122,7 @@ Operation *mlir::edsc::ops::linalg_pointwise(UnaryPointwiseOpBuilder unaryOp, StructuredIndexed O) { SmallVector iterTypes(O.getExprs().size(), edsc::IterType::Parallel); - auto fun = [&unaryOp](ArrayRef args) { + auto fun = [&unaryOp](ArrayRef args) { assert(args.size() == 2 && "expected 2 block arguments"); ValueHandle a(args[0]); linalg_yield(unaryOp(a)); @@ -135,7 +135,7 @@ Operation *mlir::edsc::ops::linalg_pointwise_tanh(StructuredIndexed I, ; using edsc::intrinsics::tanh; UnaryPointwiseOpBuilder unOp( - [](ValueHandle a) -> Value * { return tanh(a); }); + [](ValueHandle a) -> ValuePtr { return tanh(a); }); return linalg_pointwise(unOp, I, O); } @@ -146,7 +146,7 @@ Operation *mlir::edsc::ops::linalg_pointwise(BinaryPointwiseOpBuilder binaryOp, StructuredIndexed O) { SmallVector iterTypes(O.getExprs().size(), edsc::IterType::Parallel); - auto fun = [&binaryOp](ArrayRef args) { + auto fun = [&binaryOp](ArrayRef args) { assert(args.size() == 3 && "expected 3 block arguments"); ValueHandle a(args[0]), b(args[1]); linalg_yield(binaryOp(a, b)); @@ -159,14 +159,14 @@ Operation *mlir::edsc::ops::linalg_pointwise_add(StructuredIndexed I1, StructuredIndexed O) { using edsc::op::operator+; BinaryPointwiseOpBuilder binOp( - [](ValueHandle a, ValueHandle b) -> Value * { return a + b; }); + [](ValueHandle a, ValueHandle b) -> ValuePtr { return a + b; }); return linalg_pointwise(binOp, I1, I2, O); } Operation *mlir::edsc::ops::linalg_pointwise_max(StructuredIndexed I1, StructuredIndexed I2, StructuredIndexed O) { - BinaryPointwiseOpBuilder binOp([](ValueHandle a, ValueHandle b) -> Value * { + BinaryPointwiseOpBuilder binOp([](ValueHandle a, ValueHandle b) -> ValuePtr { using edsc::intrinsics::select; using edsc::op::operator>; return select(a > b, a, b).getValue(); diff --git a/lib/Dialect/Linalg/IR/LinalgOps.cpp b/lib/Dialect/Linalg/IR/LinalgOps.cpp index 6eca181e9b46..c5f30b7e10b9 100644 --- a/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -318,7 +318,7 @@ static ParseResult parseRangeOp(OpAsmParser &parser, OperationState &result) { // SliceOp //===----------------------------------------------------------------------===// void mlir::linalg::SliceOp::build(Builder *b, OperationState &result, - Value *base, ValueRange indexings) { + ValuePtr base, ValueRange indexings) { result.addOperands(base); result.addOperands(indexings); @@ -394,7 +394,7 @@ static LogicalResult verify(SliceOp op) { // TransposeOp //===----------------------------------------------------------------------===// void mlir::linalg::TransposeOp::build(Builder *b, OperationState &result, - Value *view, AffineMapAttr permutation, + ValuePtr view, AffineMapAttr permutation, ArrayRef attrs) { auto permutationMap = permutation.getValue(); assert(permutationMap); diff --git a/lib/Dialect/Linalg/Transforms/Fusion.cpp b/lib/Dialect/Linalg/Transforms/Fusion.cpp index 453daba204cc..49cea7e41707 100644 --- a/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -77,16 +77,16 @@ static llvm::cl::list clTileSizes( static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op, ArrayRef loopRanges) { auto maps = loopToOperandRangesMaps(op); - SmallVector clonedViews; + SmallVector clonedViews; clonedViews.reserve(op.getNumInputsAndOutputs()); // Iterate over the inputs and outputs in order. // Extract the subranges from the linearized ranges. - SmallVector ios(op.getInputsAndOutputs()); + SmallVector ios(op.getInputsAndOutputs()); for (auto en : llvm::enumerate(ios)) { unsigned idx = en.index(); auto map = maps[idx]; LLVM_DEBUG(dbgs() << "map: " << map << "\n"); - Value *view = en.value(); + ValuePtr view = en.value(); SmallVector viewRanges(map.getNumResults()); for (auto en2 : llvm::enumerate(map.getResults())) { unsigned d = en2.index(); @@ -99,7 +99,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op, } // Construct a new subview for the tile. unsigned rank = viewRanges.size(); - SmallVector offsets, sizes, strides; + SmallVector offsets, sizes, strides; offsets.reserve(rank); sizes.reserve(rank); strides.reserve(rank); @@ -117,7 +117,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op, } struct ViewDimension { - Value *view; + ValuePtr view; unsigned dimension; }; @@ -130,14 +130,14 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) { auto maps = loopToOperandRangesMaps(op); // Iterate over the inputs and outputs in order. // Extract the subranges from the linearized ranges. - SmallVector ios(op.getInputsAndOutputs()); + SmallVector ios(op.getInputsAndOutputs()); for (auto en : llvm::enumerate(ios)) { unsigned idx = en.index(); auto map = maps[idx]; LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange I/O idx: " << idx << "\n"); LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange map: " << map << "\n"); - Value *view = en.value(); - SmallVector viewRanges(map.getNumResults(), nullptr); + ValuePtr view = en.value(); + SmallVector viewRanges(map.getNumResults(), nullptr); for (auto en2 : llvm::enumerate(map.getResults())) { if (loopDepth == en2.value().cast().getPosition()) { LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange loopDepth: " << loopDepth @@ -151,9 +151,9 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) { llvm_unreachable("Expect to be able to extract a view defining loop range"); } -static LinalgOp fuse(Value *producedView, LinalgOp producer, LinalgOp consumer, - unsigned consumerIdx, unsigned producerIdx, - OperationFolder *folder) { +static LinalgOp fuse(ValuePtr producedView, LinalgOp producer, + LinalgOp consumer, unsigned consumerIdx, + unsigned producerIdx, OperationFolder *folder) { auto subView = dyn_cast_or_null( consumer.getInput(consumerIdx)->getDefiningOp()); auto slice = dyn_cast_or_null( @@ -206,7 +206,7 @@ static LinalgOp fuse(Value *producedView, LinalgOp producer, LinalgOp consumer, // Encode structural fusion safety preconditions. // Some of these will be lifted in the future with better analysis. static bool isStructurallyFusableProducer(LinalgOp producer, - Value *consumedView, + ValuePtr consumedView, LinalgOp consumer) { if (producer.getNumOutputs() != 1) { LLVM_DEBUG(dbgs() << "\nNot structurally fusable (multi-output)"); @@ -226,7 +226,7 @@ static bool isStructurallyFusableProducer(LinalgOp producer, bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph, LinalgOp consumer, - Value *consumedView, + ValuePtr consumedView, LinalgOp producer) { // Make some simple structural checks that alleviate the need for more // complex analyses. @@ -245,7 +245,7 @@ bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph, } bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph, - LinalgOp consumer, Value *consumedView, + LinalgOp consumer, ValuePtr consumedView, LinalgOp producer) { if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer)) return false; @@ -272,13 +272,13 @@ Optional mlir::linalg::fuseProducerOf( auto producer = cast(dependence.dependentOpView.op); // Check that the dependence is indeed on the input `consumerIdx` view. - auto *consumedView = dependence.indexingView; + auto consumedView = dependence.indexingView; if (consumer.getInput(consumerIdx) != consumedView) continue; // Consumer consumes this view, `isStructurallyFusableProducer` also checks // whether it is a strict subview of the producer view. - auto *producedView = dependence.dependentOpView.view; + auto producedView = dependence.dependentOpView.view; auto producerIdx = producer.getIndexOfOutput(producedView).getValue(); // `consumerIdx` and `producerIdx` exist by construction. LLVM_DEBUG(dbgs() << "\nRAW producer: " << *producer.getOperation() diff --git a/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp b/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp index c50c495750ff..e468c19a0b4f 100644 --- a/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp +++ b/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp @@ -49,7 +49,7 @@ using edsc::op::operator==; static SmallVector makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map, - ArrayRef vals) { + ArrayRef vals) { assert(map.getNumSymbols() == 0); assert(map.getNumInputs() == vals.size()); SmallVector res; @@ -57,35 +57,35 @@ makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map, auto dims = map.getNumDims(); for (auto e : map.getResults()) { auto exprMap = AffineMap::get(dims, 0, e); - SmallVector operands(vals.begin(), vals.end()); + SmallVector operands(vals.begin(), vals.end()); canonicalizeMapAndOperands(&exprMap, &operands); res.push_back(affine_apply(exprMap, operands)); } return res; } -static SmallVector permuteIvs(ArrayRef ivs, - Optional permutation) { +static SmallVector permuteIvs(ArrayRef ivs, + Optional permutation) { return permutation ? applyMapToValues(ScopedContext::getBuilder(), ScopedContext::getLocation(), permutation.getValue(), ivs) - : SmallVector(ivs.begin(), ivs.end()); + : SmallVector(ivs.begin(), ivs.end()); } // Creates a number of ranges equal to the number of results in `map`. // The returned ranges correspond to the loop ranges, in the proper order, for // which new loops will be created. -static SmallVector emitLoopRanges(OpBuilder &b, Location loc, - AffineMap map, - ArrayRef allViewSizes); -SmallVector emitLoopRanges(OpBuilder &b, Location loc, - AffineMap map, - ArrayRef allViewSizes) { +static SmallVector emitLoopRanges(OpBuilder &b, Location loc, + AffineMap map, + ArrayRef allViewSizes); +SmallVector emitLoopRanges(OpBuilder &b, Location loc, + AffineMap map, + ArrayRef allViewSizes) { // Apply `map` to get view sizes in loop order. auto sizes = applyMapToValues(b, loc, map, allViewSizes); // Create a new range with the applied tile sizes. ScopedContext scope(b, loc); - SmallVector res; + SmallVector res; for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) { res.push_back(range(constant_index(0), sizes[idx], constant_index(1))); } @@ -98,7 +98,7 @@ class LinalgScopedEmitter {}; template class LinalgScopedEmitter { public: - static void emitScalarImplementation(ArrayRef allIvs, + static void emitScalarImplementation(ArrayRef allIvs, CopyOp copyOp) { auto nPar = copyOp.getNumParallelLoops(); assert(nPar == allIvs.size()); @@ -121,7 +121,7 @@ class LinalgScopedEmitter { template class LinalgScopedEmitter { public: - static void emitScalarImplementation(ArrayRef allIvs, + static void emitScalarImplementation(ArrayRef allIvs, FillOp fillOp) { auto nPar = fillOp.getNumParallelLoops(); assert(nPar == allIvs.size()); @@ -138,7 +138,7 @@ class LinalgScopedEmitter { template class LinalgScopedEmitter { public: - static void emitScalarImplementation(ArrayRef allIvs, DotOp dotOp) { + static void emitScalarImplementation(ArrayRef allIvs, DotOp dotOp) { assert(allIvs.size() == 1); IndexHandle r_i(allIvs[0]); IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)), @@ -151,7 +151,7 @@ class LinalgScopedEmitter { template class LinalgScopedEmitter { public: - static void emitScalarImplementation(ArrayRef allIvs, + static void emitScalarImplementation(ArrayRef allIvs, MatvecOp matvecOp) { assert(allIvs.size() == 2); IndexHandle i(allIvs[0]), r_j(allIvs[1]); @@ -165,7 +165,7 @@ class LinalgScopedEmitter { template class LinalgScopedEmitter { public: - static void emitScalarImplementation(ArrayRef allIvs, + static void emitScalarImplementation(ArrayRef allIvs, MatmulOp matmulOp) { assert(allIvs.size() == 3); IndexHandle i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]); @@ -179,7 +179,7 @@ class LinalgScopedEmitter { template class LinalgScopedEmitter { public: - static void emitScalarImplementation(ArrayRef allIvs, + static void emitScalarImplementation(ArrayRef allIvs, ConvOp convOp) { auto b = ScopedContext::getBuilder(); auto loc = ScopedContext::getLocation(); @@ -229,14 +229,14 @@ class LinalgScopedEmitter { template class LinalgScopedEmitter { public: - static void emitScalarImplementation(ArrayRef allIvs, + static void emitScalarImplementation(ArrayRef allIvs, GenericOp genericOp) { auto b = ScopedContext::getBuilder(); auto loc = ScopedContext::getLocation(); using edsc::intrinsics::detail::ValueHandleArray; unsigned nInputs = genericOp.getNumInputs(); unsigned nOutputs = genericOp.getNumOutputs(); - SmallVector indexedValues(nInputs + nOutputs); + SmallVector indexedValues(nInputs + nOutputs); // 1.a. Emit std_load from input views. for (unsigned i = 0; i < nInputs; ++i) { @@ -324,7 +324,7 @@ class LinalgScopedEmitter { template class LinalgScopedEmitter { public: - static void emitScalarImplementation(ArrayRef allIvs, + static void emitScalarImplementation(ArrayRef allIvs, IndexedGenericOp indexedGenericOp) { auto b = ScopedContext::getBuilder(); auto loc = ScopedContext::getLocation(); @@ -332,7 +332,7 @@ class LinalgScopedEmitter { unsigned nInputs = indexedGenericOp.getNumInputs(); unsigned nOutputs = indexedGenericOp.getNumOutputs(); unsigned nLoops = allIvs.size(); - SmallVector indexedValues(nLoops + nInputs + nOutputs); + SmallVector indexedValues(nLoops + nInputs + nOutputs); for (unsigned i = 0; i < nLoops; ++i) { indexedValues[i] = allIvs[i]; diff --git a/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp b/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp index f4364928af8f..999406e05cf1 100644 --- a/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp +++ b/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp @@ -99,7 +99,7 @@ LogicalResult mlir::linalg::tileAndFuseLinalgOpAndSetMarker( } bool mlir::linalg::detail::isProducedByOpOfTypeImpl( - Operation *consumerOp, Value *consumedView, + Operation *consumerOp, ValuePtr consumedView, function_ref isaOpType) { LinalgOp consumer = dyn_cast(consumerOp); if (!consumer) @@ -175,7 +175,7 @@ LogicalResult mlir::linalg::vectorizeGenericOp(PatternRewriter &rewriter, return failure(); // TODO(ntv): non-identity layout. - auto isStaticMemRefWithIdentityLayout = [](Value *v) { + auto isStaticMemRefWithIdentityLayout = [](ValuePtr v) { auto m = v->getType().dyn_cast(); if (!m || !m.hasStaticShape() || !m.getAffineMaps().empty()) return false; @@ -235,7 +235,7 @@ mlir::linalg::permuteGenericLinalgOp(PatternRewriter &rewriter, Operation *op, LogicalResult mlir::linalg::linalgOpPromoteSubviews(PatternRewriter &rewriter, Operation *op) { LinalgOp linOp = dyn_cast(op); - SetVector subViews; + SetVector subViews; for (auto it : linOp.getInputsAndOutputs()) if (auto sv = dyn_cast_or_null(it->getDefiningOp())) subViews.insert(sv); diff --git a/lib/Dialect/Linalg/Transforms/Promotion.cpp b/lib/Dialect/Linalg/Transforms/Promotion.cpp index c7fbebce3830..b1dae455194e 100644 --- a/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -55,14 +55,15 @@ static llvm::cl::opt clPromoteDynamic( llvm::cl::desc("Test generation of dynamic promoted buffers"), llvm::cl::cat(clOptionsCategory), llvm::cl::init(false)); -static Value *allocBuffer(Type elementType, Value *size, bool dynamicBuffers) { +static ValuePtr allocBuffer(Type elementType, ValuePtr size, + bool dynamicBuffers) { auto *ctx = size->getContext(); auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8); if (!dynamicBuffers) if (auto cst = dyn_cast_or_null(size->getDefiningOp())) return alloc( MemRefType::get(width * cst.getValue(), IntegerType::get(8, ctx))); - Value *mul = muli(constant_index(width), size); + ValuePtr mul = muli(constant_index(width), size); return alloc(MemRefType::get(-1, IntegerType::get(8, ctx)), mul); } @@ -92,20 +93,20 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc, auto viewType = subView.getType(); auto rank = viewType.getRank(); - Value *allocSize = one; - SmallVector fullRanges, partialRanges; + ValuePtr allocSize = one; + SmallVector fullRanges, partialRanges; fullRanges.reserve(rank); partialRanges.reserve(rank); for (auto en : llvm::enumerate(subView.getRanges())) { auto rank = en.index(); auto rangeValue = en.value(); - Value *d = rangeValue.size; + ValuePtr d = rangeValue.size; allocSize = muli(folder, allocSize, d).getValue(); fullRanges.push_back(d); partialRanges.push_back(range(folder, zero, dim(subView, rank), one)); } SmallVector dynSizes(fullRanges.size(), -1); - auto *buffer = + auto buffer = allocBuffer(viewType.getElementType(), allocSize, dynamicBuffers); auto fullLocalView = view( MemRefType::get(dynSizes, viewType.getElementType()), buffer, fullRanges); @@ -115,7 +116,7 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc, SmallVector mlir::linalg::promoteSubViews(OpBuilder &b, Location loc, - ArrayRef subViews, bool dynamicBuffers, + ArrayRef subViews, bool dynamicBuffers, OperationFolder *folder) { if (subViews.empty()) return {}; @@ -123,8 +124,8 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc, ScopedContext scope(b, loc); SmallVector res; res.reserve(subViews.size()); - DenseMap promotionInfoMap; - for (auto *v : subViews) { + DenseMap promotionInfoMap; + for (auto v : subViews) { SubViewOp subView = cast(v->getDefiningOp()); auto viewType = subView.getType(); // TODO(ntv): support more cases than just float. @@ -136,7 +137,7 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc, res.push_back(promotionInfo); } - for (auto *v : subViews) { + for (auto v : subViews) { SubViewOp subView = cast(v->getDefiningOp()); auto info = promotionInfoMap.find(v); if (info == promotionInfoMap.end()) @@ -144,14 +145,14 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc, // TODO(ntv): value to fill with should be related to the operation. // For now, just use APFloat(0.0f). auto t = subView.getType().getElementType().cast(); - Value *fillVal = constant_float(folder, APFloat(0.0f), t); + ValuePtr fillVal = constant_float(folder, APFloat(0.0f), t); // TODO(ntv): fill is only necessary if `promotionInfo` has a full local // view that is different from the partial local view and we are on the // boundary. fill(info->second.fullLocalView, fillVal); } - for (auto *v : subViews) { + for (auto v : subViews) { auto info = promotionInfoMap.find(v); if (info == promotionInfoMap.end()) continue; @@ -161,19 +162,19 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc, } LinalgOp mlir::linalg::promoteSubViewOperands(OpBuilder &b, LinalgOp op, - SetVector subViews, + SetVector subViews, bool dynamicBuffers, OperationFolder *folder) { // 1. Promote the specified views and use them in the new op. ScopedContext scope(b, op.getLoc()); auto promotedBufferAndViews = promoteSubViews( b, op.getLoc(), subViews.getArrayRef(), dynamicBuffers, folder); - SmallVector opViews; + SmallVector opViews; opViews.reserve(op.getNumInputsAndOutputs()); - SmallVector, 8> writebackViews; + SmallVector, 8> writebackViews; writebackViews.reserve(subViews.size()); unsigned promotedIdx = 0; - for (auto *view : op.getInputsAndOutputs()) { + for (auto view : op.getInputsAndOutputs()) { if (subViews.count(view) != 0) { opViews.push_back(promotedBufferAndViews[promotedIdx].fullLocalView); writebackViews.emplace_back(std::make_pair( @@ -214,7 +215,7 @@ static void promoteSubViews(FuncOp f, bool dynamicBuffers) { f.walk([dynamicBuffers, &folder, &toErase](LinalgOp op) { // TODO(ntv) some heuristic here to decide what to promote. Atm it is all or // nothing. - SetVector subViews; + SetVector subViews; OpBuilder b(op); for (auto it : op.getInputsAndOutputs()) if (auto sv = dyn_cast_or_null(it->getDefiningOp())) diff --git a/lib/Dialect/Linalg/Transforms/Tiling.cpp b/lib/Dialect/Linalg/Transforms/Tiling.cpp index 4d8a24cb6cb5..07d559918cf3 100644 --- a/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -53,7 +53,7 @@ static llvm::cl::list llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated, llvm::cl::cat(clOptionsCategory)); -static bool isZero(Value *v) { +static bool isZero(ValuePtr v) { return isa_and_nonnull(v->getDefiningOp()) && cast(v->getDefiningOp()).getValue() == 0; } @@ -71,12 +71,12 @@ using LoopIndexToRangeIndexMap = DenseMap; // indices of newly created loops. static std::tuple, LoopIndexToRangeIndexMap> makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map, - ArrayRef allViewSizes, - ArrayRef allTileSizes, OperationFolder *folder) { + ArrayRef allViewSizes, + ArrayRef allTileSizes, OperationFolder *folder) { assert(allTileSizes.size() == map.getNumResults()); // Apply `map` to get view sizes in loop order. auto viewSizes = applyMapToValues(b, loc, map, allViewSizes, folder); - SmallVector tileSizes(allTileSizes.begin(), allTileSizes.end()); + SmallVector tileSizes(allTileSizes.begin(), allTileSizes.end()); // Traverse the tile sizes, which are in loop order, erase zeros everywhere. LoopIndexToRangeIndexMap loopIndexToRangeIndex; @@ -110,7 +110,7 @@ namespace { // `d0 + 2 * d1 + d3` is tiled by [0, 0, 0, 2] but not by [0, 0, 2, 0] // struct TileCheck : public AffineExprVisitor { - TileCheck(ArrayRef tileSizes) + TileCheck(ArrayRef tileSizes) : isTiled(false), tileSizes(tileSizes) {} void visitDimExpr(AffineDimExpr expr) { @@ -124,7 +124,7 @@ struct TileCheck : public AffineExprVisitor { "nonpositive multiplying coefficient"); } bool isTiled; - ArrayRef tileSizes; + ArrayRef tileSizes; }; } // namespace @@ -206,11 +206,11 @@ void transformIndexedGenericOpIndices( auto rangeIndex = loopIndexToRangeIndex.find(i); if (rangeIndex == loopIndexToRangeIndex.end()) continue; - Value *oldIndex = block.getArgument(i); + ValuePtr oldIndex = block.getArgument(i); // Offset the index argument `i` by the value of the corresponding induction // variable and replace all uses of the previous value. - Value *newIndex = b.create(indexedGenericOp.getLoc(), oldIndex, - pivs[rangeIndex->second]->getValue()); + ValuePtr newIndex = b.create(indexedGenericOp.getLoc(), oldIndex, + pivs[rangeIndex->second]->getValue()); for (auto &use : oldIndex->getUses()) { if (use.getOwner() == newIndex->getDefiningOp()) continue; @@ -219,7 +219,7 @@ void transformIndexedGenericOpIndices( } } -static bool isTiled(AffineExpr expr, ArrayRef tileSizes) { +static bool isTiled(AffineExpr expr, ArrayRef tileSizes) { if (!expr) return false; TileCheck t(tileSizes); @@ -229,7 +229,7 @@ static bool isTiled(AffineExpr expr, ArrayRef tileSizes) { // Checks whether the view with index `viewIndex` within `linalgOp` varies with // respect to a non-zero `tileSize`. -static bool isTiled(AffineMap map, ArrayRef tileSizes) { +static bool isTiled(AffineMap map, ArrayRef tileSizes) { if (!map) return false; for (unsigned r = 0; r < map.getNumResults(); ++r) @@ -238,13 +238,13 @@ static bool isTiled(AffineMap map, ArrayRef tileSizes) { return false; } -static SmallVector +static SmallVector makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp, - ArrayRef ivs, ArrayRef tileSizes, - ArrayRef viewSizes, OperationFolder *folder) { + ArrayRef ivs, ArrayRef tileSizes, + ArrayRef viewSizes, OperationFolder *folder) { assert(ivs.size() == static_cast(llvm::count_if( llvm::make_range(tileSizes.begin(), tileSizes.end()), - [](Value *v) { return !isZero(v); })) && + [](ValuePtr v) { return !isZero(v); })) && "expected as many ivs as non-zero sizes"); using edsc::intrinsics::select; @@ -253,21 +253,22 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp, // Construct (potentially temporary) mins and maxes on which to apply maps // that define tile subviews. - SmallVector lbs, subViewSizes; + SmallVector lbs, subViewSizes; for (unsigned idx = 0, idxIvs = 0, e = tileSizes.size(); idx < e; ++idx) { bool isTiled = !isZero(tileSizes[idx]); - lbs.push_back(isTiled ? ivs[idxIvs++] : (Value *)constant_index(folder, 0)); + lbs.push_back(isTiled ? ivs[idxIvs++] + : (ValuePtr)constant_index(folder, 0)); subViewSizes.push_back(isTiled ? tileSizes[idx] : viewSizes[idx]); } auto *op = linalgOp.getOperation(); - SmallVector res; + SmallVector res; res.reserve(op->getNumOperands()); auto viewIteratorBegin = linalgOp.getInputsAndOutputs().begin(); for (unsigned viewIndex = 0; viewIndex < linalgOp.getNumInputsAndOutputs(); ++viewIndex) { - Value *view = *(viewIteratorBegin + viewIndex); + ValuePtr view = *(viewIteratorBegin + viewIndex); unsigned rank = view->getType().cast().getRank(); auto map = loopToOperandRangesMaps(linalgOp)[viewIndex]; // If the view is not tiled, we can use it as is. @@ -277,7 +278,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp, } // Construct a new subview for the tile. - SmallVector offsets, sizes, strides; + SmallVector offsets, sizes, strides; offsets.reserve(rank); sizes.reserve(rank); strides.reserve(rank); @@ -292,9 +293,9 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp, // Tiling creates a new slice at the proper index, the slice step is 1 // (i.e. the slice view does not subsample, stepping occurs in the loop). auto m = map.getSubMap({r}); - auto *offset = applyMapToValues(b, loc, m, lbs, folder).front(); + auto offset = applyMapToValues(b, loc, m, lbs, folder).front(); offsets.push_back(offset); - auto *size = applyMapToValues(b, loc, m, subViewSizes, folder).front(); + auto size = applyMapToValues(b, loc, m, subViewSizes, folder).front(); sizes.push_back(size); strides.push_back(constant_index(folder, 1)); } @@ -308,7 +309,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp, // This is a special type of folding that we only apply when `folder` is // defined. if (folder) - for (auto *v : llvm::concat(lbs, subViewSizes)) + for (auto v : llvm::concat(lbs, subViewSizes)) if (v->use_empty()) v->getDefiningOp()->erase(); @@ -316,7 +317,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp, } Optional mlir::linalg::tileLinalgOp( - OpBuilder &b, LinalgOp op, ArrayRef tileSizes, + OpBuilder &b, LinalgOp op, ArrayRef tileSizes, ArrayRef permutation, OperationFolder *folder) { // 1. Enforce the convention that "tiling by zero" skips tiling a particular // dimension. This convention is significantly simpler to handle instead of @@ -360,7 +361,7 @@ Optional mlir::linalg::tileLinalgOp( LoopNestRangeBuilder(pivs, loopRanges)([&] { auto b = ScopedContext::getBuilder(); auto loc = ScopedContext::getLocation(); - SmallVector ivValues(ivs.begin(), ivs.end()); + SmallVector ivValues(ivs.begin(), ivs.end()); // If we have to apply a permutation to the tiled loop nest, we have to // reorder the induction variables This permutation is the right one @@ -411,7 +412,7 @@ Optional mlir::linalg::tileLinalgOp( ScopedContext scope(b, op.getLoc()); // Materialize concrete tile size values to pass the generic tiling function. - SmallVector tileSizeValues; + SmallVector tileSizeValues; tileSizeValues.reserve(tileSizes.size()); for (auto ts : tileSizes) tileSizeValues.push_back(constant_index(folder, ts)); diff --git a/lib/Dialect/Linalg/Utils/Utils.cpp b/lib/Dialect/Linalg/Utils/Utils.cpp index eb501f9b5b57..125937807f4a 100644 --- a/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/lib/Dialect/Linalg/Utils/Utils.cpp @@ -92,7 +92,7 @@ mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder( } mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder( - ArrayRef ivs, ArrayRef ranges) + ArrayRef ivs, ArrayRef ranges) : LoopNestRangeBuilder( ivs, SmallVector(ranges.begin(), ranges.end())) {} @@ -106,26 +106,26 @@ ValueHandle LoopNestRangeBuilder::LoopNestRangeBuilder::operator()( return ValueHandle::null(); } -static Value *emitOrFoldComposedAffineApply(OpBuilder &b, Location loc, - AffineMap map, - ArrayRef operandsRef, - OperationFolder *folder) { - SmallVector operands(operandsRef.begin(), operandsRef.end()); +static ValuePtr emitOrFoldComposedAffineApply(OpBuilder &b, Location loc, + AffineMap map, + ArrayRef operandsRef, + OperationFolder *folder) { + SmallVector operands(operandsRef.begin(), operandsRef.end()); fullyComposeAffineMapAndOperands(&map, &operands); canonicalizeMapAndOperands(&map, &operands); return folder ? folder->create(b, loc, map, operands) : b.create(loc, map, operands); } -SmallVector +SmallVector mlir::linalg::applyMapToValues(OpBuilder &b, Location loc, AffineMap map, - ArrayRef values, + ArrayRef values, OperationFolder *folder) { - SmallVector res; + SmallVector res; res.reserve(map.getNumResults()); unsigned numDims = map.getNumDims(); // For each `expr` in `map`, applies the `expr` to the values extracted from - // ranges. If the resulting application can be folded into a Value*, the + // ranges. If the resulting application can be folded into a Value, the // folding occurs eagerly. Otherwise, an affine.apply operation is emitted. for (auto expr : map.getResults()) { AffineMap map = AffineMap::get(numDims, 0, expr); @@ -137,12 +137,12 @@ mlir::linalg::applyMapToValues(OpBuilder &b, Location loc, AffineMap map, /// Returns all the operands of `linalgOp` that are not views. /// Asserts that these operands are value types to allow transformations like /// tiling to just use the values when cloning `linalgOp`. -SmallVector +SmallVector mlir::linalg::getAssumedNonViewOperands(LinalgOp linalgOp) { auto *op = linalgOp.getOperation(); unsigned numViews = linalgOp.getNumInputsAndOutputs(); unsigned nOperands = op->getNumOperands() - numViews; - SmallVector res; + SmallVector res; res.reserve(nOperands); for (unsigned i = 0; i < nOperands; ++i) { res.push_back(op->getOperand(numViews + i)); diff --git a/lib/Dialect/LoopOps/LoopOps.cpp b/lib/Dialect/LoopOps/LoopOps.cpp index fc8832e9a462..9610a1ac270e 100644 --- a/lib/Dialect/LoopOps/LoopOps.cpp +++ b/lib/Dialect/LoopOps/LoopOps.cpp @@ -69,8 +69,8 @@ LoopOpsDialect::LoopOpsDialect(MLIRContext *context) // ForOp //===----------------------------------------------------------------------===// -void ForOp::build(Builder *builder, OperationState &result, Value *lb, - Value *ub, Value *step) { +void ForOp::build(Builder *builder, OperationState &result, ValuePtr lb, + ValuePtr ub, ValuePtr step) { result.addOperands({lb, ub, step}); Region *bodyRegion = result.addRegion(); ForOp::ensureTerminator(*bodyRegion, *builder, result.location); @@ -134,7 +134,7 @@ static ParseResult parseForOp(OpAsmParser &parser, OperationState &result) { Region &ForOp::getLoopBody() { return region(); } -bool ForOp::isDefinedOutsideOfLoop(Value *value) { +bool ForOp::isDefinedOutsideOfLoop(ValuePtr value) { return !region().isAncestor(value->getParentRegion()); } @@ -144,8 +144,8 @@ LogicalResult ForOp::moveOutOfLoop(ArrayRef ops) { return success(); } -ForOp mlir::loop::getForInductionVarOwner(Value *val) { - auto *ivArg = dyn_cast(val); +ForOp mlir::loop::getForInductionVarOwner(ValuePtr val) { + auto ivArg = dyn_cast(val); if (!ivArg) return ForOp(); assert(ivArg->getOwner() && "unlinked block argument"); @@ -157,7 +157,7 @@ ForOp mlir::loop::getForInductionVarOwner(Value *val) { // IfOp //===----------------------------------------------------------------------===// -void IfOp::build(Builder *builder, OperationState &result, Value *cond, +void IfOp::build(Builder *builder, OperationState &result, ValuePtr cond, bool withElseRegion) { result.addOperands(cond); Region *thenRegion = result.addRegion(); diff --git a/lib/Dialect/SPIRV/SPIRVDialect.cpp b/lib/Dialect/SPIRV/SPIRVDialect.cpp index def8ee810fe4..4416e1e6b040 100644 --- a/lib/Dialect/SPIRV/SPIRVDialect.cpp +++ b/lib/Dialect/SPIRV/SPIRVDialect.cpp @@ -94,7 +94,7 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface { /// Handle the given inlined terminator by replacing it with a new operation /// as necessary. void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const final { + ArrayRef valuesToRepl) const final { // Only spv.ReturnValue needs to be handled here. auto retValOp = dyn_cast(op); if (!retValOp) diff --git a/lib/Dialect/SPIRV/SPIRVLowering.cpp b/lib/Dialect/SPIRV/SPIRVLowering.cpp index 284fe915029b..ca9b883a703e 100644 --- a/lib/Dialect/SPIRV/SPIRVLowering.cpp +++ b/lib/Dialect/SPIRV/SPIRVLowering.cpp @@ -229,9 +229,9 @@ getOrInsertBuiltinVariable(spirv::ModuleOp &moduleOp, Location loc, /// Gets the global variable associated with a builtin and add /// it if it doesn't exist. -Value *mlir::spirv::getBuiltinVariableValue(Operation *op, - spirv::BuiltIn builtin, - OpBuilder &builder) { +ValuePtr mlir::spirv::getBuiltinVariableValue(Operation *op, + spirv::BuiltIn builtin, + OpBuilder &builder) { auto moduleOp = op->getParentOfType(); if (!moduleOp) { op->emitError("expected operation to be within a SPIR-V module"); @@ -239,7 +239,7 @@ Value *mlir::spirv::getBuiltinVariableValue(Operation *op, } spirv::GlobalVariableOp varOp = getOrInsertBuiltinVariable(moduleOp, op->getLoc(), builtin, builder); - Value *ptr = builder.create(op->getLoc(), varOp); + ValuePtr ptr = builder.create(op->getLoc(), varOp); return builder.create(op->getLoc(), ptr, /*memory_access =*/nullptr, /*alignment =*/nullptr); diff --git a/lib/Dialect/SPIRV/SPIRVOps.cpp b/lib/Dialect/SPIRV/SPIRVOps.cpp index 0df4525bac6c..a20c18056e15 100644 --- a/lib/Dialect/SPIRV/SPIRVOps.cpp +++ b/lib/Dialect/SPIRV/SPIRVOps.cpp @@ -273,8 +273,8 @@ static LogicalResult verifyMemorySemantics(BarrierOp op) { } template -static LogicalResult verifyLoadStorePtrAndValTypes(LoadStoreOpTy op, Value *ptr, - Value *val) { +static LogicalResult verifyLoadStorePtrAndValTypes(LoadStoreOpTy op, + ValuePtr ptr, ValuePtr val) { // ODS already checks ptr is spirv::PointerType. Just check that the pointee // type of the pointer and the type of the value are the same // @@ -664,8 +664,8 @@ static ParseResult parseShiftOp(OpAsmParser &parser, OperationState &state) { } static void printShiftOp(Operation *op, OpAsmPrinter &printer) { - Value *base = op->getOperand(0); - Value *shift = op->getOperand(1); + ValuePtr base = op->getOperand(0); + ValuePtr shift = op->getOperand(1); printer << op->getName() << ' ' << *base << ", " << *shift << " : " << base->getType() << ", " << shift->getType(); } @@ -742,7 +742,7 @@ static Type getElementPtrType(Type type, ValueRange indices, Location baseLoc) { } void spirv::AccessChainOp::build(Builder *builder, OperationState &state, - Value *basePtr, ValueRange indices) { + ValuePtr basePtr, ValueRange indices) { auto type = getElementPtrType(basePtr->getType(), indices, state.location); assert(type && "Unable to deduce return type based on basePtr and indices"); build(builder, state, type, basePtr, indices); @@ -782,8 +782,8 @@ static void print(spirv::AccessChainOp op, OpAsmPrinter &printer) { } static LogicalResult verify(spirv::AccessChainOp accessChainOp) { - SmallVector indices(accessChainOp.indices().begin(), - accessChainOp.indices().end()); + SmallVector indices(accessChainOp.indices().begin(), + accessChainOp.indices().end()); auto resultType = getElementPtrType(accessChainOp.base_ptr()->getType(), indices, accessChainOp.getLoc()); if (!resultType) { @@ -824,7 +824,7 @@ struct CombineChainedAccessChain } // Combine indices. - SmallVector indices(parentAccessChainOp.indices()); + SmallVector indices(parentAccessChainOp.indices()); indices.append(accessChainOp.indices().begin(), accessChainOp.indices().end()); @@ -1060,7 +1060,7 @@ static LogicalResult verify(spirv::BitFieldInsertOp bitFieldOp) { static ParseResult parseBranchOp(OpAsmParser &parser, OperationState &state) { Block *dest; - SmallVector destOperands; + SmallVector destOperands; if (parser.parseSuccessorAndUseList(dest, destOperands)) return failure(); state.addSuccessor(dest, destOperands); @@ -1089,7 +1089,7 @@ static ParseResult parseBranchConditionalOp(OpAsmParser &parser, auto &builder = parser.getBuilder(); OpAsmParser::OperandType condInfo; Block *dest; - SmallVector destOperands; + SmallVector destOperands; // Parse the condition. Type boolTy = builder.getI1Type(); @@ -1214,7 +1214,7 @@ static void print(spirv::CompositeConstructOp compositeConstructOp, static LogicalResult verify(spirv::CompositeConstructOp compositeConstructOp) { auto cType = compositeConstructOp.getType().cast(); - SmallVector constituents(compositeConstructOp.constituents()); + SmallVector constituents(compositeConstructOp.constituents()); if (constituents.size() != cType.getNumElements()) { return compositeConstructOp.emitError( "has incorrect number of operands: expected ") @@ -1239,7 +1239,7 @@ static LogicalResult verify(spirv::CompositeConstructOp compositeConstructOp) { //===----------------------------------------------------------------------===// void spirv::CompositeExtractOp::build(Builder *builder, OperationState &state, - Value *composite, + ValuePtr composite, ArrayRef indices) { auto indexAttr = builder->getI32ArrayAttr(indices); auto elementType = @@ -1963,7 +1963,7 @@ OpFoldResult spirv::ISubOp::fold(ArrayRef operands) { //===----------------------------------------------------------------------===// void spirv::LoadOp::build(Builder *builder, OperationState &state, - Value *basePtr, IntegerAttr memory_access, + ValuePtr basePtr, IntegerAttr memory_access, IntegerAttr alignment) { auto ptrType = basePtr->getType().cast(); build(builder, state, ptrType.getPointeeType(), basePtr, memory_access, @@ -2497,7 +2497,8 @@ static LogicalResult verify(spirv::ReturnValueOp retValOp) { //===----------------------------------------------------------------------===// void spirv::SelectOp::build(Builder *builder, OperationState &state, - Value *cond, Value *trueValue, Value *falseValue) { + ValuePtr cond, ValuePtr trueValue, + ValuePtr falseValue) { build(builder, state, trueValue->getType(), cond, trueValue, falseValue); } @@ -2698,9 +2699,9 @@ struct ConvertSelectionOpToSelect return matchFailure(); } - auto *trueValue = getSrcValue(trueBlock); - auto *falseValue = getSrcValue(falseBlock); - auto *ptrValue = getDstPtr(trueBlock); + auto trueValue = getSrcValue(trueBlock); + auto falseValue = getSrcValue(falseBlock); + auto ptrValue = getDstPtr(trueBlock); auto storeOpAttributes = cast(trueBlock->front()).getOperation()->getAttrs(); @@ -2747,13 +2748,13 @@ struct ConvertSelectionOpToSelect } // Returns a soruce value for the given block. - Value *getSrcValue(Block *block) const { + ValuePtr getSrcValue(Block *block) const { auto storeOp = cast(block->front()); return storeOp.value(); } // Returns a destination value for the given block. - Value *getDstPtr(Block *block) const { + ValuePtr getDstPtr(Block *block) const { auto storeOp = cast(block->front()); return storeOp.ptr(); } diff --git a/lib/Dialect/SPIRV/Serialization/Deserializer.cpp b/lib/Dialect/SPIRV/Serialization/Deserializer.cpp index df9cb47a5622..799828cb6290 100644 --- a/lib/Dialect/SPIRV/Serialization/Deserializer.cpp +++ b/lib/Dialect/SPIRV/Serialization/Deserializer.cpp @@ -327,7 +327,7 @@ class Deserializer { /// This method materializes normal constants and inserts "casting" ops /// (`spv._address_of` and `spv._reference_of`) to turn an symbol into a SSA /// value for handling uses of module scope constants/variables in functions. - Value *getValue(uint32_t id); + ValuePtr getValue(uint32_t id); /// Slices the first instruction out of `binary` and returns its opcode and /// operands via `opcode` and `operands` respectively. Returns failure if @@ -446,7 +446,7 @@ class Deserializer { DenseMap blockPhiInfo; // Result to value mapping. - DenseMap valueMap; + DenseMap valueMap; // Mapping from result to undef value of a type. DenseMap undefMap; @@ -1520,7 +1520,7 @@ Deserializer::processBranchConditional(ArrayRef operands) { "false label, and optionally two branch weights"); } - auto *condition = getValue(operands[0]); + auto condition = getValue(operands[0]); auto *trueBlock = getOrCreateBlock(operands[1]); auto *falseBlock = getOrCreateBlock(operands[2]); @@ -1531,8 +1531,8 @@ Deserializer::processBranchConditional(ArrayRef operands) { opBuilder.create( unknownLoc, condition, trueBlock, - /*trueArguments=*/ArrayRef(), falseBlock, - /*falseArguments=*/ArrayRef(), weights); + /*trueArguments=*/ArrayRef(), falseBlock, + /*falseArguments=*/ArrayRef(), weights); return success(); } @@ -1626,7 +1626,7 @@ LogicalResult Deserializer::processPhi(ArrayRef operands) { // Create a block argument for this OpPhi instruction. Type blockArgType = getType(operands[0]); - BlockArgument *blockArg = curBlock->addArgument(blockArgType); + BlockArgumentPtr blockArg = curBlock->addArgument(blockArgType); valueMap[operands[1]] = blockArg; LLVM_DEBUG(llvm::dbgs() << "[phi] created block argument " << blockArg << " id = " << operands[1] << " of type " @@ -1783,8 +1783,8 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() { LLVM_DEBUG(llvm::dbgs() << "[cf] cloned block " << newBlock << " from block " << block << "\n"); if (!isFnEntryBlock(block)) { - for (BlockArgument *blockArg : block->getArguments()) { - auto *newArg = newBlock->addArgument(blockArg->getType()); + for (BlockArgumentPtr blockArg : block->getArguments()) { + auto newArg = newBlock->addArgument(blockArg->getType()); mapper.map(blockArg, newArg); LLVM_DEBUG(llvm::dbgs() << "[cf] remapped block argument " << blockArg << " to " << newArg << '\n'); @@ -1801,10 +1801,10 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() { // Go through all ops and remap the operands. auto remapOperands = [&](Operation *op) { for (auto &operand : op->getOpOperands()) - if (auto *mappedOp = mapper.lookupOrNull(operand.get())) + if (auto mappedOp = mapper.lookupOrNull(operand.get())) operand.set(mappedOp); for (auto &succOp : op->getBlockOperands()) - if (auto *mappedOp = mapper.lookupOrNull(succOp.get())) + if (auto mappedOp = mapper.lookupOrNull(succOp.get())) succOp.set(mappedOp); }; for (auto &block : body) { @@ -1824,13 +1824,13 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() { // we place the selection/loop op inside the old merge block, we need to // make sure the old merge block has the same block argument list. assert(mergeBlock->args_empty() && "OpPhi in loop merge block unsupported"); - for (BlockArgument *blockArg : headerBlock->getArguments()) { + for (BlockArgumentPtr blockArg : headerBlock->getArguments()) { mergeBlock->addArgument(blockArg->getType()); } // If the loop header block has block arguments, make sure the spv.branch op // matches. - SmallVector blockArgs; + SmallVector blockArgs; if (!headerBlock->args_empty()) blockArgs = {mergeBlock->args_begin(), mergeBlock->args_end()}; @@ -1838,7 +1838,7 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() { // loop header block. builder.setInsertionPointToEnd(&body.front()); builder.create(location, mapper.lookupOrNull(headerBlock), - ArrayRef(blockArgs)); + ArrayRef(blockArgs)); } // All the blocks cloned into the SelectionOp/LoopOp's region can now be @@ -1924,10 +1924,10 @@ LogicalResult Deserializer::wireUpBlockArgument() { auto *op = block->getTerminator(); opBuilder.setInsertionPoint(op); - SmallVector blockArgs; + SmallVector blockArgs; blockArgs.reserve(phiInfo.size()); for (uint32_t valueId : phiInfo) { - if (Value *value = getValue(valueId)) { + if (ValuePtr value = getValue(valueId)) { blockArgs.push_back(value); LLVM_DEBUG(llvm::dbgs() << "[phi] block argument " << value << " id = " << valueId << '\n'); @@ -1996,7 +1996,7 @@ LogicalResult Deserializer::structurizeControlFlow() { // Instruction //===----------------------------------------------------------------------===// -Value *Deserializer::getValue(uint32_t id) { +ValuePtr Deserializer::getValue(uint32_t id) { if (auto constInfo = getConstant(id)) { // Materialize a `spv.constant` op at every use site. return opBuilder.create(unknownLoc, constInfo->second, @@ -2192,7 +2192,7 @@ LogicalResult Deserializer::processBitcast(ArrayRef words) { } } valueID = words[wordIndex++]; - SmallVector operands; + SmallVector operands; SmallVector attributes; if (wordIndex < words.size()) { auto arg = getValue(words[wordIndex]); @@ -2366,9 +2366,9 @@ Deserializer::processOp(ArrayRef operands) { auto functionName = getFunctionSymbol(functionID); - SmallVector arguments; + SmallVector arguments; for (auto operand : llvm::drop_begin(operands, 3)) { - auto *value = getValue(operand); + auto value = getValue(operand); if (!value) { return emitError(unknownLoc, "unknown ") << operand << " used by OpFunctionCall"; diff --git a/lib/Dialect/SPIRV/Serialization/Serializer.cpp b/lib/Dialect/SPIRV/Serialization/Serializer.cpp index 4baac53b89ff..9b47045ea61a 100644 --- a/lib/Dialect/SPIRV/Serialization/Serializer.cpp +++ b/lib/Dialect/SPIRV/Serialization/Serializer.cpp @@ -323,7 +323,7 @@ class Serializer { uint32_t opcode, ArrayRef operands); - uint32_t getValueID(Value *val) const { return valueIDMap.lookup(val); } + uint32_t getValueID(ValuePtr val) const { return valueIDMap.lookup(val); } LogicalResult processAddressOfOp(spirv::AddressOfOp addressOfOp); @@ -414,7 +414,7 @@ class Serializer { DenseMap undefValIDMap; /// Map from results of normal operations to their s. - DenseMap valueIDMap; + DenseMap valueIDMap; /// Map from extended instruction set name to s. llvm::StringMap extendedInstSetIDMap; @@ -457,7 +457,7 @@ class Serializer { /// placed inside `functions`) here. And then after emitting all blocks, we /// replace the dummy 0 with the real result by overwriting /// `functions[offset]`. - DenseMap> deferredPhiValues; + DenseMap> deferredPhiValues; }; } // namespace @@ -513,12 +513,12 @@ void Serializer::collect(SmallVectorImpl &binary) { void Serializer::printValueIDMap(raw_ostream &os) { os << "\n= Value Map =\n\n"; for (auto valueIDPair : valueIDMap) { - Value *val = valueIDPair.first; + ValuePtr val = valueIDPair.first; os << " " << val << " " << "id = " << valueIDPair.second << ' '; if (auto *op = val->getDefiningOp()) { os << "from op '" << op->getName() << "'"; - } else if (auto *arg = dyn_cast(val)) { + } else if (auto arg = dyn_cast(val)) { Block *block = arg->getOwner(); os << "from argument of block " << block << ' '; os << " in op '" << block->getParentOp()->getName() << "'"; @@ -752,7 +752,7 @@ LogicalResult Serializer::processFuncOp(FuncOp op) { // There might be OpPhi instructions who have value references needing to fix. for (auto deferredValue : deferredPhiValues) { - Value *value = deferredValue.first; + ValuePtr value = deferredValue.first; uint32_t id = getValueID(value); LLVM_DEBUG(llvm::dbgs() << "[phi] fix reference of value " << value << " to id = " << id << '\n'); @@ -1402,7 +1402,7 @@ LogicalResult Serializer::emitPhiForBlockArguments(Block *block) { // Then create OpPhi instruction for each of the block argument. for (auto argIndex : llvm::seq(0, block->getNumArguments())) { - BlockArgument *arg = block->getArgument(argIndex); + BlockArgumentPtr arg = block->getArgument(argIndex); // Get the type and result for this OpPhi instruction. uint32_t phiTypeID = 0; @@ -1418,7 +1418,7 @@ LogicalResult Serializer::emitPhiForBlockArguments(Block *block) { phiArgs.push_back(phiID); for (auto predIndex : llvm::seq(0, predecessors.size())) { - Value *value = *(predecessors[predIndex].second + argIndex); + ValuePtr value = *(predecessors[predIndex].second + argIndex); uint32_t predBlockId = getOrCreateBlockID(predecessors[predIndex].first); LLVM_DEBUG(llvm::dbgs() << "[phi] use predecessor (id = " << predBlockId << ") value " << value << ' '); @@ -1784,7 +1784,7 @@ Serializer::processOp(spirv::FunctionCallOp op) { auto funcCallID = getNextID(); SmallVector operands{resTypeID, funcCallID, funcID}; - for (auto *value : op.arguments()) { + for (auto value : op.arguments()) { auto valueID = getValueID(value); assert(valueID && "cannot find a value for spv.FunctionCall"); operands.push_back(valueID); diff --git a/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp index d48b31fe4917..93ce2c0a0d5c 100644 --- a/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp +++ b/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp @@ -140,7 +140,7 @@ class FuncOpLowering final : public SPIRVOpLowering { public: using SPIRVOpLowering::SPIRVOpLowering; PatternMatchResult - matchAndRewrite(FuncOp funcOp, ArrayRef operands, + matchAndRewrite(FuncOp funcOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; }; @@ -153,7 +153,7 @@ class LowerABIAttributesPass final } // namespace PatternMatchResult -FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef operands, +FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const { if (!funcOp.getAttrOfType( spirv::getEntryPointABIAttrName())) { @@ -183,7 +183,7 @@ FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef operands, OpBuilder::InsertionGuard funcInsertionGuard(rewriter); rewriter.setInsertionPointToStart(&funcOp.front()); // Insert spirv::AddressOf and spirv::AccessChain operations. - Value *replacement = + ValuePtr replacement = rewriter.create(funcOp.getLoc(), var); // Check if the arg is a scalar or vector type. In that case, the value // needs to be loaded into registers. diff --git a/lib/Dialect/StandardOps/Ops.cpp b/lib/Dialect/StandardOps/Ops.cpp index 4116f6f14ae9..94166b5a7dd1 100644 --- a/lib/Dialect/StandardOps/Ops.cpp +++ b/lib/Dialect/StandardOps/Ops.cpp @@ -81,7 +81,7 @@ struct StdInlinerInterface : public DialectInlinerInterface { /// Handle the given inlined terminator by replacing it with a new operation /// as necessary. void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const final { + ArrayRef valuesToRepl) const final { // Only "std.return" needs to be handled here. auto returnOp = cast(op); @@ -184,7 +184,7 @@ void mlir::printDimAndSymbolList(Operation::operand_iterator begin, // dimension operands parsed. // Returns 'false' on success and 'true' on error. ParseResult mlir::parseDimAndSymbolList(OpAsmParser &parser, - SmallVectorImpl &operands, + SmallVectorImpl &operands, unsigned &numDims) { SmallVector opInfos; if (parser.parseOperandList(opInfos, OpAsmParser::Delimiter::Paren)) @@ -325,7 +325,7 @@ struct SimplifyAllocConst : public OpRewritePattern { PatternRewriter &rewriter) const override { // Check to see if any dimensions operands are constants. If so, we can // substitute and drop them. - if (llvm::none_of(alloc.getOperands(), [](Value *operand) { + if (llvm::none_of(alloc.getOperands(), [](ValuePtr operand) { return matchPattern(operand, m_ConstantIndex()); })) return matchFailure(); @@ -336,8 +336,8 @@ struct SimplifyAllocConst : public OpRewritePattern { // and keep track of the resultant memref type to build. SmallVector newShapeConstants; newShapeConstants.reserve(memrefType.getRank()); - SmallVector newOperands; - SmallVector droppedOperands; + SmallVector newOperands; + SmallVector droppedOperands; unsigned dynamicDimPos = 0; for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) { @@ -429,7 +429,7 @@ struct SimplifyBrToBlockWithSinglePred : public OpRewritePattern { static ParseResult parseBranchOp(OpAsmParser &parser, OperationState &result) { Block *dest; - SmallVector destOperands; + SmallVector destOperands; if (parser.parseSuccessorAndUseList(dest, destOperands)) return failure(); result.addSuccessor(dest, destOperands); @@ -623,7 +623,7 @@ static Type getI1SameShape(Builder *build, Type type) { //===----------------------------------------------------------------------===// static void buildCmpIOp(Builder *build, OperationState &result, - CmpIPredicate predicate, Value *lhs, Value *rhs) { + CmpIPredicate predicate, ValuePtr lhs, ValuePtr rhs) { result.addOperands({lhs, rhs}); result.types.push_back(getI1SameShape(build, lhs->getType())); result.addAttribute( @@ -777,7 +777,7 @@ CmpFPredicate CmpFOp::getPredicateByName(StringRef name) { } static void buildCmpFOp(Builder *build, OperationState &result, - CmpFPredicate predicate, Value *lhs, Value *rhs) { + CmpFPredicate predicate, ValuePtr lhs, ValuePtr rhs) { result.addOperands({lhs, rhs}); result.types.push_back(getI1SameShape(build, lhs->getType())); result.addAttribute( @@ -946,7 +946,7 @@ struct SimplifyConstCondBranchPred : public OpRewritePattern { static ParseResult parseCondBranchOp(OpAsmParser &parser, OperationState &result) { - SmallVector destOperands; + SmallVector destOperands; Block *dest; OpAsmParser::OperandType condInfo; @@ -1088,7 +1088,7 @@ OpFoldResult ConstantOp::fold(ArrayRef operands) { } void ConstantOp::getAsmResultNames( - function_ref setNameFn) { + function_ref setNameFn) { Type type = getType(); if (auto intCst = getValue().dyn_cast()) { IntegerType intTy = type.dyn_cast(); @@ -1183,7 +1183,7 @@ struct SimplifyDeadDealloc : public OpRewritePattern { PatternMatchResult matchAndRewrite(DeallocOp dealloc, PatternRewriter &rewriter) const override { // Check that the memref operand's defining operation is an AllocOp. - Value *memref = dealloc.memref(); + ValuePtr memref = dealloc.memref(); if (!isa_and_nonnull(memref->getDefiningOp())) return matchFailure(); @@ -1362,11 +1362,11 @@ OpFoldResult UnsignedDivIOp::fold(ArrayRef operands) { // --------------------------------------------------------------------------- void DmaStartOp::build(Builder *builder, OperationState &result, - Value *srcMemRef, ValueRange srcIndices, - Value *destMemRef, ValueRange destIndices, - Value *numElements, Value *tagMemRef, - ValueRange tagIndices, Value *stride, - Value *elementsPerStride) { + ValuePtr srcMemRef, ValueRange srcIndices, + ValuePtr destMemRef, ValueRange destIndices, + ValuePtr numElements, ValuePtr tagMemRef, + ValueRange tagIndices, ValuePtr stride, + ValuePtr elementsPerStride) { result.addOperands(srcMemRef); result.addOperands(srcIndices); result.addOperands(destMemRef); @@ -1507,8 +1507,8 @@ LogicalResult DmaStartOp::fold(ArrayRef cstOperands, // --------------------------------------------------------------------------- void DmaWaitOp::build(Builder *builder, OperationState &result, - Value *tagMemRef, ValueRange tagIndices, - Value *numElements) { + ValuePtr tagMemRef, ValueRange tagIndices, + ValuePtr numElements) { result.addOperands(tagMemRef); result.addOperands(tagIndices); result.addOperands(numElements); @@ -2025,7 +2025,7 @@ static LogicalResult verify(SelectOp op) { } OpFoldResult SelectOp::fold(ArrayRef operands) { - auto *condition = getCondition(); + auto condition = getCondition(); // select true, %0, %1 => %0 if (matchPattern(condition, m_One())) @@ -2357,7 +2357,7 @@ static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) { static void print(OpAsmPrinter &p, ViewOp op) { p << op.getOperationName() << ' ' << *op.getOperand(0) << '['; - auto *dynamicOffset = op.getDynamicOffset(); + auto dynamicOffset = op.getDynamicOffset(); if (dynamicOffset != nullptr) p.printOperand(dynamicOffset); p << "][" << op.getDynamicSizes() << ']'; @@ -2365,7 +2365,7 @@ static void print(OpAsmPrinter &p, ViewOp op) { p << " : " << op.getOperand(0)->getType() << " to " << op.getType(); } -Value *ViewOp::getDynamicOffset() { +ValuePtr ViewOp::getDynamicOffset() { int64_t offset; SmallVector strides; auto result = @@ -2440,7 +2440,7 @@ struct ViewOpShapeFolder : public OpRewritePattern { PatternMatchResult matchAndRewrite(ViewOp viewOp, PatternRewriter &rewriter) const override { // Return if none of the operands are constants. - if (llvm::none_of(viewOp.getOperands(), [](Value *operand) { + if (llvm::none_of(viewOp.getOperands(), [](ValuePtr operand) { return matchPattern(operand, m_ConstantIndex()); })) return matchFailure(); @@ -2457,11 +2457,11 @@ struct ViewOpShapeFolder : public OpRewritePattern { if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset))) return matchFailure(); - SmallVector newOperands; - SmallVector droppedOperands; + SmallVector newOperands; + SmallVector droppedOperands; // Fold dynamic offset operand if it is produced by a constant. - auto *dynamicOffset = viewOp.getDynamicOffset(); + auto dynamicOffset = viewOp.getDynamicOffset(); int64_t newOffset = oldOffset; unsigned dynamicOffsetOperandCount = 0; if (dynamicOffset != nullptr) { @@ -2576,7 +2576,7 @@ static Type inferSubViewResultType(MemRefType memRefType) { memRefType.getMemorySpace()); } -void mlir::SubViewOp::build(Builder *b, OperationState &result, Value *source, +void mlir::SubViewOp::build(Builder *b, OperationState &result, ValuePtr source, ValueRange offsets, ValueRange sizes, ValueRange strides, Type resultType, ArrayRef attrs) { @@ -2590,7 +2590,7 @@ void mlir::SubViewOp::build(Builder *b, OperationState &result, Value *source, } void mlir::SubViewOp::build(Builder *b, OperationState &result, Type resultType, - Value *source) { + ValuePtr source) { build(b, result, source, /*offsets=*/{}, /*sizes=*/{}, /*strides=*/{}, resultType); } @@ -2826,7 +2826,7 @@ class SubViewOpShapeFolder final : public OpRewritePattern { // Follow all or nothing approach for shapes for now. If all the operands // for sizes are constants then fold it into the type of the result memref. if (subViewType.hasStaticShape() || - llvm::any_of(subViewOp.sizes(), [](Value *operand) { + llvm::any_of(subViewOp.sizes(), [](ValuePtr operand) { return !matchPattern(operand, m_ConstantIndex()); })) { return matchFailure(); @@ -2842,7 +2842,7 @@ class SubViewOpShapeFolder final : public OpRewritePattern { subViewType.getMemorySpace()); auto newSubViewOp = rewriter.create( subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(), - ArrayRef(), subViewOp.strides(), newMemRefType); + ArrayRef(), subViewOp.strides(), newMemRefType); // Insert a memref_cast for compatibility of the uses of the op. rewriter.replaceOpWithNewOp( subViewOp.sizes(), subViewOp, newSubViewOp, subViewOp.getType()); @@ -2871,7 +2871,7 @@ class SubViewOpStrideFolder final : public OpRewritePattern { failed(getStridesAndOffset(subViewType, resultStrides, resultOffset)) || llvm::is_contained(baseStrides, MemRefType::getDynamicStrideOrOffset()) || - llvm::any_of(subViewOp.strides(), [](Value *stride) { + llvm::any_of(subViewOp.strides(), [](ValuePtr stride) { return !matchPattern(stride, m_ConstantIndex()); })) { return matchFailure(); @@ -2892,7 +2892,7 @@ class SubViewOpStrideFolder final : public OpRewritePattern { layoutMap, subViewType.getMemorySpace()); auto newSubViewOp = rewriter.create( subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(), - subViewOp.sizes(), ArrayRef(), newMemRefType); + subViewOp.sizes(), ArrayRef(), newMemRefType); // Insert a memref_cast for compatibility of the uses of the op. rewriter.replaceOpWithNewOp( subViewOp.strides(), subViewOp, newSubViewOp, subViewOp.getType()); @@ -2922,7 +2922,7 @@ class SubViewOpOffsetFolder final : public OpRewritePattern { llvm::is_contained(baseStrides, MemRefType::getDynamicStrideOrOffset()) || baseOffset == MemRefType::getDynamicStrideOrOffset() || - llvm::any_of(subViewOp.offsets(), [](Value *stride) { + llvm::any_of(subViewOp.offsets(), [](ValuePtr stride) { return !matchPattern(stride, m_ConstantIndex()); })) { return matchFailure(); @@ -2943,7 +2943,7 @@ class SubViewOpOffsetFolder final : public OpRewritePattern { MemRefType::get(subViewType.getShape(), subViewType.getElementType(), layoutMap, subViewType.getMemorySpace()); auto newSubViewOp = rewriter.create( - subViewOp.getLoc(), subViewOp.source(), ArrayRef(), + subViewOp.getLoc(), subViewOp.source(), ArrayRef(), subViewOp.sizes(), subViewOp.strides(), newMemRefType); // Insert a memref_cast for compatibility of the uses of the op. rewriter.replaceOpWithNewOp( diff --git a/lib/Dialect/VectorOps/VectorOps.cpp b/lib/Dialect/VectorOps/VectorOps.cpp index 6a3ff74afcdc..18c1714f403c 100644 --- a/lib/Dialect/VectorOps/VectorOps.cpp +++ b/lib/Dialect/VectorOps/VectorOps.cpp @@ -72,7 +72,7 @@ ArrayAttr vector::getVectorSubscriptAttr(Builder &builder, //===----------------------------------------------------------------------===// void vector::ContractionOp::build(Builder *builder, OperationState &result, - Value *lhs, Value *rhs, Value *acc, + ValuePtr lhs, ValuePtr rhs, ValuePtr acc, ArrayAttr indexingMaps, ArrayAttr iteratorTypes) { result.addOperands({lhs, rhs, acc}); @@ -404,7 +404,7 @@ static Type inferExtractOpResultType(VectorType vectorType, } void vector::ExtractOp::build(Builder *builder, OperationState &result, - Value *source, ArrayRef position) { + ValuePtr source, ArrayRef position) { result.addOperands(source); auto positionAttr = getVectorSubscriptAttr(*builder, position); result.addTypes(inferExtractOpResultType(source->getType().cast(), @@ -471,7 +471,7 @@ static LogicalResult verify(vector::ExtractOp op) { //===----------------------------------------------------------------------===// void ExtractSlicesOp::build(Builder *builder, OperationState &result, - TupleType tupleType, Value *vector, + TupleType tupleType, ValuePtr vector, ArrayRef sizes, ArrayRef strides) { result.addOperands(vector); @@ -647,8 +647,8 @@ static ParseResult parseBroadcastOp(OpAsmParser &parser, // ShuffleOp //===----------------------------------------------------------------------===// -void ShuffleOp::build(Builder *builder, OperationState &result, Value *v1, - Value *v2, ArrayRef mask) { +void ShuffleOp::build(Builder *builder, OperationState &result, ValuePtr v1, + ValuePtr v2, ArrayRef mask) { result.addOperands({v1, v2}); auto maskAttr = getVectorSubscriptAttr(*builder, mask); result.addTypes(v1->getType()); @@ -771,8 +771,8 @@ static LogicalResult verify(InsertElementOp op) { // InsertOp //===----------------------------------------------------------------------===// -void InsertOp::build(Builder *builder, OperationState &result, Value *source, - Value *dest, ArrayRef position) { +void InsertOp::build(Builder *builder, OperationState &result, ValuePtr source, + ValuePtr dest, ArrayRef position) { result.addOperands({source, dest}); auto positionAttr = getVectorSubscriptAttr(*builder, position); result.addTypes(dest->getType()); @@ -893,7 +893,7 @@ void InsertSlicesOp::getStrides(SmallVectorImpl &results) { //===----------------------------------------------------------------------===// void InsertStridedSliceOp::build(Builder *builder, OperationState &result, - Value *source, Value *dest, + ValuePtr source, ValuePtr dest, ArrayRef offsets, ArrayRef strides) { result.addOperands({source, dest}); @@ -1201,17 +1201,17 @@ static LogicalResult verify(ReshapeOp op) { // If all shape operands are produced by constant ops, verify that product // of dimensions for input/output shape match. - auto isDefByConstant = [](Value *operand) { + auto isDefByConstant = [](ValuePtr operand) { return isa_and_nonnull(operand->getDefiningOp()); }; if (llvm::all_of(op.input_shape(), isDefByConstant) && llvm::all_of(op.output_shape(), isDefByConstant)) { int64_t numInputElements = 1; - for (auto *operand : op.input_shape()) + for (auto operand : op.input_shape()) numInputElements *= cast(operand->getDefiningOp()).getValue(); int64_t numOutputElements = 1; - for (auto *operand : op.output_shape()) + for (auto operand : op.output_shape()) numOutputElements *= cast(operand->getDefiningOp()).getValue(); if (numInputElements != numOutputElements) @@ -1247,7 +1247,7 @@ static Type inferStridedSliceOpResultType(VectorType vectorType, } void StridedSliceOp::build(Builder *builder, OperationState &result, - Value *source, ArrayRef offsets, + ValuePtr source, ArrayRef offsets, ArrayRef sizes, ArrayRef strides) { result.addOperands(source); auto offsetsAttr = getVectorSubscriptAttr(*builder, offsets); @@ -1603,7 +1603,7 @@ static MemRefType inferVectorTypeCastResultType(MemRefType t) { } void TypeCastOp::build(Builder *builder, OperationState &result, - Value *source) { + ValuePtr source) { result.addOperands(source); result.addTypes( inferVectorTypeCastResultType(source->getType().cast())); @@ -1793,14 +1793,14 @@ class CreateMaskFolder final : public OpRewritePattern { PatternMatchResult matchAndRewrite(CreateMaskOp createMaskOp, PatternRewriter &rewriter) const override { // Return if any of 'createMaskOp' operands are not defined by a constant. - auto is_not_def_by_constant = [](Value *operand) { + auto is_not_def_by_constant = [](ValuePtr operand) { return !isa_and_nonnull(operand->getDefiningOp()); }; if (llvm::any_of(createMaskOp.operands(), is_not_def_by_constant)) return matchFailure(); // Gather constant mask dimension sizes. SmallVector maskDimSizes; - for (auto *operand : createMaskOp.operands()) { + for (auto operand : createMaskOp.operands()) { auto defOp = operand->getDefiningOp(); maskDimSizes.push_back(cast(defOp).getValue()); } diff --git a/lib/Dialect/VectorOps/VectorTransforms.cpp b/lib/Dialect/VectorOps/VectorTransforms.cpp index 64cacb287207..e5c281cbf643 100644 --- a/lib/Dialect/VectorOps/VectorTransforms.cpp +++ b/lib/Dialect/VectorOps/VectorTransforms.cpp @@ -106,17 +106,17 @@ static SmallVector delinearize(int64_t linearIndex, // `resultTypes`. static Operation *cloneOpWithOperandsAndTypes(PatternRewriter &builder, Location loc, Operation *op, - ArrayRef operands, + ArrayRef operands, ArrayRef resultTypes) { OperationState res(loc, op->getName().getStringRef(), operands, resultTypes, op->getAttrs()); return builder.createOperation(res); } -static Value *makeSplatZero(Location loc, PatternRewriter &rewriter, - VectorType vt) { +static ValuePtr makeSplatZero(Location loc, PatternRewriter &rewriter, + VectorType vt) { auto t = vt.getElementType(); - Value *f = nullptr; + ValuePtr f = nullptr; if (t.isBF16() || t.isF16()) f = rewriter.create(loc, t, rewriter.getF64FloatAttr(0.0f)); else if (t.isF32()) @@ -190,12 +190,12 @@ struct UnrolledVectorState { SmallVector unrollFactors; SmallVector basis; int64_t numInstances; - Value *slicesTuple; + ValuePtr slicesTuple; }; // Populates 'state' with unrolled shape, unroll factors, basis and // num unrolled instances for 'vectorType'. -static void initUnrolledVectorState(VectorType vectorType, Value *initValue, +static void initUnrolledVectorState(VectorType vectorType, ValuePtr initValue, const DenseMap &indexMap, ArrayRef targetShape, UnrolledVectorState &state, @@ -239,10 +239,10 @@ getUnrolledVectorLinearIndex(UnrolledVectorState &state, // Returns an unrolled vector at 'vectorOffsets' within the vector // represented by 'state'. The vector is created from a slice of 'initValue' // if not present in 'cache'. -static Value *getOrCreateUnrolledVectorSlice( +static ValuePtr getOrCreateUnrolledVectorSlice( Location loc, UnrolledVectorState &state, ArrayRef vectorOffsets, ArrayRef offsets, DenseMap &indexMap, - Value *initValue, SmallVectorImpl &cache, + ValuePtr initValue, SmallVectorImpl &cache, PatternRewriter &builder) { // Compute slice offsets. SmallVector sliceOffsets(state.unrolledShape.size()); @@ -253,7 +253,7 @@ static Value *getOrCreateUnrolledVectorSlice( int64_t sliceLinearIndex = getUnrolledVectorLinearIndex(state, vectorOffsets, indexMap); assert(sliceLinearIndex < static_cast(cache.size())); - auto *valueSlice = cache[sliceLinearIndex]; + auto valueSlice = cache[sliceLinearIndex]; if (valueSlice == nullptr) { // Return tuple element at 'sliceLinearIndex'. auto tupleIndex = builder.getI64IntegerAttr(sliceLinearIndex); @@ -330,12 +330,10 @@ struct VectorState { // TODO(andydavis) Generalize this to support structured ops beyond // vector ContractionOp, and merge it with 'unrollSingleResultOpMatchingType' -static Value *unrollSingleResultStructuredOp(Operation *op, - ArrayRef iterationBounds, - std::vector &vectors, - unsigned resultIndex, - ArrayRef targetShape, - PatternRewriter &builder) { +static ValuePtr unrollSingleResultStructuredOp( + Operation *op, ArrayRef iterationBounds, + std::vector &vectors, unsigned resultIndex, + ArrayRef targetShape, PatternRewriter &builder) { auto shapedType = op->getResult(0)->getType().dyn_cast_or_null(); if (!shapedType || !shapedType.hasStaticShape()) assert(false && "Expected a statically shaped result type"); @@ -351,7 +349,7 @@ static Value *unrollSingleResultStructuredOp(Operation *op, SmallVector unrolledVectorState(numVectors); for (unsigned i = 0; i < numVectors; ++i) { int64_t operandIndex = vectors[i].operandIndex; - auto *operand = operandIndex >= 0 ? op->getOperand(operandIndex) : nullptr; + auto operand = operandIndex >= 0 ? op->getOperand(operandIndex) : nullptr; initUnrolledVectorState(vectors[i].type, operand, vectors[i].indexMap, targetShape, unrolledVectorState[i], builder); } @@ -364,7 +362,7 @@ static Value *unrollSingleResultStructuredOp(Operation *op, shapedType.getElementType()); // Initialize caches for intermediate vector results. - std::vector> caches(numVectors); + std::vector> caches(numVectors); for (unsigned i = 0; i < numVectors; ++i) caches[i].resize(unrolledVectorState[i].numInstances); @@ -376,13 +374,13 @@ static Value *unrollSingleResultStructuredOp(Operation *op, auto offsets = zipMap([](int64_t v1, int64_t v2) { return v1 * v2; }, vectorOffsets, targetShape); // Get cached slice (or create slice) for each operand at 'offsets'. - SmallVector operands; + SmallVector operands; operands.resize(op->getNumOperands()); for (unsigned i = 0; i < numVectors; ++i) { int64_t operandIndex = vectors[i].operandIndex; if (operandIndex < 0) continue; // Output - auto *operand = op->getOperand(operandIndex); + auto operand = op->getOperand(operandIndex); operands[operandIndex] = getOrCreateUnrolledVectorSlice( op->getLoc(), unrolledVectorState[i], vectorOffsets, offsets, vectors[i].indexMap, operand, caches[i], builder); @@ -402,21 +400,21 @@ static Value *unrollSingleResultStructuredOp(Operation *op, // Create TupleOp of unrolled result vectors. SmallVector vectorTupleTypes(resultValueState.numInstances); - SmallVector vectorTupleValues(resultValueState.numInstances); + SmallVector vectorTupleValues(resultValueState.numInstances); for (unsigned i = 0; i < resultValueState.numInstances; ++i) { vectorTupleTypes[i] = caches[resultIndex][i]->getType().cast(); vectorTupleValues[i] = caches[resultIndex][i]; } TupleType tupleType = builder.getTupleType(vectorTupleTypes); - Value *tupleOp = builder.create(op->getLoc(), tupleType, - vectorTupleValues); + ValuePtr tupleOp = builder.create(op->getLoc(), tupleType, + vectorTupleValues); // Create InsertSlicesOp(Tuple(result_vectors)). auto resultVectorType = op->getResult(0)->getType().cast(); SmallVector sizes(resultValueState.unrolledShape); SmallVector strides(resultValueState.unrollFactors.size(), 1); - Value *insertSlicesOp = builder.create( + ValuePtr insertSlicesOp = builder.create( op->getLoc(), resultVectorType, tupleOp, builder.getI64ArrayAttr(sizes), builder.getI64ArrayAttr(strides)); return insertSlicesOp; @@ -487,7 +485,7 @@ getVectorElementwiseOpUnrollState(Operation *op, ArrayRef targetShape, } // Entry point for unrolling declarative pattern rewrites. -Value *mlir::vector::unrollSingleResultOpMatchingType( +ValuePtr mlir::vector::unrollSingleResultOpMatchingType( PatternRewriter &builder, Operation *op, ArrayRef targetShape) { assert(op->getNumResults() == 1 && "Expected single result operation"); @@ -516,8 +514,8 @@ Value *mlir::vector::unrollSingleResultOpMatchingType( static void generateTransferOpSlices(VectorType vectorType, TupleType tupleType, ArrayRef sizes, ArrayRef strides, - ArrayRef indices, PatternRewriter &rewriter, - function_ref)> fn) { + ArrayRef indices, PatternRewriter &rewriter, + function_ref)> fn) { // Compute strides w.r.t. to slice counts in each dimension. auto maybeDimSliceCounts = shapeRatio(vectorType.getShape(), sizes); assert(maybeDimSliceCounts.hasValue()); @@ -534,13 +532,13 @@ generateTransferOpSlices(VectorType vectorType, TupleType tupleType, auto offsets = zipMap([](int64_t v1, int64_t v2) { return v1 * v2; }, vectorOffsets, sizes); // Compute 'sliceIndices' by adding 'sliceOffsets[i]' to 'indices[i]'. - SmallVector sliceIndices(numSliceIndices); + SmallVector sliceIndices(numSliceIndices); for (auto it : llvm::enumerate(indices)) { auto expr = getAffineDimExpr(0, ctx) + getAffineConstantExpr(offsets[it.index()], ctx); auto map = AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0, expr); sliceIndices[it.index()] = rewriter.create( - it.value()->getLoc(), map, ArrayRef(it.value())); + it.value()->getLoc(), map, ArrayRef(it.value())); } // Call 'fn' to generate slice 'i' at 'sliceIndices'. fn(i, sliceIndices); @@ -559,7 +557,7 @@ struct SplitTransferReadOp : public OpRewritePattern { if (!xferReadOp.permutation_map().isIdentity()) return matchFailure(); // Return unless the unique 'xferReadOp' user is an ExtractSlicesOp. - Value *xferReadResult = xferReadOp.getResult(); + ValuePtr xferReadResult = xferReadOp.getResult(); auto extractSlicesOp = dyn_cast(*xferReadResult->getUsers().begin()); if (!xferReadResult->hasOneUse() || !extractSlicesOp) @@ -576,10 +574,10 @@ struct SplitTransferReadOp : public OpRewritePattern { Location loc = xferReadOp.getLoc(); int64_t numSlices = resultTupleType.size(); - SmallVector vectorTupleValues(numSlices); - SmallVector indices(xferReadOp.indices().begin(), - xferReadOp.indices().end()); - auto createSlice = [&](unsigned index, ArrayRef sliceIndices) { + SmallVector vectorTupleValues(numSlices); + SmallVector indices(xferReadOp.indices().begin(), + xferReadOp.indices().end()); + auto createSlice = [&](unsigned index, ArrayRef sliceIndices) { // Get VectorType for slice 'i'. auto sliceVectorType = resultTupleType.getType(index); // Create split TransferReadOp for 'sliceUser'. @@ -591,8 +589,8 @@ struct SplitTransferReadOp : public OpRewritePattern { indices, rewriter, createSlice); // Create tuple of splice xfer read operations. - Value *tupleOp = rewriter.create(loc, resultTupleType, - vectorTupleValues); + ValuePtr tupleOp = rewriter.create(loc, resultTupleType, + vectorTupleValues); // Replace 'xferReadOp' with result 'insertSlicesResult'. rewriter.replaceOpWithNewOp( xferReadOp, sourceVectorType, tupleOp, extractSlicesOp.sizes(), @@ -632,9 +630,9 @@ struct SplitTransferWriteOp : public OpRewritePattern { insertSlicesOp.getStrides(strides); Location loc = xferWriteOp.getLoc(); - SmallVector indices(xferWriteOp.indices().begin(), - xferWriteOp.indices().end()); - auto createSlice = [&](unsigned index, ArrayRef sliceIndices) { + SmallVector indices(xferWriteOp.indices().begin(), + xferWriteOp.indices().end()); + auto createSlice = [&](unsigned index, ArrayRef sliceIndices) { // Create split TransferWriteOp for source vector 'tupleOp.operand[i]'. rewriter.create( loc, tupleOp.getOperand(index), xferWriteOp.memref(), sliceIndices, @@ -676,7 +674,7 @@ struct TupleGetFolderOp : public OpRewritePattern { return matchFailure(); // Forward Value from 'tupleOp' at 'tupleGetOp.index'. - Value *tupleValue = tupleOp.getOperand(tupleGetOp.getIndex()); + ValuePtr tupleValue = tupleOp.getOperand(tupleGetOp.getIndex()); rewriter.replaceOp(tupleGetOp, tupleValue); return matchSuccess(); } diff --git a/lib/EDSC/Builders.cpp b/lib/EDSC/Builders.cpp index 47e2dfed55eb..35108ed56667 100644 --- a/lib/EDSC/Builders.cpp +++ b/lib/EDSC/Builders.cpp @@ -88,9 +88,8 @@ ValueHandle &mlir::edsc::ValueHandle::operator=(const ValueHandle &other) { return *this; } -ValueHandle -mlir::edsc::ValueHandle::createComposedAffineApply(AffineMap map, - ArrayRef operands) { +ValueHandle mlir::edsc::ValueHandle::createComposedAffineApply( + AffineMap map, ArrayRef operands) { Operation *op = makeComposedAffineApply(ScopedContext::getBuilder(), ScopedContext::getLocation(), map, operands) @@ -118,7 +117,7 @@ OperationHandle OperationHandle::create(StringRef name, ArrayRef resultTypes, ArrayRef attributes) { OperationState state(ScopedContext::getLocation(), name); - SmallVector ops(operands.begin(), operands.end()); + SmallVector ops(operands.begin(), operands.end()); state.addOperands(ops); state.addTypes(resultTypes); for (const auto &attr : attributes) { @@ -169,8 +168,8 @@ mlir::edsc::LoopBuilder mlir::edsc::LoopBuilder::makeAffine( if (auto staticFor = emitStaticFor(lbHandles, ubHandles, step)) { *iv = staticFor.getValue(); } else { - SmallVector lbs(lbHandles.begin(), lbHandles.end()); - SmallVector ubs(ubHandles.begin(), ubHandles.end()); + SmallVector lbs(lbHandles.begin(), lbHandles.end()); + SmallVector ubs(ubHandles.begin(), ubHandles.end()); *iv = ValueHandle::create( lbs, ScopedContext::getBuilder().getMultiDimIdentityMap(lbs.size()), ubs, ScopedContext::getBuilder().getMultiDimIdentityMap(ubs.size()), @@ -309,11 +308,11 @@ static ValueHandle createBinaryHandle(ValueHandle lhs, ValueHandle rhs) { return ValueHandle::create(lhs.getValue(), rhs.getValue()); } -static std::pair -categorizeValueByAffineType(MLIRContext *context, Value *val, unsigned &numDims, - unsigned &numSymbols) { +static std::pair +categorizeValueByAffineType(MLIRContext *context, ValuePtr val, + unsigned &numDims, unsigned &numSymbols) { AffineExpr d; - Value *resultVal = nullptr; + ValuePtr resultVal = nullptr; if (auto constant = dyn_cast_or_null(val->getDefiningOp())) { d = getAffineConstantExpr(constant.getValue(), context); } else if (isValidSymbol(val) && !isValidDim(val)) { @@ -332,12 +331,12 @@ static ValueHandle createBinaryIndexHandle( MLIRContext *context = ScopedContext::getContext(); unsigned numDims = 0, numSymbols = 0; AffineExpr d0, d1; - Value *v0, *v1; + ValuePtr v0, v1; std::tie(d0, v0) = categorizeValueByAffineType(context, lhs.getValue(), numDims, numSymbols); std::tie(d1, v1) = categorizeValueByAffineType(context, rhs.getValue(), numDims, numSymbols); - SmallVector operands; + SmallVector operands; if (v0) { operands.push_back(v0); } diff --git a/lib/EDSC/Helpers.cpp b/lib/EDSC/Helpers.cpp index eeb28668a349..1771eb0a427a 100644 --- a/lib/EDSC/Helpers.cpp +++ b/lib/EDSC/Helpers.cpp @@ -22,7 +22,7 @@ using namespace mlir; using namespace mlir::edsc; -static SmallVector getMemRefSizes(Value *memRef) { +static SmallVector getMemRefSizes(ValuePtr memRef) { MemRefType memRefType = memRef->getType().cast(); assert(isStrided(memRefType) && "Expected strided MemRef type"); @@ -39,7 +39,7 @@ static SmallVector getMemRefSizes(Value *memRef) { return res; } -mlir::edsc::MemRefView::MemRefView(Value *v) : base(v) { +mlir::edsc::MemRefView::MemRefView(ValuePtr v) : base(v) { assert(v->getType().isa() && "MemRefType expected"); auto memrefSizeValues = getMemRefSizes(v); @@ -50,7 +50,7 @@ mlir::edsc::MemRefView::MemRefView(Value *v) : base(v) { } } -mlir::edsc::VectorView::VectorView(Value *v) : base(v) { +mlir::edsc::VectorView::VectorView(ValuePtr v) : base(v) { auto vectorType = v->getType().cast(); for (auto s : vectorType.getShape()) { diff --git a/lib/EDSC/Intrinsics.cpp b/lib/EDSC/Intrinsics.cpp index 1b19f9aa0bf8..c6738c429937 100644 --- a/lib/EDSC/Intrinsics.cpp +++ b/lib/EDSC/Intrinsics.cpp @@ -29,7 +29,7 @@ OperationHandle mlir::edsc::intrinsics::br(BlockHandle bh, (void)o; assert(o && "Expected already captured ValueHandle"); } - SmallVector ops(operands.begin(), operands.end()); + SmallVector ops(operands.begin(), operands.end()); return OperationHandle::create(bh.getBlock(), ops); } static void enforceEmptyCapturesMatchOperands(ArrayRef captures, @@ -52,7 +52,7 @@ OperationHandle mlir::edsc::intrinsics::br(BlockHandle *bh, assert(!*bh && "Unexpected already captured BlockHandle"); enforceEmptyCapturesMatchOperands(captures, operands); BlockBuilder(bh, captures)(/* no body */); - SmallVector ops(operands.begin(), operands.end()); + SmallVector ops(operands.begin(), operands.end()); return OperationHandle::create(bh->getBlock(), ops); } @@ -61,8 +61,8 @@ mlir::edsc::intrinsics::cond_br(ValueHandle cond, BlockHandle trueBranch, ArrayRef trueOperands, BlockHandle falseBranch, ArrayRef falseOperands) { - SmallVector trueOps(trueOperands.begin(), trueOperands.end()); - SmallVector falseOps(falseOperands.begin(), falseOperands.end()); + SmallVector trueOps(trueOperands.begin(), trueOperands.end()); + SmallVector falseOps(falseOperands.begin(), falseOperands.end()); return OperationHandle::create( cond, trueBranch.getBlock(), trueOps, falseBranch.getBlock(), falseOps); } @@ -78,8 +78,8 @@ OperationHandle mlir::edsc::intrinsics::cond_br( enforceEmptyCapturesMatchOperands(falseCaptures, falseOperands); BlockBuilder(trueBranch, trueCaptures)(/* no body */); BlockBuilder(falseBranch, falseCaptures)(/* no body */); - SmallVector trueOps(trueOperands.begin(), trueOperands.end()); - SmallVector falseOps(falseOperands.begin(), falseOperands.end()); + SmallVector trueOps(trueOperands.begin(), trueOperands.end()); + SmallVector falseOps(falseOperands.begin(), falseOperands.end()); return OperationHandle::create( cond, trueBranch->getBlock(), trueOps, falseBranch->getBlock(), falseOps); } diff --git a/lib/IR/AsmPrinter.cpp b/lib/IR/AsmPrinter.cpp index f3c92ada0a06..177d8a5ef05d 100644 --- a/lib/IR/AsmPrinter.cpp +++ b/lib/IR/AsmPrinter.cpp @@ -319,7 +319,7 @@ void ModuleState::visitOperation(Operation *op) { visitType(type); for (auto ®ion : op->getRegions()) for (auto &block : region) - for (auto *arg : block.getArguments()) + for (auto arg : block.getArguments()) visitType(arg->getType()); // Visit each of the attributes. @@ -1437,7 +1437,7 @@ class OperationPrinter : public ModulePrinter, private OpAsmPrinter { void printAttribute(Attribute attr) override { ModulePrinter::printAttribute(attr); } - void printOperand(Value *value) override { printValueID(value); } + void printOperand(ValuePtr value) override { printValueID(value); } void printOptionalAttrDict(ArrayRef attrs, ArrayRef elidedAttrs = {}) override { @@ -1519,7 +1519,7 @@ class OperationPrinter : public ModulePrinter, private OpAsmPrinter { void numberValuesInRegion(Region ®ion); void numberValuesInBlock(Block &block); void numberValuesInOp(Operation &op); - void printValueID(Value *value, bool printResultNo = true) const { + void printValueID(ValuePtr value, bool printResultNo = true) const { printValueIDImpl(value, printResultNo, os); } @@ -1528,13 +1528,13 @@ class OperationPrinter : public ModulePrinter, private OpAsmPrinter { /// 'lookupValue' and the result of 'result' within that group in /// 'lookupResultNo'. 'lookupResultNo' is only filled in if the result group /// has more than 1 result. - void getResultIDAndNumber(OpResult *result, Value *&lookupValue, + void getResultIDAndNumber(OpResultPtr result, ValuePtr &lookupValue, int &lookupResultNo) const; - void printValueIDImpl(Value *value, bool printResultNo, + void printValueIDImpl(ValuePtr value, bool printResultNo, raw_ostream &stream) const; /// Set a special value name for the given value. - void setValueName(Value *value, StringRef name); + void setValueName(ValuePtr value, StringRef name); /// Uniques the given value name within the printer. If the given name /// conflicts, it is automatically renamed. @@ -1542,8 +1542,8 @@ class OperationPrinter : public ModulePrinter, private OpAsmPrinter { /// This is the value ID for each SSA value. If this returns ~0, then the /// valueID has an entry in valueNames. - DenseMap valueIDs; - DenseMap valueNames; + DenseMap valueIDs; + DenseMap valueNames; /// This is a map of operations that contain multiple named result groups, /// i.e. there may be multiple names for the results of the operation. The key @@ -1619,7 +1619,7 @@ void OperationPrinter::numberValuesInRegion(Region ®ion) { } void OperationPrinter::numberValuesInBlock(Block &block) { - auto setArgNameFn = [&](Value *arg, StringRef name) { + auto setArgNameFn = [&](ValuePtr arg, StringRef name) { assert(!valueIDs.count(arg) && "arg numbered multiple times"); assert(cast(arg)->getOwner() == &block && "arg not defined in 'block'"); @@ -1638,7 +1638,7 @@ void OperationPrinter::numberValuesInBlock(Block &block) { // 'arg'. SmallString<32> specialNameBuffer(isEntryBlock ? "arg" : ""); llvm::raw_svector_ostream specialName(specialNameBuffer); - for (auto *arg : block.getArguments()) { + for (auto arg : block.getArguments()) { if (valueIDs.count(arg)) continue; if (isEntryBlock) { @@ -1657,11 +1657,11 @@ void OperationPrinter::numberValuesInOp(Operation &op) { unsigned numResults = op.getNumResults(); if (numResults == 0) return; - Value *resultBegin = op.getResult(0); + ValuePtr resultBegin = op.getResult(0); // Function used to set the special result names for the operation. SmallVector resultGroups(/*Size=*/1, /*Value=*/0); - auto setResultNameFn = [&](Value *result, StringRef name) { + auto setResultNameFn = [&](ValuePtr result, StringRef name) { assert(!valueIDs.count(result) && "result numbered multiple times"); assert(result->getDefiningOp() == &op && "result not defined by 'op'"); setValueName(result, name); @@ -1690,7 +1690,7 @@ void OperationPrinter::numberValuesInOp(Operation &op) { } /// Set a special value name for the given value. -void OperationPrinter::setValueName(Value *value, StringRef name) { +void OperationPrinter::setValueName(ValuePtr value, StringRef name) { // If the name is empty, the value uses the default numbering. if (name.empty()) { valueIDs[value] = nextValueID++; @@ -1737,7 +1737,7 @@ void OperationPrinter::print(Block *block, bool printBlockArgs, // Print the argument list if non-empty. if (!block->args_empty()) { os << '('; - interleaveComma(block->getArguments(), [&](BlockArgument *arg) { + interleaveComma(block->getArguments(), [&](BlockArgumentPtr arg) { printValueID(arg); os << ": "; printType(arg->getType()); @@ -1788,8 +1788,8 @@ void OperationPrinter::print(Operation *op) { printTrailingLocation(op->getLoc()); } -void OperationPrinter::getResultIDAndNumber(OpResult *result, - Value *&lookupValue, +void OperationPrinter::getResultIDAndNumber(OpResultPtr result, + ValuePtr &lookupValue, int &lookupResultNo) const { Operation *owner = result->getOwner(); if (owner->getNumResults() == 1) @@ -1827,7 +1827,7 @@ void OperationPrinter::getResultIDAndNumber(OpResult *result, lookupValue = owner->getResult(groupResultNo); } -void OperationPrinter::printValueIDImpl(Value *value, bool printResultNo, +void OperationPrinter::printValueIDImpl(ValuePtr value, bool printResultNo, raw_ostream &stream) const { if (!value) { stream << "<>"; @@ -1840,7 +1840,7 @@ void OperationPrinter::printValueIDImpl(Value *value, bool printResultNo, // If this is a reference to the result of a multi-result operation or // operation, print out the # identifier and make sure to map our lookup // to the first result of the operation. - if (OpResult *result = dyn_cast(value)) + if (OpResultPtr result = dyn_cast(value)) getResultIDAndNumber(result, lookupValue, resultNo); auto it = valueIDs.find(lookupValue); @@ -1875,11 +1875,11 @@ void OperationPrinter::shadowRegionArgs(Region ®ion, ValueRange namesToUse) { SmallVector nameStr; for (unsigned i = 0, e = namesToUse.size(); i != e; ++i) { - auto *nameToUse = namesToUse[i]; + auto nameToUse = namesToUse[i]; if (nameToUse == nullptr) continue; - auto *nameToReplace = region.front().getArgument(i); + auto nameToReplace = region.front().getArgument(i); nameStr.clear(); llvm::raw_svector_ostream nameStream(nameStr); @@ -1951,10 +1951,10 @@ void OperationPrinter::printGenericOp(Operation *op) { for (unsigned i = 0; i < numSuccessors; ++i) totalNumSuccessorOperands += op->getNumSuccessorOperands(i); unsigned numProperOperands = op->getNumOperands() - totalNumSuccessorOperands; - SmallVector properOperands( + SmallVector properOperands( op->operand_begin(), std::next(op->operand_begin(), numProperOperands)); - interleaveComma(properOperands, [&](Value *value) { printValueID(value); }); + interleaveComma(properOperands, [&](ValuePtr value) { printValueID(value); }); os << ')'; @@ -1997,10 +1997,10 @@ void OperationPrinter::printSuccessorAndUseList(Operation *term, os << '('; interleaveComma(succOperands, - [this](Value *operand) { printValueID(operand); }); + [this](ValuePtr operand) { printValueID(operand); }); os << " : "; interleaveComma(succOperands, - [this](Value *operand) { printType(operand->getType()); }); + [this](ValuePtr operand) { printType(operand->getType()); }); os << ')'; } @@ -2072,7 +2072,7 @@ void Value::print(raw_ostream &os) { if (auto *op = getDefiningOp()) return op->print(os); // TODO: Improve this. - assert(isa(*this)); + assert(isa()); os << "\n"; } diff --git a/lib/IR/Block.cpp b/lib/IR/Block.cpp index 4dac32ae0c0c..894f9ba38d0d 100644 --- a/lib/IR/Block.cpp +++ b/lib/IR/Block.cpp @@ -98,7 +98,7 @@ void Block::dropAllReferences() { } void Block::dropAllDefinedValueUses() { - for (auto *arg : getArguments()) + for (auto arg : getArguments()) arg->dropAllUses(); for (auto &op : *this) op.dropAllDefinedValueUses(); @@ -151,7 +151,7 @@ void Block::recomputeOpOrder() { // Argument list management. //===----------------------------------------------------------------------===// -BlockArgument *Block::addArgument(Type type) { +BlockArgumentPtr Block::addArgument(Type type) { auto *arg = new BlockArgument(type, this); arguments.push_back(arg); return arg; diff --git a/lib/IR/Builders.cpp b/lib/IR/Builders.cpp index 691b2ad99c49..733fcd139947 100644 --- a/lib/IR/Builders.cpp +++ b/lib/IR/Builders.cpp @@ -343,7 +343,7 @@ Operation *OpBuilder::createOperation(const OperationState &state) { /// 'results'. Returns success if the operation was folded, failure otherwise. /// Note: This function does not erase the operation on a successful fold. LogicalResult OpBuilder::tryFold(Operation *op, - SmallVectorImpl &results) { + SmallVectorImpl &results) { results.reserve(op->getNumResults()); auto cleanupFailure = [&] { results.assign(op->result_begin(), op->result_end()); @@ -374,7 +374,7 @@ LogicalResult OpBuilder::tryFold(Operation *op, Dialect *dialect = op->getDialect(); for (auto &it : llvm::enumerate(foldResults)) { // Normal values get pushed back directly. - if (auto *value = it.value().dyn_cast()) { + if (auto value = it.value().dyn_cast()) { results.push_back(value); continue; } diff --git a/lib/IR/Operation.cpp b/lib/IR/Operation.cpp index 9df10791046a..53399ce00a38 100644 --- a/lib/IR/Operation.cpp +++ b/lib/IR/Operation.cpp @@ -114,7 +114,7 @@ template <> unsigned BlockOperand::getOperandNumber() { /// Create a new Operation with the specific fields. Operation *Operation::create(Location location, OperationName name, ArrayRef resultTypes, - ArrayRef operands, + ArrayRef operands, ArrayRef attributes, ArrayRef successors, unsigned numRegions, bool resizableOperandList) { @@ -134,7 +134,7 @@ Operation *Operation::create(const OperationState &state) { /// Create a new Operation with the specific fields. Operation *Operation::create(Location location, OperationName name, ArrayRef resultTypes, - ArrayRef operands, + ArrayRef operands, NamedAttributeList attributes, ArrayRef successors, RegionRange regions, bool resizableOperandList) { @@ -151,7 +151,7 @@ Operation *Operation::create(Location location, OperationName name, /// unnecessarily uniquing a list of attributes. Operation *Operation::create(Location location, OperationName name, ArrayRef resultTypes, - ArrayRef operands, + ArrayRef operands, NamedAttributeList attributes, ArrayRef successors, unsigned numRegions, bool resizableOperandList) { @@ -314,7 +314,7 @@ bool Operation::isProperAncestor(Operation *other) { } /// Replace any uses of 'from' with 'to' within this operation. -void Operation::replaceUsesOfWith(Value *from, Value *to) { +void Operation::replaceUsesOfWith(ValuePtr from, ValuePtr to) { if (from == to) return; for (auto &operand : getOpOperands()) @@ -585,7 +585,7 @@ void Operation::dropAllDefinedValueUses() { /// Return true if there are no users of any results of this operation. bool Operation::use_empty() { - for (auto *result : getResults()) + for (auto result : getResults()) if (!result->use_empty()) return false; return true; @@ -672,14 +672,14 @@ InFlightDiagnostic Operation::emitOpError(const Twine &message) { /// Operands are remapped using `mapper` (if present), and `mapper` is updated /// to contain the results. Operation *Operation::cloneWithoutRegions(BlockAndValueMapping &mapper) { - SmallVector operands; + SmallVector operands; SmallVector successors; operands.reserve(getNumOperands() + getNumSuccessors()); if (getNumSuccessors() == 0) { // Non-branching operations can just add all the operands. - for (auto *opValue : getOperands()) + for (auto opValue : getOperands()) operands.push_back(mapper.lookupOrDefault(opValue)); } else { // We add the operands separated by nullptr's for each successor. @@ -699,7 +699,7 @@ Operation *Operation::cloneWithoutRegions(BlockAndValueMapping &mapper) { operands.push_back(nullptr); // Remap the successors operands. - for (auto *operand : getSuccessorOperands(succ)) + for (auto operand : getSuccessorOperands(succ)) operands.push_back(mapper.lookupOrDefault(operand)); } } @@ -1092,8 +1092,8 @@ LogicalResult OpTrait::impl::verifyResultSizeAttr(Operation *op, // These functions are out-of-line implementations of the methods in BinaryOp, // which avoids them being template instantiated/duplicated. -void impl::buildBinaryOp(Builder *builder, OperationState &result, Value *lhs, - Value *rhs) { +void impl::buildBinaryOp(Builder *builder, OperationState &result, ValuePtr lhs, + ValuePtr rhs) { assert(lhs->getType() == rhs->getType()); result.addOperands({lhs, rhs}); result.types.push_back(lhs->getType()); @@ -1133,8 +1133,8 @@ void impl::printOneResultOp(Operation *op, OpAsmPrinter &p) { // CastOp implementation //===----------------------------------------------------------------------===// -void impl::buildCastOp(Builder *builder, OperationState &result, Value *source, - Type destType) { +void impl::buildCastOp(Builder *builder, OperationState &result, + ValuePtr source, Type destType) { result.addOperands(source); result.addTypes(destType); } @@ -1157,7 +1157,7 @@ void impl::printCastOp(Operation *op, OpAsmPrinter &p) { << op->getResult(0)->getType(); } -Value *impl::foldCastOp(Operation *op) { +ValuePtr impl::foldCastOp(Operation *op) { // Identity cast if (op->getOperand(0)->getType() == op->getResult(0)->getType()) return op->getOperand(0); diff --git a/lib/IR/OperationSupport.cpp b/lib/IR/OperationSupport.cpp index 256a261acd8e..333685a16fde 100644 --- a/lib/IR/OperationSupport.cpp +++ b/lib/IR/OperationSupport.cpp @@ -164,7 +164,7 @@ ResultRange::ResultRange(Operation *op) //===----------------------------------------------------------------------===// // ValueRange -ValueRange::ValueRange(ArrayRef values) +ValueRange::ValueRange(ArrayRef values) : ValueRange(values.data(), values.size()) {} ValueRange::ValueRange(OperandRange values) : ValueRange(values.begin().getBase(), values.size()) {} @@ -176,18 +176,19 @@ ValueRange::OwnerT ValueRange::offset_base(const OwnerT &owner, ptrdiff_t index) { if (OpOperand *operand = owner.dyn_cast()) return operand + index; - if (OpResult *result = owner.dyn_cast()) + if (OpResultPtr result = owner.dyn_cast()) return result + index; - return owner.get() + index; + return owner.get() + index; } /// See `detail::indexed_accessor_range_base` for details. -Value *ValueRange::dereference_iterator(const OwnerT &owner, ptrdiff_t index) { +ValuePtr ValueRange::dereference_iterator(const OwnerT &owner, + ptrdiff_t index) { // Operands access the held value via 'get'. if (OpOperand *operand = owner.dyn_cast()) return operand[index].get(); // An OpResult is a value, so we can return it directly. - if (OpResult *result = owner.dyn_cast()) + if (OpResultPtr result = owner.dyn_cast()) return &result[index]; // Otherwise, this is a raw value array so just index directly. - return owner.get()[index]; + return owner.get()[index]; } diff --git a/lib/IR/Region.cpp b/lib/IR/Region.cpp index 6cec021b6a1a..26f14c43424b 100644 --- a/lib/IR/Region.cpp +++ b/lib/IR/Region.cpp @@ -91,7 +91,7 @@ void Region::cloneInto(Region *dest, Region::iterator destPos, // Clone the block arguments. The user might be deleting arguments to the // block by specifying them in the mapper. If so, we don't add the // argument to the cloned block. - for (auto *arg : block.getArguments()) + for (auto arg : block.getArguments()) if (!mapper.contains(arg)) mapper.map(arg, newBlock->addArgument(arg->getType())); @@ -106,7 +106,7 @@ void Region::cloneInto(Region *dest, Region::iterator destPos, // operands of each of the operations. auto remapOperands = [&](Operation *op) { for (auto &operand : op->getOpOperands()) - if (auto *mappedOp = mapper.lookupOrNull(operand.get())) + if (auto mappedOp = mapper.lookupOrNull(operand.get())) operand.set(mappedOp); for (auto &succOp : op->getBlockOperands()) if (auto *mappedOp = mapper.lookupOrNull(succOp.get())) @@ -143,7 +143,7 @@ static bool isIsolatedAbove(Region ®ion, Region &limit, while (!pendingRegions.empty()) { for (Block &block : *pendingRegions.pop_back_val()) { for (Operation &op : block) { - for (Value *operand : op.getOperands()) { + for (ValuePtr operand : op.getOperands()) { // operand should be non-null here if the IR is well-formed. But // we don't assert here as this function is called from the verifier // and so could be called on invalid IR. diff --git a/lib/IR/TypeUtilities.cpp b/lib/IR/TypeUtilities.cpp index 54b1bf6329bc..8200e3a3bc68 100644 --- a/lib/IR/TypeUtilities.cpp +++ b/lib/IR/TypeUtilities.cpp @@ -33,11 +33,11 @@ Type mlir::getElementTypeOrSelf(Type type) { return type; } -Type mlir::getElementTypeOrSelf(Value *val) { +Type mlir::getElementTypeOrSelf(ValuePtr val) { return getElementTypeOrSelf(val->getType()); } -Type mlir::getElementTypeOrSelf(Value &val) { +Type mlir::getElementTypeOrSelf(ValueRef val) { return getElementTypeOrSelf(val.getType()); } @@ -101,18 +101,18 @@ LogicalResult mlir::verifyCompatibleShape(Type type1, Type type2) { OperandElementTypeIterator::OperandElementTypeIterator( Operation::operand_iterator it) - : llvm::mapped_iterator( + : llvm::mapped_iterator( it, &unwrap) {} -Type OperandElementTypeIterator::unwrap(Value *value) { +Type OperandElementTypeIterator::unwrap(ValuePtr value) { return value->getType().cast().getElementType(); } ResultElementTypeIterator::ResultElementTypeIterator( Operation::result_iterator it) - : llvm::mapped_iterator( + : llvm::mapped_iterator( it, &unwrap) {} -Type ResultElementTypeIterator::unwrap(Value *value) { +Type ResultElementTypeIterator::unwrap(ValuePtr value) { return value->getType().cast().getElementType(); } diff --git a/lib/IR/Value.cpp b/lib/IR/Value.cpp index 4c2ea5ac69cb..660d8ae32485 100644 --- a/lib/IR/Value.cpp +++ b/lib/IR/Value.cpp @@ -23,7 +23,7 @@ using namespace mlir; /// If this value is the result of an Operation, return the operation that /// defines it. Operation *Value::getDefiningOp() { - if (auto *result = dyn_cast(this)) + if (auto *result = dyn_cast()) return result->getOwner(); return nullptr; } @@ -38,7 +38,7 @@ Location Value::getLoc() { Region *Value::getParentRegion() { if (auto *op = getDefiningOp()) return op->getParentRegion(); - return cast(this)->getOwner()->getParent(); + return cast()->getOwner()->getParent(); } //===----------------------------------------------------------------------===// diff --git a/lib/Parser/Parser.cpp b/lib/Parser/Parser.cpp index 498a64d70c26..f78704842fe9 100644 --- a/lib/Parser/Parser.cpp +++ b/lib/Parser/Parser.cpp @@ -3093,7 +3093,7 @@ class OperationParser : public Parser { ParseResult popSSANameScope(); /// Register a definition of a value with the symbol table. - ParseResult addDefinition(SSAUseInfo useInfo, Value *value); + ParseResult addDefinition(SSAUseInfo useInfo, ValuePtr value); /// Parse an optional list of SSA uses into 'results'. ParseResult parseOptionalSSAUseList(SmallVectorImpl &results); @@ -3103,12 +3103,13 @@ class OperationParser : public Parser { /// Given a reference to an SSA value and its type, return a reference. This /// returns null on failure. - Value *resolveSSAUse(SSAUseInfo useInfo, Type type); + ValuePtr resolveSSAUse(SSAUseInfo useInfo, Type type); ParseResult parseSSADefOrUseAndType( const std::function &action); - ParseResult parseOptionalSSAUseAndTypeList(SmallVectorImpl &results); + ParseResult + parseOptionalSSAUseAndTypeList(SmallVectorImpl &results); /// Return the location of the value identified by its name and number if it /// has been already reference. @@ -3130,12 +3131,12 @@ class OperationParser : public Parser { /// Parse a single operation successor and its operand list. ParseResult parseSuccessorAndUseList(Block *&dest, - SmallVectorImpl &operands); + SmallVectorImpl &operands); /// Parse a comma-separated list of operation successors in brackets. ParseResult parseSuccessors(SmallVectorImpl &destinations, - SmallVectorImpl> &operands); + SmallVectorImpl> &operands); /// Parse an operation instance that is in the generic form. Operation *parseGenericOperation(); @@ -3174,7 +3175,7 @@ class OperationParser : public Parser { /// Parse a (possibly empty) list of block arguments. ParseResult - parseOptionalBlockArgList(SmallVectorImpl &results, + parseOptionalBlockArgList(SmallVectorImpl &results, Block *owner); /// Get the block with the specified name, creating it if it doesn't @@ -3204,14 +3205,14 @@ class OperationParser : public Parser { void recordDefinition(StringRef def); /// Get the value entry for the given SSA name. - SmallVectorImpl> &getSSAValueEntry(StringRef name); + SmallVectorImpl> &getSSAValueEntry(StringRef name); /// Create a forward reference placeholder value with the given location and /// result type. - Value *createForwardRefPlaceholder(SMLoc loc, Type type); + ValuePtr createForwardRefPlaceholder(SMLoc loc, Type type); /// Return true if this is a forward reference. - bool isForwardRefPlaceholder(Value *value) { + bool isForwardRefPlaceholder(ValuePtr value) { return forwardRefPlaceholders.count(value); } @@ -3236,7 +3237,7 @@ class OperationParser : public Parser { /// This keeps track of all of the SSA values we are tracking for each name /// scope, indexed by their name. This has one entry per result number. - llvm::StringMap, 1>> values; + llvm::StringMap, 1>> values; /// This keeps track of all of the values defined by a specific name scope. SmallVector, 2> definitionsPerScope; @@ -3253,7 +3254,7 @@ class OperationParser : public Parser { /// These are all of the placeholders we've made along with the location of /// their first reference, to allow checking for use of undefined values. - DenseMap forwardRefPlaceholders; + DenseMap forwardRefPlaceholders; /// The builder used when creating parsed operation instances. OpBuilder opBuilder; @@ -3278,7 +3279,7 @@ ParseResult OperationParser::finalize() { // Check for any forward references that are left. If we find any, error // out. if (!forwardRefPlaceholders.empty()) { - SmallVector, 4> errors; + SmallVector, 4> errors; // Iteration over the map isn't deterministic, so sort by source location. for (auto entry : forwardRefPlaceholders) errors.push_back({entry.second.getPointer(), entry.first}); @@ -3342,7 +3343,7 @@ ParseResult OperationParser::popSSANameScope() { } /// Register a definition of a value with the symbol table. -ParseResult OperationParser::addDefinition(SSAUseInfo useInfo, Value *value) { +ParseResult OperationParser::addDefinition(SSAUseInfo useInfo, ValuePtr value) { auto &entries = getSSAValueEntry(useInfo.name); // Make sure there is a slot for this value. @@ -3351,7 +3352,7 @@ ParseResult OperationParser::addDefinition(SSAUseInfo useInfo, Value *value) { // If we already have an entry for this, check to see if it was a definition // or a forward reference. - if (auto *existing = entries[useInfo.number].first) { + if (auto existing = entries[useInfo.number].first) { if (!isForwardRefPlaceholder(existing)) { return emitError(useInfo.loc) .append("redefinition of SSA value '", useInfo.name, "'") @@ -3416,12 +3417,12 @@ ParseResult OperationParser::parseSSAUse(SSAUseInfo &result) { /// Given an unbound reference to an SSA value and its type, return the value /// it specifies. This returns null on failure. -Value *OperationParser::resolveSSAUse(SSAUseInfo useInfo, Type type) { +ValuePtr OperationParser::resolveSSAUse(SSAUseInfo useInfo, Type type) { auto &entries = getSSAValueEntry(useInfo.name); // If we have already seen a value of this name, return it. if (useInfo.number < entries.size() && entries[useInfo.number].first) { - auto *result = entries[useInfo.number].first; + auto result = entries[useInfo.number].first; // Check that the type matches the other uses. if (result->getType() == type) return result; @@ -3447,7 +3448,7 @@ Value *OperationParser::resolveSSAUse(SSAUseInfo useInfo, Type type) { // Otherwise, this is a forward reference. Create a placeholder and remember // that we did so. - auto *result = createForwardRefPlaceholder(useInfo.loc, type); + auto result = createForwardRefPlaceholder(useInfo.loc, type); entries[useInfo.number].first = result; entries[useInfo.number].second = useInfo.loc; return result; @@ -3477,7 +3478,7 @@ ParseResult OperationParser::parseSSADefOrUseAndType( /// ::= ssa-use-list ':' type-list-no-parens /// ParseResult OperationParser::parseOptionalSSAUseAndTypeList( - SmallVectorImpl &results) { + SmallVectorImpl &results) { SmallVector valueIDs; if (parseOptionalSSAUseList(valueIDs)) return failure(); @@ -3497,7 +3498,7 @@ ParseResult OperationParser::parseOptionalSSAUseAndTypeList( results.reserve(valueIDs.size()); for (unsigned i = 0, e = valueIDs.size(); i != e; ++i) { - if (auto *value = resolveSSAUse(valueIDs[i], types[i])) + if (auto value = resolveSSAUse(valueIDs[i], types[i])) results.push_back(value); else return failure(); @@ -3512,13 +3513,13 @@ void OperationParser::recordDefinition(StringRef def) { } /// Get the value entry for the given SSA name. -SmallVectorImpl> & +SmallVectorImpl> & OperationParser::getSSAValueEntry(StringRef name) { return isolatedNameScopes.back().values[name]; } /// Create and remember a new placeholder for a forward reference. -Value *OperationParser::createForwardRefPlaceholder(SMLoc loc, Type type) { +ValuePtr OperationParser::createForwardRefPlaceholder(SMLoc loc, Type type) { // Forward references are always created as operations, because we just need // something with a def/use chain. // @@ -3632,7 +3633,7 @@ ParseResult OperationParser::parseOperation() { /// ParseResult OperationParser::parseSuccessorAndUseList(Block *&dest, - SmallVectorImpl &operands) { + SmallVectorImpl &operands) { // Verify branch is identifier and get the matching block. if (!getToken().is(Token::caret_identifier)) return emitError("expected block name"); @@ -3655,13 +3656,13 @@ OperationParser::parseSuccessorAndUseList(Block *&dest, /// ParseResult OperationParser::parseSuccessors( SmallVectorImpl &destinations, - SmallVectorImpl> &operands) { + SmallVectorImpl> &operands) { if (parseToken(Token::l_square, "expected '['")) return failure(); auto parseElt = [this, &destinations, &operands]() { Block *dest; - SmallVector destOperands; + SmallVector destOperands; auto res = parseSuccessorAndUseList(dest, destOperands); destinations.push_back(dest); operands.push_back(destOperands); @@ -3718,7 +3719,7 @@ Operation *OperationParser::parseGenericOperation() { // Parse the successor list but don't add successors to the result yet to // avoid messing up with the argument order. SmallVector successors; - SmallVector, 2> successorOperands; + SmallVector, 2> successorOperands; if (getToken().is(Token::l_square)) { // Check if the operation is a known terminator. const AbstractOperation *abstractOp = result.name.getAbstractOperation(); @@ -3779,7 +3780,7 @@ Operation *OperationParser::parseGenericOperation() { // Add the successors, and their operands after the proper operands. for (const auto &succ : llvm::zip(successors, successorOperands)) { Block *successor = std::get<0>(succ); - const SmallVector &operands = std::get<1>(succ); + const SmallVector &operands = std::get<1>(succ); result.addSuccessor(successor, operands); } @@ -4129,10 +4130,10 @@ class CustomOpAsmParser : public OpAsmParser { /// Resolve an operand to an SSA value, emitting an error on failure. ParseResult resolveOperand(const OperandType &operand, Type type, - SmallVectorImpl &result) override { + SmallVectorImpl &result) override { OperationParser::SSAUseInfo operandInfo = {operand.name, operand.number, operand.location}; - if (auto *value = parser.resolveSSAUse(operandInfo, type)) { + if (auto value = parser.resolveSSAUse(operandInfo, type)) { result.push_back(value); return success(); } @@ -4242,7 +4243,7 @@ class CustomOpAsmParser : public OpAsmParser { /// Parse a single operation successor and its operand list. ParseResult parseSuccessorAndUseList(Block *&dest, - SmallVectorImpl &operands) override { + SmallVectorImpl &operands) override { return parser.parseSuccessorAndUseList(dest, operands); } @@ -4470,7 +4471,7 @@ ParseResult OperationParser::parseBlock(Block *&block) { // If an argument list is present, parse it. if (consumeIf(Token::l_paren)) { - SmallVector bbArgs; + SmallVector bbArgs; if (parseOptionalBlockArgList(bbArgs, block) || parseToken(Token::r_paren, "expected ')' to end argument list")) return failure(); @@ -4534,7 +4535,7 @@ Block *OperationParser::defineBlockNamed(StringRef name, SMLoc loc, /// ssa-id-and-type-list ::= ssa-id-and-type (`,` ssa-id-and-type)* /// ParseResult OperationParser::parseOptionalBlockArgList( - SmallVectorImpl &results, Block *owner) { + SmallVectorImpl &results, Block *owner) { if (getToken().is(Token::r_brace)) return success(); @@ -4555,7 +4556,7 @@ ParseResult OperationParser::parseOptionalBlockArgList( return emitError("too many arguments specified in argument list"); // Finally, make sure the existing argument has the correct type. - auto *arg = owner->getArgument(nextArgument++); + auto arg = owner->getArgument(nextArgument++); if (arg->getType() != type) return emitError("argument and block argument type mismatch"); return addDefinition(useInfo, arg); diff --git a/lib/Pass/IRPrinting.cpp b/lib/Pass/IRPrinting.cpp index 8e172156f056..9d1c1f0d391a 100644 --- a/lib/Pass/IRPrinting.cpp +++ b/lib/Pass/IRPrinting.cpp @@ -48,14 +48,14 @@ class OperationFingerPrint { for (Region ®ion : op->getRegions()) { for (Block &block : region) { addDataToHash(hasher, &block); - for (BlockArgument *arg : block.getArguments()) + for (BlockArgumentPtr arg : block.getArguments()) addDataToHash(hasher, arg); } } // - Location addDataToHash(hasher, op->getLoc().getAsOpaquePointer()); // - Operands - for (Value *operand : op->getOperands()) + for (ValuePtr operand : op->getOperands()) addDataToHash(hasher, operand); // - Successors for (unsigned i = 0, e = op->getNumSuccessors(); i != e; ++i) diff --git a/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp b/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp index d38c76255f03..13fed0f9b1cc 100644 --- a/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp +++ b/lib/Quantizer/Support/ConstraintAnalysisGraph.cpp @@ -102,7 +102,7 @@ void CAGSlice::enumerateImpliedConnections( std::vector> impliedPairs; for (auto &resultAnchorPair : resultAnchors) { CAGResultAnchor *resultAnchor = resultAnchorPair.second; - Value *resultValue = resultAnchor->getValue(); + ValuePtr resultValue = resultAnchor->getValue(); for (auto &use : resultValue->getUses()) { Operation *operandOp = use.getOwner(); unsigned operandIdx = use.getOperandNumber(); diff --git a/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp b/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp index a32bb2c9b3c3..a3cbe214040f 100644 --- a/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp +++ b/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp @@ -74,7 +74,7 @@ void AddDefaultStatsPass::runWithConfig(SolverContext &solverContext, auto func = getFunction(); // Insert stats for each argument. - for (auto *arg : func.getArguments()) { + for (auto arg : func.getArguments()) { if (!config.isHandledType(arg->getType())) continue; OpBuilder b(func.getBody()); diff --git a/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp b/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp index 511df0a463f1..68c263bc4238 100644 --- a/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp +++ b/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp @@ -181,17 +181,17 @@ void InferQuantizedTypesPass::runWithConfig(SolverContext &solverContext, void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor, Type newType) { - Value *inputValue = anchor->getValue(); + ValuePtr inputValue = anchor->getValue(); Operation *op = anchor->getOp(); OpBuilder b(op->getBlock(), Block::iterator(op)); - SmallVector removeValuesIfDead; + SmallVector removeValuesIfDead; // Because we've already run the result transforms at this phase, it is // very likely that inputValue points to a dcast op whose input matches // our type. We detect that situation and route around just to save some // bulk in the IR. - Value *newTypedInputValue = inputValue; + ValuePtr newTypedInputValue = inputValue; auto inputDcastOp = dyn_cast_or_null(inputValue->getDefiningOp()); if (inputDcastOp && inputDcastOp.arg()->getType() == newType) { @@ -228,7 +228,7 @@ void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor, break; } - for (Value *removeValueIfDead : removeValuesIfDead) { + for (ValuePtr removeValueIfDead : removeValuesIfDead) { if (removeValueIfDead->use_empty()) { removeValueIfDead->getDefiningOp()->erase(); } @@ -237,12 +237,12 @@ void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor, void InferQuantizedTypesPass::transformResultType(CAGResultAnchor *anchor, Type newType) { - Value *origResultValue = anchor->getValue(); + ValuePtr origResultValue = anchor->getValue(); Operation *op = origResultValue->getDefiningOp(); OpBuilder b(op->getBlock(), ++Block::iterator(op)); - Value *replacedResultValue = nullptr; - Value *newResultValue = nullptr; + ValuePtr replacedResultValue = nullptr; + ValuePtr newResultValue = nullptr; switch (anchor->getTypeTransformRule()) { case CAGAnchorNode::TypeTransformRule::Direct: origResultValue->setType(newType); diff --git a/lib/TableGen/Pattern.cpp b/lib/TableGen/Pattern.cpp index 098dba3ae6e9..e8f44087b85c 100644 --- a/lib/TableGen/Pattern.cpp +++ b/lib/TableGen/Pattern.cpp @@ -224,7 +224,7 @@ tblgen::SymbolInfoMap::SymbolInfo::getVarDecl(StringRef name) const { return formatv("Operation::operand_range {0}(op0->getOperands());\n", name); } case Kind::Value: { - return formatv("ArrayRef {0};\n", name); + return formatv("ArrayRef {0};\n", name); } case Kind::Result: { // Use the op itself for captured results. diff --git a/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp index 6cf975bcce21..7273d3dfd7ba 100644 --- a/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -76,7 +76,7 @@ class Importer { /// `value` is an SSA-use. Return the remapped version of `value` or a /// placeholder that will be remapped later if this is an instruction that /// has not yet been visited. - Value *processValue(llvm::Value *value); + ValuePtr processValue(llvm::Value *value); /// Create the most accurate Location possible using a llvm::DebugLoc and /// possibly an llvm::Instruction to narrow the Location if debug information /// is unavailable. @@ -85,14 +85,14 @@ class Importer { /// `br` branches to `target`. Return the block arguments to attach to the /// generated branch op. These should be in the same order as the PHIs in /// `target`. - SmallVector processBranchArgs(llvm::BranchInst *br, - llvm::BasicBlock *target); + SmallVector processBranchArgs(llvm::BranchInst *br, + llvm::BasicBlock *target); /// Return `value` as an attribute to attach to a GlobalOp. Attribute getConstantAsAttr(llvm::Constant *value); /// Return `c` as an MLIR Value. This could either be a ConstantOp, or /// an expanded sequence of ops in the current function's entry block (for /// ConstantExprs or ConstantGEPs). - Value *processConstant(llvm::Constant *c); + ValuePtr processConstant(llvm::Constant *c); /// The current builder, pointing at where the next Instruction should be /// generated. @@ -120,7 +120,7 @@ class Importer { /// Remapped blocks, for the current function. DenseMap blocks; /// Remapped values. These are function-local. - DenseMap instMap; + DenseMap instMap; /// Instructions that had not been defined when first encountered as a use. /// Maps to the dummy Operation that was created in processValue(). DenseMap unknownInstMap; @@ -263,13 +263,13 @@ GlobalOp Importer::processGlobal(llvm::GlobalVariable *GV) { Region &r = op.getInitializerRegion(); currentEntryBlock = b.createBlock(&r); b.setInsertionPoint(currentEntryBlock, currentEntryBlock->begin()); - Value *v = processConstant(GV->getInitializer()); - b.create(op.getLoc(), ArrayRef({v})); + ValuePtr v = processConstant(GV->getInitializer()); + b.create(op.getLoc(), ArrayRef({v})); } return globals[GV] = op; } -Value *Importer::processConstant(llvm::Constant *c) { +ValuePtr Importer::processConstant(llvm::Constant *c) { if (Attribute attr = getConstantAsAttr(c)) { // These constants can be represented as attributes. OpBuilder b(currentEntryBlock, currentEntryBlock->begin()); @@ -298,7 +298,7 @@ Value *Importer::processConstant(llvm::Constant *c) { return nullptr; } -Value *Importer::processValue(llvm::Value *value) { +ValuePtr Importer::processValue(llvm::Value *value) { auto it = instMap.find(value); if (it != instMap.end()) return it->second; @@ -407,9 +407,9 @@ static ICmpPredicate getICmpPredicate(llvm::CmpInst::Predicate p) { // `br` branches to `target`. Return the branch arguments to `br`, in the // same order of the PHIs in `target`. -SmallVector Importer::processBranchArgs(llvm::BranchInst *br, - llvm::BasicBlock *target) { - SmallVector v; +SmallVector Importer::processBranchArgs(llvm::BranchInst *br, + llvm::BasicBlock *target) { + SmallVector v; for (auto inst = target->begin(); isa(inst); ++inst) { auto *PN = cast(&*inst); v.push_back(processValue(PN->getIncomingValueForBlock(br->getParent()))); @@ -421,7 +421,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) { // FIXME: Support uses of SubtargetData. Currently inbounds GEPs, fast-math // flags and call / operand attributes are not supported. Location loc = processDebugLoc(inst->getDebugLoc(), inst); - Value *&v = instMap[inst]; + ValuePtr &v = instMap[inst]; assert(!v && "processInstruction must be called only once per instruction!"); switch (inst->getOpcode()) { default: @@ -462,7 +462,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) { case llvm::Instruction::AddrSpaceCast: case llvm::Instruction::BitCast: { OperationState state(loc, opcMap.lookup(inst->getOpcode())); - SmallVector ops; + SmallVector ops; ops.reserve(inst->getNumOperands()); for (auto *op : inst->operand_values()) ops.push_back(processValue(op)); @@ -484,7 +484,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) { auto *brInst = cast(inst); OperationState state(loc, brInst->isConditional() ? "llvm.cond_br" : "llvm.br"); - SmallVector ops; + SmallVector ops; if (brInst->isConditional()) ops.push_back(processValue(brInst->getCondition())); state.addOperands(ops); @@ -500,7 +500,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) { } case llvm::Instruction::Call: { llvm::CallInst *ci = cast(inst); - SmallVector ops; + SmallVector ops; ops.reserve(inst->getNumOperands()); for (auto &op : ci->arg_operands()) ops.push_back(processValue(op.get())); @@ -523,7 +523,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) { case llvm::Instruction::GetElementPtr: { // FIXME: Support inbounds GEPs. llvm::GetElementPtrInst *gep = cast(inst); - SmallVector ops; + SmallVector ops; for (auto *op : gep->operand_values()) ops.push_back(processValue(op)); v = b.create(loc, processType(inst->getType()), ops, @@ -565,8 +565,8 @@ LogicalResult Importer::processFunction(llvm::Function *f) { // any unknown uses we encountered are remapped. for (auto &llvmAndUnknown : unknownInstMap) { assert(instMap.count(llvmAndUnknown.first)); - Value *newValue = instMap[llvmAndUnknown.first]; - Value *oldValue = llvmAndUnknown.second->getResult(0); + ValuePtr newValue = instMap[llvmAndUnknown.first]; + ValuePtr oldValue = llvmAndUnknown.second->getResult(0); oldValue->replaceAllUsesWith(newValue); llvmAndUnknown.second->erase(); } diff --git a/lib/Target/LLVMIR/ModuleTranslation.cpp b/lib/Target/LLVMIR/ModuleTranslation.cpp index e59c69aa25b2..ec28434b8237 100644 --- a/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -248,7 +248,7 @@ LogicalResult ModuleTranslation::convertBlock(Block &bb, bool ignoreArguments) { auto predecessors = bb.getPredecessors(); unsigned numPredecessors = std::distance(predecessors.begin(), predecessors.end()); - for (auto *arg : bb.getArguments()) { + for (auto arg : bb.getArguments()) { auto wrappedType = arg->getType().dyn_cast(); if (!wrappedType) return emitError(bb.front().getLoc(), @@ -342,8 +342,8 @@ void ModuleTranslation::convertGlobals() { /// Get the SSA value passed to the current block from the terminator operation /// of its predecessor. -static Value *getPHISourceValue(Block *current, Block *pred, - unsigned numArguments, unsigned index) { +static ValuePtr getPHISourceValue(Block *current, Block *pred, + unsigned numArguments, unsigned index) { auto &terminator = *pred->getTerminator(); if (isa(terminator)) { return terminator.getOperand(index); @@ -420,7 +420,7 @@ LogicalResult ModuleTranslation::convertOneFunction(LLVMFuncOp func) { unsigned int argIdx = 0; for (const auto &kvp : llvm::zip(func.getArguments(), llvmFunc->args())) { llvm::Argument &llvmArg = std::get<1>(kvp); - BlockArgument *mlirArg = std::get<0>(kvp); + BlockArgumentPtr mlirArg = std::get<0>(kvp); if (auto attr = func.getArgAttrOfType(argIdx, "llvm.noalias")) { // NB: Attribute already verified to be boolean, so check if we can indeed @@ -497,7 +497,7 @@ SmallVector ModuleTranslation::lookupValues(ValueRange values) { SmallVector remapped; remapped.reserve(values.size()); - for (Value *v : values) + for (ValuePtr v : values) remapped.push_back(valueMapping.lookup(v)); return remapped; } diff --git a/lib/Transforms/AffineDataCopyGeneration.cpp b/lib/Transforms/AffineDataCopyGeneration.cpp index 7fb356f3ad21..5bc33943e508 100644 --- a/lib/Transforms/AffineDataCopyGeneration.cpp +++ b/lib/Transforms/AffineDataCopyGeneration.cpp @@ -130,7 +130,7 @@ struct AffineDataCopyGeneration bool skipNonUnitStrideLoops; // Constant zero index to avoid too many duplicates. - Value *zeroIndex = nullptr; + ValuePtr zeroIndex = nullptr; }; } // end anonymous namespace diff --git a/lib/Transforms/AffineLoopInvariantCodeMotion.cpp b/lib/Transforms/AffineLoopInvariantCodeMotion.cpp index f384f6d3fb18..23199dd8a390 100644 --- a/lib/Transforms/AffineLoopInvariantCodeMotion.cpp +++ b/lib/Transforms/AffineLoopInvariantCodeMotion.cpp @@ -58,15 +58,15 @@ struct LoopInvariantCodeMotion : public FunctionPass { } // end anonymous namespace static bool -checkInvarianceOfNestedIfOps(Operation *op, Value *indVar, +checkInvarianceOfNestedIfOps(Operation *op, ValuePtr indVar, SmallPtrSetImpl &definedOps, SmallPtrSetImpl &opsToHoist); -static bool isOpLoopInvariant(Operation &op, Value *indVar, +static bool isOpLoopInvariant(Operation &op, ValuePtr indVar, SmallPtrSetImpl &definedOps, SmallPtrSetImpl &opsToHoist); static bool -areAllOpsInTheBlockListInvariant(Region &blockList, Value *indVar, +areAllOpsInTheBlockListInvariant(Region &blockList, ValuePtr indVar, SmallPtrSetImpl &definedOps, SmallPtrSetImpl &opsToHoist); @@ -79,7 +79,7 @@ static bool isMemRefDereferencingOp(Operation &op) { } // Returns true if the individual op is loop invariant. -bool isOpLoopInvariant(Operation &op, Value *indVar, +bool isOpLoopInvariant(Operation &op, ValuePtr indVar, SmallPtrSetImpl &definedOps, SmallPtrSetImpl &opsToHoist) { LLVM_DEBUG(llvm::dbgs() << "iterating on op: " << op;); @@ -97,9 +97,9 @@ bool isOpLoopInvariant(Operation &op, Value *indVar, return false; } else if (!isa(op)) { if (isMemRefDereferencingOp(op)) { - Value *memref = isa(op) - ? cast(op).getMemRef() - : cast(op).getMemRef(); + ValuePtr memref = isa(op) + ? cast(op).getMemRef() + : cast(op).getMemRef(); for (auto *user : memref->getUsers()) { // If this memref has a user that is a DMA, give up because these // operations write to this memref. @@ -163,7 +163,8 @@ bool isOpLoopInvariant(Operation &op, Value *indVar, // Checks if all ops in a region (i.e. list of blocks) are loop invariant. bool areAllOpsInTheBlockListInvariant( - Region &blockList, Value *indVar, SmallPtrSetImpl &definedOps, + Region &blockList, ValuePtr indVar, + SmallPtrSetImpl &definedOps, SmallPtrSetImpl &opsToHoist) { for (auto &b : blockList) { @@ -178,7 +179,7 @@ bool areAllOpsInTheBlockListInvariant( } // Returns true if the affine.if op can be hoisted. -bool checkInvarianceOfNestedIfOps(Operation *op, Value *indVar, +bool checkInvarianceOfNestedIfOps(Operation *op, ValuePtr indVar, SmallPtrSetImpl &definedOps, SmallPtrSetImpl &opsToHoist) { assert(isa(op)); @@ -199,7 +200,7 @@ bool checkInvarianceOfNestedIfOps(Operation *op, Value *indVar, void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) { auto *loopBody = forOp.getBody(); - auto *indVar = forOp.getInductionVar(); + auto indVar = forOp.getInductionVar(); SmallPtrSet definedOps; // This is the place where hoisted instructions would reside. diff --git a/lib/Transforms/DialectConversion.cpp b/lib/Transforms/DialectConversion.cpp index 37c918fe9be9..05066ef599c3 100644 --- a/lib/Transforms/DialectConversion.cpp +++ b/lib/Transforms/DialectConversion.cpp @@ -86,13 +86,13 @@ namespace { struct ConversionValueMapping { /// Lookup a mapped value within the map. If a mapping for the provided value /// does not exist then return the provided value. - Value *lookupOrDefault(Value *from) const; + ValuePtr lookupOrDefault(ValuePtr from) const; /// Map a value to the one provided. - void map(Value *oldVal, Value *newVal) { mapping.map(oldVal, newVal); } + void map(ValuePtr oldVal, ValuePtr newVal) { mapping.map(oldVal, newVal); } /// Drop the last mapping for the given value. - void erase(Value *value) { mapping.erase(value); } + void erase(ValuePtr value) { mapping.erase(value); } private: /// Current value mappings. @@ -102,10 +102,10 @@ struct ConversionValueMapping { /// Lookup a mapped value within the map. If a mapping for the provided value /// does not exist then return the provided value. -Value *ConversionValueMapping::lookupOrDefault(Value *from) const { +ValuePtr ConversionValueMapping::lookupOrDefault(ValuePtr from) const { // If this value had a valid mapping, unmap that value as well in the case // that it was also replaced. - while (auto *mappedValue = mapping.lookupOrNull(from)) + while (auto mappedValue = mapping.lookupOrNull(from)) from = mappedValue; return from; } @@ -127,7 +127,7 @@ struct ArgConverter { /// been converted. struct ConvertedArgInfo { ConvertedArgInfo(unsigned newArgIdx, unsigned newArgSize, - Value *castValue = nullptr) + ValuePtr castValue = nullptr) : newArgIdx(newArgIdx), newArgSize(newArgSize), castValue(castValue) {} /// The start index of in the new argument list that contains arguments that @@ -139,7 +139,7 @@ struct ArgConverter { /// The cast value that was created to cast from the new arguments to the /// old. This only used if 'newArgSize' > 1. - Value *castValue; + ValuePtr castValue; }; /// This structure contains information pertaining to a block that has had its @@ -235,7 +235,7 @@ void ArgConverter::notifyOpRemoved(Operation *op) { // Drop all uses of the original arguments and delete the original block. Block *origBlock = it->second.origBlock; - for (BlockArgument *arg : origBlock->getArguments()) + for (BlockArgumentPtr arg : origBlock->getArguments()) arg->dropAllUses(); conversionInfo.erase(it); } @@ -270,7 +270,7 @@ void ArgConverter::applyRewrites(ConversionValueMapping &mapping) { // Process the remapping for each of the original arguments. for (unsigned i = 0, e = origBlock->getNumArguments(); i != e; ++i) { Optional &argInfo = blockInfo.argInfo[i]; - BlockArgument *origArg = origBlock->getArgument(i); + BlockArgumentPtr origArg = origBlock->getArgument(i); // Handle the case of a 1->0 value mapping. if (!argInfo) { @@ -305,7 +305,7 @@ void ArgConverter::applyRewrites(ConversionValueMapping &mapping) { } // Otherwise this is a 1->N value mapping. - Value *castValue = argInfo->castValue; + ValuePtr castValue = argInfo->castValue; assert(argInfo->newArgSize > 1 && castValue && "expected 1->N mapping"); // If the argument is still used, replace it with the generated cast. @@ -344,8 +344,8 @@ Block *ArgConverter::applySignatureConversion( Block *newBlock = block->splitBlock(block->begin()); block->replaceAllUsesWith(newBlock); - SmallVector newArgRange(newBlock->addArguments(convertedTypes)); - ArrayRef newArgs(newArgRange); + SmallVector newArgRange(newBlock->addArguments(convertedTypes)); + ArrayRef newArgs(newArgRange); // Remap each of the original arguments as determined by the signature // conversion. @@ -358,7 +358,7 @@ Block *ArgConverter::applySignatureConversion( auto inputMap = signatureConversion.getInputMapping(i); if (!inputMap) continue; - BlockArgument *origArg = block->getArgument(i); + BlockArgumentPtr origArg = block->getArgument(i); // If inputMap->replacementValue is not nullptr, then the argument is // dropped and a replacement value is provided to be the remappedValue. @@ -445,7 +445,7 @@ struct ConversionPatternRewriterImpl { : op(op), newValues(newValues.begin(), newValues.end()) {} Operation *op; - SmallVector newValues; + SmallVector newValues; }; /// The kind of the block action performed during the rewrite. Actions can be @@ -542,7 +542,7 @@ struct ConversionPatternRewriterImpl { /// Remap the given operands to those with potentially different types. void remapValues(Operation::operand_range operands, - SmallVectorImpl &remapped); + SmallVectorImpl &remapped); /// Returns true if the given operation is ignored, and does not need to be /// converted. @@ -591,7 +591,7 @@ void ConversionPatternRewriterImpl::resetState(RewriterState state) { // Reset any replaced operations and undo any saved mappings. for (auto &repl : llvm::drop_begin(replacements, state.numReplacements)) - for (auto *result : repl.op->getResults()) + for (auto result : repl.op->getResults()) mapping.erase(result); replacements.resize(state.numReplacements); @@ -660,7 +660,7 @@ void ConversionPatternRewriterImpl::applyRewrites() { // Apply all of the rewrites replacements requested during conversion. for (auto &repl : replacements) { for (unsigned i = 0, e = repl.newValues.size(); i != e; ++i) { - if (auto *newValue = repl.newValues[i]) + if (auto newValue = repl.newValues[i]) repl.op->getResult(i)->replaceAllUsesWith( mapping.lookupOrDefault(newValue)); } @@ -715,7 +715,7 @@ void ConversionPatternRewriterImpl::replaceOp(Operation *op, // Create mappings for each of the new result values. for (unsigned i = 0, e = newValues.size(); i < e; ++i) - if (auto *repl = newValues[i]) + if (auto repl = newValues[i]) mapping.map(op->getResult(i), repl); // Record the requested operation replacement. @@ -755,9 +755,9 @@ void ConversionPatternRewriterImpl::notifyRegionWasClonedBefore( } void ConversionPatternRewriterImpl::remapValues( - Operation::operand_range operands, SmallVectorImpl &remapped) { + Operation::operand_range operands, SmallVectorImpl &remapped) { remapped.reserve(llvm::size(operands)); - for (Value *operand : operands) + for (ValuePtr operand : operands) remapped.push_back(mapping.lookupOrDefault(operand)); } @@ -803,7 +803,7 @@ void ConversionPatternRewriter::replaceOp(Operation *op, ValueRange newValues, void ConversionPatternRewriter::eraseOp(Operation *op) { LLVM_DEBUG(llvm::dbgs() << "** Erasing operation : " << op->getName() << "\n"); - SmallVector nullRepls(op->getNumResults(), nullptr); + SmallVector nullRepls(op->getNumResults(), nullptr); impl->replaceOp(op, nullRepls, /*valuesToRemoveIfDead=*/llvm::None); } @@ -813,8 +813,8 @@ Block *ConversionPatternRewriter::applySignatureConversion( return impl->applySignatureConversion(region, conversion); } -void ConversionPatternRewriter::replaceUsesOfBlockArgument(BlockArgument *from, - Value *to) { +void ConversionPatternRewriter::replaceUsesOfBlockArgument( + BlockArgumentPtr from, ValuePtr to) { for (auto &u : from->getUses()) { if (u.getOwner() == to->getDefiningOp()) continue; @@ -825,7 +825,7 @@ void ConversionPatternRewriter::replaceUsesOfBlockArgument(BlockArgument *from, /// Return the converted value that replaces 'key'. Return 'key' if there is /// no such a converted value. -Value *ConversionPatternRewriter::getRemappedValue(Value *key) { +ValuePtr ConversionPatternRewriter::getRemappedValue(ValuePtr key) { return impl->mapping.lookupOrDefault(key); } @@ -896,7 +896,7 @@ detail::ConversionPatternRewriterImpl &ConversionPatternRewriter::getImpl() { PatternMatchResult ConversionPattern::matchAndRewrite(Operation *op, PatternRewriter &rewriter) const { - SmallVector operands; + SmallVector operands; auto &dialectRewriter = static_cast(rewriter); dialectRewriter.getImpl().remapValues(op->getOperands(), operands); @@ -908,7 +908,7 @@ ConversionPattern::matchAndRewrite(Operation *op, SmallVector destinations; destinations.reserve(op->getNumSuccessors()); - SmallVector, 2> operandsPerDestination; + SmallVector, 2> operandsPerDestination; unsigned firstSuccessorOperand = op->getSuccessorOperandIndex(0); for (unsigned i = 0, seen = 0, e = op->getNumSuccessors(); i < e; ++i) { destinations.push_back(op->getSuccessor(i)); @@ -1059,7 +1059,7 @@ OperationLegalizer::legalizeWithFold(Operation *op, RewriterState curState = rewriterImpl.getCurrentState(); // Try to fold the operation. - SmallVector replacementValues; + SmallVector replacementValues; rewriter.setInsertionPoint(op); if (failed(rewriter.tryFold(op, replacementValues))) return failure(); @@ -1459,7 +1459,7 @@ void TypeConverter::SignatureConversion::remapInput(unsigned origInputNo, /// Remap an input of the original signature to another `replacementValue` /// value. This would make the signature converter drop this argument. void TypeConverter::SignatureConversion::remapInput(unsigned origInputNo, - Value *replacementValue) { + ValuePtr replacementValue) { assert(!remappedInputs[origInputNo] && "input has already been remapped"); remappedInputs[origInputNo] = InputMapping{origInputNo, /*size=*/0, replacementValue}; @@ -1528,7 +1528,7 @@ struct FuncOpSignatureConversion : public OpConversionPattern { /// Hook for derived classes to implement combined matching and rewriting. PatternMatchResult - matchAndRewrite(FuncOp funcOp, ArrayRef operands, + matchAndRewrite(FuncOp funcOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { FunctionType type = funcOp.getType(); diff --git a/lib/Transforms/LoopFusion.cpp b/lib/Transforms/LoopFusion.cpp index 5694c990b9b9..60f0264eb35d 100644 --- a/lib/Transforms/LoopFusion.cpp +++ b/lib/Transforms/LoopFusion.cpp @@ -172,7 +172,7 @@ struct MemRefDependenceGraph { Node(unsigned id, Operation *op) : id(id), op(op) {} // Returns the load op count for 'memref'. - unsigned getLoadOpCount(Value *memref) { + unsigned getLoadOpCount(ValuePtr memref) { unsigned loadOpCount = 0; for (auto *loadOpInst : loads) { if (memref == cast(loadOpInst).getMemRef()) @@ -182,7 +182,7 @@ struct MemRefDependenceGraph { } // Returns the store op count for 'memref'. - unsigned getStoreOpCount(Value *memref) { + unsigned getStoreOpCount(ValuePtr memref) { unsigned storeOpCount = 0; for (auto *storeOpInst : stores) { if (memref == cast(storeOpInst).getMemRef()) @@ -192,7 +192,7 @@ struct MemRefDependenceGraph { } // Returns all store ops in 'storeOps' which access 'memref'. - void getStoreOpsForMemref(Value *memref, + void getStoreOpsForMemref(ValuePtr memref, SmallVectorImpl *storeOps) { for (auto *storeOpInst : stores) { if (memref == cast(storeOpInst).getMemRef()) @@ -201,7 +201,7 @@ struct MemRefDependenceGraph { } // Returns all load ops in 'loadOps' which access 'memref'. - void getLoadOpsForMemref(Value *memref, + void getLoadOpsForMemref(ValuePtr memref, SmallVectorImpl *loadOps) { for (auto *loadOpInst : loads) { if (memref == cast(loadOpInst).getMemRef()) @@ -211,13 +211,13 @@ struct MemRefDependenceGraph { // Returns all memrefs in 'loadAndStoreMemrefSet' for which this node // has at least one load and store operation. - void getLoadAndStoreMemrefSet(DenseSet *loadAndStoreMemrefSet) { - llvm::SmallDenseSet loadMemrefs; + void getLoadAndStoreMemrefSet(DenseSet *loadAndStoreMemrefSet) { + llvm::SmallDenseSet loadMemrefs; for (auto *loadOpInst : loads) { loadMemrefs.insert(cast(loadOpInst).getMemRef()); } for (auto *storeOpInst : stores) { - auto *memref = cast(storeOpInst).getMemRef(); + auto memref = cast(storeOpInst).getMemRef(); if (loadMemrefs.count(memref) > 0) loadAndStoreMemrefSet->insert(memref); } @@ -239,7 +239,7 @@ struct MemRefDependenceGraph { // defines an SSA value and another graph node which uses the SSA value // (e.g. a constant operation defining a value which is used inside a loop // nest). - Value *value; + ValuePtr value; }; // Map from node id to Node. @@ -250,7 +250,7 @@ struct MemRefDependenceGraph { DenseMap> outEdges; // Map from memref to a count on the dependence edges associated with that // memref. - DenseMap memrefEdgeCount; + DenseMap memrefEdgeCount; // The next unique identifier to use for newly created graph nodes. unsigned nextNodeId = 0; @@ -309,7 +309,7 @@ struct MemRefDependenceGraph { bool writesToLiveInOrEscapingMemrefs(unsigned id) { Node *node = getNode(id); for (auto *storeOpInst : node->stores) { - auto *memref = cast(storeOpInst).getMemRef(); + auto memref = cast(storeOpInst).getMemRef(); auto *op = memref->getDefiningOp(); // Return true if 'memref' is a block argument. if (!op) @@ -338,7 +338,7 @@ struct MemRefDependenceGraph { const auto &nodeOutEdges = outEdgeIt->second; for (auto *op : node->stores) { auto storeOp = cast(op); - auto *memref = storeOp.getMemRef(); + auto memref = storeOp.getMemRef(); // Skip this store if there are no dependences on its memref. This means // that store either: // *) writes to a memref that is only read within the same loop nest @@ -381,7 +381,7 @@ struct MemRefDependenceGraph { // Returns true iff there is an edge from node 'srcId' to node 'dstId' which // is for 'value' if non-null, or for any value otherwise. Returns false // otherwise. - bool hasEdge(unsigned srcId, unsigned dstId, Value *value = nullptr) { + bool hasEdge(unsigned srcId, unsigned dstId, ValuePtr value = nullptr) { if (outEdges.count(srcId) == 0 || inEdges.count(dstId) == 0) { return false; } @@ -395,7 +395,7 @@ struct MemRefDependenceGraph { } // Adds an edge from node 'srcId' to node 'dstId' for 'value'. - void addEdge(unsigned srcId, unsigned dstId, Value *value) { + void addEdge(unsigned srcId, unsigned dstId, ValuePtr value) { if (!hasEdge(srcId, dstId, value)) { outEdges[srcId].push_back({dstId, value}); inEdges[dstId].push_back({srcId, value}); @@ -405,7 +405,7 @@ struct MemRefDependenceGraph { } // Removes an edge from node 'srcId' to node 'dstId' for 'value'. - void removeEdge(unsigned srcId, unsigned dstId, Value *value) { + void removeEdge(unsigned srcId, unsigned dstId, ValuePtr value) { assert(inEdges.count(dstId) > 0); assert(outEdges.count(srcId) > 0); if (value->getType().isa()) { @@ -459,7 +459,7 @@ struct MemRefDependenceGraph { // Returns the input edge count for node 'id' and 'memref' from src nodes // which access 'memref' with a store operation. - unsigned getIncomingMemRefAccesses(unsigned id, Value *memref) { + unsigned getIncomingMemRefAccesses(unsigned id, ValuePtr memref) { unsigned inEdgeCount = 0; if (inEdges.count(id) > 0) for (auto &inEdge : inEdges[id]) @@ -474,7 +474,7 @@ struct MemRefDependenceGraph { // Returns the output edge count for node 'id' and 'memref' (if non-null), // otherwise returns the total output edge count from node 'id'. - unsigned getOutEdgeCount(unsigned id, Value *memref = nullptr) { + unsigned getOutEdgeCount(unsigned id, ValuePtr memref = nullptr) { unsigned outEdgeCount = 0; if (outEdges.count(id) > 0) for (auto &outEdge : outEdges[id]) @@ -548,7 +548,7 @@ struct MemRefDependenceGraph { // Updates edge mappings from node 'srcId' to node 'dstId' after 'oldMemRef' // has been replaced in node at 'dstId' by a private memref depending // on the value of 'createPrivateMemRef'. - void updateEdges(unsigned srcId, unsigned dstId, Value *oldMemRef, + void updateEdges(unsigned srcId, unsigned dstId, ValuePtr oldMemRef, bool createPrivateMemRef) { // For each edge in 'inEdges[srcId]': add new edge remaping to 'dstId'. if (inEdges.count(srcId) > 0) { @@ -681,7 +681,7 @@ struct MemRefDependenceGraph { // TODO(andydavis) Add support for taking a Block arg to construct the // dependence graph at a different depth. bool MemRefDependenceGraph::init(FuncOp f) { - DenseMap> memrefAccesses; + DenseMap> memrefAccesses; // TODO: support multi-block functions. if (f.getBlocks().size() != 1) @@ -701,12 +701,12 @@ bool MemRefDependenceGraph::init(FuncOp f) { Node node(nextNodeId++, &op); for (auto *opInst : collector.loadOpInsts) { node.loads.push_back(opInst); - auto *memref = cast(opInst).getMemRef(); + auto memref = cast(opInst).getMemRef(); memrefAccesses[memref].insert(node.id); } for (auto *opInst : collector.storeOpInsts) { node.stores.push_back(opInst); - auto *memref = cast(opInst).getMemRef(); + auto memref = cast(opInst).getMemRef(); memrefAccesses[memref].insert(node.id); } forToNodeMap[&op] = node.id; @@ -715,14 +715,14 @@ bool MemRefDependenceGraph::init(FuncOp f) { // Create graph node for top-level load op. Node node(nextNodeId++, &op); node.loads.push_back(&op); - auto *memref = cast(op).getMemRef(); + auto memref = cast(op).getMemRef(); memrefAccesses[memref].insert(node.id); nodes.insert({node.id, node}); } else if (auto storeOp = dyn_cast(op)) { // Create graph node for top-level store op. Node node(nextNodeId++, &op); node.stores.push_back(&op); - auto *memref = cast(op).getMemRef(); + auto memref = cast(op).getMemRef(); memrefAccesses[memref].insert(node.id); nodes.insert({node.id, node}); } else if (op.getNumRegions() != 0) { @@ -743,7 +743,7 @@ bool MemRefDependenceGraph::init(FuncOp f) { if (!node.loads.empty() || !node.stores.empty()) continue; auto *opInst = node.op; - for (auto *value : opInst->getResults()) { + for (auto value : opInst->getResults()) { for (auto *user : value->getUsers()) { SmallVector loops; getLoopIVs(*user, &loops); @@ -777,7 +777,7 @@ bool MemRefDependenceGraph::init(FuncOp f) { // Removes load operations from 'srcLoads' which operate on 'memref', and // adds them to 'dstLoads'. -static void moveLoadsAccessingMemrefTo(Value *memref, +static void moveLoadsAccessingMemrefTo(ValuePtr memref, SmallVectorImpl *srcLoads, SmallVectorImpl *dstLoads) { dstLoads->clear(); @@ -893,10 +893,11 @@ static unsigned getMemRefEltSizeInBytes(MemRefType memRefType) { // MemRefRegion written to by 'srcStoreOpInst' at depth 'dstLoopDepth'. // TODO(bondhugula): consider refactoring the common code from generateDma and // this one. -static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst, - unsigned dstLoopDepth, - Optional fastMemorySpace, - uint64_t localBufSizeThreshold) { +static ValuePtr createPrivateMemRef(AffineForOp forOp, + Operation *srcStoreOpInst, + unsigned dstLoopDepth, + Optional fastMemorySpace, + uint64_t localBufSizeThreshold) { auto *forInst = forOp.getOperation(); // Create builder to insert alloc op just before 'forOp'. @@ -904,7 +905,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst, // Builder to create constants at the top level. OpBuilder top(forInst->getParentOfType().getBody()); // Create new memref type based on slice bounds. - auto *oldMemRef = cast(srcStoreOpInst).getMemRef(); + auto oldMemRef = cast(srcStoreOpInst).getMemRef(); auto oldMemRefType = oldMemRef->getType().cast(); unsigned rank = oldMemRefType.getRank(); @@ -928,7 +929,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst, // 'outerIVs' holds the values that this memory region is symbolic/parametric // on; this would correspond to loop IVs surrounding the level at which the // slice is being materialized. - SmallVector outerIVs; + SmallVector outerIVs; cst->getIdValues(rank, cst->getNumIds(), &outerIVs); // Build 'rank' AffineExprs from MemRefRegion 'lbs' @@ -960,7 +961,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst, auto newMemRefType = MemRefType::get(newShape, oldMemRefType.getElementType(), {}, newMemSpace); // Gather alloc operands for the dynamic dimensions of the memref. - SmallVector allocOperands; + SmallVector allocOperands; unsigned dynamicDimCount = 0; for (auto dimSize : oldMemRefType.getShape()) { if (dimSize == -1) @@ -973,7 +974,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst, // consumer loop nests to reduce their live range. Currently they are added // at the beginning of the function, because loop nests can be reordered // during the fusion pass. - Value *newMemRef = + ValuePtr newMemRef = top.create(forOp.getLoc(), newMemRefType, allocOperands); // Build an AffineMap to remap access functions based on lower bound offsets. @@ -1016,7 +1017,7 @@ static bool canFuseSrcWhichWritesToLiveOut(unsigned srcId, unsigned dstId, MemRefDependenceGraph *mdg) { assert(srcLiveOutStoreOp && "Expected a valid store op"); auto *dstNode = mdg->getNode(dstId); - Value *memref = srcLiveOutStoreOp.getMemRef(); + ValuePtr memref = srcLiveOutStoreOp.getMemRef(); // Return false if 'srcNode' has more than one output edge on 'memref'. if (mdg->getOutEdgeCount(srcId, memref) > 1) return false; @@ -1495,10 +1496,10 @@ struct GreedyFusion { SmallVector loads = dstNode->loads; SmallVector dstLoadOpInsts; - DenseSet visitedMemrefs; + DenseSet visitedMemrefs; while (!loads.empty()) { // Get memref of load on top of the stack. - auto *memref = cast(loads.back()).getMemRef(); + auto memref = cast(loads.back()).getMemRef(); if (visitedMemrefs.count(memref) > 0) continue; visitedMemrefs.insert(memref); @@ -1653,7 +1654,7 @@ struct GreedyFusion { } // TODO(andydavis) Use union of memref write regions to compute // private memref footprint. - auto *newMemRef = createPrivateMemRef( + auto newMemRef = createPrivateMemRef( dstAffineForOp, storesForMemref[0], bestDstLoopDepth, fastMemorySpace, localBufSizeThreshold); visitedMemrefs.insert(newMemRef); @@ -1671,7 +1672,7 @@ struct GreedyFusion { // Add new load ops to current Node load op list 'loads' to // continue fusing based on new operands. for (auto *loadOpInst : dstLoopCollector.loadOpInsts) { - auto *loadMemRef = cast(loadOpInst).getMemRef(); + auto loadMemRef = cast(loadOpInst).getMemRef(); if (visitedMemrefs.count(loadMemRef) == 0) loads.push_back(loadOpInst); } @@ -1737,10 +1738,10 @@ struct GreedyFusion { // Attempt to fuse 'dstNode' with sibling nodes in the graph. void fuseWithSiblingNodes(Node *dstNode) { DenseSet visitedSibNodeIds; - std::pair idAndMemref; + std::pair idAndMemref; while (findSiblingNodeToFuse(dstNode, &visitedSibNodeIds, &idAndMemref)) { unsigned sibId = idAndMemref.first; - Value *memref = idAndMemref.second; + ValuePtr memref = idAndMemref.second; // TODO(andydavis) Check that 'sibStoreOpInst' post-dominates all other // stores to the same memref in 'sibNode' loop nest. auto *sibNode = mdg->getNode(sibId); @@ -1804,10 +1805,10 @@ struct GreedyFusion { // 'idAndMemrefToFuse' on success. Returns false otherwise. bool findSiblingNodeToFuse(Node *dstNode, DenseSet *visitedSibNodeIds, - std::pair *idAndMemrefToFuse) { + std::pair *idAndMemrefToFuse) { // Returns true if 'sibNode' can be fused with 'dstNode' for input reuse // on 'memref'. - auto canFuseWithSibNode = [&](Node *sibNode, Value *memref) { + auto canFuseWithSibNode = [&](Node *sibNode, ValuePtr memref) { // Skip if 'outEdge' is not a read-after-write dependence. // TODO(andydavis) Remove restrict to single load op restriction. if (sibNode->getLoadOpCount(memref) != 1) @@ -1819,15 +1820,15 @@ struct GreedyFusion { return false; // Skip sib node if it loads to (and stores from) the same memref on // which it also has an input dependence edge. - DenseSet loadAndStoreMemrefSet; + DenseSet loadAndStoreMemrefSet; sibNode->getLoadAndStoreMemrefSet(&loadAndStoreMemrefSet); - if (llvm::any_of(loadAndStoreMemrefSet, [=](Value *memref) { + if (llvm::any_of(loadAndStoreMemrefSet, [=](ValuePtr memref) { return mdg->getIncomingMemRefAccesses(sibNode->id, memref) > 0; })) return false; // Check that all stores are to the same memref. - DenseSet storeMemrefs; + DenseSet storeMemrefs; for (auto *storeOpInst : sibNode->stores) { storeMemrefs.insert(cast(storeOpInst).getMemRef()); } @@ -1856,7 +1857,7 @@ struct GreedyFusion { if (visitedSibNodeIds->count(sibNode->id) > 0) continue; // Skip 'use' if it does not load from the same memref as 'dstNode'. - auto *memref = loadOp.getMemRef(); + auto memref = loadOp.getMemRef(); if (dstNode->getLoadOpCount(memref) == 0) continue; // Check if 'sibNode/dstNode' can be input-reuse fused on 'memref'. @@ -1950,7 +1951,7 @@ struct GreedyFusion { for (auto &pair : mdg->memrefEdgeCount) { if (pair.second > 0) continue; - auto *memref = pair.first; + auto memref = pair.first; // Skip if there exist other uses (return operation or function calls). if (!memref->use_empty()) continue; diff --git a/lib/Transforms/LoopInvariantCodeMotion.cpp b/lib/Transforms/LoopInvariantCodeMotion.cpp index 4932494a04bd..bd58827d0015 100644 --- a/lib/Transforms/LoopInvariantCodeMotion.cpp +++ b/lib/Transforms/LoopInvariantCodeMotion.cpp @@ -50,7 +50,7 @@ struct LoopInvariantCodeMotion : public OperationPass { // - the op has no side-effects. If sideEffecting is Never, sideeffects of this // op and its nested ops are ignored. static bool canBeHoisted(Operation *op, - function_ref definedOutside, + function_ref definedOutside, SideEffecting sideEffecting, SideEffectsInterface &interface) { // Check that dependencies are defined outside of loop. @@ -92,7 +92,7 @@ static LogicalResult moveLoopInvariantCode(LoopLikeOpInterface looplike, SmallVector opsToMove; // Helper to check whether an operation is loop invariant wrt. SSA properties. - auto isDefinedOutsideOfBody = [&](Value *value) { + auto isDefinedOutsideOfBody = [&](ValuePtr value) { auto definingOp = value->getDefiningOp(); return (definingOp && !!willBeMovedSet.count(definingOp)) || looplike.isDefinedOutsideOfLoop(value); diff --git a/lib/Transforms/LoopTiling.cpp b/lib/Transforms/LoopTiling.cpp index 10654783aa97..361a4d8ecb9a 100644 --- a/lib/Transforms/LoopTiling.cpp +++ b/lib/Transforms/LoopTiling.cpp @@ -120,8 +120,8 @@ constructTiledIndexSetHyperRect(MutableArrayRef origLoops, for (unsigned i = 0; i < width; i++) { auto lbOperands = origLoops[i].getLowerBoundOperands(); auto ubOperands = origLoops[i].getUpperBoundOperands(); - SmallVector newLbOperands(lbOperands); - SmallVector newUbOperands(ubOperands); + SmallVector newLbOperands(lbOperands); + SmallVector newUbOperands(ubOperands); newLoops[i].setLowerBound(newLbOperands, origLoops[i].getLowerBoundMap()); newLoops[i].setUpperBound(newUbOperands, origLoops[i].getUpperBoundMap()); newLoops[i].setStep(tileSizes[i]); @@ -147,7 +147,7 @@ constructTiledIndexSetHyperRect(MutableArrayRef origLoops, // with 'i' (tile-space loop) appended to it. The new upper bound map is // the original one with an additional expression i + tileSize appended. auto ub = origLoops[i].getUpperBound(); - SmallVector ubOperands; + SmallVector ubOperands; ubOperands.reserve(ub.getNumOperands() + 1); auto origUbMap = ub.getMap(); // Add dim operands from original upper bound. @@ -235,9 +235,10 @@ LogicalResult mlir::tileCodeGen(MutableArrayRef band, // Move the loop body of the original nest to the new one. moveLoopBody(origLoops[origLoops.size() - 1], innermostPointLoop); - SmallVector origLoopIVs; + SmallVector origLoopIVs; extractForInductionVars(band, &origLoopIVs); - SmallVector, 6> ids(origLoopIVs.begin(), origLoopIVs.end()); + SmallVector, 6> ids(origLoopIVs.begin(), + origLoopIVs.end()); FlatAffineConstraints cst; getIndexSet(band, &cst); diff --git a/lib/Transforms/LoopUnrollAndJam.cpp b/lib/Transforms/LoopUnrollAndJam.cpp index 230869abcd56..a857b8ec95a6 100644 --- a/lib/Transforms/LoopUnrollAndJam.cpp +++ b/lib/Transforms/LoopUnrollAndJam.cpp @@ -191,7 +191,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp, // Adjust the lower bound of the cleanup loop; its upper bound is the same // as the original loop's upper bound. AffineMap cleanupMap; - SmallVector cleanupOperands; + SmallVector cleanupOperands; getCleanupLoopLowerBound(forOp, unrollJamFactor, &cleanupMap, &cleanupOperands, builder); cleanupAffineForOp.setLowerBound(cleanupOperands, cleanupMap); @@ -208,7 +208,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp, int64_t step = forOp.getStep(); forOp.setStep(step * unrollJamFactor); - auto *forOpIV = forOp.getInductionVar(); + auto forOpIV = forOp.getInductionVar(); // Unroll and jam (appends unrollJamFactor - 1 additional copies). for (unsigned i = unrollJamFactor - 1; i >= 1; --i) { // Operand map persists across all sub-blocks. diff --git a/lib/Transforms/MemRefDataFlowOpt.cpp b/lib/Transforms/MemRefDataFlowOpt.cpp index c531ca551b46..0695aafe171b 100644 --- a/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/lib/Transforms/MemRefDataFlowOpt.cpp @@ -76,7 +76,7 @@ struct MemRefDataFlowOpt : public FunctionPass { void forwardStoreToLoad(AffineLoadOp loadOp); // A list of memref's that are potentially dead / could be eliminated. - SmallPtrSet memrefsToErase; + SmallPtrSet memrefsToErase; // Load op's whose results were replaced by those forwarded from stores. SmallVector loadOpsToErase; @@ -180,7 +180,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) { return; // Perform the actual store to load forwarding. - Value *storeVal = cast(lastWriteStoreOp).getValueToStore(); + ValuePtr storeVal = cast(lastWriteStoreOp).getValueToStore(); loadOp.replaceAllUsesWith(storeVal); // Record the memref for a later sweep to optimize away. memrefsToErase.insert(loadOp.getMemRef()); @@ -213,7 +213,7 @@ void MemRefDataFlowOpt::runOnFunction() { // Check if the store fwd'ed memrefs are now left with only stores and can // thus be completely deleted. Note: the canonicalize pass should be able // to do this as well, but we'll do it here since we collected these anyway. - for (auto *memref : memrefsToErase) { + for (auto memref : memrefsToErase) { // If the memref hasn't been alloc'ed in this function, skip. Operation *defInst = memref->getDefiningOp(); if (!defInst || !isa(defInst)) diff --git a/lib/Transforms/PipelineDataTransfer.cpp b/lib/Transforms/PipelineDataTransfer.cpp index fdf01351549f..4162936ea2d0 100644 --- a/lib/Transforms/PipelineDataTransfer.cpp +++ b/lib/Transforms/PipelineDataTransfer.cpp @@ -70,7 +70,7 @@ static unsigned getTagMemRefPos(Operation &dmaInst) { /// Replaces all uses of the old memref by the new one while indexing the newly /// added dimension by the loop IV of the specified 'affine.for' operation /// modulo 2. Returns false if such a replacement cannot be performed. -static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) { +static bool doubleBuffer(ValuePtr oldMemRef, AffineForOp forOp) { auto *forBody = forOp.getBody(); OpBuilder bInner(forBody, forBody->begin()); @@ -94,7 +94,7 @@ static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) { auto *forInst = forOp.getOperation(); OpBuilder bOuter(forInst); // Put together alloc operands for any dynamic dimensions of the memref. - SmallVector allocOperands; + SmallVector allocOperands; unsigned dynamicDimCount = 0; for (auto dimSize : oldMemRefType.getShape()) { if (dimSize == -1) @@ -103,7 +103,7 @@ static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) { } // Create and place the alloc right before the 'affine.for' operation. - Value *newMemRef = + ValuePtr newMemRef = bOuter.create(forInst->getLoc(), newMemRefType, allocOperands); // Create 'iv mod 2' value to index the leading dimension. @@ -212,7 +212,7 @@ static void findMatchingStartFinishInsts( continue; // We only double buffer if the buffer is not live out of loop. - auto *memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos()); + auto memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos()); bool escapingUses = false; for (auto *user : memref->getUsers()) { // We can double buffer regardless of dealloc's outside the loop. @@ -270,7 +270,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) { // dimension. for (auto &pair : startWaitPairs) { auto *dmaStartInst = pair.first; - Value *oldMemRef = dmaStartInst->getOperand( + ValuePtr oldMemRef = dmaStartInst->getOperand( cast(dmaStartInst).getFasterMemPos()); if (!doubleBuffer(oldMemRef, forOp)) { // Normally, double buffering should not fail because we already checked @@ -301,7 +301,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) { // Double the buffers for tag memrefs. for (auto &pair : startWaitPairs) { auto *dmaFinishInst = pair.second; - Value *oldTagMemRef = + ValuePtr oldTagMemRef = dmaFinishInst->getOperand(getTagMemRefPos(*dmaFinishInst)); if (!doubleBuffer(oldTagMemRef, forOp)) { LLVM_DEBUG(llvm::dbgs() << "tag double buffering failed\n";); @@ -342,7 +342,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) { // If a slice wasn't created, the reachable affine.apply op's from its // operands are the ones that go with it. SmallVector affineApplyInsts; - SmallVector operands(dmaStartInst->getOperands()); + SmallVector operands(dmaStartInst->getOperands()); getReachableAffineApplyOps(operands, affineApplyInsts); for (auto *op : affineApplyInsts) { instShiftMap[op] = 0; diff --git a/lib/Transforms/Utils/FoldUtils.cpp b/lib/Transforms/Utils/FoldUtils.cpp index d4b7caae5275..85d1f21305e7 100644 --- a/lib/Transforms/Utils/FoldUtils.cpp +++ b/lib/Transforms/Utils/FoldUtils.cpp @@ -90,7 +90,7 @@ LogicalResult OperationFolder::tryToFold( return failure(); // Try to fold the operation. - SmallVector results; + SmallVector results; if (failed(tryToFold(op, results, processGeneratedConstants))) return failure(); @@ -138,7 +138,7 @@ void OperationFolder::notifyRemoval(Operation *op) { /// Tries to perform folding on the given `op`. If successful, populates /// `results` with the results of the folding. LogicalResult OperationFolder::tryToFold( - Operation *op, SmallVectorImpl &results, + Operation *op, SmallVectorImpl &results, function_ref processGeneratedConstants) { SmallVector operandConstants; SmallVector foldResults; @@ -181,13 +181,13 @@ LogicalResult OperationFolder::tryToFold( assert(!foldResults[i].isNull() && "expected valid OpFoldResult"); // Check if the result was an SSA value. - if (auto *repl = foldResults[i].dyn_cast()) { + if (auto repl = foldResults[i].dyn_cast()) { results.emplace_back(repl); continue; } // Check to see if there is a canonicalized version of this constant. - auto *res = op->getResult(i); + auto res = op->getResult(i); Attribute attrRepl = foldResults[i].get(); if (auto *constOp = tryGetOrCreateConstant(uniquedConstants, dialect, builder, attrRepl, diff --git a/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp index e2ca3f8fc5e2..fe4a6f9f9e0b 100644 --- a/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp +++ b/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp @@ -107,7 +107,7 @@ class GreedyPatternRewriteDriver : public PatternRewriter { // simplifications to its users - make sure to add them to the worklist // before the root is changed. void notifyRootReplaced(Operation *op) override { - for (auto *result : op->getResults()) + for (auto result : op->getResults()) for (auto *user : result->getUsers()) addToWorklist(user); } @@ -118,7 +118,7 @@ class GreedyPatternRewriteDriver : public PatternRewriter { // operation is modified or removed, as it may trigger further // simplifications. template void addToWorklist(Operands &&operands) { - for (Value *operand : operands) { + for (ValuePtr operand : operands) { // If the use count of this operand is now < 2, we re-add the defining // operation to the worklist. // TODO(riverriddle) This is based on the fact that zero use operations @@ -160,7 +160,7 @@ bool GreedyPatternRewriteDriver::simplify(MutableArrayRef regions, region.walk(collectOps); // These are scratch vectors used in the folding loop below. - SmallVector originalOperands, resultValues; + SmallVector originalOperands, resultValues; changed = false; while (!worklist.empty()) { @@ -189,7 +189,7 @@ bool GreedyPatternRewriteDriver::simplify(MutableArrayRef regions, // Add all the users of the result to the worklist so we make sure // to revisit them. - for (auto *result : op->getResults()) + for (auto result : op->getResults()) for (auto *operand : result->getUsers()) addToWorklist(operand); diff --git a/lib/Transforms/Utils/InliningUtils.cpp b/lib/Transforms/Utils/InliningUtils.cpp index e8466aa3fd6c..048130c0d3ab 100644 --- a/lib/Transforms/Utils/InliningUtils.cpp +++ b/lib/Transforms/Utils/InliningUtils.cpp @@ -55,7 +55,7 @@ static void remapInlinedOperands(iterator_range inlinedBlocks, BlockAndValueMapping &mapper) { auto remapOperands = [&](Operation *op) { for (auto &operand : op->getOpOperands()) - if (auto *mappedOp = mapper.lookupOrNull(operand.get())) + if (auto mappedOp = mapper.lookupOrNull(operand.get())) operand.set(mappedOp); }; for (auto &block : inlinedBlocks) @@ -98,7 +98,7 @@ void InlinerInterface::handleTerminator(Operation *op, Block *newDest) const { /// Handle the given inlined terminator by replacing it with a new operation /// as necessary. void InlinerInterface::handleTerminator(Operation *op, - ArrayRef valuesToRepl) const { + ArrayRef valuesToRepl) const { auto *handler = getInterfaceFor(op); assert(handler && "expected valid dialect handler"); handler->handleTerminator(op, valuesToRepl); @@ -137,7 +137,7 @@ static bool isLegalToInline(InlinerInterface &interface, Region *src, LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, Operation *inlinePoint, BlockAndValueMapping &mapper, - ArrayRef resultsToReplace, + ArrayRef resultsToReplace, Optional inlineLoc, bool shouldCloneInlinedRegion) { // We expect the region to have at least one block. @@ -147,7 +147,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, // Check that all of the region arguments have been mapped. auto *srcEntryBlock = &src->front(); if (llvm::any_of(srcEntryBlock->getArguments(), - [&](BlockArgument *arg) { return !mapper.contains(arg); })) + [&](BlockArgumentPtr arg) { return !mapper.contains(arg); })) return failure(); // The insertion point must be within a block. @@ -207,7 +207,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, } else { // Otherwise, there were multiple blocks inlined. Add arguments to the post // insertion block to represent the results to replace. - for (Value *resultToRepl : resultsToReplace) { + for (ValuePtr resultToRepl : resultsToReplace) { resultToRepl->replaceAllUsesWith( postInsertBlock->addArgument(resultToRepl->getType())); } @@ -229,8 +229,8 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, /// in-favor of the region arguments when inlining. LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, Operation *inlinePoint, - ArrayRef inlinedOperands, - ArrayRef resultsToReplace, + ArrayRef inlinedOperands, + ArrayRef resultsToReplace, Optional inlineLoc, bool shouldCloneInlinedRegion) { // We expect the region to have at least one block. @@ -246,7 +246,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, for (unsigned i = 0, e = inlinedOperands.size(); i != e; ++i) { // Verify that the types of the provided values match the function argument // types. - BlockArgument *regionArg = entryBlock->getArgument(i); + BlockArgumentPtr regionArg = entryBlock->getArgument(i); if (inlinedOperands[i]->getType() != regionArg->getType()) return failure(); mapper.map(regionArg, inlinedOperands[i]); @@ -259,10 +259,10 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, /// Utility function used to generate a cast operation from the given interface, /// or return nullptr if a cast could not be generated. -static Value *materializeConversion(const DialectInlinerInterface *interface, - SmallVectorImpl &castOps, - OpBuilder &castBuilder, Value *arg, - Type type, Location conversionLoc) { +static ValuePtr materializeConversion(const DialectInlinerInterface *interface, + SmallVectorImpl &castOps, + OpBuilder &castBuilder, ValuePtr arg, + Type type, Location conversionLoc) { if (!interface) return nullptr; @@ -297,8 +297,8 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface, // Make sure that the number of arguments and results matchup between the call // and the region. - SmallVector callOperands(call.getArgOperands()); - SmallVector callResults(call.getOperation()->getResults()); + SmallVector callOperands(call.getArgOperands()); + SmallVector callResults(call.getOperation()->getResults()); if (callOperands.size() != entryBlock->getNumArguments() || callResults.size() != callableResultTypes.size()) return failure(); @@ -325,8 +325,8 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface, // Map the provided call operands to the arguments of the region. BlockAndValueMapping mapper; for (unsigned i = 0, e = callOperands.size(); i != e; ++i) { - BlockArgument *regionArg = entryBlock->getArgument(i); - Value *operand = callOperands[i]; + BlockArgumentPtr regionArg = entryBlock->getArgument(i); + ValuePtr operand = callOperands[i]; // If the call operand doesn't match the expected region argument, try to // generate a cast. @@ -342,13 +342,13 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface, // Ensure that the resultant values of the call, match the callable. castBuilder.setInsertionPointAfter(call); for (unsigned i = 0, e = callResults.size(); i != e; ++i) { - Value *callResult = callResults[i]; + ValuePtr callResult = callResults[i]; if (callResult->getType() == callableResultTypes[i]) continue; // Generate a conversion that will produce the original type, so that the IR // is still valid after the original call gets replaced. - Value *castResult = + ValuePtr castResult = materializeConversion(callInterface, castOps, castBuilder, callResult, callResult->getType(), castLoc); if (!castResult) diff --git a/lib/Transforms/Utils/LoopFusionUtils.cpp b/lib/Transforms/Utils/LoopFusionUtils.cpp index fd803390ce76..d5cda3265de5 100644 --- a/lib/Transforms/Utils/LoopFusionUtils.cpp +++ b/lib/Transforms/Utils/LoopFusionUtils.cpp @@ -45,7 +45,7 @@ using namespace mlir; // Gathers all load and store memref accesses in 'opA' into 'values', where // 'values[memref] == true' for each store operation. static void getLoadAndStoreMemRefAccesses(Operation *opA, - DenseMap &values) { + DenseMap &values) { opA->walk([&](Operation *op) { if (auto loadOp = dyn_cast(op)) { if (values.count(loadOp.getMemRef()) == 0) @@ -60,7 +60,7 @@ static void getLoadAndStoreMemRefAccesses(Operation *opA, // accessed 'values' and at least one of the access is a store operation. // Returns false otherwise. static bool isDependentLoadOrStoreOp(Operation *op, - DenseMap &values) { + DenseMap &values) { if (auto loadOp = dyn_cast(op)) { return values.count(loadOp.getMemRef()) > 0 && values[loadOp.getMemRef()] == true; @@ -75,7 +75,7 @@ static bool isDependentLoadOrStoreOp(Operation *op, static Operation *getFirstDependentOpInRange(Operation *opA, Operation *opB) { // Record memref values from all loads/store in loop nest rooted at 'opA'. // Map from memref value to bool which is true if store, false otherwise. - DenseMap values; + DenseMap values; getLoadAndStoreMemRefAccesses(opA, values); // For each 'opX' in block in range ('opA', 'opB'), check if there is a data @@ -101,7 +101,7 @@ static Operation *getFirstDependentOpInRange(Operation *opA, Operation *opB) { static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) { // Record memref values from all loads/store in loop nest rooted at 'opB'. // Map from memref value to bool which is true if store, false otherwise. - DenseMap values; + DenseMap values; getLoadAndStoreMemRefAccesses(opB, values); // For each 'opX' in block in range ('opA', 'opB') in reverse order, @@ -121,8 +121,8 @@ static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) { } return WalkResult::advance(); } - for (auto *value : op->getResults()) { - for (auto *user : value->getUsers()) { + for (auto value : op->getResults()) { + for (auto user : value->getUsers()) { SmallVector loops; // Check if any loop in loop nest surrounding 'user' is 'opB'. getLoopIVs(*user, &loops); @@ -443,7 +443,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats, // Subtract from operation count the loads/store we expect load/store // forwarding to remove. unsigned storeCount = 0; - llvm::SmallDenseSet storeMemrefs; + llvm::SmallDenseSet storeMemrefs; srcForOp.walk([&](Operation *op) { if (auto storeOp = dyn_cast(op)) { storeMemrefs.insert(storeOp.getMemRef()); @@ -455,7 +455,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats, computeCostMap[insertPointParent] = -storeCount; // Subtract out any load users of 'storeMemrefs' nested below // 'insertPointParent'. - for (auto *value : storeMemrefs) { + for (auto value : storeMemrefs) { for (auto *user : value->getUsers()) { if (auto loadOp = dyn_cast(user)) { SmallVector loops; diff --git a/lib/Transforms/Utils/LoopUtils.cpp b/lib/Transforms/Utils/LoopUtils.cpp index 3691aee4870b..bc1ced408a92 100644 --- a/lib/Transforms/Utils/LoopUtils.cpp +++ b/lib/Transforms/Utils/LoopUtils.cpp @@ -52,7 +52,7 @@ using llvm::SmallMapVector; /// expression. void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor, AffineMap *map, - SmallVectorImpl *operands, + SmallVectorImpl *operands, OpBuilder &b) { auto lbMap = forOp.getLowerBoundMap(); @@ -63,7 +63,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor, } AffineMap tripCountMap; - SmallVector tripCountOperands; + SmallVector tripCountOperands; buildTripCountMapAndOperands(forOp, &tripCountMap, &tripCountOperands); // Sometimes the trip count cannot be expressed as an affine expression. @@ -82,7 +82,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor, // lb + tr1 - tr1 % ufactor, lb + tr2 - tr2 % ufactor; the results of all // these affine.apply's make up the cleanup loop lower bound. SmallVector bumpExprs(tripCountMap.getNumResults()); - SmallVector bumpValues(tripCountMap.getNumResults()); + SmallVector bumpValues(tripCountMap.getNumResults()); for (unsigned i = 0, e = tripCountMap.getNumResults(); i < e; i++) { auto tripCountExpr = tripCountMap.getResult(i); bumpExprs[i] = (tripCountExpr - tripCountExpr % unrollFactor) * step; @@ -105,7 +105,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor, *map = simplifyAffineMap(*map); canonicalizeMapAndOperands(map, operands); // Remove any affine.apply's that became dead from the simplification above. - for (auto *v : bumpValues) { + for (auto v : bumpValues) { if (v->use_empty()) { v->getDefiningOp()->erase(); } @@ -127,7 +127,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) { return failure(); // Replaces all IV uses to its single iteration value. - auto *iv = forOp.getInductionVar(); + auto iv = forOp.getInductionVar(); Operation *op = forOp.getOperation(); if (!iv->use_empty()) { if (forOp.hasConstantLowerBound()) { @@ -137,7 +137,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) { iv->replaceAllUsesWith(constOp); } else { AffineBound lb = forOp.getLowerBound(); - SmallVector lbOperands(lb.operand_begin(), lb.operand_end()); + SmallVector lbOperands(lb.operand_begin(), lb.operand_end()); OpBuilder builder(op->getBlock(), Block::iterator(op)); if (lb.getMap() == builder.getDimIdentityMap()) { // No need of generating an affine.apply. @@ -178,8 +178,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap, const std::vector>> &instGroupQueue, unsigned offset, AffineForOp srcForInst, OpBuilder b) { - SmallVector lbOperands(srcForInst.getLowerBoundOperands()); - SmallVector ubOperands(srcForInst.getUpperBoundOperands()); + SmallVector lbOperands(srcForInst.getLowerBoundOperands()); + SmallVector ubOperands(srcForInst.getUpperBoundOperands()); assert(lbMap.getNumInputs() == lbOperands.size()); assert(ubMap.getNumInputs() == ubOperands.size()); @@ -187,8 +187,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap, auto loopChunk = b.create(srcForInst.getLoc(), lbOperands, lbMap, ubOperands, ubMap, srcForInst.getStep()); - auto *loopChunkIV = loopChunk.getInductionVar(); - auto *srcIV = srcForInst.getInductionVar(); + auto loopChunkIV = loopChunk.getInductionVar(); + auto srcIV = srcForInst.getInductionVar(); BlockAndValueMapping operandMap; @@ -449,7 +449,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp, OpBuilder builder(op->getBlock(), ++Block::iterator(op)); auto cleanupForInst = cast(builder.clone(*op)); AffineMap cleanupMap; - SmallVector cleanupOperands; + SmallVector cleanupOperands; getCleanupLoopLowerBound(forOp, unrollFactor, &cleanupMap, &cleanupOperands, builder); assert(cleanupMap && @@ -477,7 +477,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp, Block::iterator srcBlockEnd = std::prev(forOp.getBody()->end(), 2); // Unroll the contents of 'forOp' (append unrollFactor-1 additional copies). - auto *forOpIV = forOp.getInductionVar(); + auto forOpIV = forOp.getInductionVar(); for (unsigned i = 1; i < unrollFactor; i++) { BlockAndValueMapping operandMap; @@ -669,8 +669,8 @@ void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) { // ... // } // ``` -static void augmentMapAndBounds(OpBuilder &b, Value *iv, AffineMap *map, - SmallVector *operands, +static void augmentMapAndBounds(OpBuilder &b, ValuePtr iv, AffineMap *map, + SmallVector *operands, int64_t offset = 0) { auto bounds = llvm::to_vector<4>(map->getResults()); bounds.push_back(b.getAffineDimExpr(map->getNumDims()) + offset); @@ -699,16 +699,16 @@ stripmineSink(AffineForOp forOp, uint64_t factor, // Lower-bound map creation. auto lbMap = forOp.getLowerBoundMap(); - SmallVector lbOperands(forOp.getLowerBoundOperands()); + SmallVector lbOperands(forOp.getLowerBoundOperands()); augmentMapAndBounds(b, forOp.getInductionVar(), &lbMap, &lbOperands); // Upper-bound map creation. auto ubMap = forOp.getUpperBoundMap(); - SmallVector ubOperands(forOp.getUpperBoundOperands()); + SmallVector ubOperands(forOp.getUpperBoundOperands()); augmentMapAndBounds(b, forOp.getInductionVar(), &ubMap, &ubOperands, /*offset=*/scaledStep); - auto *iv = forOp.getInductionVar(); + auto iv = forOp.getInductionVar(); SmallVector innerLoops; for (auto t : targets) { // Insert newForOp before the terminator of `t`. @@ -729,10 +729,10 @@ stripmineSink(AffineForOp forOp, uint64_t factor, return innerLoops; } -static Loops stripmineSink(loop::ForOp forOp, Value *factor, +static Loops stripmineSink(loop::ForOp forOp, ValuePtr factor, ArrayRef targets) { - auto *originalStep = forOp.step(); - auto *iv = forOp.getInductionVar(); + auto originalStep = forOp.step(); + auto iv = forOp.getInductionVar(); OpBuilder b(forOp); forOp.setStep(b.create(forOp.getLoc(), originalStep, factor)); @@ -745,10 +745,10 @@ static Loops stripmineSink(loop::ForOp forOp, Value *factor, // Insert newForOp before the terminator of `t`. OpBuilder b(t.getBodyBuilder()); - Value *stepped = b.create(t.getLoc(), iv, forOp.step()); - Value *less = b.create(t.getLoc(), CmpIPredicate::slt, - forOp.upperBound(), stepped); - Value *ub = + ValuePtr stepped = b.create(t.getLoc(), iv, forOp.step()); + ValuePtr less = b.create(t.getLoc(), CmpIPredicate::slt, + forOp.upperBound(), stepped); + ValuePtr ub = b.create(t.getLoc(), less, forOp.upperBound(), stepped); // Splice [begin, begin + nOps - 1) into `newForOp` and replace uses. @@ -799,7 +799,7 @@ mlir::tile(ArrayRef forOps, ArrayRef sizes, } SmallVector mlir::tile(ArrayRef forOps, - ArrayRef sizes, + ArrayRef sizes, ArrayRef targets) { return tileImpl(forOps, sizes, targets); } @@ -821,13 +821,13 @@ SmallVector mlir::tile(ArrayRef forOps, return tileImpl(forOps, sizes, target); } -Loops mlir::tile(ArrayRef forOps, ArrayRef sizes, +Loops mlir::tile(ArrayRef forOps, ArrayRef sizes, loop::ForOp target) { return tileImpl(forOps, sizes, target); } Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp, - ArrayRef sizes) { + ArrayRef sizes) { // Collect perfectly nested loops. If more size values provided than nested // loops available, truncate `sizes`. SmallVector forOps; @@ -842,14 +842,15 @@ Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp, // Build the IR that performs ceil division of a positive value by a constant: // ceildiv(a, B) = divis(a + (B-1), B) // where divis is rounding-to-zero division. -static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend, - int64_t divisor) { +static ValuePtr ceilDivPositive(OpBuilder &builder, Location loc, + ValuePtr dividend, int64_t divisor) { assert(divisor > 0 && "expected positive divisor"); assert(dividend->getType().isIndex() && "expected index-typed value"); - Value *divisorMinusOneCst = builder.create(loc, divisor - 1); - Value *divisorCst = builder.create(loc, divisor); - Value *sum = builder.create(loc, dividend, divisorMinusOneCst); + ValuePtr divisorMinusOneCst = + builder.create(loc, divisor - 1); + ValuePtr divisorCst = builder.create(loc, divisor); + ValuePtr sum = builder.create(loc, dividend, divisorMinusOneCst); return builder.create(loc, sum, divisorCst); } @@ -857,13 +858,13 @@ static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend, // positive value: // ceildiv(a, b) = divis(a + (b - 1), b) // where divis is rounding-to-zero division. -static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend, - Value *divisor) { +static ValuePtr ceilDivPositive(OpBuilder &builder, Location loc, + ValuePtr dividend, ValuePtr divisor) { assert(dividend->getType().isIndex() && "expected index-typed value"); - Value *cstOne = builder.create(loc, 1); - Value *divisorMinusOne = builder.create(loc, divisor, cstOne); - Value *sum = builder.create(loc, dividend, divisorMinusOne); + ValuePtr cstOne = builder.create(loc, 1); + ValuePtr divisorMinusOne = builder.create(loc, divisor, cstOne); + ValuePtr sum = builder.create(loc, dividend, divisorMinusOne); return builder.create(loc, sum, divisor); } @@ -945,7 +946,7 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp, // iterations. Given that the loop current executes // numIterations = ceildiv((upperBound - lowerBound), step) // iterations, we need to tile with size ceildiv(numIterations, size[i]). - SmallVector tileSizes; + SmallVector tileSizes; tileSizes.reserve(sizes.size()); for (unsigned i = 0, e = sizes.size(); i < e; ++i) { assert(sizes[i] > 0 && "expected strictly positive size for strip-mining"); @@ -953,10 +954,10 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp, auto forOp = forOps[i]; OpBuilder builder(forOp); auto loc = forOp.getLoc(); - Value *diff = + ValuePtr diff = builder.create(loc, forOp.upperBound(), forOp.lowerBound()); - Value *numIterations = ceilDivPositive(builder, loc, diff, forOp.step()); - Value *iterationsPerBlock = + ValuePtr numIterations = ceilDivPositive(builder, loc, diff, forOp.step()); + ValuePtr iterationsPerBlock = ceilDivPositive(builder, loc, numIterations, sizes[i]); tileSizes.push_back(iterationsPerBlock); } @@ -976,7 +977,7 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp, // Replaces all uses of `orig` with `replacement` except if the user is listed // in `exceptions`. static void -replaceAllUsesExcept(Value *orig, Value *replacement, +replaceAllUsesExcept(ValuePtr orig, ValuePtr replacement, const SmallPtrSetImpl &exceptions) { for (auto &use : llvm::make_early_inc_range(orig->getUses())) { if (exceptions.count(use.getOwner()) == 0) @@ -1018,30 +1019,30 @@ static void normalizeLoop(loop::ForOp loop, loop::ForOp outer, // of the loop to go from 0 to the number of iterations, if necessary. // TODO(zinenko): introduce support for negative steps or emit dynamic asserts // on step positivity, whatever gets implemented first. - Value *diff = + ValuePtr diff = builder.create(loc, loop.upperBound(), loop.lowerBound()); - Value *numIterations = ceilDivPositive(builder, loc, diff, loop.step()); + ValuePtr numIterations = ceilDivPositive(builder, loc, diff, loop.step()); loop.setUpperBound(numIterations); - Value *lb = loop.lowerBound(); + ValuePtr lb = loop.lowerBound(); if (!isZeroBased) { - Value *cst0 = builder.create(loc, 0); + ValuePtr cst0 = builder.create(loc, 0); loop.setLowerBound(cst0); } - Value *step = loop.step(); + ValuePtr step = loop.step(); if (!isStepOne) { - Value *cst1 = builder.create(loc, 1); + ValuePtr cst1 = builder.create(loc, 1); loop.setStep(cst1); } // Insert code computing the value of the original loop induction variable // from the "normalized" one. builder.setInsertionPointToStart(inner.getBody()); - Value *scaled = + ValuePtr scaled = isStepOne ? loop.getInductionVar() : builder.create(loc, loop.getInductionVar(), step); - Value *shifted = + ValuePtr shifted = isZeroBased ? scaled : builder.create(loc, scaled, lb); SmallPtrSet preserve{scaled->getDefiningOp(), @@ -1065,7 +1066,7 @@ void mlir::coalesceLoops(MutableArrayRef loops) { // of the number of iterations of all loops. OpBuilder builder(outermost); Location loc = outermost.getLoc(); - Value *upperBound = outermost.upperBound(); + ValuePtr upperBound = outermost.upperBound(); for (auto loop : loops.drop_front()) upperBound = builder.create(loc, upperBound, loop.upperBound()); outermost.setUpperBound(upperBound); @@ -1080,16 +1081,16 @@ void mlir::coalesceLoops(MutableArrayRef loops) { // iv_i = floordiv(iv_linear, product-of-loop-ranges-until-i) mod range_i. // Compute these iteratively from the innermost loop by creating a "running // quotient" of division by the range. - Value *previous = outermost.getInductionVar(); + ValuePtr previous = outermost.getInductionVar(); for (unsigned i = 0, e = loops.size(); i < e; ++i) { unsigned idx = loops.size() - i - 1; if (i != 0) previous = builder.create(loc, previous, loops[idx + 1].upperBound()); - Value *iv = (i == e - 1) ? previous - : builder.create( - loc, previous, loops[idx].upperBound()); + ValuePtr iv = (i == e - 1) ? previous + : builder.create( + loc, previous, loops[idx].upperBound()); replaceAllUsesInRegionWith(loops[idx].getInductionVar(), iv, loops.back().region()); } @@ -1105,24 +1106,24 @@ void mlir::coalesceLoops(MutableArrayRef loops) { } void mlir::mapLoopToProcessorIds(loop::ForOp forOp, - ArrayRef processorId, - ArrayRef numProcessors) { + ArrayRef processorId, + ArrayRef numProcessors) { assert(processorId.size() == numProcessors.size()); if (processorId.empty()) return; OpBuilder b(forOp); Location loc(forOp.getLoc()); - Value *mul = processorId.front(); + ValuePtr mul = processorId.front(); for (unsigned i = 1, e = processorId.size(); i < e; ++i) mul = b.create(loc, b.create(loc, mul, numProcessors[i]), processorId[i]); - Value *lb = b.create(loc, forOp.lowerBound(), - b.create(loc, forOp.step(), mul)); + ValuePtr lb = b.create(loc, forOp.lowerBound(), + b.create(loc, forOp.step(), mul)); forOp.setLowerBound(lb); - Value *step = forOp.step(); - for (auto *numProcs : numProcessors) + ValuePtr step = forOp.step(); + for (auto numProcs : numProcessors) step = b.create(loc, step, numProcs); forOp.setStep(step); } @@ -1139,7 +1140,7 @@ findHighestBlockForPlacement(const MemRefRegion ®ion, Block &block, Block::iterator *copyInPlacementStart, Block::iterator *copyOutPlacementStart) { const auto *cst = region.getConstraints(); - SmallVector symbols; + SmallVector symbols; cst->getIdValues(cst->getNumDimIds(), cst->getNumDimAndSymbolIds(), &symbols); SmallVector enclosingFors; @@ -1202,10 +1203,10 @@ static void getMultiLevelStrides(const MemRefRegion ®ion, /// returns the outermost AffineForOp of the copy loop nest. `memIndicesStart' /// holds the lower coordinates of the region in the original memref to copy /// in/out. If `copyOut' is true, generates a copy-out; otherwise a copy-in. -static AffineForOp generatePointWiseCopy(Location loc, Value *memref, - Value *fastMemRef, +static AffineForOp generatePointWiseCopy(Location loc, ValuePtr memref, + ValuePtr fastMemRef, AffineMap memAffineMap, - ArrayRef memIndicesStart, + ArrayRef memIndicesStart, ArrayRef fastBufferShape, bool isCopyOut, OpBuilder b) { assert(!memIndicesStart.empty() && "only 1-d or more memrefs"); @@ -1215,7 +1216,7 @@ static AffineForOp generatePointWiseCopy(Location loc, Value *memref, // for y = ... // fast_buf[x][y] = buf[mem_x + x][mem_y + y] - SmallVector fastBufIndices, memIndices; + SmallVector fastBufIndices, memIndices; AffineForOp copyNestRoot; for (unsigned d = 0, e = fastBufferShape.size(); d < e; ++d) { auto forOp = b.create(loc, 0, fastBufferShape[d]); @@ -1224,7 +1225,7 @@ static AffineForOp generatePointWiseCopy(Location loc, Value *memref, b = forOp.getBodyBuilder(); fastBufIndices.push_back(forOp.getInductionVar()); - Value *memBase = + ValuePtr memBase = (memAffineMap == b.getMultiDimIdentityMap(memAffineMap.getNumDims())) ? memIndicesStart[d] : b.create( @@ -1277,7 +1278,7 @@ static LogicalResult generateCopy( const MemRefRegion ®ion, Block *block, Block::iterator begin, Block::iterator end, Block *copyPlacementBlock, Block::iterator copyInPlacementStart, Block::iterator copyOutPlacementStart, - AffineCopyOptions copyOptions, DenseMap &fastBufferMap, + AffineCopyOptions copyOptions, DenseMap &fastBufferMap, DenseSet ©Nests, uint64_t *sizeInBytes, Block::iterator *nBegin, Block::iterator *nEnd) { *nBegin = begin; @@ -1285,7 +1286,7 @@ static LogicalResult generateCopy( FuncOp f = begin->getParentOfType(); OpBuilder topBuilder(f.getBody()); - Value *zeroIndex = topBuilder.create(f.getLoc(), 0); + ValuePtr zeroIndex = topBuilder.create(f.getLoc(), 0); if (begin == end) return success(); @@ -1305,7 +1306,7 @@ static LogicalResult generateCopy( OpBuilder top(func.getBody()); auto loc = region.loc; - auto *memref = region.memref; + auto memref = region.memref; auto memRefType = memref->getType().cast(); auto layoutMaps = memRefType.getAffineMaps(); @@ -1317,9 +1318,9 @@ static LogicalResult generateCopy( // Indices to use for the copying. // Indices for the original memref being copied from/to. - SmallVector memIndices; + SmallVector memIndices; // Indices for the faster buffer being copied into/from. - SmallVector bufIndices; + SmallVector bufIndices; unsigned rank = memRefType.getRank(); SmallVector fastBufferShape; @@ -1345,7 +1346,7 @@ static LogicalResult generateCopy( // 'regionSymbols' hold values that this memory region is symbolic/parametric // on; these typically include loop IVs surrounding the level at which the // copy generation is being done or other valid symbols in MLIR. - SmallVector regionSymbols; + SmallVector regionSymbols; cst->getIdValues(rank, cst->getNumIds(), ®ionSymbols); // Construct the index expressions for the fast memory buffer. The index @@ -1393,7 +1394,7 @@ static LogicalResult generateCopy( } // The faster memory space buffer. - Value *fastMemRef; + ValuePtr fastMemRef; // Check if a buffer was already created. bool existingBuf = fastBufferMap.count(memref) > 0; @@ -1433,8 +1434,8 @@ static LogicalResult generateCopy( return failure(); } - Value *stride = nullptr; - Value *numEltPerStride = nullptr; + ValuePtr stride = nullptr; + ValuePtr numEltPerStride = nullptr; if (!strideInfos.empty()) { stride = top.create(loc, strideInfos[0].stride); numEltPerStride = @@ -1473,7 +1474,7 @@ static LogicalResult generateCopy( copyOptions.tagMemorySpace); auto tagMemRef = prologue.create(loc, tagMemRefType); - SmallVector tagIndices({zeroIndex}); + SmallVector tagIndices({zeroIndex}); auto tagAffineMap = b.getMultiDimIdentityMap(tagIndices.size()); fullyComposeAffineMapAndOperands(&tagAffineMap, &tagIndices); if (!region.isWrite()) { @@ -1582,7 +1583,7 @@ static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs, SmallVector ivs; getLoopIVs(*opInst, &ivs); ivs.resize(numParamLoopIVs); - SmallVector symbols; + SmallVector symbols; extractForInductionVars(ivs, &symbols); regionCst->reset(rank, numParamLoopIVs, 0); regionCst->setIdValues(rank, rank + numParamLoopIVs, symbols); @@ -1629,12 +1630,12 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin, // List of memory regions to copy for. We need a map vector to have a // guaranteed iteration order to write test cases. CHECK-DAG doesn't help here // since the alloc's for example are identical except for the SSA id. - SmallMapVector, 4> readRegions; - SmallMapVector, 4> writeRegions; + SmallMapVector, 4> readRegions; + SmallMapVector, 4> writeRegions; // Map from original memref's to the fast buffers that their accesses are // replaced with. - DenseMap fastBufferMap; + DenseMap fastBufferMap; // To check for errors when walking the block. bool error = false; @@ -1684,7 +1685,7 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin, // Attempts to update; returns true if 'region' exists in targetRegions. auto updateRegion = - [&](const SmallMapVector, 4> + [&](const SmallMapVector, 4> &targetRegions) { auto it = targetRegions.find(region->memref); if (it == targetRegions.end()) @@ -1736,7 +1737,7 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin, uint64_t totalCopyBuffersSizeInBytes = 0; bool ret = true; auto processRegions = - [&](const SmallMapVector, 4> + [&](const SmallMapVector, 4> ®ions) { for (const auto ®ionEntry : regions) { // For each region, hoist copy in/out past all hoistable diff --git a/lib/Transforms/Utils/RegionUtils.cpp b/lib/Transforms/Utils/RegionUtils.cpp index b91b189b3814..749d5bf1dd07 100644 --- a/lib/Transforms/Utils/RegionUtils.cpp +++ b/lib/Transforms/Utils/RegionUtils.cpp @@ -27,9 +27,9 @@ using namespace mlir; -void mlir::replaceAllUsesInRegionWith(Value *orig, Value *replacement, +void mlir::replaceAllUsesInRegionWith(ValuePtr orig, ValuePtr replacement, Region ®ion) { - for (IROperand &use : llvm::make_early_inc_range(orig->getUses())) { + for (auto &use : llvm::make_early_inc_range(orig->getUses())) { if (region.isAncestor(use.getOwner()->getParentRegion())) use.set(replacement); } @@ -63,14 +63,14 @@ void mlir::visitUsedValuesDefinedAbove( } void mlir::getUsedValuesDefinedAbove(Region ®ion, Region &limit, - llvm::SetVector &values) { + llvm::SetVector &values) { visitUsedValuesDefinedAbove(region, limit, [&](OpOperand *operand) { values.insert(operand->get()); }); } void mlir::getUsedValuesDefinedAbove(MutableArrayRef regions, - llvm::SetVector &values) { + llvm::SetVector &values) { for (Region ®ion : regions) getUsedValuesDefinedAbove(region, region, values); } @@ -146,8 +146,8 @@ namespace { class LiveMap { public: /// Value methods. - bool wasProvenLive(Value *value) { return liveValues.count(value); } - void setProvedLive(Value *value) { + bool wasProvenLive(ValuePtr value) { return liveValues.count(value); } + void setProvedLive(ValuePtr value) { changed |= liveValues.insert(value).second; } @@ -161,7 +161,7 @@ class LiveMap { private: bool changed = false; - DenseSet liveValues; + DenseSet liveValues; DenseSet liveOps; }; } // namespace @@ -188,7 +188,7 @@ static bool isUseSpeciallyKnownDead(OpOperand &use, LiveMap &liveMap) { return false; } -static void processValue(Value *value, LiveMap &liveMap) { +static void processValue(ValuePtr value, LiveMap &liveMap) { bool provedLive = llvm::any_of(value->getUses(), [&](OpOperand &use) { if (isUseSpeciallyKnownDead(use, liveMap)) return false; @@ -222,9 +222,9 @@ static void propagateLiveness(Operation *op, LiveMap &liveMap) { liveMap.setProvedLive(op); return; } - for (Value *value : op->getResults()) + for (ValuePtr value : op->getResults()) processValue(value, liveMap); - bool provedLive = llvm::any_of(op->getResults(), [&](Value *value) { + bool provedLive = llvm::any_of(op->getResults(), [&](ValuePtr value) { return liveMap.wasProvenLive(value); }); if (provedLive) @@ -240,7 +240,7 @@ static void propagateLiveness(Region ®ion, LiveMap &liveMap) { // faster convergence to a fixed point (we try to visit uses before defs). for (Operation &op : llvm::reverse(block->getOperations())) propagateLiveness(&op, liveMap); - for (Value *value : block->getArguments()) + for (ValuePtr value : block->getArguments()) processValue(value, liveMap); } } @@ -259,7 +259,7 @@ static void eraseTerminatorSuccessorOperands(Operation *terminator, // Iterating args in reverse is needed for correctness, to avoid // shifting later args when earlier args are erased. unsigned arg = argE - argI - 1; - Value *value = terminator->getSuccessor(succ)->getArgument(arg); + ValuePtr value = terminator->getSuccessor(succ)->getArgument(arg); if (!liveMap.wasProvenLive(value)) { terminator->eraseSuccessorOperand(succ, arg); } diff --git a/lib/Transforms/Utils/Utils.cpp b/lib/Transforms/Utils/Utils.cpp index 57a92531163e..96a6cdc544f2 100644 --- a/lib/Transforms/Utils/Utils.cpp +++ b/lib/Transforms/Utils/Utils.cpp @@ -47,7 +47,8 @@ static bool isMemRefDereferencingOp(Operation &op) { } /// Return the AffineMapAttr associated with memory 'op' on 'memref'. -static NamedAttribute getAffineMapAttrForMemRef(Operation *op, Value *memref) { +static NamedAttribute getAffineMapAttrForMemRef(Operation *op, + ValuePtr memref) { return TypeSwitch(op) .Case( @@ -55,12 +56,10 @@ static NamedAttribute getAffineMapAttrForMemRef(Operation *op, Value *memref) { } // Perform the replacement in `op`. -LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, - Operation *op, - ArrayRef extraIndices, - AffineMap indexRemap, - ArrayRef extraOperands, - ArrayRef symbolOperands) { +LogicalResult mlir::replaceAllMemRefUsesWith( + ValuePtr oldMemRef, ValuePtr newMemRef, Operation *op, + ArrayRef extraIndices, AffineMap indexRemap, + ArrayRef extraOperands, ArrayRef symbolOperands) { unsigned newMemRefRank = newMemRef->getType().cast().getRank(); (void)newMemRefRank; // unused in opt mode unsigned oldMemRefRank = oldMemRef->getType().cast().getRank(); @@ -106,13 +105,13 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, NamedAttribute oldMapAttrPair = getAffineMapAttrForMemRef(op, oldMemRef); AffineMap oldMap = oldMapAttrPair.second.cast().getValue(); unsigned oldMapNumInputs = oldMap.getNumInputs(); - SmallVector oldMapOperands( + SmallVector oldMapOperands( op->operand_begin() + memRefOperandPos + 1, op->operand_begin() + memRefOperandPos + 1 + oldMapNumInputs); // Apply 'oldMemRefOperands = oldMap(oldMapOperands)'. - SmallVector oldMemRefOperands; - SmallVector affineApplyOps; + SmallVector oldMemRefOperands; + SmallVector affineApplyOps; oldMemRefOperands.reserve(oldMemRefRank); if (oldMap != builder.getMultiDimIdentityMap(oldMap.getNumDims())) { for (auto resultExpr : oldMap.getResults()) { @@ -130,14 +129,14 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, // Construct new indices as a remap of the old ones if a remapping has been // provided. The indices of a memref come right after it, i.e., // at position memRefOperandPos + 1. - SmallVector remapOperands; + SmallVector remapOperands; remapOperands.reserve(extraOperands.size() + oldMemRefRank + symbolOperands.size()); remapOperands.append(extraOperands.begin(), extraOperands.end()); remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end()); remapOperands.append(symbolOperands.begin(), symbolOperands.end()); - SmallVector remapOutputs; + SmallVector remapOutputs; remapOutputs.reserve(oldMemRefRank); if (indexRemap && @@ -156,11 +155,11 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, remapOutputs.append(remapOperands.begin(), remapOperands.end()); } - SmallVector newMapOperands; + SmallVector newMapOperands; newMapOperands.reserve(newMemRefRank); // Prepend 'extraIndices' in 'newMapOperands'. - for (auto *extraIndex : extraIndices) { + for (auto extraIndex : extraIndices) { assert(extraIndex->getDefiningOp()->getNumResults() == 1 && "single result op's expected to generate these indices"); assert((isValidDim(extraIndex) || isValidSymbol(extraIndex)) && @@ -179,7 +178,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, newMap = simplifyAffineMap(newMap); canonicalizeMapAndOperands(&newMap, &newMapOperands); // Remove any affine.apply's that became dead as a result of composition. - for (auto *value : affineApplyOps) + for (auto value : affineApplyOps) if (value->use_empty()) value->getDefiningOp()->erase(); @@ -203,7 +202,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, // Result types don't change. Both memref's are of the same elemental type. state.types.reserve(op->getNumResults()); - for (auto *result : op->getResults()) + for (auto result : op->getResults()) state.types.push_back(result->getType()); // Add attribute for 'newMap', other Attributes do not change. @@ -224,13 +223,11 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, return success(); } -LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, - ArrayRef extraIndices, - AffineMap indexRemap, - ArrayRef extraOperands, - ArrayRef symbolOperands, - Operation *domInstFilter, - Operation *postDomInstFilter) { +LogicalResult mlir::replaceAllMemRefUsesWith( + ValuePtr oldMemRef, ValuePtr newMemRef, ArrayRef extraIndices, + AffineMap indexRemap, ArrayRef extraOperands, + ArrayRef symbolOperands, Operation *domInstFilter, + Operation *postDomInstFilter) { unsigned newMemRefRank = newMemRef->getType().cast().getRank(); (void)newMemRefRank; // unused in opt mode unsigned oldMemRefRank = oldMemRef->getType().cast().getRank(); @@ -331,9 +328,9 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, void mlir::createAffineComputationSlice( Operation *opInst, SmallVectorImpl *sliceOps) { // Collect all operands that are results of affine apply ops. - SmallVector subOperands; + SmallVector subOperands; subOperands.reserve(opInst->getNumOperands()); - for (auto *operand : opInst->getOperands()) + for (auto operand : opInst->getOperands()) if (isa_and_nonnull(operand->getDefiningOp())) subOperands.push_back(operand); @@ -348,7 +345,7 @@ void mlir::createAffineComputationSlice( // which case there would be nothing to do. bool localized = true; for (auto *op : affineApplyOps) { - for (auto *result : op->getResults()) { + for (auto result : op->getResults()) { for (auto *user : result->getUsers()) { if (user != opInst) { localized = false; @@ -361,7 +358,7 @@ void mlir::createAffineComputationSlice( return; OpBuilder builder(opInst); - SmallVector composedOpOperands(subOperands); + SmallVector composedOpOperands(subOperands); auto composedMap = builder.getMultiDimIdentityMap(composedOpOperands.size()); fullyComposeAffineMapAndOperands(&composedMap, &composedOpOperands); @@ -378,7 +375,7 @@ void mlir::createAffineComputationSlice( // affine apply op above instead of existing ones (subOperands). So, they // differ from opInst's operands only for those operands in 'subOperands', for // which they will be replaced by the corresponding one from 'sliceOps'. - SmallVector newOperands(opInst->getOperands()); + SmallVector newOperands(opInst->getOperands()); for (unsigned i = 0, e = newOperands.size(); i < e; i++) { // Replace the subOperands from among the new operands. unsigned j, f; @@ -451,8 +448,8 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) { newShape[d] = ubConst.getValue() + 1; } - auto *oldMemRef = allocOp.getResult(); - SmallVector symbolOperands(allocOp.getSymbolicOperands()); + auto oldMemRef = allocOp.getResult(); + SmallVector symbolOperands(allocOp.getSymbolicOperands()); auto newMemRefType = MemRefType::get(newShape, memrefType.getElementType(), b.getMultiDimIdentityMap(newRank)); diff --git a/lib/Transforms/Vectorize.cpp b/lib/Transforms/Vectorize.cpp index e3212d54e42c..d8f5b1dc0e44 100644 --- a/lib/Transforms/Vectorize.cpp +++ b/lib/Transforms/Vectorize.cpp @@ -705,7 +705,7 @@ struct VectorizationState { // Map of old scalar Operation to new vectorized Operation. DenseMap vectorizationMap; // Map of old scalar Value to new vectorized Value. - DenseMap replacementMap; + DenseMap replacementMap; // The strategy drives which loop to vectorize by which amount. const VectorizationStrategy *strategy; // Use-def roots. These represent the starting points for the worklist in the @@ -728,7 +728,7 @@ struct VectorizationState { OperationFolder *folder; private: - void registerReplacement(Value *key, Value *value); + void registerReplacement(ValuePtr key, ValuePtr value); }; } // end namespace @@ -768,7 +768,7 @@ void VectorizationState::finishVectorizationPattern() { } } -void VectorizationState::registerReplacement(Value *key, Value *value) { +void VectorizationState::registerReplacement(ValuePtr key, ValuePtr value) { assert(replacementMap.count(key) == 0 && "replacement already registered"); replacementMap.insert(std::make_pair(key, value)); } @@ -776,7 +776,7 @@ void VectorizationState::registerReplacement(Value *key, Value *value) { // Apply 'map' with 'mapOperands' returning resulting values in 'results'. static void computeMemoryOpIndices(Operation *op, AffineMap map, ValueRange mapOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { OpBuilder builder(op); for (auto resultExpr : map.getResults()) { auto singleResMap = @@ -803,7 +803,7 @@ static void computeMemoryOpIndices(Operation *op, AffineMap map, /// Such special cases force us to delay the vectorization of the stores until /// the last step. Here we merely register the store operation. template -static LogicalResult vectorizeRootOrTerminal(Value *iv, +static LogicalResult vectorizeRootOrTerminal(ValuePtr iv, LoadOrStoreOpPointer memoryOp, VectorizationState *state) { auto memRefType = memoryOp.getMemRef()->getType().template cast(); @@ -823,7 +823,7 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv, if (auto load = dyn_cast(opInst)) { OpBuilder b(opInst); ValueRange mapOperands = load.getMapOperands(); - SmallVector indices; + SmallVector indices; indices.reserve(load.getMemRefType().getRank()); if (load.getAffineMap() != b.getMultiDimIdentityMap(load.getMemRefType().getRank())) { @@ -838,8 +838,7 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv, LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: "); LLVM_DEBUG(permutationMap.print(dbgs())); auto transfer = b.create( - opInst->getLoc(), vectorType, memoryOp.getMemRef(), - map(makePtrDynCaster(), indices), + opInst->getLoc(), vectorType, memoryOp.getMemRef(), indices, AffineMapAttr::get(permutationMap), // TODO(b/144455320) add a proper padding value, not just 0.0 : f32 state->folder->create(b, opInst->getLoc(), @@ -951,7 +950,8 @@ vectorizeLoopsAndLoadsRecursively(NestedMatch oneMatch, /// element type. /// If `type` is not a valid vector type or if the scalar constant is not a /// valid vector element type, returns nullptr. -static Value *vectorizeConstant(Operation *op, ConstantOp constant, Type type) { +static ValuePtr vectorizeConstant(Operation *op, ConstantOp constant, + Type type) { if (!type || !type.isa() || !VectorType::isValidElementType(constant.getType())) { return nullptr; @@ -989,8 +989,8 @@ static Value *vectorizeConstant(Operation *op, ConstantOp constant, Type type) { /// vectorization is possible with the above logic. Returns nullptr otherwise. /// /// TODO(ntv): handle more complex cases. -static Value *vectorizeOperand(Value *operand, Operation *op, - VectorizationState *state) { +static ValuePtr vectorizeOperand(ValuePtr operand, Operation *op, + VectorizationState *state) { LLVM_DEBUG(dbgs() << "\n[early-vect]vectorize operand: "); LLVM_DEBUG(operand->print(dbgs())); // 1. If this value has already been vectorized this round, we are done. @@ -1004,7 +1004,7 @@ static Value *vectorizeOperand(Value *operand, Operation *op, // been vectorized. This would be invalid IR. auto it = state->replacementMap.find(operand); if (it != state->replacementMap.end()) { - auto *res = it->second; + auto res = it->second; LLVM_DEBUG(dbgs() << "-> delayed replacement by: "); LLVM_DEBUG(res->print(dbgs())); return res; @@ -1047,12 +1047,12 @@ static Operation *vectorizeOneOperation(Operation *opInst, if (auto store = dyn_cast(opInst)) { OpBuilder b(opInst); - auto *memRef = store.getMemRef(); - auto *value = store.getValueToStore(); - auto *vectorValue = vectorizeOperand(value, opInst, state); + auto memRef = store.getMemRef(); + auto value = store.getValueToStore(); + auto vectorValue = vectorizeOperand(value, opInst, state); ValueRange mapOperands = store.getMapOperands(); - SmallVector indices; + SmallVector indices; indices.reserve(store.getMemRefType().getRank()); if (store.getAffineMap() != b.getMultiDimIdentityMap(store.getMemRefType().getRank())) { @@ -1081,16 +1081,16 @@ static Operation *vectorizeOneOperation(Operation *opInst, return nullptr; SmallVector vectorTypes; - for (auto *v : opInst->getResults()) { + for (auto v : opInst->getResults()) { vectorTypes.push_back( VectorType::get(state->strategy->vectorSizes, v->getType())); } - SmallVector vectorOperands; - for (auto *v : opInst->getOperands()) { + SmallVector vectorOperands; + for (auto v : opInst->getOperands()) { vectorOperands.push_back(vectorizeOperand(v, opInst, state)); } // Check whether a single operand is null. If so, vectorization failed. - bool success = llvm::all_of(vectorOperands, [](Value *op) { return op; }); + bool success = llvm::all_of(vectorOperands, [](ValuePtr op) { return op; }); if (!success) { LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize"); return nullptr; diff --git a/test/EDSC/builder-api-test.cpp b/test/EDSC/builder-api-test.cpp index 0b105eadf5a9..376fc249a18b 100644 --- a/test/EDSC/builder-api-test.cpp +++ b/test/EDSC/builder-api-test.cpp @@ -484,7 +484,7 @@ TEST_FUNC(select_op_i32) { IndexedValue A(f.getArgument(0)); IndexHandle i, j; AffineLoopNestBuilder({&i, &j}, {zero, zero}, {one, one}, {1, 1})([&]{ - // This test exercises IndexedValue::operator Value*. + // This test exercises IndexedValue::operator Value. // Without it, one must force conversion to ValueHandle as such: // edsc::intrinsics::select( // i == zero, ValueHandle(A(zero, zero)), ValueHandle(ValueA(i, j))) @@ -802,7 +802,7 @@ TEST_FUNC(affine_if_op) { }; auto intSet = IntegerSet::get(2, 2, affineExprs, isEq); - SmallVector affineIfArgs = {zero, zero, ten, ten}; + SmallVector affineIfArgs = {zero, zero, ten, ten}; intrinsics::affine_if(intSet, affineIfArgs, /*withElseRegion=*/false); intrinsics::affine_if(intSet, affineIfArgs, /*withElseRegion=*/true); diff --git a/test/lib/TestDialect/TestDialect.cpp b/test/lib/TestDialect/TestDialect.cpp index 7462db4544f2..12d024f65932 100644 --- a/test/lib/TestDialect/TestDialect.cpp +++ b/test/lib/TestDialect/TestDialect.cpp @@ -100,7 +100,7 @@ struct TestInlinerInterface : public DialectInlinerInterface { /// Handle the given inlined terminator by replacing it with a new operation /// as necessary. void handleTerminator(Operation *op, - ArrayRef valuesToRepl) const final { + ArrayRef valuesToRepl) const final { // Only handle "test.return" here. auto returnOp = dyn_cast(op); if (!returnOp) @@ -117,7 +117,7 @@ struct TestInlinerInterface : public DialectInlinerInterface { /// operation that takes 'input' as the only operand, and produces a single /// result of 'resultType'. If a conversion can not be generated, nullptr /// should be returned. - Operation *materializeCallConversion(OpBuilder &builder, Value *input, + Operation *materializeCallConversion(OpBuilder &builder, ValuePtr input, Type resultType, Location conversionLoc) const final { // Only allow conversion for i16/i32 types. @@ -231,7 +231,7 @@ static ParseResult parseWrappingRegionOp(OpAsmParser &parser, // Create a return terminator in the inner region, pass as operand to the // terminator the returned values from the wrapped operation. - SmallVector return_operands(wrapped_op->getResults()); + SmallVector return_operands(wrapped_op->getResults()); OpBuilder builder(parser.getBuilder().getContext()); builder.setInsertionPointToEnd(&block); builder.create(wrapped_op->getLoc(), return_operands); @@ -297,7 +297,7 @@ OpFoldResult TestOpWithRegionFold::fold(ArrayRef operands) { LogicalResult TestOpWithVariadicResultsAndFolder::fold( ArrayRef operands, SmallVectorImpl &results) { - for (Value *input : this->operands()) { + for (ValuePtr input : this->operands()) { results.push_back(input); } return success(); diff --git a/test/lib/TestDialect/TestOps.td b/test/lib/TestDialect/TestOps.td index e33d9c26c7f1..ea071f0ddf42 100644 --- a/test/lib/TestDialect/TestOps.td +++ b/test/lib/TestDialect/TestOps.td @@ -644,7 +644,7 @@ def OpSymbolBindingB : TEST_Op<"symbol_binding_b", []> { let builders = [ OpBuilder< - "Builder *builder, OperationState &state, Value *operand", + "Builder *builder, OperationState &state, ValuePtr operand", [{ state.types.assign({builder->getIntegerType(32)}); state.addOperands({operand}); diff --git a/test/lib/TestDialect/TestPatterns.cpp b/test/lib/TestDialect/TestPatterns.cpp index 94eb792cc66b..1f6224dba3a8 100644 --- a/test/lib/TestDialect/TestPatterns.cpp +++ b/test/lib/TestDialect/TestPatterns.cpp @@ -22,11 +22,12 @@ using namespace mlir; // Native function for testing NativeCodeCall -static Value *chooseOperand(Value *input1, Value *input2, BoolAttr choice) { +static ValuePtr chooseOperand(ValuePtr input1, ValuePtr input2, + BoolAttr choice) { return choice.getValue() ? input1 : input2; } -static void createOpI(PatternRewriter &rewriter, Value *input) { +static void createOpI(PatternRewriter &rewriter, ValuePtr input) { rewriter.create(rewriter.getUnknownLoc(), input); } @@ -73,7 +74,7 @@ struct ReturnTypeOpMatch : public RewritePattern { PatternMatchResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const final { if (auto retTypeFn = dyn_cast(op)) { - SmallVector values(op->getOperands()); + SmallVector values(op->getOperands()); SmallVector inferedReturnTypes; if (failed(retTypeFn.inferReturnTypes(op->getLoc(), values, op->getAttrs(), op->getRegions(), @@ -132,7 +133,7 @@ struct TestRegionRewriteBlockMovement : public ConversionPattern { : ConversionPattern("test.region", 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { // Inline this region into the parent region. auto &parentRegion = *op->getParentRegion(); @@ -165,7 +166,7 @@ struct TestRegionRewriteUndo : public RewritePattern { // Add an explicitly illegal operation to ensure the conversion fails. rewriter.create(op->getLoc(), rewriter.getIntegerType(32)); - rewriter.create(op->getLoc(), ArrayRef()); + rewriter.create(op->getLoc(), ArrayRef()); // Drop this operation. rewriter.eraseOp(op); @@ -182,7 +183,7 @@ struct TestDropOpSignatureConversion : public ConversionPattern { : ConversionPattern("test.drop_region_op", 1, ctx), converter(converter) { } PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Region ®ion = op->getRegion(0); Block *entry = ®ion.front(); @@ -208,7 +209,7 @@ struct TestPassthroughInvalidOp : public ConversionPattern { TestPassthroughInvalidOp(MLIRContext *ctx) : ConversionPattern("test.invalid", 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { rewriter.replaceOpWithNewOp(op, llvm::None, operands, llvm::None); @@ -220,7 +221,7 @@ struct TestSplitReturnType : public ConversionPattern { TestSplitReturnType(MLIRContext *ctx) : ConversionPattern("test.return", 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { // Check for a return of F32. if (op->getNumOperands() != 1 || !op->getOperand(0)->getType().isF32()) @@ -245,7 +246,7 @@ struct TestChangeProducerTypeI32ToF32 : public ConversionPattern { TestChangeProducerTypeI32ToF32(MLIRContext *ctx) : ConversionPattern("test.type_producer", 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { // If the type is I32, change the type to F32. if (!(*op->result_type_begin()).isInteger(32)) @@ -258,7 +259,7 @@ struct TestChangeProducerTypeF32ToF64 : public ConversionPattern { TestChangeProducerTypeF32ToF64(MLIRContext *ctx) : ConversionPattern("test.type_producer", 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { // If the type is F32, change the type to F64. if (!(*op->result_type_begin()).isF32()) @@ -271,7 +272,7 @@ struct TestChangeProducerTypeF32ToInvalid : public ConversionPattern { TestChangeProducerTypeF32ToInvalid(MLIRContext *ctx) : ConversionPattern("test.type_producer", 10, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { // Always convert to B16, even though it is not a legal type. This tests // that values are unmapped correctly. @@ -283,7 +284,7 @@ struct TestUpdateConsumerType : public ConversionPattern { TestUpdateConsumerType(MLIRContext *ctx) : ConversionPattern("test.type_consumer", 1, ctx) {} PatternMatchResult - matchAndRewrite(Operation *op, ArrayRef operands, + matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { // Verify that the incoming operand has been successfully remapped to F64. if (!operands[0]->getType().isF64()) @@ -344,7 +345,7 @@ struct TestTypeConverter : public TypeConverter { /// Override the hook to materialize a conversion. This is necessary because /// we generate 1->N type mappings. Operation *materializeConversion(PatternRewriter &rewriter, Type resultType, - ArrayRef inputs, + ArrayRef inputs, Location loc) override { return rewriter.create(loc, resultType, inputs); } @@ -467,13 +468,13 @@ struct OneVResOneVOperandOp1Converter using OpConversionPattern::OpConversionPattern; PatternMatchResult - matchAndRewrite(OneVResOneVOperandOp1 op, ArrayRef operands, + matchAndRewrite(OneVResOneVOperandOp1 op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto origOps = op.getOperands(); assert(std::distance(origOps.begin(), origOps.end()) == 1 && "One operand expected"); - Value *origOp = *origOps.begin(); - SmallVector remappedOperands; + ValuePtr origOp = *origOps.begin(); + SmallVector remappedOperands; // Replicate the remapped original operand twice. Note that we don't used // the remapped 'operand' since the goal is testing 'getRemappedValue'. remappedOperands.push_back(rewriter.getRemappedValue(origOp)); diff --git a/test/lib/Transforms/TestLoopMapping.cpp b/test/lib/Transforms/TestLoopMapping.cpp index c25fea9aa131..7f587fc3170a 100644 --- a/test/lib/Transforms/TestLoopMapping.cpp +++ b/test/lib/Transforms/TestLoopMapping.cpp @@ -41,7 +41,7 @@ class TestLoopMappingPass : public FunctionPass { // SSA values for the transformation are created out of thin air by // unregistered "new_processor_id_and_range" operations. This is enough to // emulate mapping conditions. - SmallVector processorIds, numProcessors; + SmallVector processorIds, numProcessors; func.walk([&processorIds, &numProcessors](Operation *op) { if (op->getName().getStringRef() != "new_processor_id_and_range") return; diff --git a/test/lib/Transforms/TestVectorizationUtils.cpp b/test/lib/Transforms/TestVectorizationUtils.cpp index 7efc74f23041..35df0631ca77 100644 --- a/test/lib/Transforms/TestVectorizationUtils.cpp +++ b/test/lib/Transforms/TestVectorizationUtils.cpp @@ -245,7 +245,7 @@ void VectorizerTestPass::testNormalizeMaps() { for (auto m : matches) { auto app = cast(m.getMatchedOperation()); OpBuilder b(m.getMatchedOperation()); - SmallVector operands(app.getOperands()); + SmallVector operands(app.getOperands()); makeComposedAffineApply(b, app.getLoc(), app.getAffineMap(), operands); } } diff --git a/test/mlir-tblgen/op-attribute.td b/test/mlir-tblgen/op-attribute.td index fa73697dba89..004e76622994 100644 --- a/test/mlir-tblgen/op-attribute.td +++ b/test/mlir-tblgen/op-attribute.td @@ -216,9 +216,9 @@ def MixOperandsAndAttrs : NS_Op<"mix_operands_and_attrs", []> { } // DEF-LABEL: MixOperandsAndAttrs definitions -// DEF-DAG: Value *MixOperandsAndAttrs::operand() -// DEF-DAG: Value *MixOperandsAndAttrs::otherArg() -// DEF-DAG: void MixOperandsAndAttrs::build(Builder *tblgen_builder, OperationState &tblgen_state, FloatAttr attr, Value *operand, FloatAttr otherAttr, Value *otherArg) +// DEF-DAG: ValuePtr MixOperandsAndAttrs::operand() +// DEF-DAG: ValuePtr MixOperandsAndAttrs::otherArg() +// DEF-DAG: void MixOperandsAndAttrs::build(Builder *tblgen_builder, OperationState &tblgen_state, FloatAttr attr, ValuePtr operand, FloatAttr otherAttr, ValuePtr otherArg) // DEF-DAG: APFloat MixOperandsAndAttrs::attr() // DEF-DAG: APFloat MixOperandsAndAttrs::otherAttr() diff --git a/test/mlir-tblgen/op-decl.td b/test/mlir-tblgen/op-decl.td index a217a139848b..55952236429e 100644 --- a/test/mlir-tblgen/op-decl.td +++ b/test/mlir-tblgen/op-decl.td @@ -26,7 +26,7 @@ def NS_AOp : NS_Op<"a_op", [NoSideEffect, NoSideEffect]> { ); let regions = (region AnyRegion:$someRegion); - let builders = [OpBuilder<"Value *val">]; + let builders = [OpBuilder<"ValuePtr val">]; let parser = [{ foo }]; let printer = [{ bar }]; let verifier = [{ baz }]; @@ -46,12 +46,12 @@ def NS_AOp : NS_Op<"a_op", [NoSideEffect, NoSideEffect]> { // CHECK: class AOpOperandAdaptor { // CHECK: public: -// CHECK: AOpOperandAdaptor(ArrayRef values); -// CHECK: ArrayRef getODSOperands(unsigned index); -// CHECK: Value *a(); -// CHECK: ArrayRef b(); +// CHECK: AOpOperandAdaptor(ArrayRef values); +// CHECK: ArrayRef getODSOperands(unsigned index); +// CHECK: ValuePtr a(); +// CHECK: ArrayRef b(); // CHECK: private: -// CHECK: ArrayRef tblgen_operands; +// CHECK: ArrayRef tblgen_operands; // CHECK: }; // CHECK: class AOp : public Op::Impl, OpTrait::HasNoSideEffect, OpTrait::AtLeastNOperands<1>::Impl @@ -60,18 +60,18 @@ def NS_AOp : NS_Op<"a_op", [NoSideEffect, NoSideEffect]> { // CHECK: using OperandAdaptor = AOpOperandAdaptor; // CHECK: static StringRef getOperationName(); // CHECK: Operation::operand_range getODSOperands(unsigned index); -// CHECK: Value *a(); +// CHECK: ValuePtr a(); // CHECK: Operation::operand_range b(); // CHECK: Operation::result_range getODSResults(unsigned index); -// CHECK: Value *r(); +// CHECK: ValuePtr r(); // CHECK: Region &someRegion(); // CHECK: IntegerAttr attr1Attr() // CHECK: APInt attr1(); // CHECK: FloatAttr attr2Attr() // CHECK: Optional< APFloat > attr2(); -// CHECK: static void build(Value *val); -// CHECK: static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type r, ArrayRef s, Value *a, ValueRange b, IntegerAttr attr1, /*optional*/FloatAttr attr2) -// CHECK: static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type r, ArrayRef s, Value *a, ValueRange b, APInt attr1, /*optional*/FloatAttr attr2) +// CHECK: static void build(ValuePtr val); +// CHECK: static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type r, ArrayRef s, ValuePtr a, ValueRange b, IntegerAttr attr1, /*optional*/FloatAttr attr2) +// CHECK: static void build(Builder *tblgen_builder, OperationState &tblgen_state, Type r, ArrayRef s, ValuePtr a, ValueRange b, APInt attr1, /*optional*/FloatAttr attr2) // CHECK: static void build(Builder *, OperationState &tblgen_state, ArrayRef resultTypes, ValueRange operands, ArrayRef attributes) // CHECK: static ParseResult parse(OpAsmParser &parser, OperationState &result); // CHECK: void print(OpAsmPrinter &p); @@ -111,7 +111,7 @@ def NS_DOp : NS_Op<"op_with_two_operands", []> { def NS_SkipDefaultBuildersOp : NS_Op<"skip_default_builders", []> { let skipDefaultBuilders = 1; - let builders = [OpBuilder<"Value *val">]; + let builders = [OpBuilder<"ValuePtr val">]; } // CHECK-LABEL: NS::SkipDefaultBuildersOp declarations diff --git a/test/mlir-tblgen/op-operand.td b/test/mlir-tblgen/op-operand.td index 872cc474a064..c592686ebd32 100644 --- a/test/mlir-tblgen/op-operand.td +++ b/test/mlir-tblgen/op-operand.td @@ -18,7 +18,7 @@ def OpA : NS_Op<"one_normal_operand_op", []> { // CHECK-NEXT: tblgen_operands = values // CHECK: void OpA::build -// CHECK: Value *input +// CHECK: ValuePtr input // CHECK: tblgen_state.addOperands(input); // CHECK: void OpA::build @@ -39,19 +39,19 @@ def OpD : NS_Op<"mix_variadic_and_normal_inputs_op", [SameVariadicOperandSize]> let arguments = (ins Variadic:$input1, AnyTensor:$input2, Variadic:$input3); } -// CHECK-LABEL: ArrayRef OpDOperandAdaptor::input1 +// CHECK-LABEL: ArrayRef OpDOperandAdaptor::input1 // CHECK-NEXT: return getODSOperands(0); -// CHECK-LABEL: Value *OpDOperandAdaptor::input2 +// CHECK-LABEL: ValuePtr OpDOperandAdaptor::input2 // CHECK-NEXT: return *getODSOperands(1).begin(); -// CHECK-LABEL: ArrayRef OpDOperandAdaptor::input3 +// CHECK-LABEL: ArrayRef OpDOperandAdaptor::input3 // CHECK-NEXT: return getODSOperands(2); // CHECK-LABEL: Operation::operand_range OpD::input1 // CHECK-NEXT: return getODSOperands(0); -// CHECK-LABEL: Value *OpD::input2 +// CHECK-LABEL: ValuePtr OpD::input2 // CHECK-NEXT: return *getODSOperands(1).begin(); // CHECK-LABEL: OpD::build diff --git a/test/mlir-tblgen/op-result.td b/test/mlir-tblgen/op-result.td index 4ee631986ccc..f9a77ea492ea 100644 --- a/test/mlir-tblgen/op-result.td +++ b/test/mlir-tblgen/op-result.td @@ -23,9 +23,9 @@ def OpB : NS_Op<"same_input_output_type_op", [SameOperandsAndResultType]> { } // CHECK-LABEL: OpB definitions -// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, Type y, Value *x) +// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, Type y, ValuePtr x) // CHECK: tblgen_state.addTypes(y); -// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, Value *x) +// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, ValuePtr x) // CHECK: tblgen_state.addTypes({x->getType()}); def OpC : NS_Op<"three_normal_result_op", []> { @@ -89,7 +89,7 @@ def OpI : NS_Op<"mix_variadic_and_normal_results_op", [SameVariadicResultSize]> // CHECK-LABEL: Operation::result_range OpI::output1 // CHECK-NEXT: return getODSResults(0); -// CHECK-LABEL: Value *OpI::output2 +// CHECK-LABEL: ValuePtr OpI::output2 // CHECK-NEXT: return *getODSResults(1).begin(); // CHECK-LABEL: OpI::build diff --git a/test/mlir-tblgen/predicate.td b/test/mlir-tblgen/predicate.td index 26a5b746fb46..fef1b139dc98 100644 --- a/test/mlir-tblgen/predicate.td +++ b/test/mlir-tblgen/predicate.td @@ -16,7 +16,7 @@ def OpA : NS_Op<"op_for_CPred_containing_multiple_same_placeholder", []> { } // CHECK-LABEL: OpA::verify -// CHECK: for (Value *v : getODSOperands(0)) { +// CHECK: for (ValuePtr v : getODSOperands(0)) { // CHECK: if (!((v->getType().isInteger(32) || v->getType().isF32()))) def OpB : NS_Op<"op_for_And_PredOpTrait", [ @@ -90,5 +90,5 @@ def OpK : NS_Op<"op_for_AnyTensorOf", []> { } // CHECK-LABEL: OpK::verify -// CHECK: for (Value *v : getODSOperands(0)) { +// CHECK: for (ValuePtr v : getODSOperands(0)) { // CHECK: if (!(((v->getType().isa())) && (((v->getType().cast().getElementType().isF32())) || ((v->getType().cast().getElementType().isInteger(32)))))) diff --git a/tools/mlir-tblgen/OpDefinitionsGen.cpp b/tools/mlir-tblgen/OpDefinitionsGen.cpp index dd56458ccb38..df8feb855c5a 100644 --- a/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -713,11 +713,12 @@ void OpEmitter::genAttrGetters() { // Generates the named operand getter methods for the given Operator `op` and // puts them in `opClass`. Uses `rangeType` as the return type of getters that -// return a range of operands (individual operands are `Value *` and each -// element in the range must also be `Value *`); use `rangeBeginCall` to get an -// iterator to the beginning of the operand range; use `rangeSizeCall` to obtain -// the number of operands. `getOperandCallPattern` contains the code necessary -// to obtain a single operand whose position will be substituted instead of +// return a range of operands (individual operands are `ValuePtr ` and each +// element in the range must also be `ValuePtr `); use `rangeBeginCall` to get +// an iterator to the beginning of the operand range; use `rangeSizeCall` to +// obtain the number of operands. `getOperandCallPattern` contains the code +// necessary to obtain a single operand whose position will be substituted +// instead of // "{0}" marker in the pattern. Note that the pattern should work for any kind // of ops, in particular for one-operand ops that may not have the // `getOperand(unsigned)` method. @@ -790,7 +791,7 @@ static void generateNamedOperandGetters(const Operator &op, Class &opClass, auto &m = opClass.newMethod(rangeType, operand.name); m.body() << " return getODSOperands(" << i << ");"; } else { - auto &m = opClass.newMethod("Value *", operand.name); + auto &m = opClass.newMethod("ValuePtr ", operand.name); m.body() << " return *getODSOperands(" << i << ").begin();"; } } @@ -868,7 +869,7 @@ void OpEmitter::genNamedResultGetters() { auto &m = opClass.newMethod("Operation::result_range", result.name); m.body() << " return getODSResults(" << i << ");"; } else { - auto &m = opClass.newMethod("Value *", result.name); + auto &m = opClass.newMethod("ValuePtr ", result.name); m.body() << " return *getODSResults(" << i << ").begin();"; } } @@ -1246,7 +1247,7 @@ void OpEmitter::buildParamList(std::string ¶mList, auto argument = op.getArg(i); if (argument.is()) { const auto &operand = op.getOperand(numOperands); - paramList.append(operand.isVariadic() ? ", ValueRange " : ", Value *"); + paramList.append(operand.isVariadic() ? ", ValueRange " : ", ValuePtr "); paramList.append(getArgumentName(op, numOperands)); ++numOperands; } else { @@ -1535,7 +1536,7 @@ void OpEmitter::genOperandResultVerifier(OpMethodBody &body, continue; // Emit a loop to check all the dynamic values in the pack. - body << formatv(" for (Value *v : getODS{0}{1}s({2})) {{\n", + body << formatv(" for (ValuePtr v : getODS{0}{1}s({2})) {{\n", // Capitalize the first letter to match the function name valueKind.substr(0, 1).upper(), valueKind.substr(1), staticValue.index()); @@ -1690,7 +1691,7 @@ void OpEmitter::genOpAsmInterface() { namespace { // Helper class to emit Op operand adaptors to an output stream. Operand -// adaptors are wrappers around ArrayRef that provide named operand +// adaptors are wrappers around ArrayRef that provide named operand // getters identical to those defined in the Op. class OpOperandAdaptorEmitter { public: @@ -1706,12 +1707,12 @@ class OpOperandAdaptorEmitter { OpOperandAdaptorEmitter::OpOperandAdaptorEmitter(const Operator &op) : adapterClass(op.getCppClassName().str() + "OperandAdaptor") { - adapterClass.newField("ArrayRef", "tblgen_operands"); - auto &constructor = adapterClass.newConstructor("ArrayRef values"); + adapterClass.newField("ArrayRef", "tblgen_operands"); + auto &constructor = adapterClass.newConstructor("ArrayRef values"); constructor.body() << " tblgen_operands = values;\n"; generateNamedOperandGetters(op, adapterClass, - /*rangeType=*/"ArrayRef", + /*rangeType=*/"ArrayRef", /*rangeBeginCall=*/"tblgen_operands.begin()", /*rangeSizeCall=*/"tblgen_operands.size()", /*getOperandCallPattern=*/"tblgen_operands[{0}]"); diff --git a/tools/mlir-tblgen/RewriterGen.cpp b/tools/mlir-tblgen/RewriterGen.cpp index b2376e8739c5..a74bc23a95ac 100644 --- a/tools/mlir-tblgen/RewriterGen.cpp +++ b/tools/mlir-tblgen/RewriterGen.cpp @@ -576,14 +576,14 @@ void PatternEmitter::emitRewriteLogic() { os.indent(4) << "rewriter.eraseOp(op0);\n"; } else { // Process replacement result patterns. - os.indent(4) << "SmallVector tblgen_repl_values;\n"; + os.indent(4) << "SmallVector tblgen_repl_values;\n"; for (int i = replStartIndex; i < numResultPatterns; ++i) { DagNode resultTree = pattern.getResultPattern(i); auto val = handleResultPattern(resultTree, offsets[i], 0); os.indent(4) << "\n"; // Resolve each symbol for all range use so that we can loop over them. os << symbolInfoMap.getAllRangeUse( - val, " for (auto *v : {0}) {{ tblgen_repl_values.push_back(v); }", + val, " for (auto v : {0}) {{ tblgen_repl_values.push_back(v); }", "\n"); } os.indent(4) << "\n"; @@ -819,7 +819,7 @@ std::string PatternEmitter::handleOpCreation(DagNode tree, int resultIndex, int numResults = resultOp.getNumResults(); if (numResults != 0) { for (int i = 0; i < numResults; ++i) - os.indent(6) << formatv("for (auto *v : castedOp0.getODSResults({0})) {{" + os.indent(6) << formatv("for (auto v : castedOp0.getODSResults({0})) {{" "tblgen_types.push_back(v->getType()); }\n", resultIndex + i); } @@ -835,8 +835,8 @@ void PatternEmitter::createSeparateLocalVarsForOpArgs( Operator &resultOp = node.getDialectOp(opMap); // Now prepare operands used for building this op: - // * If the operand is non-variadic, we create a `Value*` local variable. - // * If the operand is variadic, we create a `SmallVector` local + // * If the operand is non-variadic, we create a `Value` local variable. + // * If the operand is variadic, we create a `SmallVector` local // variable. int valueIndex = 0; // An index for uniquing local variable names. @@ -851,7 +851,7 @@ void PatternEmitter::createSeparateLocalVarsForOpArgs( std::string varName; if (operand->isVariadic()) { varName = formatv("tblgen_values_{0}", valueIndex++); - os.indent(6) << formatv("SmallVector {0};\n", varName); + os.indent(6) << formatv("SmallVector {0};\n", varName); std::string range; if (node.isNestedDagArg(argIndex)) { range = childNodeNames[argIndex]; @@ -861,11 +861,11 @@ void PatternEmitter::createSeparateLocalVarsForOpArgs( // Resolve the symbol for all range use so that we have a uniform way of // capturing the values. range = symbolInfoMap.getValueAndRangeUse(range); - os.indent(6) << formatv("for (auto *v : {0}) {1}.push_back(v);\n", range, + os.indent(6) << formatv("for (auto v : {0}) {1}.push_back(v);\n", range, varName); } else { varName = formatv("tblgen_value_{0}", valueIndex++); - os.indent(6) << formatv("Value *{0} = ", varName); + os.indent(6) << formatv("ValuePtr {0} = ", varName); if (node.isNestedDagArg(argIndex)) { os << symbolInfoMap.getValueAndRangeUse(childNodeNames[argIndex]); } else { @@ -934,7 +934,7 @@ void PatternEmitter::createAggregateLocalVarsForOpArgs( Operator &resultOp = node.getDialectOp(opMap); os.indent(6) << formatv( - "SmallVector tblgen_values; (void)tblgen_values;\n"); + "SmallVector tblgen_values; (void)tblgen_values;\n"); os.indent(6) << formatv( "SmallVector tblgen_attrs; (void)tblgen_attrs;\n"); @@ -975,7 +975,7 @@ void PatternEmitter::createAggregateLocalVarsForOpArgs( // capturing the values. range = symbolInfoMap.getValueAndRangeUse(range); os.indent(6) << formatv( - "for (auto *v : {0}) tblgen_values.push_back(v);\n", range); + "for (auto v : {0}) tblgen_values.push_back(v);\n", range); } else { os.indent(6) << formatv("tblgen_values.push_back(", varName); if (node.isNestedDagArg(argIndex)) { diff --git a/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/tools/mlir-tblgen/SPIRVUtilsGen.cpp index f1712efb3198..6d5bcc116ad6 100644 --- a/tools/mlir-tblgen/SPIRVUtilsGen.cpp +++ b/tools/mlir-tblgen/SPIRVUtilsGen.cpp @@ -470,7 +470,7 @@ static void emitDeserializationFunction(const Record *attrClass, emitResultDeserialization(op, record->getLoc(), " ", words, wordIndex, resultTypes, valueID, os); - os << formatv(" SmallVector {0};\n", operands); + os << formatv(" SmallVector {0};\n", operands); os << formatv(" SmallVector {0};\n", attributes); // Operand deserialization emitOperandDeserialization(op, record->getLoc(), " ", words, wordIndex, diff --git a/unittests/IR/OperationSupportTest.cpp b/unittests/IR/OperationSupportTest.cpp index 80f82ac3e5d6..d7dae4648fed 100644 --- a/unittests/IR/OperationSupportTest.cpp +++ b/unittests/IR/OperationSupportTest.cpp @@ -25,7 +25,7 @@ using namespace mlir::detail; namespace { Operation *createOp(MLIRContext *context, bool resizableOperands, - ArrayRef operands = llvm::None, + ArrayRef operands = llvm::None, ArrayRef resultTypes = llvm::None) { return Operation::create( UnknownLoc::get(context), OperationName("foo.bar", context), resultTypes, @@ -39,7 +39,7 @@ TEST(OperandStorageTest, NonResizable) { Operation *useOp = createOp(&context, /*resizableOperands=*/false, /*operands=*/llvm::None, builder.getIntegerType(16)); - Value *operand = useOp->getResult(0); + ValuePtr operand = useOp->getResult(0); // Create a non-resizable operation with one operand. Operation *user = createOp(&context, /*resizableOperands=*/false, operand, @@ -68,7 +68,7 @@ TEST(OperandStorageDeathTest, AddToNonResizable) { Operation *useOp = createOp(&context, /*resizableOperands=*/false, /*operands=*/llvm::None, builder.getIntegerType(16)); - Value *operand = useOp->getResult(0); + ValuePtr operand = useOp->getResult(0); // Create a non-resizable operation with one operand. Operation *user = createOp(&context, /*resizableOperands=*/false, operand, @@ -88,7 +88,7 @@ TEST(OperandStorageTest, Resizable) { Operation *useOp = createOp(&context, /*resizableOperands=*/false, /*operands=*/llvm::None, builder.getIntegerType(16)); - Value *operand = useOp->getResult(0); + ValuePtr operand = useOp->getResult(0); // Create a resizable operation with one operand. Operation *user = createOp(&context, /*resizableOperands=*/true, operand,