diff --git a/mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp b/mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp index 26406ceef082c..7e4a5acb9867d 100644 --- a/mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp +++ b/mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp @@ -74,12 +74,12 @@ struct CreatorOpShardingInterface if (!oldType.isDynamicDim(i) && shardType.isDynamicDim(i)) { if (!newSharding) { newSharding = - builder.create(op->getLoc(), resultShardings[0]); + ShardingOp::create(builder, op->getLoc(), resultShardings[0]); device = - builder.create(op->getLoc(), mesh) + mesh::ProcessMultiIndexOp::create(builder, op->getLoc(), mesh) .getResults(); - shapeForDevice = builder.create( - op->getLoc(), oldType.getShape(), spmdizedOperands, + shapeForDevice = mesh::ShardShapeOp::create( + builder, op->getLoc(), oldType.getShape(), spmdizedOperands, newSharding->getResult(0), device); } newOperands.emplace_back(shapeForDevice.getResult()[i]); @@ -88,7 +88,7 @@ struct CreatorOpShardingInterface newOperands.emplace_back(spmdizedOperands[++currOldOprndNum]); } } - newOp = builder.create(op->getLoc(), shardType, newOperands); + newOp = OpTy::create(builder, op->getLoc(), shardType, newOperands); spmdizationMap.map(op->getResult(0), newOp->getResult(0)); } else { // `clone` will populate the mapping of old to new results. diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp index b035a53692dcf..7d4b1127a08be 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -53,8 +53,8 @@ Operation *TensorDialect::materializeConstant(OpBuilder &builder, if (auto op = arith::ConstantOp::materialize(builder, value, type, loc)) return op; if (complex::ConstantOp::isBuildableWith(value, type)) - return builder.create(loc, type, - llvm::cast(value)); + return complex::ConstantOp::create(builder, loc, type, + llvm::cast(value)); return nullptr; } @@ -107,7 +107,7 @@ FailureOr tensor::getOrCreateDestination(OpBuilder &b, Location loc, // Create empty tensor. Value emptyTensor = - b.create(loc, mixedSizes, tensorType.getElementType()); + tensor::EmptyOp::create(b, loc, mixedSizes, tensorType.getElementType()); return emptyTensor; } @@ -678,8 +678,8 @@ FailureOr> ConcatOp::decomposeOperation(OpBuilder &builder) { inputShapes.emplace_back(std::move(inputShape)); } - Value replacement = builder.create( - loc, outputShape, getType().getElementType()); + Value replacement = tensor::EmptyOp::create(builder, loc, outputShape, + getType().getElementType()); int64_t rank = getType().getRank(); OpFoldResult one = builder.getIndexAttr(1); @@ -687,12 +687,12 @@ FailureOr> ConcatOp::decomposeOperation(OpBuilder &builder) { SmallVector offsets(rank, zero); for (auto [index, input] : llvm::enumerate(getInputs())) { offsets[concatDim] = concatOffsets[index]; - auto insertSlice = builder.create( - loc, input, replacement, offsets, inputShapes[index], strides); + auto insertSlice = tensor::InsertSliceOp::create( + builder, loc, input, replacement, offsets, inputShapes[index], strides); replacement = insertSlice.getResult(); } if (replacement.getType() != getType()) { - replacement = builder.create(loc, getType(), replacement); + replacement = tensor::CastOp::create(builder, loc, getType(), replacement); } return SmallVector{replacement}; } @@ -723,7 +723,7 @@ ConcatOp::reifyResultShapes(OpBuilder &builder, builder.getIndexAttr(inferredResultType.getDimSize(i))); } else { reifiedReturnShapes[0][i] = - builder.create(init.getLoc(), init, i).getResult(); + tensor::DimOp::create(builder, init.getLoc(), init, i).getResult(); } } @@ -823,8 +823,8 @@ struct InferConcatOperandTypes : public OpRewritePattern { // Use refined operand type and create cast from original operand. auto castOp = - rewriter.create(concatOp->getLoc(), inferredOperandType, - concatOp.getOperand(operandIdx)); + CastOp::create(rewriter, concatOp->getLoc(), inferredOperandType, + concatOp.getOperand(operandIdx)); rewriter.modifyOpInPlace(concatOp, [=, operandIdx = operandIdx] { concatOp->setOperand(operandIdx, castOp->getResult(0)); }); @@ -864,8 +864,9 @@ struct InferConcatResultType : public OpRewritePattern { return failure(); } - auto newConcatOp = rewriter.create( - concatOp->getLoc(), inferredResultType, dim, concatOp->getOperands()); + auto newConcatOp = + ConcatOp::create(rewriter, concatOp->getLoc(), inferredResultType, dim, + concatOp->getOperands()); rewriter.replaceOpWithNewOp(concatOp, concatOp.getResultType(), newConcatOp); @@ -892,7 +893,7 @@ void DimOp::getAsmResultNames(function_ref setNameFn) { void DimOp::build(OpBuilder &builder, OperationState &result, Value source, int64_t index) { auto loc = result.location; - Value indexValue = builder.create(loc, index); + Value indexValue = arith::ConstantIndexOp::create(builder, loc, index); build(builder, result, source, indexValue); } @@ -1036,10 +1037,10 @@ struct DimOfReshapeOp : public OpRewritePattern { rewriter.setInsertionPointAfter(dim); Location loc = dim.getLoc(); Value extract = - rewriter.create(loc, reshape.getShape(), dim.getIndex()); + ExtractOp::create(rewriter, loc, reshape.getShape(), dim.getIndex()); if (extract.getType() != dim.getType()) extract = - rewriter.create(loc, dim.getType(), extract); + arith::IndexCastOp::create(rewriter, loc, dim.getType(), extract); rewriter.replaceOp(dim, extract); return success(); } @@ -1150,8 +1151,8 @@ struct ReplaceEmptyTensorStaticShapeDims : OpRewritePattern { if (foldedTensorType == op.getType()) return failure(); - auto newOp = rewriter.create(op.getLoc(), foldedTensorType, - foldedDynamicSizes); + auto newOp = EmptyOp::create(rewriter, op.getLoc(), foldedTensorType, + foldedDynamicSizes); rewriter.replaceOpWithNewOp(op, op.getType(), newOp); return success(); } @@ -1326,8 +1327,8 @@ struct ExtractFromCollapseShape : public OpRewritePattern { SmallVector basis = llvm::map_to_vector(group, [&](int64_t d) { return sourceSizes[d]; }); - auto delinearize = rewriter.create( - extractOp.getLoc(), index, basis, /*hasOuterBound=*/true); + auto delinearize = affine::AffineDelinearizeIndexOp::create( + rewriter, extractOp.getLoc(), index, basis, /*hasOuterBound=*/true); llvm::append_range(sourceIndices, delinearize.getResults()); } if (collapseOp.getReassociationIndices().empty()) { @@ -1498,8 +1499,8 @@ struct ExtractElementFromIndexCast Type elementTy = getElementTypeOrSelf(indexCast.getIn()); - auto newExtract = rewriter.create( - loc, elementTy, indexCast.getIn(), extract.getIndices()); + auto newExtract = tensor::ExtractOp::create( + rewriter, loc, elementTy, indexCast.getIn(), extract.getIndices()); rewriter.replaceOpWithNewOp(extract, extract.getType(), newExtract); @@ -1736,7 +1737,7 @@ struct StaticTensorGenerate : public OpRewritePattern { auto loc = generateOp.getLoc(); auto newOp = - rewriter.create(loc, foldedTensorType, foldedDynamicSizes); + GenerateOp::create(rewriter, loc, foldedTensorType, foldedDynamicSizes); rewriter.inlineRegionBefore(generateOp.getBody(), newOp.getBody(), newOp.getBody().begin()); rewriter.replaceOpWithNewOp(generateOp, @@ -2161,9 +2162,9 @@ struct FoldCollapseOfCastOp : public OpRewritePattern { collapseShapeOp.getSrcMutable().assign(castOp.getSource()); }); } else { - auto newOp = rewriter.create( - collapseShapeOp.getLoc(), newResultType, castOp.getSource(), - collapseShapeOp.getReassociation()); + auto newOp = CollapseShapeOp::create(rewriter, collapseShapeOp.getLoc(), + newResultType, castOp.getSource(), + collapseShapeOp.getReassociation()); rewriter.replaceOpWithNewOp( collapseShapeOp, collapseShapeOp.getResultType(), newOp); } @@ -2240,10 +2241,10 @@ struct ConvertToStaticExpandShape : public OpRewritePattern { newInputShape, expandOp.getSrcType().getElementType()); auto outputType = RankedTensorType::get( newOutputShape, expandOp.getSrcType().getElementType()); - auto inputCast = rewriter.create(expandOp.getLoc(), inputType, - expandOp.getSrc()); - auto newExpand = rewriter.create( - expandOp.getLoc(), outputType, inputCast.getResult(), + auto inputCast = CastOp::create(rewriter, expandOp.getLoc(), inputType, + expandOp.getSrc()); + auto newExpand = ExpandShapeOp::create( + rewriter, expandOp.getLoc(), outputType, inputCast.getResult(), expandOp.getReassociationIndices(), outputOfr); rewriter.replaceOpWithNewOp(expandOp, expandOp.getType(), newExpand.getResult()); @@ -2555,10 +2556,11 @@ class ExtractSliceOpCastFolder final : public OpRewritePattern { // Create folded extract. Location loc = sliceOp.getLoc(); - Value newResult = rewriter.create( - loc, sliceOp.getType(), castOp.getSource(), sliceOp.getOffsets(), - sliceOp.getSizes(), sliceOp.getStrides(), sliceOp.getStaticOffsets(), - sliceOp.getStaticSizes(), sliceOp.getStaticStrides()); + Value newResult = ExtractSliceOp::create( + rewriter, loc, sliceOp.getType(), castOp.getSource(), + sliceOp.getOffsets(), sliceOp.getSizes(), sliceOp.getStrides(), + sliceOp.getStaticOffsets(), sliceOp.getStaticSizes(), + sliceOp.getStaticStrides()); rewriter.replaceOp(sliceOp, newResult); return success(); } @@ -2709,8 +2711,8 @@ struct SliceCanonicalizer { ExtractSliceOp newOp) { Value replacement = newOp.getResult(); if (replacement.getType() != op.getType()) - replacement = rewriter.create(op.getLoc(), op.getType(), - replacement); + replacement = tensor::CastOp::create(rewriter, op.getLoc(), op.getType(), + replacement); rewriter.replaceOp(op, replacement); } }; @@ -2978,8 +2980,8 @@ class InsertSliceOpConstantArgumentFolder final // the parallel case. if (std::is_same::value) rewriter.setInsertionPoint(insertSliceOp->getParentOp()); - toInsert = rewriter.create(insertSliceOp.getLoc(), - sourceType, toInsert); + toInsert = tensor::CastOp::create(rewriter, insertSliceOp.getLoc(), + sourceType, toInsert); } rewriter.replaceOpWithNewOp( insertSliceOp, toInsert, insertSliceOp.getDest(), mixedOffsets, @@ -3075,17 +3077,18 @@ struct InsertSliceOpCastFolder final : public OpRewritePattern { if (!sliceResult.isValid) return failure(); - Operation *replacement = rewriter.create( - insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(), - mixedSizes, insertSliceOp.getMixedStrides()); + Operation *replacement = + InsertOpTy::create(rewriter, insertSliceOp.getLoc(), src, dst, + insertSliceOp.getMixedOffsets(), mixedSizes, + insertSliceOp.getMixedStrides()); // In the parallel case there is no result and so nothing to cast. bool isParallelInsert = std::is_same::value; if (!isParallelInsert && dst.getType() != insertSliceOp.getDestType()) { - replacement = rewriter.create(insertSliceOp.getLoc(), - insertSliceOp.getDestType(), - replacement->getResult(0)); + replacement = tensor::CastOp::create(rewriter, insertSliceOp.getLoc(), + insertSliceOp.getDestType(), + replacement->getResult(0)); } rewriter.replaceOp(insertSliceOp, replacement->getResults()); return success(); @@ -3154,8 +3157,8 @@ struct InsertSliceOpSourceCastInserter final // parallel case. if (std::is_same::value) rewriter.setInsertionPoint(insertSliceOp->getParentOp()); - Value cast = rewriter.create( - insertSliceOp.getLoc(), newSrcType, insertSliceOp.getSource()); + Value cast = tensor::CastOp::create(rewriter, insertSliceOp.getLoc(), + newSrcType, insertSliceOp.getSource()); rewriter.replaceOpWithNewOp( insertSliceOp, cast, insertSliceOp.getDest(), insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(), @@ -3353,7 +3356,7 @@ void PadOp::build(OpBuilder &b, OperationState &result, Type resultType, // a guard to reset the insertion point of the builder after it is destroyed. OpBuilder::InsertionGuard guard(b); b.createBlock(region, region->end(), blockArgTypes, blockArgLocs); - b.create(result.location, constantPadValue); + tensor::YieldOp::create(b, result.location, constantPadValue); } llvm::SmallBitVector PadOp::getPaddedDims() { @@ -3407,10 +3410,11 @@ struct FoldSourceTensorCast : public OpRewritePattern { padTensorOp.getSourceMutable().assign(castOp.getSource()); }); } else { - auto newOp = rewriter.create( - padTensorOp->getLoc(), newResultType, padTensorOp.getSource(), - padTensorOp.getStaticLow(), padTensorOp.getStaticHigh(), - padTensorOp.getLow(), padTensorOp.getHigh(), padTensorOp.getNofold(), + auto newOp = PadOp::create( + rewriter, padTensorOp->getLoc(), newResultType, + padTensorOp.getSource(), padTensorOp.getStaticLow(), + padTensorOp.getStaticHigh(), padTensorOp.getLow(), + padTensorOp.getHigh(), padTensorOp.getNofold(), getPrunedAttributeList(padTensorOp, PadOp::getAttributeNames())); IRMapping mapper; padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper); @@ -3439,8 +3443,8 @@ struct FoldTargetTensorCast : public OpRewritePattern { tensorCastOp.getDest().getType())) return failure(); - auto replacementOp = rewriter.create( - padTensorOp.getLoc(), tensorCastOp.getDest().getType(), + auto replacementOp = PadOp::create( + rewriter, padTensorOp.getLoc(), tensorCastOp.getDest().getType(), padTensorOp.getSource(), padTensorOp.getStaticLow(), padTensorOp.getStaticHigh(), padTensorOp.getLow(), padTensorOp.getHigh(), padTensorOp.getNofold(), @@ -3597,11 +3601,11 @@ struct FoldOrthogonalPaddings : public OpRewritePattern { // Create a new tensor::ExtractSliceOp, tensor::PadOp pair that performs // the two paddings in one step. - auto newSliceOp = rewriter.create( - padOp.getLoc(), outerSliceOp.getSource(), newOffsets, newSizes, - innerSliceOp.getMixedStrides()); - auto newPadOp = rewriter.create( - padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(), + auto newSliceOp = ExtractSliceOp::create( + rewriter, padOp.getLoc(), outerSliceOp.getSource(), newOffsets, + newSizes, innerSliceOp.getMixedStrides()); + auto newPadOp = PadOp::create( + rewriter, padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(), padOp.getMixedLowPad(), newHighPad, padOp.getNofold(), getPrunedAttributeList(padOp, PadOp::getAttributeNames())); rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(), @@ -3697,9 +3701,9 @@ struct FoldStaticPadding : public OpRewritePattern { // Rewrite the op using the new static type. auto newResultType = RankedTensorType::get( newOutDims, padTensorOp.getType().getElementType()); - auto newOp = rewriter.create( - padTensorOp->getLoc(), newResultType, input, staticLow, staticHigh, - newLows, newHighs, padTensorOp.getNofold(), + auto newOp = PadOp::create( + rewriter, padTensorOp->getLoc(), newResultType, input, staticLow, + staticHigh, newLows, newHighs, padTensorOp.getNofold(), getPrunedAttributeList(padTensorOp, PadOp::getAttributeNames())); IRMapping mapper; @@ -3777,9 +3781,9 @@ struct FoldConsecutiveConstantPadding : public OpRewritePattern { SmallVector newLowPad = addPaddings(padOp.getMixedLowPad(), producerPad.getMixedLowPad()); - auto newPadOp = rewriter.create( - padOp.getLoc(), padOp.getResultType(), producerPad.getSource(), - newLowPad, newHighPad, padOp.getNofold(), + auto newPadOp = tensor::PadOp::create( + rewriter, padOp.getLoc(), padOp.getResultType(), + producerPad.getSource(), newLowPad, newHighPad, padOp.getNofold(), getPrunedAttributeList(padOp, tensor::PadOp::getAttributeNames())); rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(), newPadOp.getRegion().begin()); @@ -3803,7 +3807,7 @@ PadOp::reifyResultShapes(OpBuilder &b, } Location loc = getLoc(); Value dim = b.createOrFold( - loc, getSource(), b.create(loc, i)); + loc, getSource(), arith::ConstantIndexOp::create(b, loc, i)); AffineExpr d0, d1, d2; bindDims(b.getContext(), d0, d1, d2); @@ -4108,8 +4112,8 @@ struct FoldTensorCastProducerOp for (auto [oldResult, newResult] : llvm::zip(op->getResults(), newOp->getResults())) { if (newResult.getType() != oldResult.getType()) { - replacements.push_back(rewriter.create( - op->getLoc(), oldResult.getType(), newResult)); + replacements.push_back(tensor::CastOp::create( + rewriter, op->getLoc(), oldResult.getType(), newResult)); } else { replacements.push_back(newResult); } diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp index 437bc5d00faa8..124a63281a37c 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp @@ -207,13 +207,13 @@ FailureOr tensor::bubbleUpPadSlice(OpBuilder &b, if (isZeroInteger(newLength)) { hasZeroLen = true; } else if (!hasZeroLen) { - Value check = b.create( - loc, arith::CmpIPredicate::eq, + Value check = arith::CmpIOp::create( + b, loc, arith::CmpIPredicate::eq, getValueOrCreateConstantIndexOp(b, loc, newLength), getValueOrCreateConstantIndexOp(b, loc, zero)); dynHasZeroLenCond = dynHasZeroLenCond - ? b.create(loc, check, dynHasZeroLenCond) + ? arith::OrIOp::create(b, loc, check, dynHasZeroLenCond) : check; } @@ -237,7 +237,7 @@ FailureOr tensor::bubbleUpPadSlice(OpBuilder &b, auto castResult = [&](Value val) -> Value { if (resultType == val.getType()) return val; - return b.create(loc, resultType, val); + return tensor::CastOp::create(b, loc, resultType, val); }; // In cases where the original data source is unused: Emit a GenerateOp and @@ -245,10 +245,10 @@ FailureOr tensor::bubbleUpPadSlice(OpBuilder &b, // have a dimension of size 0, the semantics of which is unclear.) auto createGenerateOp = [&]() { // Create GenerateOp. - auto generateOp = b.create( - loc, resultType, dynDims, + auto generateOp = tensor::GenerateOp::create( + b, loc, resultType, dynDims, [&](OpBuilder &builder, Location gLoc, ValueRange indices) { - builder.create(gLoc, padValue); + tensor::YieldOp::create(builder, gLoc, padValue); }); return generateOp; }; @@ -257,10 +257,10 @@ FailureOr tensor::bubbleUpPadSlice(OpBuilder &b, // the result shape of the new SliceOp has a zero dimension. auto createPadOfExtractSlice = [&]() { // Create pad(extract_slice(x)). - auto newSliceOp = b.create( - loc, padOp.getSource(), newOffsets, newLengths, newStrides); - auto newPadOp = b.create( - loc, Type(), newSliceOp, newLows, newHighs, + auto newSliceOp = tensor::ExtractSliceOp::create( + b, loc, padOp.getSource(), newOffsets, newLengths, newStrides); + auto newPadOp = PadOp::create( + b, loc, Type(), newSliceOp, newLows, newHighs, /*nofold=*/padOp.getNofold(), getPrunedAttributeList(padOp, PadOp::getAttributeNames())); @@ -287,17 +287,17 @@ FailureOr tensor::bubbleUpPadSlice(OpBuilder &b, Operation *thenOp; Operation *elseOp; Operation *sliceOp; - auto result = b.create( - loc, dynHasZeroLenCond, + auto result = scf::IfOp::create( + b, loc, dynHasZeroLenCond, /*thenBuilder=*/ [&](OpBuilder &b, Location loc) { thenOp = createGenerateOp(); - b.create(loc, castResult(thenOp->getResult(0))); + scf::YieldOp::create(b, loc, castResult(thenOp->getResult(0))); }, /*elseBuilder=*/ [&](OpBuilder &b, Location loc) { std::tie(elseOp, sliceOp) = createPadOfExtractSlice(); - b.create(loc, castResult(elseOp->getResult(0))); + scf::YieldOp::create(b, loc, castResult(elseOp->getResult(0))); }); return TilingResult{ {elseOp}, SmallVector(result->getResults()), {sliceOp}}; diff --git a/mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp b/mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp index 723731b8bed61..ba258402b61b3 100644 --- a/mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp +++ b/mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp @@ -165,7 +165,7 @@ void transform::TypeConversionCastShapeDynamicDimsOp:: if (!tensor::CastOp::areCastCompatible(input.getType(), resultType)) { return Value(); } - return builder.create(loc, resultType, input).getResult(); + return tensor::CastOp::create(builder, loc, resultType, input).getResult(); }); converter.addTargetMaterialization([](OpBuilder &builder, Type resultType, ValueRange inputs, @@ -177,7 +177,7 @@ void transform::TypeConversionCastShapeDynamicDimsOp:: if (!tensor::CastOp::areCastCompatible(input.getType(), resultType)) { return Value(); } - return builder.create(loc, resultType, input).getResult(); + return tensor::CastOp::create(builder, loc, resultType, input).getResult(); }); } diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp index 47b41efbed83b..bc11e567fa2d8 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -222,8 +222,8 @@ struct CollapseShapeOpInterface MemRefType::get(collapseShapeOp.getSrcType().getShape(), collapseShapeOp.getSrcType().getElementType(), AffineMap(), bufferType.getMemorySpace()); - buffer = rewriter.create( - op->getLoc(), memrefType, *tensorAlloc); + buffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(), + memrefType, *tensorAlloc); } // Result type is inferred by the builder. @@ -349,8 +349,8 @@ struct ExpandShapeOpInterface if (failed(buffer)) return failure(); - auto memrefExpandShape = rewriter.create( - op->getLoc(), tensorResultType.getShape(), *buffer, + auto memrefExpandShape = memref::ExpandShapeOp::create( + rewriter, op->getLoc(), tensorResultType.getShape(), *buffer, expandShapeOp.getReassociationIndices(), expandShapeOp.getMixedOutputShape()); replaceOpWithBufferizedValues(rewriter, op, @@ -398,8 +398,8 @@ struct ExtractSliceOpInterface extractSliceOp.getResult(), options, state); if (failed(resultMemrefType)) return failure(); - Value subView = rewriter.create( - loc, llvm::cast(*resultMemrefType), *srcMemref, + Value subView = memref::SubViewOp::create( + rewriter, loc, llvm::cast(*resultMemrefType), *srcMemref, mixedOffsets, mixedSizes, mixedStrides); replaceOpWithBufferizedValues(rewriter, op, subView); @@ -469,7 +469,7 @@ static void createStores(RewriterBase &rewriter, Location loc, int dim, if (dim == static_cast(shape.size()) - 1) { for (int i = 0; i < shape.back(); ++i) { indices.back() = constants[i]; - rewriter.create(loc, *elementIt, buffer, indices); + memref::StoreOp::create(rewriter, loc, *elementIt, buffer, indices); ++elementIt; } return; @@ -507,8 +507,8 @@ struct FromElementsOpInterface bufferization::getBufferType(*tensorAlloc, options, state); if (failed(memrefType)) return failure(); - Value buffer = rewriter.create( - op->getLoc(), *memrefType, *tensorAlloc); + Value buffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(), + *memrefType, *tensorAlloc); // Case: tensor<0xelem_type>. if (fromElementsOp.getElements().empty()) { @@ -518,8 +518,8 @@ struct FromElementsOpInterface // Case: tensor. if (shape.empty()) { - rewriter.create( - loc, fromElementsOp.getElements().front(), buffer); + memref::StoreOp::create(rewriter, loc, + fromElementsOp.getElements().front(), buffer); replaceOpWithBufferizedValues(rewriter, op, buffer); return success(); } @@ -529,7 +529,7 @@ struct FromElementsOpInterface SmallVector constants; constants.reserve(maxDim); for (int i = 0; i < maxDim; ++i) - constants.push_back(rewriter.create(loc, i)); + constants.push_back(arith::ConstantIndexOp::create(rewriter, loc, i)); // Traverse all `elements` and create `memref.store` ops. auto elementIt = fromElementsOp.getElements().begin(); @@ -576,15 +576,15 @@ static Value lowerGenerateLikeOpBody(RewriterBase &rewriter, Location loc, // Create linalg::MapOp. OpBuilder::InsertionGuard g(rewriter); auto linalgOp = - rewriter.create(loc, tensorType, /*inputs=*/ValueRange(), - /*init=*/tensorDestination); + linalg::MapOp::create(rewriter, loc, tensorType, /*inputs=*/ValueRange(), + /*init=*/tensorDestination); Block &linalgBody = linalgOp.getMapper().emplaceBlock(); // Create linalg::IndexOps. rewriter.setInsertionPointToStart(&linalgBody); SmallVector indices; for (int64_t dim = 0; dim < tensorType.getRank(); ++dim) - indices.push_back(rewriter.create(loc, dim)); + indices.push_back(linalg::IndexOp::create(rewriter, loc, dim)); // Move over body. rewriter.mergeBlocks(&generateBody.front(), &linalgBody, indices); @@ -644,8 +644,8 @@ struct InsertOpInterface getBuffer(rewriter, insertOp.getDest(), options, state); if (failed(destMemref)) return failure(); - rewriter.create(insertOp.getLoc(), insertOp.getScalar(), - *destMemref, insertOp.getIndices()); + memref::StoreOp::create(rewriter, insertOp.getLoc(), insertOp.getScalar(), + *destMemref, insertOp.getIndices()); replaceOpWithBufferizedValues(rewriter, op, *destMemref); return success(); } @@ -713,9 +713,9 @@ struct InsertSliceOpInterface memref::SubViewOp::inferRankReducedResultType( insertSliceOp.getSourceType().getShape(), dstMemrefType, mixedOffsets, mixedSizes, mixedStrides); - Value subView = rewriter.create( - loc, subviewMemRefType, *dstMemref, mixedOffsets, mixedSizes, - mixedStrides); + Value subView = + memref::SubViewOp::create(rewriter, loc, subviewMemRefType, *dstMemref, + mixedOffsets, mixedSizes, mixedStrides); // Copy tensor. If this tensor.insert_slice has a matching // tensor.extract_slice, the copy operation will eventually fold away. @@ -796,14 +796,14 @@ struct PadOpInterface for (int64_t i = 0; i < resultType.getRank(); ++i) { if (!resultType.isDynamicDim(i)) continue; - Value srcDim = rewriter.create(loc, padOp.getSource(), i); + Value srcDim = tensor::DimOp::create(rewriter, loc, padOp.getSource(), i); Value lowPad = toValue(mixedLowPad[i]); Value highPad = toValue(mixedHighPad[i]); AffineExpr s0, s1, s2; bindSymbols(op->getContext(), s0, s1, s2); AffineExpr sumExpr = s0 + s1 + s2; - Value sum = rewriter.create( - loc, sumExpr, ValueRange{srcDim, lowPad, highPad}); + Value sum = affine::AffineApplyOp::create( + rewriter, loc, sumExpr, ValueRange{srcDim, lowPad, highPad}); dynamicSizes.push_back(sum); } @@ -995,9 +995,9 @@ struct ParallelInsertSliceOpInterface parallelInsertSliceOp.getMixedOffsets(), parallelInsertSliceOp.getMixedSizes(), parallelInsertSliceOp.getMixedStrides()); - Value subview = rewriter.create( - parallelInsertSliceOp.getLoc(), subviewMemRefType, *destBuffer, - parallelInsertSliceOp.getMixedOffsets(), + Value subview = memref::SubViewOp::create( + rewriter, parallelInsertSliceOp.getLoc(), subviewMemRefType, + *destBuffer, parallelInsertSliceOp.getMixedOffsets(), parallelInsertSliceOp.getMixedSizes(), parallelInsertSliceOp.getMixedStrides()); @@ -1065,14 +1065,14 @@ struct SplatOpInterface if (options.defaultMemorySpaceFn(tensorType) != Attribute()) return op->emitError("memory space not implemented yet"); - auto linalgOp = - rewriter.create(loc, tensorType, /*inputs=*/ValueRange(), - /*init=*/*tensorAlloc); + auto linalgOp = linalg::MapOp::create(rewriter, loc, tensorType, + /*inputs=*/ValueRange(), + /*init=*/*tensorAlloc); Block &linalgBody = linalgOp.getMapper().emplaceBlock(); // Create linalg::IndexOps. rewriter.setInsertionPointToStart(&linalgBody); - rewriter.create(loc, splatOp.getInput()); + linalg::YieldOp::create(rewriter, loc, splatOp.getInput()); rewriter.replaceOp(splatOp, linalgOp.getResult()[0]); return success(); @@ -1126,8 +1126,8 @@ struct ConcatOpInterface MemRefType memrefType = MemRefType::get(concatOp.getResultType().getShape(), concatOp.getResultType().getElementType(), layout); - Value dstBuffer = rewriter.create( - op->getLoc(), memrefType, *tensorAlloc); + Value dstBuffer = bufferization::ToBufferOp::create( + rewriter, op->getLoc(), memrefType, *tensorAlloc); // Extract the dimension for the concat op uint64_t concatDim = concatOp.getDim(); @@ -1142,7 +1142,7 @@ struct ConcatOpInterface for (const auto &[dimIdx, dimSize] : llvm::enumerate(tensorType.getShape())) { if (dimSize == ShapedType::kDynamic) { - auto dimOp = rewriter.create(loc, dstBuffer, dimIdx); + auto dimOp = memref::DimOp::create(rewriter, loc, dstBuffer, dimIdx); sizes.push_back(dimOp.getResult()); if (dimIdx == concatDim) dynamicConcatDim = true; @@ -1157,7 +1157,7 @@ struct ConcatOpInterface if (dynamicConcatDim) { // One or more operands have dynamic size, so we must accumulate the // offset with arith ops. - dynamicOffset = rewriter.create(loc, 0); + dynamicOffset = arith::ConstantIndexOp::create(rewriter, loc, 0); } for (auto operand : concatOp.getInputs()) { @@ -1174,8 +1174,9 @@ struct ConcatOpInterface if (dynamicConcatDim) { offsets[concatDim] = dynamicOffset.value(); - dynamicSize = rewriter.create(loc, *srcBuffer, concatDim) - .getResult(); + dynamicSize = + memref::DimOp::create(rewriter, loc, *srcBuffer, concatDim) + .getResult(); sizes[concatDim] = dynamicSize.value(); } else { sizes[concatDim] = rewriter.getIndexAttr(operandConcatDimSize); @@ -1188,16 +1189,16 @@ struct ConcatOpInterface memref::SubViewOp::inferRankReducedResultType( operandTensorType.getShape(), dstMemrefType, offsets, sizes, strides); - Value subview = rewriter.create( - loc, subviewMemRefType, dstBuffer, offsets, sizes, strides); + Value subview = memref::SubViewOp::create( + rewriter, loc, subviewMemRefType, dstBuffer, offsets, sizes, strides); // Copy the source buffer into the destination subview. if (failed(options.createMemCpy(rewriter, loc, *srcBuffer, subview))) return failure(); if (dynamicConcatDim) { - dynamicOffset = rewriter.create( - loc, dynamicOffset.value(), dynamicSize.value()); + dynamicOffset = arith::AddIOp::create( + rewriter, loc, dynamicOffset.value(), dynamicSize.value()); } else { concatDimOffset += operandConcatDimSize; } diff --git a/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp index 3c2b0ab42f7a6..670865de6031f 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp @@ -42,8 +42,9 @@ struct FoldEmptyTensorWithReshapeOp : public OpRewritePattern { // Create new tensor.empty op. // TODO: Do not drop tensor type encoding. - Value emptyTensor = rewriter.create( - loc, resultShapes[0], reshapeOp.getResultType().getElementType()); + Value emptyTensor = + EmptyOp::create(rewriter, loc, resultShapes[0], + reshapeOp.getResultType().getElementType()); if (emptyTensor.getType() != reshapeOp.getResultType()) { rewriter.replaceOpWithNewOp( reshapeOp, reshapeOp.getResultType(), emptyTensor); diff --git a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp index dd50ae54d17cc..840b4f3121428 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp @@ -53,7 +53,7 @@ static ValueRange invertCollapseShapeIndexing( for (int64_t i : reassociation[dim]) basis.push_back(reshapeSourceShape[i]); auto delinearized = - b.create(loc, indexValue, basis); + AffineDelinearizeIndexOp::create(b, loc, indexValue, basis); return delinearized->getResults(); } @@ -142,15 +142,15 @@ tensor::ExtractSliceFromCollapseHelper::emitLoopNestBody( SmallVector extractParams = helper.getExtractSliceParams(builder.getContext(), multiIndices); - Value subTileResult = builder.create( - loc, collapseShapeOp.getSrc(), extractParams); + Value subTileResult = tensor::ExtractSliceOp::create( + builder, loc, collapseShapeOp.getSrc(), extractParams); SmallVector insertParams = helper.getInsertSliceParams(builder.getContext(), tileInductionVars); // Collapse the dimensions of the source slice back down. - Value collapsedResult = builder.create( - loc, subTileResult, reassociationIndices); + Value collapsedResult = tensor::CollapseShapeOp::create( + builder, loc, subTileResult, reassociationIndices); return std::make_pair(collapsedResult, insertParams); } @@ -173,8 +173,9 @@ tensor::simplifyCollapseShapeWithRankReducingExtractSlice( SmallVector sizes = tensor::getMixedSizes(rewriter, op.getLoc(), op.getSrc()); SmallVector strides(sourceType.getRank(), one); - auto sliceOp = rewriter.create( - op.getLoc(), info->sliceResultType, op.getSrc(), offsets, sizes, strides); + auto sliceOp = tensor::ExtractSliceOp::create( + rewriter, op.getLoc(), info->sliceResultType, op.getSrc(), offsets, sizes, + strides); if (!info->newReassociationIndices.has_value()) { rewriter.replaceOp(op, sliceOp.getResult()); diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp index 13de55b0672a5..d76c02af7ab16 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp @@ -112,9 +112,9 @@ TransferReadOfExtractSliceOpFolder::matchAndRewriteMaskableOp( extractSliceOp.getMixedStrides(), extractSliceOp.getDroppedDims(), indices, sourceIndices); - Operation *newOp = rewriter.create( - readOp.getLoc(), readOp.getVectorType(), extractSliceOp.getSource(), - sourceIndices, + Operation *newOp = vector::TransferReadOp::create( + rewriter, readOp.getLoc(), readOp.getVectorType(), + extractSliceOp.getSource(), sourceIndices, AffineMapAttr::get(expandDimsToRank( readOp.getPermutationMap(), extractSliceOp.getSourceType().getRank(), extractSliceOp.getDroppedDims())), diff --git a/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp b/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp index bad56d4111dca..43d9d704a29ff 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp @@ -64,9 +64,10 @@ FailureOr tensor::buildIndependentOp(OpBuilder &b, tensor::PadOp padOp, return padOp.getResult(); // Create a new tensor::PadOp. - auto newPadOp = b.create( - loc, padOp.getResultType(), padOp.getSource(), newMixedLow, newMixedHigh, - constantPadding, padOp.getNofold(), /*attrs=*/ArrayRef{}); + auto newPadOp = + PadOp::create(b, loc, padOp.getResultType(), padOp.getSource(), + newMixedLow, newMixedHigh, constantPadding, + padOp.getNofold(), /*attrs=*/ArrayRef{}); // Create a tensor::ExtractSliceOp. // Reify the result sizes of the old tensor::PadOp. @@ -83,10 +84,10 @@ FailureOr tensor::buildIndependentOp(OpBuilder &b, tensor::PadOp padOp, offsets.push_back(b.getIndexAttr(0)); } else { offsets.push_back( - b.create( - loc, b.getAffineDimExpr(0) - b.getAffineDimExpr(1), - std::initializer_list{cast(newMixedLow[i]), - cast(prevLow)}) + affine::AffineApplyOp::create( + b, loc, b.getAffineDimExpr(0) - b.getAffineDimExpr(1), + std::initializer_list{cast(newMixedLow[i]), + cast(prevLow)}) .getResult()); } // size = reified result size @@ -99,7 +100,7 @@ FailureOr tensor::buildIndependentOp(OpBuilder &b, tensor::PadOp padOp, strides.push_back(b.getIndexAttr(1)); } - return b.create(loc, newPadOp, offsets, sizes, strides) + return ExtractSliceOp::create(b, loc, newPadOp, offsets, sizes, strides) .getResult(); } @@ -124,7 +125,7 @@ FailureOr tensor::buildIndependentOp(OpBuilder &b, // Create a new tensor::EmptyOp. Value newEmptyOp = - b.create(loc, newSizes, emptyOp.getType().getElementType()); + EmptyOp::create(b, loc, newSizes, emptyOp.getType().getElementType()); // Create a tensor::ExtractSliceOp. SmallVector offsets(newSizes.size(), b.getIndexAttr(0)); diff --git a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp index 20bb4d1caf019..e0af2f77d44b8 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp @@ -76,8 +76,8 @@ struct FoldUnPaddingCollapseIntoExtract return rewriter.notifyMatchFailure(collapseShapeOp, "expected unpadding collapse"); - Value unPaddedExtractSlice = rewriter.create( - extractSliceOp.getLoc(), collapseShapeOp.getResultType(), + Value unPaddedExtractSlice = tensor::ExtractSliceOp::create( + rewriter, extractSliceOp.getLoc(), collapseShapeOp.getResultType(), extractSliceOp.getSource(), extractSliceOp.getMixedOffsets(), extractSliceOp.getMixedSizes(), extractSliceOp.getMixedStrides()); rewriter.replaceOp(collapseShapeOp, unPaddedExtractSlice); @@ -270,8 +270,8 @@ struct BubbleUpExpandThroughParallelCollapse // matches the number of dimensions of the result, then the expand_shape // is a no-op. if (newExpandReInds.size() != newExpandSizes.size()) { - newCollapseSrc = rewriter.create( - loc, expandResultType, newCollapseSrc, newExpandReInds, + newCollapseSrc = tensor::ExpandShapeOp::create( + rewriter, loc, expandResultType, newCollapseSrc, newExpandReInds, newExpandSizes); } @@ -280,8 +280,8 @@ struct BubbleUpExpandThroughParallelCollapse // is a no-op. Value replacement = newCollapseSrc; if (newCollapseReInds.size() != newExpandSizes.size()) { - replacement = rewriter.create( - loc, newCollapseSrc, newCollapseReInds); + replacement = tensor::CollapseShapeOp::create( + rewriter, loc, newCollapseSrc, newCollapseReInds); } rewriter.replaceOp(expandOp, replacement); return success(); @@ -405,8 +405,8 @@ struct BubbleUpExpandShapeThroughExtractSlice shape, expandShapeOp.getResultType().getElementType()); // Create a new ExtractSliceOp and ExpandShapeOp. - Value newSliceOp = rewriter.create( - loc, expandShapeOp.getSrc(), collapsedOffsets, collapsedSizes, + Value newSliceOp = tensor::ExtractSliceOp::create( + rewriter, loc, expandShapeOp.getSrc(), collapsedOffsets, collapsedSizes, collapsedStrides); rewriter.replaceOpWithNewOp( sliceOp, resultType, newSliceOp, @@ -735,9 +735,9 @@ struct BubbleUpCollapseShapeThroughExtractSlice groupExpandedOffsets.rend()); } - Value newSliceOp = rewriter.create( - collapseShapeOp->getLoc(), collapseShapeOp.getSrc(), expandedOffsets, - expandedSizes, expandedStrides); + Value newSliceOp = tensor::ExtractSliceOp::create( + rewriter, collapseShapeOp->getLoc(), collapseShapeOp.getSrc(), + expandedOffsets, expandedSizes, expandedStrides); rewriter.replaceOpWithNewOp( sliceOp, sliceOp.getResultType(), newSliceOp, collapseShapeOp.getReassociationIndices()); diff --git a/mlir/lib/Dialect/Tensor/Transforms/RewriteAsConstant.cpp b/mlir/lib/Dialect/Tensor/Transforms/RewriteAsConstant.cpp index 7c9fced540adb..69e649d2eebe8 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/RewriteAsConstant.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/RewriteAsConstant.cpp @@ -196,7 +196,7 @@ struct PadOpToConstant final : public OpRewritePattern { "tensor type not supported"); if (newOp.getType() != resultType) - newOp = rewriter.create(loc, resultType, newOp); + newOp = tensor::CastOp::create(rewriter, loc, resultType, newOp); rewriter.replaceOp(padTensorOp, newOp); return success(); diff --git a/mlir/lib/Dialect/Tensor/Transforms/RuntimeOpVerification.cpp b/mlir/lib/Dialect/Tensor/Transforms/RuntimeOpVerification.cpp index 6e3285abffbfc..838ff1f987c63 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/RuntimeOpVerification.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/RuntimeOpVerification.cpp @@ -47,15 +47,14 @@ struct CastOpInterface if (isa(srcType)) { // Check rank. - Value srcRank = builder.create(loc, castOp.getSource()); + Value srcRank = RankOp::create(builder, loc, castOp.getSource()); Value resultRank = - builder.create(loc, resultType.getRank()); - Value isSameRank = builder.create( - loc, arith::CmpIPredicate::eq, srcRank, resultRank); - builder.create( - loc, isSameRank, - RuntimeVerifiableOpInterface::generateErrorMessage(op, - "rank mismatch")); + arith::ConstantIndexOp::create(builder, loc, resultType.getRank()); + Value isSameRank = arith::CmpIOp::create( + builder, loc, arith::CmpIPredicate::eq, srcRank, resultRank); + cf::AssertOp::create(builder, loc, isSameRank, + RuntimeVerifiableOpInterface::generateErrorMessage( + op, "rank mismatch")); } // Check dimension sizes. @@ -70,13 +69,13 @@ struct CastOpInterface continue; Value srcDimSz = - builder.create(loc, castOp.getSource(), it.index()); + DimOp::create(builder, loc, castOp.getSource(), it.index()); Value resultDimSz = - builder.create(loc, it.value()); - Value isSameSz = builder.create( - loc, arith::CmpIPredicate::eq, srcDimSz, resultDimSz); - builder.create( - loc, isSameSz, + arith::ConstantIndexOp::create(builder, loc, it.value()); + Value isSameSz = arith::CmpIOp::create( + builder, loc, arith::CmpIPredicate::eq, srcDimSz, resultDimSz); + cf::AssertOp::create( + builder, loc, isSameSz, RuntimeVerifiableOpInterface::generateErrorMessage( op, "size mismatch of dim " + std::to_string(it.index()))); } @@ -89,10 +88,11 @@ struct DimOpInterface void generateRuntimeVerification(Operation *op, OpBuilder &builder, Location loc) const { auto dimOp = cast(op); - Value rank = builder.create(loc, dimOp.getSource()); - Value zero = builder.create(loc, 0); - builder.create( - loc, generateInBoundsCheck(builder, loc, dimOp.getIndex(), zero, rank), + Value rank = RankOp::create(builder, loc, dimOp.getSource()); + Value zero = arith::ConstantIndexOp::create(builder, loc, 0); + cf::AssertOp::create( + builder, loc, + generateInBoundsCheck(builder, loc, dimOp.getIndex(), zero, rank), RuntimeVerifiableOpInterface::generateErrorMessage( op, "index is out of bounds")); } @@ -124,7 +124,7 @@ struct ExtractInsertOpInterface } auto indices = extractInsertOp.getIndices(); - auto zero = builder.create(loc, 0); + auto zero = arith::ConstantIndexOp::create(builder, loc, 0); Value assertCond; for (auto i : llvm::seq(0, rank)) { Value dimOp = builder.createOrFold(loc, tensor, i); @@ -134,10 +134,9 @@ struct ExtractInsertOpInterface i > 0 ? builder.createOrFold(loc, assertCond, inBounds) : inBounds; } - builder.create( - loc, assertCond, - RuntimeVerifiableOpInterface::generateErrorMessage( - op, "out-of-bounds access")); + cf::AssertOp::create(builder, loc, assertCond, + RuntimeVerifiableOpInterface::generateErrorMessage( + op, "out-of-bounds access")); } }; @@ -152,8 +151,8 @@ struct ExtractSliceOpInterface // For each dimension, assert that: // 0 <= offset < dim_size // 0 <= offset + (size - 1) * stride < dim_size - Value zero = builder.create(loc, 0); - Value one = builder.create(loc, 1); + Value zero = arith::ConstantIndexOp::create(builder, loc, 0); + Value one = arith::ConstantIndexOp::create(builder, loc, 1); for (int64_t i = 0, e = sourceType.getRank(); i < e; ++i) { Value offset = getValueOrCreateConstantIndexOp( builder, loc, extractSliceOp.getMixedOffsets()[i]); @@ -167,21 +166,21 @@ struct ExtractSliceOpInterface loc, extractSliceOp.getSource(), i); Value offsetInBounds = generateInBoundsCheck(builder, loc, offset, zero, dimSize); - builder.create( - loc, offsetInBounds, + cf::AssertOp::create( + builder, loc, offsetInBounds, RuntimeVerifiableOpInterface::generateErrorMessage( op, "offset " + std::to_string(i) + " is out-of-bounds")); // Verify that slice does not run out-of-bounds. - Value sizeMinusOne = builder.create(loc, size, one); + Value sizeMinusOne = arith::SubIOp::create(builder, loc, size, one); Value sizeMinusOneTimesStride = - builder.create(loc, sizeMinusOne, stride); + arith::MulIOp::create(builder, loc, sizeMinusOne, stride); Value lastPos = - builder.create(loc, offset, sizeMinusOneTimesStride); + arith::AddIOp::create(builder, loc, offset, sizeMinusOneTimesStride); Value lastPosInBounds = generateInBoundsCheck(builder, loc, lastPos, zero, dimSize); - builder.create( - loc, lastPosInBounds, + cf::AssertOp::create( + builder, loc, lastPosInBounds, RuntimeVerifiableOpInterface::generateErrorMessage( op, "extract_slice runs out-of-bounds along dimension " + std::to_string(i))); diff --git a/mlir/lib/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.cpp index d50d7c62b789c..b6fdba360deea 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.cpp @@ -59,8 +59,8 @@ struct InsertSliceLikeOpSubsetInsertionOpInterface Value buildSubsetExtraction(Operation *op, OpBuilder &builder, Location loc) const { auto insertSliceOp = cast(op); - auto extractOp = builder.create( - loc, insertSliceOp.getSourceType(), insertSliceOp.getDest(), + auto extractOp = tensor::ExtractSliceOp::create( + builder, loc, insertSliceOp.getSourceType(), insertSliceOp.getDest(), insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(), insertSliceOp.getMixedStrides()); return extractOp.getResult(); diff --git a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp index 6df401d4c6962..bdbafa5b01d07 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp @@ -50,9 +50,9 @@ FailureOr tensor::replaceExtractSliceWithTiledProducer( builder.getIndexAttr(0)); SmallVector strides(sliceOp.getSourceType().getRank(), builder.getIndexAttr(1)); - auto newSliceOp = builder.create( - sliceOp.getLoc(), sliceOp.getType(), tiledResult->tiledValues[0], - offsets, sliceOp.getMixedSizes(), strides); + auto newSliceOp = tensor::ExtractSliceOp::create( + builder, sliceOp.getLoc(), sliceOp.getType(), + tiledResult->tiledValues[0], offsets, sliceOp.getMixedSizes(), strides); tiledResult->tiledValues[0] = newSliceOp; } diff --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp index 3f6258b5e4d43..fd75e1090174b 100644 --- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp @@ -56,7 +56,7 @@ PadOp mlir::tensor::createPadHighOp(RankedTensorType resType, Value source, high[idx] = affine::makeComposedFoldedAffineApply(b, loc, d0 - d1, {outDim, sourceDim}); } - return b.create(loc, resType, source, low, high, pad, nofold); + return PadOp::create(b, loc, resType, source, low, high, pad, nofold); } SmallVector mlir::tensor::createDynamicDimValues(OpBuilder &b, @@ -67,7 +67,7 @@ SmallVector mlir::tensor::createDynamicDimValues(OpBuilder &b, for (const auto &en : llvm::enumerate(tensorTy.getShape())) { if (en.value() == ShapedType::kDynamic) dynamicDims.push_back( - b.create(loc, rankedTensor, en.index())); + tensor::DimOp::create(b, loc, rankedTensor, en.index())); } return dynamicDims; } @@ -119,7 +119,7 @@ mlir::tensor::dropGivenUnitDims(OpBuilder &b, Location loc, Value src, reassocMaps.emplace_back(llvm::make_range(seq.begin(), seq.end())); nextDimToGroup = setBit + 1; } - return b.create(loc, src, reassocMaps); + return tensor::CollapseShapeOp::create(b, loc, src, reassocMaps); } bool mlir::tensor::isCastLikeInsertSliceOp(InsertSliceOp op) {