Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
300 changes: 153 additions & 147 deletions mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

Large diffs are not rendered by default.

30 changes: 16 additions & 14 deletions mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -672,9 +672,10 @@ static Operation *replaceForAllWithNewSignature(
newOuts.push_back(outputs[resultNumber]);

// Create new scf.forall op
auto newforallOp = rewriter.create<scf::ForallOp>(
loc, forallOp.getMixedLowerBound(), forallOp.getMixedUpperBound(),
forallOp.getMixedStep(), newOuts, forallOp.getMapping());
auto newforallOp = scf::ForallOp::create(
rewriter, loc, forallOp.getMixedLowerBound(),
forallOp.getMixedUpperBound(), forallOp.getMixedStep(), newOuts,
forallOp.getMapping());
rewriter.eraseBlock(newforallOp.getBody());
newforallOp.getRegion().takeBody(forallOp.getRegion());

Expand All @@ -699,8 +700,8 @@ static Operation *replaceForAllWithNewSignature(
Value src = tileAndFuseResult.tiledValues[0];
Value dst = newforallOp.getRegionIterArgs().back();
SmallVector<OpFoldResult> strides(offsets.size(), rewriter.getIndexAttr(1));
rewriter.create<tensor::ParallelInsertSliceOp>(firstYieldOp->getLoc(), src,
dst, offsets, sizes, strides);
tensor::ParallelInsertSliceOp::create(rewriter, firstYieldOp->getLoc(), src,
dst, offsets, sizes, strides);

for (auto result : llvm::enumerate(forallOp.getResults())) {
rewriter.replaceAllUsesWith(result.value(),
Expand Down Expand Up @@ -3410,12 +3411,12 @@ transform::TileUsingForOp::apply(transform::TransformRewriter &rewriter,
for (auto [ofrIdx, ofr] : llvm::enumerate(getMixedSizes())) {
if (auto attr = llvm::dyn_cast_if_present<Attribute>(ofr)) {
if (scalableSizes[ofrIdx]) {
auto val = b.create<arith::ConstantIndexOp>(
getLoc(), cast<IntegerAttr>(attr).getInt());
auto val = arith::ConstantIndexOp::create(
b, getLoc(), cast<IntegerAttr>(attr).getInt());
Value vscale =
b.create<vector::VectorScaleOp>(getLoc(), b.getIndexType());
vector::VectorScaleOp::create(b, getLoc(), b.getIndexType());
sizes.push_back(
b.create<arith::MulIOp>(getLoc(), val, vscale).getResult());
arith::MulIOp::create(b, getLoc(), val, vscale).getResult());
} else {
sizes.push_back(attr);
}
Expand Down Expand Up @@ -3626,9 +3627,10 @@ static scf::ForallOp normalizeForallLoopOp(RewriterBase &rewriter,
SmallVector<OpFoldResult> normalizedSteps(normalizedUbs.size(),
rewriter.getIndexAttr(1));

auto normalizedForallOp = rewriter.create<scf::ForallOp>(
loc, normalizedLbs, normalizedUbs, normalizedSteps, loop.getOutputs(),
loop.getMapping(), [](OpBuilder &, Location, ValueRange) {});
auto normalizedForallOp = scf::ForallOp::create(
rewriter, loc, normalizedLbs, normalizedUbs, normalizedSteps,
loop.getOutputs(), loop.getMapping(),
[](OpBuilder &, Location, ValueRange) {});

auto normalizedLoopIvs = normalizedForallOp.getInductionVars();
OpBuilder::InsertionGuard g(rewriter);
Expand Down Expand Up @@ -4131,8 +4133,8 @@ DiagnosedSilenceableFailure doit(RewriterBase &rewriter, OpTy target,
target->template getParentOfType<scf::InParallelOp>());
}

Value extracted = rewriter.create<tensor::ExtractSliceOp>(
target.getLoc(), target.getDest(), target.getMixedOffsets(),
Value extracted = tensor::ExtractSliceOp::create(
rewriter, target.getLoc(), target.getDest(), target.getMixedOffsets(),
target.getMixedSizes(), target.getMixedStrides());
Value copied = rewriter
.create<linalg::CopyOp>(target.getLoc(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,9 @@ struct SoftmaxOpInterface
getBuffer(rewriter, softmaxOp.getOutput(), options, state);
if (failed(outputBuffer))
return failure();
rewriter.create<linalg::SoftmaxOp>(softmaxOp.getLoc(),
/*result=*/TypeRange(), *inputBuffer,
*outputBuffer, softmaxOp.getDimension());
linalg::SoftmaxOp::create(rewriter, softmaxOp.getLoc(),
/*result=*/TypeRange(), *inputBuffer,
*outputBuffer, softmaxOp.getDimension());
replaceOpWithBufferizedValues(rewriter, op, *outputBuffer);
return success();
}
Expand Down
171 changes: 86 additions & 85 deletions mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp

Large diffs are not rendered by default.

79 changes: 40 additions & 39 deletions mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ static Value createInserts(RewriterBase &rewriter, Location loc, int dim,
if (dim == static_cast<int>(shape.size()) - 1) {
for (int i = 0; i < shape.back(); ++i) {
indices.back() = constants[i];
destination = rewriter.create<tensor::InsertOp>(loc, *elementIt,
destination, indices);
destination = tensor::InsertOp::create(rewriter, loc, *elementIt,
destination, indices);
++elementIt;
}
return destination;
Expand All @@ -65,27 +65,27 @@ static void createMemcpy(OpBuilder &b, Location loc, Value tensorSource,
MaterializeInDestination: {
// Note: This is the preferred way of memcpy'ing because no layout map
// and/or memory space must be specified for the source.
auto materializeOp = b.create<bufferization::MaterializeInDestinationOp>(
loc, tensorSource, memrefDest);
auto materializeOp = bufferization::MaterializeInDestinationOp::create(
b, loc, tensorSource, memrefDest);
materializeOp.setWritable(true);
} break;
case linalg::BufferizeToAllocationOptions::MemcpyOp::MemrefCopy: {
// TODO: Support custom memory space on source.
// We do not know the layout map of the source yet, so use a fully dynamic
// layout for best compatibility.
Value toBuffer = b.create<bufferization::ToBufferOp>(
loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
Value toBuffer = bufferization::ToBufferOp::create(
b, loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
tensorSource, /*readOnly=*/true);
b.create<memref::CopyOp>(loc, toBuffer, memrefDest);
memref::CopyOp::create(b, loc, toBuffer, memrefDest);
} break;
case linalg::BufferizeToAllocationOptions::MemcpyOp::LinalgCopy: {
// TODO: Support custom memory space on source.
// We do not know the layout map of the source yet, so use a fully dynamic
// layout for best compatibility.
Value toBuffer = b.create<bufferization::ToBufferOp>(
loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
Value toBuffer = bufferization::ToBufferOp::create(
b, loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
tensorSource, /*readOnly=*/true);
b.create<linalg::CopyOp>(loc, toBuffer, memrefDest);
linalg::CopyOp::create(b, loc, toBuffer, memrefDest);
} break;
};
}
Expand Down Expand Up @@ -120,15 +120,15 @@ static Operation *movePaddingToFillOrGenericOp(RewriterBase &rewriter,
->materializeConstant(rewriter, constYieldedValue,
yieldedValue.getType(), yieldedValue.getLoc())
->getResult(0);
auto fillOp = rewriter.create<linalg::FillOp>(loc, ValueRange(fillValue),
ValueRange(dest));
auto fillOp = linalg::FillOp::create(rewriter, loc, ValueRange(fillValue),
ValueRange(dest));
return fillOp;
}

if (invariantYieldedValue) {
// Padding with an invariant value.
auto fillOp = rewriter.create<linalg::FillOp>(loc, ValueRange(yieldedValue),
ValueRange(dest));
auto fillOp = linalg::FillOp::create(
rewriter, loc, ValueRange(yieldedValue), ValueRange(dest));
return fillOp;
}

Expand All @@ -137,16 +137,16 @@ static Operation *movePaddingToFillOrGenericOp(RewriterBase &rewriter,
utils::IteratorType::parallel);
SmallVector<AffineMap> indexingMaps(
1, rewriter.getMultiDimIdentityMap(resultType.getRank()));
auto genericOp = rewriter.create<linalg::GenericOp>(
loc, resultType, /*inputs=*/ValueRange(),
auto genericOp = linalg::GenericOp::create(
rewriter, loc, resultType, /*inputs=*/ValueRange(),
/*outputs=*/ValueRange{dest}, /*indexingMaps=*/
indexingMaps, iteratorTypes);
Block *body = rewriter.createBlock(&genericOp->getRegion(0), {},
resultType.getElementType(), loc);
rewriter.setInsertionPointToStart(body);
SmallVector<Value> bbArgReplacements;
for (int64_t i = 0; i < resultType.getRank(); ++i)
bbArgReplacements.push_back(rewriter.create<linalg::IndexOp>(loc, i));
bbArgReplacements.push_back(linalg::IndexOp::create(rewriter, loc, i));
rewriter.mergeBlocks(padOp.getBody(), body, bbArgReplacements);

// Update terminator.
Expand Down Expand Up @@ -179,8 +179,8 @@ static SmallVector<Value> reifyOrComputeDynamicSizes(OpBuilder &b,
for (int64_t i = 0; i < tensorType.getRank(); ++i) {
if (tensorType.isDynamicDim(i))
dynSizes.push_back(
b.create<DimOp>(value.getLoc(), value,
b.create<arith::ConstantIndexOp>(value.getLoc(), i)));
DimOp::create(b, value.getLoc(), value,
arith::ConstantIndexOp::create(b, value.getLoc(), i)));
}
return dynSizes;
}
Expand All @@ -201,15 +201,15 @@ createAllocationForTensor(RewriterBase &rewriter, Location loc, Value value,
Value alloc;
if (options.allocOp ==
linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloc) {
alloc = rewriter.create<memref::AllocOp>(loc, memrefType, dynamicSizes);
alloc = memref::AllocOp::create(rewriter, loc, memrefType, dynamicSizes);
if (options.emitDealloc) {
// Place deallocation at the end of the block.
rewriter.setInsertionPoint(rewriter.getInsertionBlock()->getTerminator());
rewriter.create<memref::DeallocOp>(loc, alloc);
memref::DeallocOp::create(rewriter, loc, alloc);
}
} else if (options.allocOp ==
linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloca) {
alloc = rewriter.create<memref::AllocaOp>(loc, memrefType, dynamicSizes);
alloc = memref::AllocaOp::create(rewriter, loc, memrefType, dynamicSizes);
// No dealloc is needed.
}

Expand Down Expand Up @@ -243,14 +243,14 @@ Value linalg::bufferizeToAllocation(
getMixedSizes(rewriter, loc, padOp.getSource());
SmallVector<OpFoldResult> strides(padOp.getResultType().getRank(),
rewriter.getIndexAttr(1));
Value subview = rewriter.create<memref::SubViewOp>(
loc, alloc, /*offsets=*/padOp.getMixedLowPad(), sizes, strides);
Value subview = memref::SubViewOp::create(
rewriter, loc, alloc, /*offsets=*/padOp.getMixedLowPad(), sizes, strides);
createMemcpy(rewriter, loc, padOp.getSource(), subview, options);

// Create bufferization.to_tensor with "restrict" and "writable". The returned
// tensor is a new buffer allocation, so it does not alias with any buffer.
Value toTensorOp = rewriter.create<bufferization::ToTensorOp>(
loc, padOp.getResult().getType(), alloc, /*restrict=*/true,
Value toTensorOp = bufferization::ToTensorOp::create(
rewriter, loc, padOp.getResult().getType(), alloc, /*restrict=*/true,
/*writable=*/true);
rewriter.replaceOp(padOp, toTensorOp);
return alloc;
Expand Down Expand Up @@ -338,8 +338,9 @@ Value linalg::bufferizeToAllocation(

// Create bufferization.to_tensor with "restrict" and "writable". The returned
// tensor is a new buffer allocation, so it does not alias with any buffer.
Value toTensorOp = rewriter.create<bufferization::ToTensorOp>(
loc, allocTensorOp.getResult().getType(), alloc, /*restrict=*/true,
Value toTensorOp = bufferization::ToTensorOp::create(
rewriter, loc, allocTensorOp.getResult().getType(), alloc,
/*restrict=*/true,
/*writable=*/true);
rewriter.replaceOp(allocTensorOp, toTensorOp);
return alloc;
Expand All @@ -354,7 +355,7 @@ FailureOr<Operation *> mlir::linalg::rewriteInDestinationPassingStyle(
auto shape = tensorType.getShape();

// Create tensor.empty.
auto emptyOp = rewriter.create<EmptyOp>(loc, tensorType, ValueRange());
auto emptyOp = EmptyOp::create(rewriter, loc, tensorType, ValueRange());

// Case: tensor<elem_type>.
if (shape.empty()) {
Expand All @@ -369,7 +370,7 @@ FailureOr<Operation *> mlir::linalg::rewriteInDestinationPassingStyle(
SmallVector<Value, 2> constants;
constants.reserve(maxDim);
for (int i = 0; i < maxDim; ++i)
constants.push_back(rewriter.create<arith::ConstantIndexOp>(loc, i));
constants.push_back(arith::ConstantIndexOp::create(rewriter, loc, i));

// Traverse all elements and create tensor.insert ops.
auto elementIt = fromElementsOp.getElements().begin();
Expand All @@ -394,24 +395,24 @@ mlir::linalg::rewriteInDestinationPassingStyle(RewriterBase &rewriter,
RankedTensorType tensorType = cast<RankedTensorType>(generateOp.getType());

// Create tensor.empty.
auto emptyOp =
rewriter.create<EmptyOp>(loc, tensorType, generateOp.getDynamicExtents());
auto emptyOp = EmptyOp::create(rewriter, loc, tensorType,
generateOp.getDynamicExtents());

// Create linalg.generic.
SmallVector<utils::IteratorType> iteratorTypes(tensorType.getRank(),
utils::IteratorType::parallel);
SmallVector<AffineMap> indexingMaps(
1, rewriter.getMultiDimIdentityMap(tensorType.getRank()));
auto genericOp = rewriter.create<linalg::GenericOp>(
loc, tensorType, /*inputs=*/ValueRange(),
auto genericOp = linalg::GenericOp::create(
rewriter, loc, tensorType, /*inputs=*/ValueRange(),
/*outputs=*/ValueRange{emptyOp.getResult()}, /*indexingMaps=*/
indexingMaps, iteratorTypes);
Block *body = rewriter.createBlock(&genericOp->getRegion(0), {},
tensorType.getElementType(), loc);
rewriter.setInsertionPointToStart(body);
SmallVector<Value> bbArgReplacements;
for (int64_t i = 0; i < tensorType.getRank(); ++i)
bbArgReplacements.push_back(rewriter.create<linalg::IndexOp>(loc, i));
bbArgReplacements.push_back(linalg::IndexOp::create(rewriter, loc, i));
rewriter.mergeBlocks(&generateOp.getBody().front(), body, bbArgReplacements);

// Update terminator.
Expand Down Expand Up @@ -450,13 +451,13 @@ mlir::linalg::rewriteInDestinationPassingStyle(RewriterBase &rewriter,
llvm::all_of(padOp.getMixedHighPad(), isZeroInteger)) {
using bufferization::AllocTensorOp;
Value allocated =
rewriter.create<AllocTensorOp>(loc, resultType, dynamicSizes);
AllocTensorOp::create(rewriter, loc, resultType, dynamicSizes);
auto copyOp = rewriter.replaceOpWithNewOp<linalg::CopyOp>(
padOp, padOp.getSource(), allocated);
return copyOp.getOperation();
}

Value empty = rewriter.create<EmptyOp>(loc, resultType, dynamicSizes);
Value empty = EmptyOp::create(rewriter, loc, resultType, dynamicSizes);
// Create linalg.fill or linalg.generic.
Operation *fillOp = movePaddingToFillOrGenericOp(rewriter, loc, padOp, empty);
rewriter.setInsertionPointAfter(fillOp);
Expand Down Expand Up @@ -567,8 +568,8 @@ Value linalg::bufferizeToAllocation(
createMemcpy(rewriter, op->getLoc(), operand->get(), alloc, options);
}
rewriter.modifyOpInPlace(op, [&]() {
auto toTensorOp = rewriter.create<ToTensorOp>(
op->getLoc(), operand->get().getType(), alloc);
auto toTensorOp = ToTensorOp::create(rewriter, op->getLoc(),
operand->get().getType(), alloc);
operand->set(toTensorOp);
if (options.bufferizeDestinationOnly) {
rewriter.modifyOpInPlace(toTensorOp, [&]() {
Expand Down
Loading