Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/AMX/IR/AMXDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,8 @@ static Value getStride(Location loc, MemRefType mType, Value base,
MemRefDescriptor memrefDescriptor(base);
auto attr = rewriter.getI64IntegerAttr(bytes);
Value scale = LLVM::ConstantOp::create(rewriter, loc, llvmInt64Type, attr);
return rewriter
.create<LLVM::MulOp>(loc, llvmInt64Type, scale,
memrefDescriptor.stride(rewriter, loc, preLast))
return LLVM::MulOp::create(rewriter, loc, llvmInt64Type, scale,
memrefDescriptor.stride(rewriter, loc, preLast))
.getResult();
}
// Use direct constant for static stride.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -688,8 +688,8 @@ FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
if (failed(bufferType))
return failure();
ensureToBufferOpIsValid(value, *bufferType);
return rewriter
.create<bufferization::ToBufferOp>(value.getLoc(), *bufferType, value)
return bufferization::ToBufferOp::create(rewriter, value.getLoc(),
*bufferType, value)
.getResult();
}

Expand Down Expand Up @@ -772,9 +772,8 @@ FailureOr<Value> BufferizationOptions::createAlloc(OpBuilder &b, Location loc,

// Default bufferallocation via AllocOp.
if (bufferAlignment != 0)
return b
.create<memref::AllocOp>(loc, type, dynShape,
b.getI64IntegerAttr(bufferAlignment))
return memref::AllocOp::create(b, loc, type, dynShape,
b.getI64IntegerAttr(bufferAlignment))
.getResult();
return memref::AllocOp::create(b, loc, type, dynShape).getResult();
}
Expand Down
20 changes: 10 additions & 10 deletions mlir/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -482,10 +482,10 @@ func::FuncOp mlir::bufferization::buildDeallocationLibraryFunction(

// Build the first for loop that computes aliasing with retained
// memrefs.
Value noRetainAlias =
builder
.create<scf::ForOp>(
loc, c0, toRetainSize, c1, trueValue,
Value
noRetainAlias =
scf::ForOp::create(
builder, loc, c0, toRetainSize, c1, trueValue,
[&](OpBuilder &builder, Location loc, Value i,
ValueRange iterArgs) {
Value retainValue = memref::LoadOp::create(
Expand All @@ -512,14 +512,14 @@ func::FuncOp mlir::bufferization::buildDeallocationLibraryFunction(
builder, loc, iterArgs[0], doesntAlias);
scf::YieldOp::create(builder, loc, yieldValue);
})
.getResult(0);
.getResult(0);

// Build the second for loop that adds aliasing with previously
// deallocated memrefs.
Value noAlias =
builder
.create<scf::ForOp>(
loc, c0, outerIter, c1, noRetainAlias,
Value
noAlias =
scf::ForOp::create(
builder, loc, c0, outerIter, c1, noRetainAlias,
[&](OpBuilder &builder, Location loc, Value i,
ValueRange iterArgs) {
Value prevDeallocValue = memref::LoadOp::create(
Expand All @@ -531,7 +531,7 @@ func::FuncOp mlir::bufferization::buildDeallocationLibraryFunction(
builder, loc, iterArgs[0], doesntAlias);
scf::YieldOp::create(builder, loc, yieldValue);
})
.getResult(0);
.getResult(0);

Value shouldDealoc = arith::AndIOp::create(builder, loc, noAlias, cond);
memref::StoreOp::create(builder, loc, shouldDealoc, deallocCondsMemref,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -750,17 +750,16 @@ Value BufferDeallocation::materializeMemrefWithGuaranteedOwnership(

// Insert a runtime check and only clone if we still don't have ownership at
// runtime.
Value maybeClone = builder
.create<scf::IfOp>(
memref.getLoc(), condition,
[&](OpBuilder &builder, Location loc) {
scf::YieldOp::create(builder, loc, newMemref);
},
[&](OpBuilder &builder, Location loc) {
Value clone = bufferization::CloneOp::create(
builder, loc, newMemref);
scf::YieldOp::create(builder, loc, clone);
})
Value maybeClone = scf::IfOp::create(
builder, memref.getLoc(), condition,
[&](OpBuilder &builder, Location loc) {
scf::YieldOp::create(builder, loc, newMemref);
},
[&](OpBuilder &builder, Location loc) {
Value clone = bufferization::CloneOp::create(
builder, loc, newMemref);
scf::YieldOp::create(builder, loc, clone);
})
.getResult(0);
Value trueVal = buildBoolValue(builder, memref.getLoc(), true);
state.updateOwnership(maybeClone, trueVal);
Expand Down
10 changes: 4 additions & 6 deletions mlir/lib/Dialect/GPU/Transforms/ShuffleRewriter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,14 +60,12 @@ struct GpuShuffleRewriter : public OpRewritePattern<gpu::ShuffleOp> {

// Shuffle the values.
ValueRange loRes =
rewriter
.create<gpu::ShuffleOp>(op.getLoc(), lo, op.getOffset(),
op.getWidth(), op.getMode())
gpu::ShuffleOp::create(rewriter, op.getLoc(), lo, op.getOffset(),
op.getWidth(), op.getMode())
.getResults();
ValueRange hiRes =
rewriter
.create<gpu::ShuffleOp>(op.getLoc(), hi, op.getOffset(),
op.getWidth(), op.getMode())
gpu::ShuffleOp::create(rewriter, op.getLoc(), hi, op.getOffset(),
op.getWidth(), op.getMode())
.getResults();

// Convert lo back to i64.
Expand Down
7 changes: 3 additions & 4 deletions mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -197,10 +197,9 @@ Value createSubgroupShuffleReduction(OpBuilder &builder, Location loc,
// Parallel reduction using butterfly shuffles.
for (unsigned i = ci.clusterStride; i < ci.clusterStride * ci.clusterSize;
i <<= 1) {
Value shuffled = builder
.create<gpu::ShuffleOp>(loc, packFn(laneVal), i,
/*width=*/ci.subgroupSize,
/*mode=*/gpu::ShuffleMode::XOR)
Value shuffled = gpu::ShuffleOp::create(builder, loc, packFn(laneVal), i,
/*width=*/ci.subgroupSize,
/*mode=*/gpu::ShuffleMode::XOR)
.getShuffleResult();
laneVal = vector::makeArithReduction(builder, loc,
gpu::convertReductionKind(mode),
Expand Down
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/MemRef/Transforms/IndependenceTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,8 @@ FailureOr<Value> memref::buildIndependentOp(OpBuilder &b,
// Create a memref::SubViewOp.
SmallVector<OpFoldResult> offsets(newSizes.size(), b.getIndexAttr(0));
SmallVector<OpFoldResult> strides(newSizes.size(), b.getIndexAttr(1));
return b
.create<SubViewOp>(loc, newAllocaOp, offsets, allocaOp.getMixedSizes(),
strides)
return SubViewOp::create(b, loc, newAllocaOp, offsets,
allocaOp.getMixedSizes(), strides)
.getResult();
}

Expand Down
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/MemRef/Transforms/RuntimeOpVerification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,9 +185,8 @@ struct CopyOpInterface
int64_t dim) -> Value {
return type.isDynamicDim(dim)
? DimOp::create(builder, loc, memRef, dim).getResult()
: builder
.create<arith::ConstantIndexOp>(loc,
type.getDimSize(dim))
: arith::ConstantIndexOp::create(builder, loc,
type.getDimSize(dim))
.getResult();
};
Value sourceDim = getDimSize(copyOp.getSource(), rankedSourceType, i);
Expand Down
84 changes: 40 additions & 44 deletions mlir/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,16 +148,14 @@ flattenUnrankedTensorAroundAxis(OpBuilder &builder, Location loc, Value input,
auto axisValue = arith::ConstantIndexOp::create(builder, loc, axis);
auto axisNextValue = arith::ConstantIndexOp::create(builder, loc, axis + 1);
auto shapeLeft =
builder
.create<shape::SplitAtOp>(loc, TypeRange{shapeType, shapeType},
inputShape, axisValue)
shape::SplitAtOp::create(builder, loc, TypeRange{shapeType, shapeType},
inputShape, axisValue)
.getResult(0);
auto sizeLeft =
shape::NumElementsOp::create(builder, loc, indexType, shapeLeft);
auto shapeRight =
builder
.create<shape::SplitAtOp>(loc, TypeRange{shapeType, shapeType},
inputShape, axisNextValue)
shape::SplitAtOp::create(builder, loc, TypeRange{shapeType, shapeType},
inputShape, axisNextValue)
.getResult(1);
auto sizeRight =
shape::NumElementsOp::create(builder, loc, indexType, shapeRight);
Expand Down Expand Up @@ -557,25 +555,24 @@ Value convertPerChannelRanked(OpBuilder &builder, Location loc, Operation *op,
SmallVector<AffineMap> indexingMaps{
builder.getMultiDimIdentityMap(inputRank), channelAxisAffineMap,
channelAxisAffineMap, builder.getMultiDimIdentityMap(inputRank)};
auto result = builder
.create<linalg::GenericOp>(
loc,
init.getType(), // resultType
ValueRange{input, scales, zeroPoints}, // inputs
ValueRange{init}, // outputs
indexingMaps, iteratorTypes,
[&](OpBuilder &builder, Location loc, ValueRange args) {
assert(args.size() == 4);
auto input = args[0];
auto scale = args[1];
auto zeroPoint = args[2];

auto result =
convertRanked(builder, loc, op, input, {}, scale,
zeroPoint, quantizedType);

linalg::YieldOp::create(builder, loc, result);
})
auto result = linalg::GenericOp::create(
builder, loc,
init.getType(), // resultType
ValueRange{input, scales, zeroPoints}, // inputs
ValueRange{init}, // outputs
indexingMaps, iteratorTypes,
[&](OpBuilder &builder, Location loc, ValueRange args) {
assert(args.size() == 4);
auto input = args[0];
auto scale = args[1];
auto zeroPoint = args[2];

auto result =
convertRanked(builder, loc, op, input, {}, scale,
zeroPoint, quantizedType);

linalg::YieldOp::create(builder, loc, result);
})
.getResult(0);

return result;
Expand Down Expand Up @@ -660,25 +657,24 @@ Value convertSubChannel(OpBuilder &builder, Location loc, Operation *op,
SmallVector<AffineMap> indexingMaps{
builder.getMultiDimIdentityMap(inputRank), affineMap, affineMap,
builder.getMultiDimIdentityMap(inputRank)};
auto result = builder
.create<linalg::GenericOp>(
loc,
init.getType(), // resultType
ValueRange{input, scales, zeroPoints}, // inputs
ValueRange{init}, // outputs
indexingMaps, iteratorTypes,
[&](OpBuilder &builder, Location loc, ValueRange args) {
assert(args.size() == 4);
auto input = args[0];
auto scale = args[1];
auto zeroPoint = args[2];

auto result =
convertRanked(builder, loc, op, input, {}, scale,
zeroPoint, quantizedType);

linalg::YieldOp::create(builder, loc, result);
})
auto result = linalg::GenericOp::create(
builder, loc,
init.getType(), // resultType
ValueRange{input, scales, zeroPoints}, // inputs
ValueRange{init}, // outputs
indexingMaps, iteratorTypes,
[&](OpBuilder &builder, Location loc, ValueRange args) {
assert(args.size() == 4);
auto input = args[0];
auto scale = args[1];
auto zeroPoint = args[2];

auto result =
convertRanked(builder, loc, op, input, {}, scale,
zeroPoint, quantizedType);

linalg::YieldOp::create(builder, loc, result);
})
.getResult(0);

return result;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -497,10 +497,10 @@ getBbArgReplacements(RewriterBase &rewriter, Block::BlockArgListType bbArgs,
size_t idx = it.index();
Value val = it.value();
if (tensorIndices.contains(idx)) {
result.push_back(rewriter
.create<bufferization::ToTensorOp>(
val.getLoc(), oldBbArgs[idx].getType(), val)
.getResult());
result.push_back(
bufferization::ToTensorOp::create(rewriter, val.getLoc(),
oldBbArgs[idx].getType(), val)
.getResult());
} else {
result.push_back(val);
}
Expand Down
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/SCF/Utils/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -827,9 +827,8 @@ static Value getProductOfIntsOrIndexes(RewriterBase &rewriter, Location loc,
productOf = v;
}
if (!productOf) {
productOf = rewriter
.create<arith::ConstantOp>(
loc, rewriter.getOneAttr(getType(values.front())))
productOf = arith::ConstantOp::create(
rewriter, loc, rewriter.getOneAttr(getType(values.front())))
.getResult();
}
return productOf.value();
Expand Down
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/Shape/IR/Shape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1702,9 +1702,8 @@ struct ShapeOfOpToConstShapeOp : public OpRewritePattern<shape::ShapeOfOp> {
return failure();
Location loc = op.getLoc();
Value constShape =
rewriter
.create<ConstShapeOp>(loc,
rewriter.getIndexTensorAttr(type.getShape()))
ConstShapeOp::create(rewriter, loc,
rewriter.getIndexTensorAttr(type.getShape()))
.getResult();
if (constShape.getType() != op.getResult().getType())
constShape = tensor::CastOp::create(rewriter, loc,
Expand Down
25 changes: 11 additions & 14 deletions mlir/lib/Dialect/Shard/Transforms/Partition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,8 @@ splitLastAxisInResharding(ImplicitLocOpBuilder &builder,
TypedValue<ShapedType> sourceShard, GridOp grid,
int64_t splitTensorAxis, GridAxis splitGridAxis) {
TypedValue<ShapedType> targetShard = cast<TypedValue<ShapedType>>(
builder
.create<AllSliceOp>(sourceShard, grid,
ArrayRef<GridAxis>(splitGridAxis),
splitTensorAxis)
AllSliceOp::create(builder, sourceShard, grid,
ArrayRef<GridAxis>(splitGridAxis), splitTensorAxis)
.getResult());
Sharding targetSharding = targetShardingInSplitLastAxis(
builder.getContext(), sourceSharding, splitTensorAxis, splitGridAxis);
Expand Down Expand Up @@ -420,16 +418,15 @@ tryUpdateHaloInResharding(ImplicitLocOpBuilder &builder, GridOp grid,

// Finally update the halo.
auto updateHaloResult =
builder
.create<UpdateHaloOp>(
sourceShard.getLoc(),
RankedTensorType::get(outShape,
sourceShard.getType().getElementType()),
initOprnd, grid.getSymName(),
GridAxesArrayAttr::get(builder.getContext(),
sourceSharding.getSplitAxes()),
targetSharding.getDynamicHaloSizes(),
targetSharding.getStaticHaloSizes())
UpdateHaloOp::create(
builder, sourceShard.getLoc(),
RankedTensorType::get(outShape,
sourceShard.getType().getElementType()),
initOprnd, grid.getSymName(),
GridAxesArrayAttr::get(builder.getContext(),
sourceSharding.getSplitAxes()),
targetSharding.getDynamicHaloSizes(),
targetSharding.getStaticHaloSizes())
.getResult();
return std::make_tuple(cast<TypedValue<ShapedType>>(updateHaloResult),
targetSharding);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -931,10 +931,9 @@ createQuickSort(OpBuilder &builder, ModuleOp module, func::FuncOp func,
FlatSymbolRefAttr partitionFunc = getMangledSortHelperFunc(
builder, func, {IndexType::get(context)}, kPartitionFuncNamePrefix, xPerm,
ny, args.drop_back(nTrailingP), createPartitionFunc);
Value p = builder
.create<func::CallOp>(loc, partitionFunc,
TypeRange{IndexType::get(context)},
args.drop_back(nTrailingP))
Value p = func::CallOp::create(builder, loc, partitionFunc,
TypeRange{IndexType::get(context)},
args.drop_back(nTrailingP))
.getResult(0);

Value lenLow = arith::SubIOp::create(builder, loc, p, lo);
Expand Down Expand Up @@ -1028,9 +1027,8 @@ static void createSortStableFunc(OpBuilder &builder, ModuleOp module,
FlatSymbolRefAttr searchFunc = getMangledSortHelperFunc(
builder, func, {IndexType::get(context)}, kBinarySearchFuncNamePrefix,
xPerm, ny, operands, createBinarySearchFunc);
Value p = builder
.create<func::CallOp>(loc, searchFunc, TypeRange{c1.getType()},
operands)
Value p = func::CallOp::create(builder, loc, searchFunc,
TypeRange{c1.getType()}, operands)
.getResult(0);

// Move the value at data[i] to a temporary location.
Expand Down
Loading