diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp @@ -343,10 +343,9 @@ namespace { -// The padding value for a given Op depends on the semantics of the Op. -// The identity value for ConvOp and PoolingSumOp is 0, for PoolingMaxOp is -inf -// or minInt and for PoolingMinOp is inf or maxInt. - +/// The padding value for a given Op depends on the semantics of the Op. +/// The identity value for ConvOp and PoolingSumOp is 0, for PoolingMaxOp is +/// -inf or minInt and for PoolingMinOp is inf or maxInt. template Attribute getPadValueAttr(Type type) { llvm_unreachable("Unexpected op type for getPadValueAttr"); @@ -357,8 +356,9 @@ Attribute getPadValueAttr(Type type) { auto &b = ScopedContext::getBuilderRef(); if (auto floatType = type.dyn_cast()) { - return b.getFloatAttr(floatType, - APFloat::getInf(floatType.getFloatSemantics(), true)); + return b.getFloatAttr( + floatType, + APFloat::getInf(floatType.getFloatSemantics(), /*Negative*/ true)); } if (auto intType = type.dyn_cast()) { unsigned width = intType.getWidth(); @@ -435,11 +435,11 @@ // when there is non-zero padding. if (hasPadding(convOp)) { Type type = convOp.input().getType().cast().getElementType(); - Value zero = std_constant(type, getPadValueAttr(type)); + Value padValue = std_constant(type, getPadValueAttr(type)); Value paddedInput = getPaddedInput( convOp.input(), imIdx, /* Only need to pad the window dimensions */ - {0, static_cast(imIdx.size()) - 1}, zero); + {0, static_cast(imIdx.size()) - 1}, padValue); O(oIdx) += F(fIdx) * paddedInput; } else { IndexedValueType I(convOp.input());