diff --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertSPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertSPIRVToLLVM.cpp --- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertSPIRVToLLVM.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertSPIRVToLLVM.cpp @@ -86,9 +86,9 @@ loc, dstType, minusOneIntegerAttribute(srcType, rewriter)); } -/// This is a utility function for bit manipulations ops (`BitFieldInsert`) -/// and operates on their `Count` or `Offset` values. It casts the given -/// value to match the target type. +/// This is a utility function for bit manipulations ops (`BitFieldInsert`, +/// `BitFieldSExtract` and `BitFieldUExtract`), and operates on their `Count` +/// or `Offset` values. It casts the given value to match the target type. static Value optionallyCast(Location loc, Value value, Type dstType, ConversionPatternRewriter &rewriter) { auto llvmType = dstType.cast(); @@ -196,6 +196,130 @@ } }; +class BitFieldSExtractPattern + : public SPIRVToLLVMConversion { +public: + using SPIRVToLLVMConversion::SPIRVToLLVMConversion; + + LogicalResult + matchAndRewrite(spirv::BitFieldSExtractOp op, ArrayRef operands, + ConversionPatternRewriter &rewriter) const override { + auto srcType = op.getType(); + auto dstType = this->typeConverter.convertType(srcType); + if (!dstType) + return failure(); + Location loc = op.getLoc(); + + // Broadcast `Offset` and `Count` to match the type of `Base`. + // If `Base` is of a vector type, construct a vector that has: + // - same number of elements as `Base` + // - each element has the type that is the same as the type of `Offset` or + // `Count` + // - each element has the same value as `Offset` or `Count` + Value offset; + Value count; + if (auto vectorType = srcType.dyn_cast()) { + int64_t numElements = static_cast(vectorType.getNumElements()); + broadcast(loc, op.offset(), offset, numElements, typeConverter, rewriter); + broadcast(loc, op.count(), count, numElements, typeConverter, rewriter); + } else { + offset = op.offset(); + count = op.count(); + } + + // Need to zero extend `Offset` and `Count` if their bit width is different + // from the result bit width. + Value optionallyCastedCount = optionallyCast(loc, count, dstType, rewriter); + Value optionallyCastedOffset = + optionallyCast(loc, offset, dstType, rewriter); + + // Create a constant that holds the size of the `Base` + IntegerType integerType; + if (auto vecType = srcType.dyn_cast()) + integerType = vecType.getElementType().cast(); + else + integerType = srcType.cast(); + + auto baseSize = rewriter.getIntegerAttr(integerType, getBitWidth(srcType)); + Value size = + srcType.isa() + ? rewriter.create( + loc, dstType, + SplatElementsAttr::get(srcType.cast(), baseSize)) + : rewriter.create(loc, dstType, baseSize); + + // Shift `Base` left by [sizeof(Base) - (Count + Offset)], so that the bit + // at Offset + Count - 1 is the most significant bit now. + Value countPlusOffset = rewriter.create( + loc, dstType, optionallyCastedCount, optionallyCastedOffset); + Value amountToShiftLeft = + rewriter.create(loc, dstType, size, countPlusOffset); + Value baseShiftedLeft = rewriter.create( + loc, dstType, op.base(), amountToShiftLeft); + + // Shift the result right, filling the bits with the sign bit. + Value amountToShiftRight = rewriter.create( + loc, dstType, optionallyCastedOffset, amountToShiftLeft); + rewriter.replaceOpWithNewOp(op, dstType, baseShiftedLeft, + amountToShiftRight); + return success(); + } +}; + +class BitFieldUExtractPattern + : public SPIRVToLLVMConversion { +public: + using SPIRVToLLVMConversion::SPIRVToLLVMConversion; + + LogicalResult + matchAndRewrite(spirv::BitFieldUExtractOp op, ArrayRef operands, + ConversionPatternRewriter &rewriter) const override { + auto srcType = op.getType(); + auto dstType = this->typeConverter.convertType(srcType); + if (!dstType) + return failure(); + Location loc = op.getLoc(); + + // Broadcast `Offset` and `Count` to match the type of `Base`. + // If `Base` is of a vector type, construct a vector that has: + // - same number of elements as `Base` + // - each element has the type that is the same as the type of `Offset` or + // `Count` + // - each element has the same value as `Offset` or `Count` + Value offset; + Value count; + if (auto vectorType = srcType.dyn_cast()) { + int64_t numElements = static_cast(vectorType.getNumElements()); + broadcast(loc, op.offset(), offset, numElements, typeConverter, rewriter); + broadcast(loc, op.count(), count, numElements, typeConverter, rewriter); + } else { + offset = op.offset(); + count = op.count(); + } + + // Create a mask with all bits set of the same type as `srcType` + Value minusOne = createConstantAllBitsSet(loc, srcType, dstType, rewriter); + + // Need to cast `Offset` and `Count` if their bit width is different from + // the result bit width. + Value optionallyCastedCount = optionallyCast(loc, count, dstType, rewriter); + Value optionallyCastedOffset = + optionallyCast(loc, offset, dstType, rewriter); + + // Create a mask with bits set at [0, Count - 1]. + Value maskShiftedByCount = rewriter.create( + loc, dstType, minusOne, optionallyCastedCount); + Value mask = rewriter.create(loc, dstType, maskShiftedByCount, + minusOne); + + // Shift `Base` by `Offset` and apply the mask on it. + Value shiftedBase = rewriter.create(loc, dstType, op.base(), + optionallyCastedOffset); + rewriter.replaceOpWithNewOp(op, dstType, shiftedBase, mask); + return success(); + } +}; + /// Converts SPIR-V operations that have straightforward LLVM equivalent /// into LLVM dialect operations. template @@ -506,7 +630,7 @@ DirectConversionPattern, // Bitwise ops - BitFieldInsertPattern, + BitFieldInsertPattern, BitFieldUExtractPattern, BitFieldSExtractPattern, DirectConversionPattern, DirectConversionPattern, DirectConversionPattern, diff --git a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir @@ -132,6 +132,237 @@ return } +//===----------------------------------------------------------------------===// +// spv.BitFieldSExtract +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: func @bitfield_sextract_scalar_same_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm.i64, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64 +func @bitfield_sextract_scalar_same_bit_width(%base: i64, %offset: i64, %count: i64) { + // CHECK: %[[SIZE:.]] = llvm.mlir.constant(64 : i64) : !llvm.i64 + // CHECK: %[[T0:.*]] = llvm.add %[[COUNT]], %[[OFFSET]] : !llvm.i64 + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i64 + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i64 + // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET]], %[[T1]] : !llvm.i64 + // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i64 + %0 = spv.BitFieldSExtract %base, %offset, %count : i64, i64, i64 + return +} + +// CHECK-LABEL: func @bitfield_sextract_scalar_smaller_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i8 +func @bitfield_sextract_scalar_smaller_bit_width(%base: i32, %offset: i8, %count: i8) { + // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32 + // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i32 + // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32 + // CHECK: %[[T0:.*]] = llvm.add %[[EXT_COUNT]], %[[EXT_OFFSET]] : !llvm.i32 + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32 + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32 + // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : !llvm.i32 + // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32 + %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i8, i8 + return +} + +// CHECK-LABEL: func @bitfield_sextract_scalar_greater_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64 +func @bitfield_sextract_scalar_greater_bit_width(%base: i32, %offset: i64, %count: i64) { + // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i32 + // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i64 to !llvm.i32 + // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32 + // CHECK: %[[T0:.*]] = llvm.add %[[TRUNC_COUNT]], %[[TRUNC_OFFSET]] : !llvm.i32 + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32 + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32 + // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : !llvm.i32 + // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32 + %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i64, i64 + return +} + +// CHECK-LABEL: func @bitfield_sextract_vector_same_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i32>">, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32 +func @bitfield_sextract_vector_same_bit_width(%base: vector<2xi32>, %offset: i32, %count: i32) { + // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(dense<32> : vector<2xi32>) : !llvm<"<2 x i32>"> + // CHECK: %[[T0:.*]] = llvm.add %[[COUNT_V2]], %[[OFFSET_V2]] : !llvm<"<2 x i32>"> + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm<"<2 x i32>"> + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm<"<2 x i32>"> + // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET_V2]], %[[T1]] : !llvm<"<2 x i32>"> + // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm<"<2 x i32>"> + %0 = spv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i32, i32 + return +} + +// CHECK-LABEL: func @bitfield_sextract_vector_smaller_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i32>">, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i8 +func @bitfield_sextract_vector_smaller_bit_width(%base: vector<2xi32>, %offset: i8, %count: i8) { + // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i8>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i8>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i8>"> + // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i8>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i8>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i8>"> + // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT_V2]] : !llvm<"<2 x i8>"> to !llvm<"<2 x i32>"> + // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET_V2]] : !llvm<"<2 x i8>"> to !llvm<"<2 x i32>"> + // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(dense<32> : vector<2xi32>) : !llvm<"<2 x i32>"> + // CHECK: %[[T0:.*]] = llvm.add %[[EXT_COUNT]], %[[EXT_OFFSET]] : !llvm<"<2 x i32>"> + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm<"<2 x i32>"> + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm<"<2 x i32>"> + // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : !llvm<"<2 x i32>"> + // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm<"<2 x i32>"> + %0 = spv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i8, i8 + return +} + +// CHECK-LABEL: func @bitfield_sextract_vector_greater_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i32>">, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64 +func @bitfield_sextract_vector_greater_bit_width(%base: vector<2xi32>, %offset: i64, %count: i64) { + // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i64>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i64>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i64>"> + // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i64>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i64>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i64>"> + // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT_V2]] : !llvm<"<2 x i64>"> to !llvm<"<2 x i32>"> + // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET_V2]] : !llvm<"<2 x i64>"> to !llvm<"<2 x i32>"> + // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(dense<32> : vector<2xi32>) : !llvm<"<2 x i32>"> + // CHECK: %[[T0:.*]] = llvm.add %[[TRUNC_COUNT]], %[[TRUNC_OFFSET]] : !llvm<"<2 x i32>"> + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm<"<2 x i32>"> + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm<"<2 x i32>"> + // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : !llvm<"<2 x i32>"> + // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm<"<2 x i32>"> + %0 = spv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i64, i64 + return +} + +//===----------------------------------------------------------------------===// +// spv.BitFieldUExtract +//===----------------------------------------------------------------------===// + +// CHECK-LABEL: func @bitfield_uextract_scalar_same_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32 +func @bitfield_uextract_scalar_same_bit_width(%base: i32, %offset: i32, %count: i32) { + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32 + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET]] : !llvm.i32 + // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32 + %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32 + return +} + +// CHECK-LABEL: func @bitfield_uextract_scalar_smaller_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i16, %[[COUNT:.*]]: !llvm.i8 +func @bitfield_uextract_scalar_smaller_bit_width(%base: i32, %offset: i16, %count: i8) { + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32 + // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32 + // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i16 to !llvm.i32 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i32 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32 + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : !llvm.i32 + // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32 + %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i16, i8 + return +} + +// CHECK-LABEL: func @bitfield_uextract_scalar_greater_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm.i8, %[[OFFSET:.*]]: !llvm.i16, %[[COUNT:.*]]: !llvm.i8 +func @bitfield_uextract_scalar_greater_bit_width(%base: i8, %offset: i16, %count: i8) { + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i8) : !llvm.i8 + // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i16 to !llvm.i8 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i8 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i8 + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[TRUNC_OFFSET]] : !llvm.i8 + // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i8 + %0 = spv.BitFieldUExtract %base, %offset, %count : i8, i16, i8 + return +} + +// CHECK-LABEL: func @bitfield_uextract_vector_same_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i32>">, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32 +func @bitfield_uextract_vector_same_bit_width(%base: vector<2xi32>, %offset: i32, %count: i32) { + // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm<"<2 x i32>"> + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm<"<2 x i32>"> + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm<"<2 x i32>"> + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET_V2]] : !llvm<"<2 x i32>"> + // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm<"<2 x i32>"> + %0 = spv.BitFieldUExtract %base, %offset, %count : vector<2xi32>, i32, i32 + return +} + +// CHECK-LABEL: func @bitfield_uextract_vector_smaller_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i32>">, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i32 +func @bitfield_uextract_vector_smaller_bit_width(%base: vector<2xi32>, %offset: i8, %count: i32) { + // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i8>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i8>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i8>"> + // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm<"<2 x i32>"> + // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET_V2]] : !llvm<"<2 x i8>"> to !llvm<"<2 x i32>"> + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm<"<2 x i32>"> + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm<"<2 x i32>"> + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : !llvm<"<2 x i32>"> + // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm<"<2 x i32>"> + %0 = spv.BitFieldUExtract %base, %offset, %count : vector<2xi32>, i8, i32 + return +} + +// CHECK-LABEL: func @bitfield_uextract_vector_greater_bit_width +// CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i8>">, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i32 +func @bitfield_uextract_vector_greater_bit_width(%base: vector<2xi8>, %offset: i8, %count: i32) { + // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i8>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i8>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i8>"> + // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>"> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>"> + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi8>) : !llvm<"<2 x i8>"> + // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT_V2]] : !llvm<"<2 x i32>"> to !llvm<"<2 x i8>"> + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[TRUNC_COUNT]] : !llvm<"<2 x i8>"> + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm<"<2 x i8>"> + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET_V2]] : !llvm<"<2 x i8>"> + // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm<"<2 x i8>"> + %0 = spv.BitFieldUExtract %base, %offset, %count : vector<2xi8>, i8, i32 + return +} + //===----------------------------------------------------------------------===// // spv.BitwiseAnd //===----------------------------------------------------------------------===//