diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -630,7 +630,10 @@ getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF).lower(); // TODO: Custom lowering for v2s32, v4s32, v2s64. - getActionDefinitionsBuilder(G_BITREVERSE).legalFor({s32, s64, v8s8, v16s8}); + getActionDefinitionsBuilder(G_BITREVERSE) + .legalFor({s32, s64, v8s8, v16s8}) + .widenScalarToNextPow2(0, /*Min = */ 32) + .clampScalar(0, s32, s64); getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF).lower(); diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bitreverse.mir @@ -9,6 +9,7 @@ bb.0: liveins: $w0 ; CHECK-LABEL: name: s32_legal + ; CHECK: liveins: $w0 ; CHECK: %copy:_(s32) = COPY $w0 ; CHECK: %bitreverse:_(s32) = G_BITREVERSE %copy ; CHECK: $w0 = COPY %bitreverse(s32) @@ -25,6 +26,7 @@ bb.0: liveins: $x0 ; CHECK-LABEL: name: s64_legal + ; CHECK: liveins: $x0 ; CHECK: %copy:_(s64) = COPY $x0 ; CHECK: %bitreverse:_(s64) = G_BITREVERSE %copy ; CHECK: $x0 = COPY %bitreverse(s64) @@ -41,6 +43,7 @@ bb.0: liveins: $x0 ; CHECK-LABEL: name: v8s8_legal + ; CHECK: liveins: $x0 ; CHECK: %vec:_(<8 x s8>) = G_IMPLICIT_DEF ; CHECK: %bitreverse:_(<8 x s8>) = G_BITREVERSE %vec ; CHECK: $x0 = COPY %bitreverse(<8 x s8>) @@ -57,6 +60,7 @@ bb.0: liveins: $q0 ; CHECK-LABEL: name: v16s8_legal + ; CHECK: liveins: $q0 ; CHECK: %vec:_(<16 x s8>) = G_IMPLICIT_DEF ; CHECK: %bitreverse:_(<16 x s8>) = G_BITREVERSE %vec ; CHECK: $q0 = COPY %bitreverse(<16 x s8>) @@ -66,3 +70,66 @@ $q0 = COPY %bitreverse RET_ReallyLR implicit $q0 ... +--- +name: s8_widen +tracksRegLiveness: true +body: | + bb.0: + liveins: $b0 + ; CHECK-LABEL: name: s8_widen + ; CHECK: liveins: $b0 + ; CHECK: %copy:_(s8) = COPY $b0 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %copy(s8) + ; CHECK: [[BITREVERSE:%[0-9]+]]:_(s32) = G_BITREVERSE [[ANYEXT]] + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 + ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITREVERSE]], [[C]](s64) + ; CHECK: %bitreverse:_(s8) = G_TRUNC [[LSHR]](s32) + ; CHECK: $b0 = COPY %bitreverse(s8) + ; CHECK: RET_ReallyLR implicit $b0 + %copy:_(s8) = COPY $b0 + %bitreverse:_(s8) = G_BITREVERSE %copy + $b0 = COPY %bitreverse + RET_ReallyLR implicit $b0 +... +--- +name: s3_widen +tracksRegLiveness: true +body: | + bb.0: + liveins: $b0 + ; CHECK-LABEL: name: s3_widen + ; CHECK: liveins: $b0 + ; CHECK: %copy:_(s8) = COPY $b0 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %copy(s8) + ; CHECK: [[BITREVERSE:%[0-9]+]]:_(s32) = G_BITREVERSE [[ANYEXT]] + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 29 + ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITREVERSE]], [[C]](s64) + ; CHECK: %ext:_(s8) = G_TRUNC [[LSHR]](s32) + ; CHECK: $b0 = COPY %ext(s8) + ; CHECK: RET_ReallyLR implicit $b0 + %copy:_(s8) = COPY $b0 + %trunc:_(s3) = G_TRUNC %copy + %bitreverse:_(s3) = G_BITREVERSE %trunc + %ext:_(s8) = G_ANYEXT %bitreverse + $b0 = COPY %ext + RET_ReallyLR implicit $b0 +... +--- +name: s128_narrow +tracksRegLiveness: true +body: | + bb.0: + liveins: $q0 + ; CHECK-LABEL: name: s128_narrow + ; CHECK: liveins: $q0 + ; CHECK: %copy:_(s128) = COPY $q0 + ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %copy(s128) + ; CHECK: [[BITREVERSE:%[0-9]+]]:_(s64) = G_BITREVERSE [[UV1]] + ; CHECK: [[BITREVERSE1:%[0-9]+]]:_(s64) = G_BITREVERSE [[UV]] + ; CHECK: %bitreverse:_(s128) = G_MERGE_VALUES [[BITREVERSE]](s64), [[BITREVERSE1]](s64) + ; CHECK: $q0 = COPY %bitreverse(s128) + ; CHECK: RET_ReallyLR implicit $q0 + %copy:_(s128) = COPY $q0 + %bitreverse:_(s128) = G_BITREVERSE %copy + $q0 = COPY %bitreverse + RET_ReallyLR implicit $q0