diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h @@ -35,6 +35,8 @@ MachineInstr &MI) const override; private: + bool legalizeBSwap(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const; bool legalizeVaArg(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder) const; bool legalizeLoadStore(MachineInstr &MI, MachineRegisterInfo &MRI, diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -103,7 +103,8 @@ getActionDefinitionsBuilder(G_BSWAP) .legalFor({s32, s64, v4s32, v2s32, v2s64}) .clampScalar(0, s32, s64) - .widenScalarToNextPow2(0); + .widenScalarToNextPow2(0) + .customIf(typeIs(0, v2s16)); getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) .legalFor({s32, s64, v2s32, v4s32, v4s16, v8s16, v16s8, v8s8}) @@ -784,6 +785,8 @@ case TargetOpcode::G_LOAD: case TargetOpcode::G_STORE: return legalizeLoadStore(MI, MRI, MIRBuilder, Observer); + case TargetOpcode::G_BSWAP: + return legalizeBSwap(MI, MRI, MIRBuilder); case TargetOpcode::G_SHL: case TargetOpcode::G_ASHR: case TargetOpcode::G_LSHR: @@ -994,6 +997,39 @@ return true; } +bool AArch64LegalizerInfo::legalizeBSwap(MachineInstr &MI, + MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const { + assert(MI.getOpcode() == TargetOpcode::G_BSWAP); + + Register ValReg = MI.getOperand(1).getReg(); + const LLT v2s32 = LLT::fixed_vector(2, 32); + const LLT v8s8 = LLT::fixed_vector(8, 8); + const LLT s32 = LLT::scalar(32); + assert(LLT::fixed_vector(2, 16) == MRI.getType(ValReg)); + + auto Undef = MIRBuilder.buildUndef(v2s32); + auto Bitcast = MIRBuilder.buildBitcast(s32, ValReg); + auto Zero = MIRBuilder.buildConstant(s32, 0); + auto Insert = + MIRBuilder.buildInsertVectorElement(v2s32, Undef, Bitcast, Zero); + auto Copy = MIRBuilder.buildBitcast(v8s8, Insert); + auto Rev32 = MIRBuilder.buildInstr(AArch64::REV32v8i8, {v8s8}, {Copy}); + constrainSelectedInstRegOperands(*Rev32, *ST->getInstrInfo(), + *MRI.getTargetRegisterInfo(), + *ST->getRegBankInfo()); + auto Bitcast3 = MIRBuilder.buildBitcast(v2s32, Rev32); + auto UShr = MIRBuilder.buildInstr(AArch64::USHRv2i32_shift, {v2s32}, + {Bitcast3, 16ULL}); + constrainSelectedInstRegOperands(*UShr, *ST->getInstrInfo(), + *MRI.getTargetRegisterInfo(), + *ST->getRegBankInfo()); + auto Extract = MIRBuilder.buildExtractVectorElement(s32, UShr, Zero); + MIRBuilder.buildBitcast({MI.getOperand(0).getReg()}, Extract); + MI.eraseFromParent(); + return true; +} + bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder) const { diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir @@ -8,6 +8,8 @@ define i16 @bswap_s16(i16 %a) { ret i16 0 } + define <2 x i16> @bswap_2xi16(<2 x i16> %a) { ret <2 x i16> } + attributes #0 = { nounwind readnone speculatable willreturn } ... @@ -42,3 +44,41 @@ RET_ReallyLR implicit $w0 ... +--- +name: bswap_2xi16 +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +liveins: + - { reg: '$s0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.1: + liveins: $s0 + + ; CHECK-LABEL: name: bswap_2xi16 + ; CHECK: liveins: $s0 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $s0 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK: [[IVEC:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[BITCAST]](s32), [[C]](s32) + ; CHECK: [[BITCAST1:%[0-9]+]]:fpr64(<8 x s8>) = G_BITCAST [[IVEC]](<2 x s32>) + ; CHECK: [[REV32v8i8_:%[0-9]+]]:fpr64(<8 x s8>) = REV32v8i8 [[BITCAST1]](<8 x s8>) + ; CHECK: [[BITCAST2:%[0-9]+]]:fpr64(<2 x s32>) = G_BITCAST [[REV32v8i8_]](<8 x s8>) + ; CHECK: [[USHRv2i32_shift:%[0-9]+]]:fpr64(<2 x s32>) = USHRv2i32_shift [[BITCAST2]](<2 x s32>), 16 + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[USHRv2i32_shift]](<2 x s32>), [[C1]](s64) + ; CHECK: [[BITCAST3:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[EVEC]](s32) + ; CHECK: $s0 = COPY [[BITCAST3]](<2 x s16>) + ; CHECK: RET_ReallyLR + %0:_(<2 x s16>) = COPY $s0 + %1:_(<2 x s16>) = G_BSWAP %0 + $s0 = COPY %1(<2 x s16>) + RET_ReallyLR + +...