diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -239,11 +239,29 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) { switch (Opcode) { case TargetOpcode::G_SDIV: - assert((Size == 32 || Size == 64) && "Unsupported size"); - return Size == 64 ? RTLIB::SDIV_I64 : RTLIB::SDIV_I32; + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + switch (Size) { + case 32: + return RTLIB::SDIV_I32; + case 64: + return RTLIB::SDIV_I64; + case 128: + return RTLIB::SDIV_I128; + default: + llvm_unreachable("unexpected size"); + } case TargetOpcode::G_UDIV: - assert((Size == 32 || Size == 64) && "Unsupported size"); - return Size == 64 ? RTLIB::UDIV_I64 : RTLIB::UDIV_I32; + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + switch (Size) { + case 32: + return RTLIB::UDIV_I32; + case 64: + return RTLIB::UDIV_I64; + case 128: + return RTLIB::UDIV_I128; + default: + llvm_unreachable("unexpected size"); + } case TargetOpcode::G_SREM: assert((Size == 32 || Size == 64) && "Unsupported size"); return Size == 64 ? RTLIB::SREM_I64 : RTLIB::SREM_I32; diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -2956,7 +2956,8 @@ const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg()); const LLT WideTy = MRI.getType(SrcReg); (void)WideTy; - assert(WideTy.isVector() && "can only unmerge from vector types!"); + assert((WideTy.isVector() || WideTy == LLT::scalar(128)) && + "can only unmerge from vector types!"); assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() && "source register size too small!"); diff --git a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -112,6 +112,7 @@ getActionDefinitionsBuilder({G_SDIV, G_UDIV}) .legalFor({s32, s64}) + .libcallFor({s128}) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0) .scalarize(0); diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-s128-div.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-s128-div.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-s128-div.mir @@ -0,0 +1,93 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple aarch64-apple-ios -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s +--- | + target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + target triple = "aarch64-apple-ios" + + define void @udiv_test(i128* %v1ptr, i128* %v2ptr) { ret void } + + define void @sdiv_test(i128* %v1ptr, i128* %v2ptr) { ret void } + +... +--- +name: udiv_test +alignment: 2 +tracksRegLiveness: true +liveins: + - { reg: '$x0' } + - { reg: '$x1' } +machineFunctionInfo: {} +body: | + bb.1 (%ir-block.0): + liveins: $x0, $x1 + + ; CHECK-LABEL: name: udiv_test + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.v1ptr) + ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.v2ptr) + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](s128) + ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD1]](s128) + ; CHECK: $x0 = COPY [[UV]](s64) + ; CHECK: $x1 = COPY [[UV1]](s64) + ; CHECK: $x2 = COPY [[UV2]](s64) + ; CHECK: $x3 = COPY [[UV3]](s64) + ; CHECK: BL &__udivti3, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit-def $x0, implicit-def $x1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x1 + ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64) + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16 into %ir.v1ptr) + ; CHECK: RET_ReallyLR + %0:_(p0) = COPY $x0 + %1:_(p0) = COPY $x1 + %2:_(s128) = G_LOAD %0(p0) :: (load 16 from %ir.v1ptr) + %3:_(s128) = G_LOAD %1(p0) :: (load 16 from %ir.v2ptr) + %4:_(s128) = G_UDIV %2, %3 + G_STORE %4(s128), %0(p0) :: (store 16 into %ir.v1ptr) + RET_ReallyLR + +... +--- +name: sdiv_test +alignment: 2 +tracksRegLiveness: true +liveins: + - { reg: '$x0' } + - { reg: '$x1' } +machineFunctionInfo: {} +body: | + bb.1 (%ir-block.0): + liveins: $x0, $x1 + + ; CHECK-LABEL: name: sdiv_test + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.v1ptr) + ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.v2ptr) + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](s128) + ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD1]](s128) + ; CHECK: $x0 = COPY [[UV]](s64) + ; CHECK: $x1 = COPY [[UV1]](s64) + ; CHECK: $x2 = COPY [[UV2]](s64) + ; CHECK: $x3 = COPY [[UV3]](s64) + ; CHECK: BL &__divti3, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit-def $x0, implicit-def $x1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x1 + ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64) + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16 into %ir.v1ptr) + ; CHECK: RET_ReallyLR + %0:_(p0) = COPY $x0 + %1:_(p0) = COPY $x1 + %2:_(s128) = G_LOAD %0(p0) :: (load 16 from %ir.v1ptr) + %3:_(s128) = G_LOAD %1(p0) :: (load 16 from %ir.v2ptr) + %4:_(s128) = G_SDIV %2, %3 + G_STORE %4(s128), %0(p0) :: (store 16 into %ir.v1ptr) + RET_ReallyLR + +...