Index: include/llvm/Support/TargetOpcodes.def =================================================================== --- include/llvm/Support/TargetOpcodes.def +++ include/llvm/Support/TargetOpcodes.def @@ -520,6 +520,12 @@ /// Floating point ceil. HANDLE_TARGET_OPCODE(G_FCEIL) +/// Floating point cosine. +HANDLE_TARGET_OPCODE(G_FCOS) + +/// Floating point sine. +HANDLE_TARGET_OPCODE(G_FSIN) + /// Generic AddressSpaceCast. HANDLE_TARGET_OPCODE(G_ADDRSPACE_CAST) Index: include/llvm/Target/GenericOpcodes.td =================================================================== --- include/llvm/Target/GenericOpcodes.td +++ include/llvm/Target/GenericOpcodes.td @@ -555,6 +555,20 @@ let hasSideEffects = 0; } +// Floating point cosine of a value. +def G_FCOS : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src1); + let hasSideEffects = 0; +} + +// Floating point sine of a value. +def G_FSIN : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src1); + let hasSideEffects = 0; +} + //------------------------------------------------------------------------------ // Opcodes for LLVM Intrinsics //------------------------------------------------------------------------------ Index: include/llvm/Target/GlobalISel/SelectionDAGCompat.td =================================================================== --- include/llvm/Target/GlobalISel/SelectionDAGCompat.td +++ include/llvm/Target/GlobalISel/SelectionDAGCompat.td @@ -89,6 +89,8 @@ def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; +def : GINodeEquiv; +def : GINodeEquiv; // Broadly speaking G_LOAD is equivalent to ISD::LOAD but there are some // complications that tablegen must take care of. For example, Predicates such Index: lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- lib/CodeGen/GlobalISel/IRTranslator.cpp +++ lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1055,6 +1055,16 @@ .addDef(getOrCreateVReg(CI)) .addUse(getOrCreateVReg(*CI.getArgOperand(0))); return true; + case Intrinsic::cos: + MIRBuilder.buildInstr(TargetOpcode::G_FCOS) + .addDef(getOrCreateVReg(CI)) + .addUse(getOrCreateVReg(*CI.getArgOperand(0))); + return true; + case Intrinsic::sin: + MIRBuilder.buildInstr(TargetOpcode::G_FSIN) + .addDef(getOrCreateVReg(CI)) + .addUse(getOrCreateVReg(*CI.getArgOperand(0))); + return true; } return false; } Index: lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -122,6 +122,14 @@ case TargetOpcode::G_FMA: assert((Size == 32 || Size == 64) && "Unsupported size"); return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32; + case TargetOpcode::G_FSIN: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + return Size == 128 ? RTLIB::SIN_F128 + : Size == 64 ? RTLIB::SIN_F64 : RTLIB::SIN_F32; + case TargetOpcode::G_FCOS: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + return Size == 128 ? RTLIB::COS_F128 + : Size == 64 ? RTLIB::COS_F64 : RTLIB::COS_F32; } llvm_unreachable("Unknown libcall function"); } @@ -213,7 +221,9 @@ case TargetOpcode::G_FDIV: case TargetOpcode::G_FMA: case TargetOpcode::G_FPOW: - case TargetOpcode::G_FREM: { + case TargetOpcode::G_FREM: + case TargetOpcode::G_FCOS: + case TargetOpcode::G_FSIN: { Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx); auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); if (Status != Legalized) @@ -1034,6 +1044,8 @@ case TargetOpcode::G_FDIV: case TargetOpcode::G_FREM: case TargetOpcode::G_FCEIL: + case TargetOpcode::G_FCOS: + case TargetOpcode::G_FSIN: assert(TypeIdx == 0); Observer.changingInstr(MI); @@ -1290,7 +1302,9 @@ case TargetOpcode::G_FDIV: case TargetOpcode::G_FREM: case TargetOpcode::G_FMA: - case TargetOpcode::G_FCEIL: { + case TargetOpcode::G_FCEIL: + case TargetOpcode::G_FCOS: + case TargetOpcode::G_FSIN: { unsigned NarrowSize = NarrowTy.getSizeInBits(); unsigned DstReg = MI.getOperand(0).getReg(); unsigned Flags = MI.getFlags(); Index: lib/Target/AArch64/AArch64LegalizerInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -142,6 +142,13 @@ [=](const LegalityQuery &Query) { return std::make_pair(0, s32); }) .legalFor({s16, s32, s64, v2s32, v4s32, v2s64, v2s16, v4s16, v8s16}); + getActionDefinitionsBuilder({G_FCOS, G_FSIN}) + // We need a call for these, so we always need to scalarize. + .scalarize(0) + // Regardless of FP16 support, widen 16-bit elements to 32-bits. + .minScalar(0, s32) + .libcallFor({s32, s64, v2s32, v4s32, v2s64}); + getActionDefinitionsBuilder(G_INSERT) .unsupportedIf([=](const LegalityQuery &Query) { return Query.Types[0].getSizeInBits() <= Query.Types[1].getSizeInBits(); @@ -444,7 +451,11 @@ }); getActionDefinitionsBuilder(G_BUILD_VECTOR) - .legalFor({{v4s16, s16}, {v8s16, s16}, {v4s32, s32}, {v2s64, s64}}) + .legalFor({{v4s16, s16}, + {v8s16, s16}, + {v2s32, s32}, + {v4s32, s32}, + {v2s64, s64}}) .clampNumElements(0, v4s32, v4s32) .clampNumElements(0, v2s64, v2s64) Index: lib/Target/AArch64/AArch64RegisterBankInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterBankInfo.cpp +++ lib/Target/AArch64/AArch64RegisterBankInfo.cpp @@ -392,6 +392,8 @@ case TargetOpcode::G_FPEXT: case TargetOpcode::G_FPTRUNC: case TargetOpcode::G_FCEIL: + case TargetOpcode::G_FCOS: + case TargetOpcode::G_FSIN: return true; } return false; Index: test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -2283,3 +2283,19 @@ %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x) ret <2 x double> %y } + +declare float @llvm.cos.f32(float) +define float @test_cos_f32(float %x) { + ; CHECK-LABEL: name: test_cos_f32 + ; CHECK: %{{[0-9]+}}:_(s32) = G_FCOS %{{[0-9]+}} + %y = call float @llvm.cos.f32(float %x) + ret float %y +} + +declare float @llvm.sin.f32(float) +define float @test_sin_f32(float %x) { + ; CHECK-LABEL: name: test_sin_f32 + ; CHECK: %{{[0-9]+}}:_(s32) = G_FSIN %{{[0-9]+}} + %y = call float @llvm.sin.f32(float %x) + ret float %y +} Index: test/CodeGen/AArch64/GlobalISel/legalize-cos.mir =================================================================== --- /dev/null +++ test/CodeGen/AArch64/GlobalISel/legalize-cos.mir @@ -0,0 +1,227 @@ +# RUN: llc -verify-machineinstrs -mtriple aarch64--- \ +# RUN: -run-pass=legalizer -mattr=+fullfp16 -global-isel %s -o - \ +# RUN: | FileCheck %s +... +--- +name: test_v4f16.cos +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $d0 + ; CHECK-LABEL: name: test_v4f16.cos + ; CHECK: [[V1:%[0-9]+]]:_(s16), [[V2:%[0-9]+]]:_(s16), [[V3:%[0-9]+]]:_(s16), [[V4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES %{{[0-9]+}}(<4 x s16>) + + ; CHECK-DAG: [[V1_S32:%[0-9]+]]:_(s32) = G_FPEXT [[V1]](s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[V1_S32]](s32) + ; CHECK-NEXT: BL &cosf + ; CHECK-NEXT: [[ELT1_S32:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[ELT1:%[0-9]+]]:_(s16) = G_FPTRUNC [[ELT1_S32]](s32) + + ; CHECK-DAG: [[V2_S32:%[0-9]+]]:_(s32) = G_FPEXT [[V2]](s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[V2_S32]](s32) + ; CHECK-NEXT: BL &cosf + ; CHECK-NEXT: [[ELT2_S32:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[ELT2:%[0-9]+]]:_(s16) = G_FPTRUNC [[ELT2_S32]](s32) + + ; CHECK-DAG: [[V3_S32:%[0-9]+]]:_(s32) = G_FPEXT [[V3]](s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[V3_S32]](s32) + ; CHECK-NEXT: BL &cosf + ; CHECK-NEXT: [[ELT3_S32:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[ELT3:%[0-9]+]]:_(s16) = G_FPTRUNC [[ELT3_S32]](s32) + + ; CHECK-DAG: [[V4_S32:%[0-9]+]]:_(s32) = G_FPEXT [[V4]](s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[V4_S32]](s32) + ; CHECK-NEXT: BL &cosf + ; CHECK-NEXT: [[ELT4_S32:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[ELT4:%[0-9]+]]:_(s16) = G_FPTRUNC [[ELT4_S32]](s32) + + ; CHECK-DAG: %{{[0-9]+}}:_(<4 x s16>) = G_BUILD_VECTOR [[ELT1]](s16), [[ELT2]](s16), [[ELT3]](s16), [[ELT4]](s16) + + %0:_(<4 x s16>) = COPY $d0 + %1:_(<4 x s16>) = G_FCOS %0 + $d0 = COPY %1(<4 x s16>) + RET_ReallyLR implicit $d0 + +... +--- +name: test_v8f16.cos +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $q0 + + ; CHECK-LABEL: name: test_v8f16.cos + + ; This is big, so let's just check for the 8 calls to cosf, the the + ; G_UNMERGE_VALUES, and the G_BUILD_VECTOR. The other instructions ought + ; to be covered by the other tests. + + ; CHECK: G_UNMERGE_VALUES + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: G_BUILD_VECTOR + + %0:_(<8 x s16>) = COPY $q0 + %1:_(<8 x s16>) = G_FCOS %0 + $q0 = COPY %1(<8 x s16>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_v2f32.cos +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $d0 + + ; CHECK-LABEL: name: test_v2f32.cos + ; CHECK: [[V1:%[0-9]+]]:_(s32), [[V2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %{{[0-9]+}}(<2 x s32>) + + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V1]](s32) + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: [[ELT1:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V2]](s32) + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: [[ELT2:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: %1:_(<2 x s32>) = G_BUILD_VECTOR [[ELT1]](s32), [[ELT2]](s32) + + %0:_(<2 x s32>) = COPY $d0 + %1:_(<2 x s32>) = G_FCOS %0 + $d0 = COPY %1(<2 x s32>) + RET_ReallyLR implicit $d0 + +... +--- +name: test_v4f32.cos +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $q0 + ; CHECK-LABEL: name: test_v4f32.cos + ; CHECK: [[V1:%[0-9]+]]:_(s32), [[V2:%[0-9]+]]:_(s32), [[V3:%[0-9]+]]:_(s32), [[V4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %{{[0-9]+}}(<4 x s32>) + + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V1]](s32) + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: [[ELT1:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V2]](s32) + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: [[ELT2:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V3]](s32) + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: [[ELT3:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V4]](s32) + ; CHECK-DAG: BL &cosf + ; CHECK-DAG: [[ELT4:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: %1:_(<4 x s32>) = G_BUILD_VECTOR [[ELT1]](s32), [[ELT2]](s32), [[ELT3]](s32), [[ELT4]](s32) + + %0:_(<4 x s32>) = COPY $q0 + %1:_(<4 x s32>) = G_FCOS %0 + $q0 = COPY %1(<4 x s32>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_v2f64.cos +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $q0 + + ; CHECK-LABEL: name: test_v2f64.cos + ; CHECK: [[V1:%[0-9]+]]:_(s64), [[V2:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %{{[0-9]+}}(<2 x s64>) + + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-DAG: $d0 = COPY [[V1]](s64) + ; CHECK-DAG: BL &cos + ; CHECK-DAG: [[ELT1:%[0-9]+]]:_(s64) = COPY $d0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $d0 = COPY [[V2]](s64) + ; CHECK-DAG: BL &cos + ; CHECK-DAG: [[ELT2:%[0-9]+]]:_(s64) = COPY $d0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: %1:_(<2 x s64>) = G_BUILD_VECTOR [[ELT1]](s64), [[ELT2]](s64) + + %0:_(<2 x s64>) = COPY $q0 + %1:_(<2 x s64>) = G_FCOS %0 + $q0 = COPY %1(<2 x s64>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_cos_half +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $h0 + ; CHECK-LABEL: name: test_cos_half + ; CHECK: [[REG1:%[0-9]+]]:_(s32) = G_FPEXT %0(s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[REG1]](s32) + ; CHECK-NEXT: BL &cosf + ; CHECK-NEXT: [[REG2:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s16) = G_FPTRUNC [[REG2]](s32) + + %0:_(s16) = COPY $h0 + %1:_(s16) = G_FCOS %0 + $h0 = COPY %1(s16) + RET_ReallyLR implicit $h0 Index: test/CodeGen/AArch64/GlobalISel/legalize-sin.mir =================================================================== --- /dev/null +++ test/CodeGen/AArch64/GlobalISel/legalize-sin.mir @@ -0,0 +1,227 @@ +# RUN: llc -verify-machineinstrs -mtriple aarch64--- \ +# RUN: -run-pass=legalizer -mattr=+fullfp16 -global-isel %s -o - \ +# RUN: | FileCheck %s +... +--- +name: test_v4f16.sin +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $d0 + ; CHECK-LABEL: name: test_v4f16.sin + ; CHECK: [[V1:%[0-9]+]]:_(s16), [[V2:%[0-9]+]]:_(s16), [[V3:%[0-9]+]]:_(s16), [[V4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES %{{[0-9]+}}(<4 x s16>) + + ; CHECK-DAG: [[V1_S32:%[0-9]+]]:_(s32) = G_FPEXT [[V1]](s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[V1_S32]](s32) + ; CHECK-NEXT: BL &sinf + ; CHECK-NEXT: [[ELT1_S32:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[ELT1:%[0-9]+]]:_(s16) = G_FPTRUNC [[ELT1_S32]](s32) + + ; CHECK-DAG: [[V2_S32:%[0-9]+]]:_(s32) = G_FPEXT [[V2]](s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[V2_S32]](s32) + ; CHECK-NEXT: BL &sinf + ; CHECK-NEXT: [[ELT2_S32:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[ELT2:%[0-9]+]]:_(s16) = G_FPTRUNC [[ELT2_S32]](s32) + + ; CHECK-DAG: [[V3_S32:%[0-9]+]]:_(s32) = G_FPEXT [[V3]](s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[V3_S32]](s32) + ; CHECK-NEXT: BL &sinf + ; CHECK-NEXT: [[ELT3_S32:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[ELT3:%[0-9]+]]:_(s16) = G_FPTRUNC [[ELT3_S32]](s32) + + ; CHECK-DAG: [[V4_S32:%[0-9]+]]:_(s32) = G_FPEXT [[V4]](s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[V4_S32]](s32) + ; CHECK-NEXT: BL &sinf + ; CHECK-NEXT: [[ELT4_S32:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[ELT4:%[0-9]+]]:_(s16) = G_FPTRUNC [[ELT4_S32]](s32) + + ; CHECK-DAG: %{{[0-9]+}}:_(<4 x s16>) = G_BUILD_VECTOR [[ELT1]](s16), [[ELT2]](s16), [[ELT3]](s16), [[ELT4]](s16) + + %0:_(<4 x s16>) = COPY $d0 + %1:_(<4 x s16>) = G_FSIN %0 + $d0 = COPY %1(<4 x s16>) + RET_ReallyLR implicit $d0 + +... +--- +name: test_v8f16.sin +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $q0 + + ; CHECK-LABEL: name: test_v8f16.sin + + ; This is big, so let's just check for the 8 calls to sinf, the the + ; G_UNMERGE_VALUES, and the G_BUILD_VECTOR. The other instructions ought + ; to be covered by the other tests. + + ; CHECK: G_UNMERGE_VALUES + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: G_BUILD_VECTOR + + %0:_(<8 x s16>) = COPY $q0 + %1:_(<8 x s16>) = G_FSIN %0 + $q0 = COPY %1(<8 x s16>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_v2f32.sin +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $d0 + + ; CHECK-LABEL: name: test_v2f32.sin + ; CHECK: [[V1:%[0-9]+]]:_(s32), [[V2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %{{[0-9]+}}(<2 x s32>) + + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V1]](s32) + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: [[ELT1:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V2]](s32) + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: [[ELT2:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: %1:_(<2 x s32>) = G_BUILD_VECTOR [[ELT1]](s32), [[ELT2]](s32) + + %0:_(<2 x s32>) = COPY $d0 + %1:_(<2 x s32>) = G_FSIN %0 + $d0 = COPY %1(<2 x s32>) + RET_ReallyLR implicit $d0 + +... +--- +name: test_v4f32.sin +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $q0 + ; CHECK-LABEL: name: test_v4f32.sin + ; CHECK: [[V1:%[0-9]+]]:_(s32), [[V2:%[0-9]+]]:_(s32), [[V3:%[0-9]+]]:_(s32), [[V4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %{{[0-9]+}}(<4 x s32>) + + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V1]](s32) + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: [[ELT1:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V2]](s32) + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: [[ELT2:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V3]](s32) + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: [[ELT3:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $s0 = COPY [[V4]](s32) + ; CHECK-DAG: BL &sinf + ; CHECK-DAG: [[ELT4:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: %1:_(<4 x s32>) = G_BUILD_VECTOR [[ELT1]](s32), [[ELT2]](s32), [[ELT3]](s32), [[ELT4]](s32) + + %0:_(<4 x s32>) = COPY $q0 + %1:_(<4 x s32>) = G_FSIN %0 + $q0 = COPY %1(<4 x s32>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_v2f64.sin +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $q0 + + ; CHECK-LABEL: name: test_v2f64.sin + ; CHECK: [[V1:%[0-9]+]]:_(s64), [[V2:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %{{[0-9]+}}(<2 x s64>) + + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-DAG: $d0 = COPY [[V1]](s64) + ; CHECK-DAG: BL &sin + ; CHECK-DAG: [[ELT1:%[0-9]+]]:_(s64) = COPY $d0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: ADJCALLSTACKDOWN + ; CHECK-DAG: $d0 = COPY [[V2]](s64) + ; CHECK-DAG: BL &sin + ; CHECK-DAG: [[ELT2:%[0-9]+]]:_(s64) = COPY $d0 + ; CHECK-DAG: ADJCALLSTACKUP + + ; CHECK-DAG: %1:_(<2 x s64>) = G_BUILD_VECTOR [[ELT1]](s64), [[ELT2]](s64) + + %0:_(<2 x s64>) = COPY $q0 + %1:_(<2 x s64>) = G_FSIN %0 + $q0 = COPY %1(<2 x s64>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_sin_half +alignment: 2 +tracksRegLiveness: true +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } +body: | + bb.0: + liveins: $h0 + ; CHECK-LABEL: name: test_sin_half + ; CHECK: [[REG1:%[0-9]+]]:_(s32) = G_FPEXT %0(s16) + ; CHECK-NEXT: ADJCALLSTACKDOWN + ; CHECK-NEXT: $s0 = COPY [[REG1]](s32) + ; CHECK-NEXT: BL &sinf + ; CHECK-NEXT: [[REG2:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: ADJCALLSTACKUP + ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s16) = G_FPTRUNC [[REG2]](s32) + + %0:_(s16) = COPY $h0 + %1:_(s16) = G_FSIN %0 + $h0 = COPY %1(s16) + RET_ReallyLR implicit $h0 Index: test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -331,7 +331,13 @@ # DEBUG: .. the first uncovered type index: 1, OK # # DEBUG-NEXT: G_FCEIL (opcode {{[0-9]+}}): 1 type index -# DEBUG: .. the first uncovered type index: 1, OK +# DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected +# +# DEBUG-NEXT: G_FCOS (opcode {{[0-9]+}}): 1 type index +# DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected +# +# DEBUG-NEXT: G_FSIN (opcode {{[0-9]+}}): 1 type index +# DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected # CHECK-NOT: ill-defined Index: test/CodeGen/AArch64/arm64-vfloatintrinsics.ll =================================================================== --- test/CodeGen/AArch64/arm64-vfloatintrinsics.ll +++ test/CodeGen/AArch64/arm64-vfloatintrinsics.ll @@ -30,17 +30,25 @@ %1 = call %v4f16 @llvm.powi.v4f16(%v4f16 %a, i32 %b) ret %v4f16 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v4f16.sin define %v4f16 @test_v4f16.sin(%v4f16 %a) { ; This operation is expanded, whether with or without +fullfp16. ; CHECK-LABEL: test_v4f16.sin: ; CHECK-COUNT-4: bl sinf + ; GISEL-LABEL: test_v4f16.sin: + ; GISEL-COUNT-4: bl sinf %1 = call %v4f16 @llvm.sin.v4f16(%v4f16 %a) ret %v4f16 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v4f16.cos define %v4f16 @test_v4f16.cos(%v4f16 %a) { ; This operation is expanded, whether with or without +fullfp16. ; CHECK-LABEL: test_v4f16.cos: ; CHECK-COUNT-4: bl cosf + ; GISEL-LABEL: test_v4f16.cos: + ; GISEL-COUNT-4: bl cosf %1 = call %v4f16 @llvm.cos.v4f16(%v4f16 %a) ret %v4f16 %1 } @@ -193,17 +201,25 @@ %1 = call %v8f16 @llvm.powi.v8f16(%v8f16 %a, i32 %b) ret %v8f16 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v8f16.sin define %v8f16 @test_v8f16.sin(%v8f16 %a) { ; This operation is expanded, whether with or without +fullfp16. ; CHECK-LABEL: test_v8f16.sin: ; CHECK-COUNT-8: bl sinf + ; GISEL-LABEL: test_v8f16.sin: + ; GISEL-COUNT-8: bl sinf %1 = call %v8f16 @llvm.sin.v8f16(%v8f16 %a) ret %v8f16 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v8f16.cos define %v8f16 @test_v8f16.cos(%v8f16 %a) { ; This operation is expanded, whether with or without +fullfp16. ; CHECK-LABEL: test_v8f16.cos: ; CHECK-COUNT-8: bl cosf + ; GISEL-LABEL: test_v8f16.cos: + ; GISEL-COUNT-8: bl cosf %1 = call %v8f16 @llvm.cos.v8f16(%v8f16 %a) ret %v8f16 %1 } @@ -352,15 +368,21 @@ %1 = call %v2f32 @llvm.powi.v2f32(%v2f32 %a, i32 %b) ret %v2f32 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v2f32.sin ; CHECK: test_v2f32.sin: define %v2f32 @test_v2f32.sin(%v2f32 %a) { ; CHECK: sin + ; GISEL: sin %1 = call %v2f32 @llvm.sin.v2f32(%v2f32 %a) ret %v2f32 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v2f32.cos ; CHECK: test_v2f32.cos: define %v2f32 @test_v2f32.cos(%v2f32 %a) { ; CHECK: cos + ; GISEL: cos %1 = call %v2f32 @llvm.cos.v2f32(%v2f32 %a) ret %v2f32 %1 } @@ -480,15 +502,21 @@ %1 = call %v4f32 @llvm.powi.v4f32(%v4f32 %a, i32 %b) ret %v4f32 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v4f32.sin ; CHECK: test_v4f32.sin: define %v4f32 @test_v4f32.sin(%v4f32 %a) { ; CHECK: sin + ; GISEL: sin %1 = call %v4f32 @llvm.sin.v4f32(%v4f32 %a) ret %v4f32 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v4f32.cos ; CHECK: test_v4f32.cos: define %v4f32 @test_v4f32.cos(%v4f32 %a) { ; CHECK: cos + ; GISEL: cos %1 = call %v4f32 @llvm.cos.v4f32(%v4f32 %a) ret %v4f32 %1 } @@ -607,15 +635,21 @@ %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b) ret %v2f64 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v2f64.sin ; CHECK: test_v2f64.sin: define %v2f64 @test_v2f64.sin(%v2f64 %a) { ; CHECK: sin + ; GISEL: sin %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a) ret %v2f64 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v2f64.cos ; CHECK: test_v2f64.cos: define %v2f64 @test_v2f64.cos(%v2f64 %a) { ; CHECK: cos + ; GISEL: cos %1 = call %v2f64 @llvm.cos.v2f64(%v2f64 %a) ret %v2f64 %1 } Index: test/CodeGen/AArch64/f16-instructions.ll =================================================================== --- test/CodeGen/AArch64/f16-instructions.ll +++ test/CodeGen/AArch64/f16-instructions.ll @@ -4,13 +4,13 @@ ; RUN: llc < %s -mtriple aarch64-unknown-unknown -aarch64-neon-syntax=apple \ ; RUN: -asm-verbose=false -disable-post-ra -frame-pointer=all -global-isel \ ; RUN: -global-isel-abort=2 -pass-remarks-missed=gisel-* 2>&1 | FileCheck %s \ -; RUN: --check-prefixes=FALLBACK,GISEL-CVT +; RUN: --check-prefixes=FALLBACK,GISEL-CVT,GISEL ; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fullfp16 \ ; RUN: -aarch64-neon-syntax=apple -asm-verbose=false -disable-post-ra \ ; RUN: -frame-pointer=all -global-isel -global-isel-abort=2 \ ; RUN: -pass-remarks-missed=gisel-* 2>&1 | FileCheck %s \ -; RUN: --check-prefixes=FALLBACK-FP16,GISEL-FP16 +; RUN: --check-prefixes=FALLBACK-FP16,GISEL-FP16,GISEL target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" @@ -809,6 +809,9 @@ ret half %r } +; FALLBACK-NOT: remark:{{.*}}test_sin +; FALLBACK-FP16-NOT: remark:{{.*}}test_sin + ; CHECK-COMMON-LABEL: test_sin: ; CHECK-COMMON-NEXT: stp x29, x30, [sp, #-16]! ; CHECK-COMMON-NEXT: mov x29, sp @@ -817,11 +820,23 @@ ; CHECK-COMMON-NEXT: fcvt h0, s0 ; CHECK-COMMON-NEXT: ldp x29, x30, [sp], #16 ; CHECK-COMMON-NEXT: ret + +; GISEL-LABEL: test_sin: +; GISEL-NEXT: stp x29, x30, [sp, #-16]! +; GISEL-NEXT: mov x29, sp +; GISEL-NEXT: fcvt s0, h0 +; GISEL-NEXT: bl {{_?}}sinf +; GISEL-NEXT: fcvt h0, s0 +; GISEL-NEXT: ldp x29, x30, [sp], #16 +; GISEL-NEXT: ret define half @test_sin(half %a) #0 { %r = call half @llvm.sin.f16(half %a) ret half %r } +; FALLBACK-NOT: remark:{{.*}}test_cos +; FALLBACK-FP16-NOT: remark:{{.*}}test_cos + ; CHECK-COMMON-LABEL: test_cos: ; CHECK-COMMON-NEXT: stp x29, x30, [sp, #-16]! ; CHECK-COMMON-NEXT: mov x29, sp @@ -830,6 +845,15 @@ ; CHECK-COMMON-NEXT: fcvt h0, s0 ; CHECK-COMMON-NEXT: ldp x29, x30, [sp], #16 ; CHECK-COMMON-NEXT: ret + +; GISEL-LABEL: test_cos: +; GISEL-NEXT: stp x29, x30, [sp, #-16]! +; GISEL-NEXT: mov x29, sp +; GISEL-NEXT: fcvt s0, h0 +; GISEL-NEXT: bl {{_?}}cosf +; GISEL-NEXT: fcvt h0, s0 +; GISEL-NEXT: ldp x29, x30, [sp], #16 +; GISEL-NEXT: ret define half @test_cos(half %a) #0 { %r = call half @llvm.cos.f16(half %a) ret half %r Index: test/CodeGen/AArch64/sincospow-vector-expansion.ll =================================================================== --- test/CodeGen/AArch64/sincospow-vector-expansion.ll +++ test/CodeGen/AArch64/sincospow-vector-expansion.ll @@ -1,19 +1,30 @@ ; RUN: llc -o - %s -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+neon | FileCheck %s +; RUN: llc -o - %s -verify-machineinstrs -mtriple=aarch64-linux-gnu \ +; RUN: -mattr=+neon -global-isel -global-isel-abort=2 \ +; RUN: -pass-remarks-missed=gisel* \ +; RUN: 2>&1 | FileCheck %s --check-prefixes=FALLBACK,GISEL - +; FALLBACK-NOT: remark{{.*}}test_cos_v2f32 define <2 x float> @test_cos_v2f64(<2 x double> %v1) { ; CHECK-LABEL: test_cos_v2f64: ; CHECK: bl cos ; CHECK: bl cos +; GISEL-LABEL: test_cos_v2f64: +; GISEL: bl cos +; GISEL: bl cos %1 = call <2 x double> @llvm.cos.v2f64(<2 x double> %v1) %2 = fptrunc <2 x double> %1 to <2 x float> ret <2 x float> %2 } +; FALLBACK-NOT: remark{{.*}}test_sin_v2f32 define <2 x float> @test_sin_v2f64(<2 x double> %v1) { ; CHECK-LABEL: test_sin_v2f64: ; CHECK: bl sin ; CHECK: bl sin +; GISEL-LABEL: test_sin_v2f64: +; GISEL: bl sin +; GISEL: bl sin %1 = call <2 x double> @llvm.sin.v2f64(<2 x double> %v1) %2 = fptrunc <2 x double> %1 to <2 x float> ret <2 x float> %2 @@ -32,18 +43,26 @@ declare <2 x double> @llvm.sin.v2f64(<2 x double>) declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>) +; FALLBACK-NOT: remark{{.*}}test_cos_v2f32 define <2 x float> @test_cos_v2f32(<2 x float> %v1) { ; CHECK-LABEL: test_cos_v2f32: ; CHECK: bl cos ; CHECK: bl cos +; GISEL-LABEL: test_cos_v2f32: +; GISEL: bl cos +; GISEL: bl cos %1 = call <2 x float> @llvm.cos.v2f32(<2 x float> %v1) ret <2 x float> %1 } +; FALLBACK-NOT: remark{{.*}}test_sin_v2f32 define <2 x float> @test_sin_v2f32(<2 x float> %v1) { ; CHECK-LABEL: test_sin_v2f32: ; CHECK: bl sin ; CHECK: bl sin +; GISEL-LABEL: test_sin_v2f32: +; GISEL: bl sin +; GISEL: bl sin %1 = call <2 x float> @llvm.sin.v2f32(<2 x float> %v1) ret <2 x float> %1 } @@ -60,22 +79,34 @@ declare <2 x float> @llvm.sin.v2f32(<2 x float>) declare <2 x float> @llvm.pow.v2f32(<2 x float>, <2 x float>) +; FALLBACK-NOT: remark{{.*}}test_cos_v4f32 define <4 x float> @test_cos_v4f32(<4 x float> %v1) { ; CHECK-LABEL: test_cos_v4f32: ; CHECK: bl cos ; CHECK: bl cos ; CHECK: bl cos ; CHECK: bl cos +; GISEL-LABEL: test_cos_v4f32: +; GISEL: bl cos +; GISEL: bl cos +; GISEL: bl cos +; GISEL: bl cos %1 = call <4 x float> @llvm.cos.v4f32(<4 x float> %v1) ret <4 x float> %1 } +; FALLBACK-NOT: remark{{.*}}test_sin_v4f32 define <4 x float> @test_sin_v4f32(<4 x float> %v1) { ; CHECK-LABEL: test_sin_v4f32: ; CHECK: bl sin ; CHECK: bl sin ; CHECK: bl sin ; CHECK: bl sin +; GISEL-LABEL: test_sin_v4f32: +; GISEL: bl sin +; GISEL: bl sin +; GISEL: bl sin +; GISEL: bl sin %1 = call <4 x float> @llvm.sin.v4f32(<4 x float> %v1) ret <4 x float> %1 } @@ -93,4 +124,3 @@ declare <4 x float> @llvm.cos.v4f32(<4 x float>) declare <4 x float> @llvm.sin.v4f32(<4 x float>) declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>) -