Index: cmake/builtin-config-ix.cmake =================================================================== --- cmake/builtin-config-ix.cmake +++ cmake/builtin-config-ix.cmake @@ -25,6 +25,7 @@ set(ARM64 aarch64) set(ARM32 arm armhf armv6m armv7m armv7em armv7 armv7s armv7k) +set(HEXAGON hexagon) set(X86 i386) set(X86_64 x86_64) set(MIPS32 mips mipsel) @@ -42,7 +43,7 @@ endif() set(ALL_BUILTIN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} - ${MIPS32} ${MIPS64} ${PPC64} ${RISCV32} ${RISCV64} ${WASM32} ${WASM64}) + ${HEXAGON} ${MIPS32} ${MIPS64} ${PPC64} ${RISCV32} ${RISCV64} ${WASM32} ${WASM64}) include(CompilerRTUtils) include(CompilerRTDarwinUtils) Index: cmake/config-ix.cmake =================================================================== --- cmake/config-ix.cmake +++ cmake/config-ix.cmake @@ -174,6 +174,7 @@ set(ARM64 aarch64) set(ARM32 arm armhf) +set(HEXAGON hexagon) set(X86 i386) set(X86_64 x86_64) set(MIPS32 mips mipsel) Index: lib/builtins/CMakeLists.txt =================================================================== --- lib/builtins/CMakeLists.txt +++ lib/builtins/CMakeLists.txt @@ -459,6 +459,41 @@ set(armv7m_SOURCES ${arm_SOURCES}) set(armv7em_SOURCES ${arm_SOURCES}) +# hexagon arch +set(hexagon_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) +set(hexagon_SOURCES + hexagon/common_entry_exit_abi1.S + hexagon/common_entry_exit_abi2.S + hexagon/common_entry_exit_legacy.S + hexagon/dfaddsub.S + hexagon/dfdiv.S + hexagon/dffma.S + hexagon/dfminmax.S + hexagon/dfmul.S + hexagon/dfsqrt.S + hexagon/divdi3.S + hexagon/divsi3.S + hexagon/fabs_opt.S + hexagon/fastmath2_dlib_asm.S + hexagon/fastmath2_ldlib_asm.S + hexagon/fastmath_dlib_asm.S + hexagon/fma_opt.S + hexagon/fmax_opt.S + hexagon/fmin_opt.S + hexagon/memcpy_forward_vp4cp4n2.S + hexagon/memcpy_likely_aligned.S + hexagon/moddi3.S + hexagon/modsi3.S + hexagon/sfdiv_opt.S + hexagon/sfsqrt_opt.S + hexagon/udivdi3.S + hexagon/udivmoddi4.S + hexagon/udivmodsi4.S + hexagon/udivsi3.S + hexagon/umoddi3.S + hexagon/umodsi3.S) + + set(mips_SOURCES ${GENERIC_SOURCES}) set(mipsel_SOURCES ${mips_SOURCES}) set(mips64_SOURCES ${GENERIC_TF_SOURCES} Index: lib/builtins/hexagon/common_entry_exit_abi1.S =================================================================== --- lib/builtins/hexagon/common_entry_exit_abi1.S +++ lib/builtins/hexagon/common_entry_exit_abi1.S @@ -0,0 +1,103 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Functions that implement common sequences in function prologues and epilogues + used to save code size */ + + .macro FUNCTION_BEGIN name + .text + .globl \name + .type \name, @function + .falign +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + .macro FALLTHROUGH_TAIL_CALL name0 name1 + .size \name0, . - \name0 + .globl \name1 + .type \name1, @function + .falign +\name1: + .endm + + + + +/* Save r25:24 at fp+#-8 and r27:26 at fp+#-16. */ + + + + +/* The compiler knows that the __save_* functions clobber LR. No other + registers should be used without informing the compiler. */ + +/* Since we can only issue one store per packet, we don't hurt performance by + simply jumping to the right point in this sequence of stores. */ + +FUNCTION_BEGIN __save_r24_through_r27 + memd(fp+#-16) = r27:26 +FALLTHROUGH_TAIL_CALL __save_r24_through_r27 __save_r24_through_r25 + { + memd(fp+#-8) = r25:24 + jumpr lr + } +FUNCTION_END __save_r24_through_r25 + + + + +/* For each of the *_before_tailcall functions, jumpr lr is executed in parallel + with deallocframe. That way, the return gets the old value of lr, which is + where these functions need to return, and at the same time, lr gets the value + it needs going into the tail call. */ + +FUNCTION_BEGIN __restore_r24_through_r27_and_deallocframe_before_tailcall + r27:26 = memd(fp+#-16) +FALLTHROUGH_TAIL_CALL __restore_r24_through_r27_and_deallocframe_before_tailcall __restore_r24_through_r25_and_deallocframe_before_tailcall + { + r25:24 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r24_through_r25_and_deallocframe_before_tailcall + + + + +/* Here we use the extra load bandwidth to restore LR early, allowing the return + to occur in parallel with the deallocframe. */ + +FUNCTION_BEGIN __restore_r24_through_r27_and_deallocframe + { + lr = memw(fp+#4) + r27:26 = memd(fp+#-16) + } + { + r25:24 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r24_through_r27_and_deallocframe + + + + +/* Here the load bandwidth is maximized. */ + +FUNCTION_BEGIN __restore_r24_through_r25_and_deallocframe + { + r25:24 = memd(fp+#-8) + deallocframe + } + jumpr lr +FUNCTION_END __restore_r24_through_r25_and_deallocframe Index: lib/builtins/hexagon/common_entry_exit_abi2.S =================================================================== --- lib/builtins/hexagon/common_entry_exit_abi2.S +++ lib/builtins/hexagon/common_entry_exit_abi2.S @@ -0,0 +1,268 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Functions that implement common sequences in function prologues and epilogues + used to save code size */ + + .macro FUNCTION_BEGIN name + .p2align 2 + .section .text.\name,"ax",@progbits + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + .macro FALLTHROUGH_TAIL_CALL name0 name1 + .p2align 2 + .size \name0, . - \name0 + .globl \name1 + .type \name1, @function +\name1: + .endm + + + + +/* Save r17:16 at fp+#-8, r19:18 at fp+#-16, r21:20 at fp+#-24, r23:22 at + fp+#-32, r25:24 at fp+#-40, and r27:26 at fp+#-48. + The compiler knows that the __save_* functions clobber LR. No other + registers should be used without informing the compiler. */ + +FUNCTION_BEGIN __save_r16_through_r27 + { + memd(fp+#-48) = r27:26 + memd(fp+#-40) = r25:24 + } + { + memd(fp+#-32) = r23:22 + memd(fp+#-24) = r21:20 + } + { + memd(fp+#-16) = r19:18 + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r27 + +FUNCTION_BEGIN __save_r16_through_r25 + { + memd(fp+#-40) = r25:24 + memd(fp+#-32) = r23:22 + } + { + memd(fp+#-24) = r21:20 + memd(fp+#-16) = r19:18 + } + { + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r25 + +FUNCTION_BEGIN __save_r16_through_r23 + { + memd(fp+#-32) = r23:22 + memd(fp+#-24) = r21:20 + } + { + memd(fp+#-16) = r19:18 + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r23 + +FUNCTION_BEGIN __save_r16_through_r21 + { + memd(fp+#-24) = r21:20 + memd(fp+#-16) = r19:18 + } + { + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r21 + +FUNCTION_BEGIN __save_r16_through_r19 + { + memd(fp+#-16) = r19:18 + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r19 + +FUNCTION_BEGIN __save_r16_through_r17 + { + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r17 + +/* For each of the *_before_tailcall functions, jumpr lr is executed in parallel + with deallocframe. That way, the return gets the old value of lr, which is + where these functions need to return, and at the same time, lr gets the value + it needs going into the tail call. */ + + +FUNCTION_BEGIN __restore_r16_through_r27_and_deallocframe_before_tailcall + r27:26 = memd(fp+#-48) + { + r25:24 = memd(fp+#-40) + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r27_and_deallocframe_before_tailcall + +FUNCTION_BEGIN __restore_r16_through_r25_and_deallocframe_before_tailcall + { + r25:24 = memd(fp+#-40) + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r25_and_deallocframe_before_tailcall + +FUNCTION_BEGIN __restore_r16_through_r23_and_deallocframe_before_tailcall + { + r23:22 = memd(fp+#-32) + r21:20 = memd(fp+#-24) + } + r19:18 = memd(fp+#-16) + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r23_and_deallocframe_before_tailcall + + +FUNCTION_BEGIN __restore_r16_through_r21_and_deallocframe_before_tailcall + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r19_and_deallocframe_before_tailcall + +FUNCTION_BEGIN __restore_r16_through_r19_and_deallocframe_before_tailcall + r19:18 = memd(fp+#-16) + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r19_and_deallocframe_before_tailcall + +FUNCTION_BEGIN __restore_r16_through_r17_and_deallocframe_before_tailcall + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r17_and_deallocframe_before_tailcall + + +FUNCTION_BEGIN __restore_r16_through_r27_and_deallocframe + r27:26 = memd(fp+#-48) + { + r25:24 = memd(fp+#-40) + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r27_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r25_and_deallocframe + { + r25:24 = memd(fp+#-40) + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r25_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r23_and_deallocframe + { + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r23_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r21_and_deallocframe + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r21_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r19_and_deallocframe + { + r19:18 = memd(fp+#-16) + r17:16 = memd(fp+#-8) + } + { + dealloc_return + } +FUNCTION_END __restore_r16_through_r19_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r17_and_deallocframe + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r17_and_deallocframe + +FUNCTION_BEGIN __deallocframe + dealloc_return +FUNCTION_END __deallocframe Index: lib/builtins/hexagon/common_entry_exit_legacy.S =================================================================== --- lib/builtins/hexagon/common_entry_exit_legacy.S +++ lib/builtins/hexagon/common_entry_exit_legacy.S @@ -0,0 +1,157 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + +/* Functions that implement common sequences in function prologues and epilogues + used to save code size */ + + .macro FUNCTION_BEGIN name + .text + .globl \name + .type \name, @function + .falign +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + .macro FALLTHROUGH_TAIL_CALL name0 name1 + .size \name0, . - \name0 + .globl \name1 + .type \name1, @function + .falign +\name1: + .endm + + + + +/* Save r27:26 at fp+#-8, r25:24 at fp+#-16, r23:22 at fp+#-24, r21:20 at + fp+#-32, r19:18 at fp+#-40, and r17:16 at fp+#-48. */ + + + + +/* The compiler knows that the __save_* functions clobber LR. No other + registers should be used without informing the compiler. */ + +/* Since we can only issue one store per packet, we don't hurt performance by + simply jumping to the right point in this sequence of stores. */ + +FUNCTION_BEGIN __save_r27_through_r16 + memd(fp+#-48) = r17:16 +FALLTHROUGH_TAIL_CALL __save_r27_through_r16 __save_r27_through_r18 + memd(fp+#-40) = r19:18 +FALLTHROUGH_TAIL_CALL __save_r27_through_r18 __save_r27_through_r20 + memd(fp+#-32) = r21:20 +FALLTHROUGH_TAIL_CALL __save_r27_through_r20 __save_r27_through_r22 + memd(fp+#-24) = r23:22 +FALLTHROUGH_TAIL_CALL __save_r27_through_r22 __save_r27_through_r24 + memd(fp+#-16) = r25:24 + { + memd(fp+#-8) = r27:26 + jumpr lr + } +FUNCTION_END __save_r27_through_r24 + + + + +/* For each of the *_before_sibcall functions, jumpr lr is executed in parallel + with deallocframe. That way, the return gets the old value of lr, which is + where these functions need to return, and at the same time, lr gets the value + it needs going into the sibcall. */ + +FUNCTION_BEGIN __restore_r27_through_r20_and_deallocframe_before_sibcall + { + r21:20 = memd(fp+#-32) + r23:22 = memd(fp+#-24) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r20_and_deallocframe_before_sibcall __restore_r27_through_r24_and_deallocframe_before_sibcall + { + r25:24 = memd(fp+#-16) + jump __restore_r27_through_r26_and_deallocframe_before_sibcall + } +FUNCTION_END __restore_r27_through_r24_and_deallocframe_before_sibcall + + + + +FUNCTION_BEGIN __restore_r27_through_r16_and_deallocframe_before_sibcall + r17:16 = memd(fp+#-48) +FALLTHROUGH_TAIL_CALL __restore_r27_through_r16_and_deallocframe_before_sibcall __restore_r27_through_r18_and_deallocframe_before_sibcall + { + r19:18 = memd(fp+#-40) + r21:20 = memd(fp+#-32) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r18_and_deallocframe_before_sibcall __restore_r27_through_r22_and_deallocframe_before_sibcall + { + r23:22 = memd(fp+#-24) + r25:24 = memd(fp+#-16) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r22_and_deallocframe_before_sibcall __restore_r27_through_r26_and_deallocframe_before_sibcall + { + r27:26 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r27_through_r26_and_deallocframe_before_sibcall + + + + +/* Here we use the extra load bandwidth to restore LR early, allowing the return + to occur in parallel with the deallocframe. */ + +FUNCTION_BEGIN __restore_r27_through_r16_and_deallocframe + { + r17:16 = memd(fp+#-48) + r19:18 = memd(fp+#-40) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r16_and_deallocframe __restore_r27_through_r20_and_deallocframe + { + r21:20 = memd(fp+#-32) + r23:22 = memd(fp+#-24) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r20_and_deallocframe __restore_r27_through_r24_and_deallocframe + { + lr = memw(fp+#4) + r25:24 = memd(fp+#-16) + } + { + r27:26 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r27_through_r24_and_deallocframe + + + + +/* Here the load bandwidth is maximized for all three functions. */ + +FUNCTION_BEGIN __restore_r27_through_r18_and_deallocframe + { + r19:18 = memd(fp+#-40) + r21:20 = memd(fp+#-32) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r18_and_deallocframe __restore_r27_through_r22_and_deallocframe + { + r23:22 = memd(fp+#-24) + r25:24 = memd(fp+#-16) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r22_and_deallocframe __restore_r27_through_r26_and_deallocframe + { + r27:26 = memd(fp+#-8) + deallocframe + } + jumpr lr +FUNCTION_END __restore_r27_through_r26_and_deallocframe Index: lib/builtins/hexagon/dfaddsub.S =================================================================== --- lib/builtins/hexagon/dfaddsub.S +++ lib/builtins/hexagon/dfaddsub.S @@ -0,0 +1,398 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Double Precision Multiply */ + +#define A r1:0 +#define AH r1 +#define AL r0 +#define B r3:2 +#define BH r3 +#define BL r2 + +#define EXPA r4 +#define EXPB r5 +#define EXPB_A r5:4 + +#define ZTMP r7:6 +#define ZTMPH r7 +#define ZTMPL r6 + +#define ATMP r13:12 +#define ATMPH r13 +#define ATMPL r12 + +#define BTMP r9:8 +#define BTMPH r9 +#define BTMPL r8 + +#define ATMP2 r11:10 +#define ATMP2H r11 +#define ATMP2L r10 + +#define EXPDIFF r15 +#define EXTRACTOFF r14 +#define EXTRACTAMT r15:14 + +#define TMP r28 + +#define MANTBITS 52 +#define HI_MANTBITS 20 +#define EXPBITS 11 +#define BIAS 1024 +#define MANTISSA_TO_INT_BIAS 52 +#define SR_BIT_INEXACT 5 + +#ifndef SR_ROUND_OFF +#define SR_ROUND_OFF 22 +#endif + +#define NORMAL p3 +#define BIGB p2 + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG +#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG +#define END(TAG) .size TAG,.-TAG + + .text + .global __hexagon_adddf3 + .global __hexagon_subdf3 + .type __hexagon_adddf3, @function + .type __hexagon_subdf3, @function + +Q6_ALIAS(adddf3) +FAST_ALIAS(adddf3) +FAST2_ALIAS(adddf3) +Q6_ALIAS(subdf3) +FAST_ALIAS(subdf3) +FAST2_ALIAS(subdf3) + + .p2align 5 +__hexagon_adddf3: + { + EXPA = extractu(AH,#EXPBITS,#HI_MANTBITS) + EXPB = extractu(BH,#EXPBITS,#HI_MANTBITS) + ATMP = combine(##0x20000000,#0) + } + { + NORMAL = dfclass(A,#2) + NORMAL = dfclass(B,#2) + BTMP = ATMP + BIGB = cmp.gtu(EXPB,EXPA) // Is B substantially greater than A? + } + { + if (!NORMAL) jump .Ladd_abnormal // If abnormal, go to special code + if (BIGB) A = B // if B >> A, swap A and B + if (BIGB) B = A // If B >> A, swap A and B + if (BIGB) EXPB_A = combine(EXPA,EXPB) // swap exponents + } + { + ATMP = insert(A,#MANTBITS,#EXPBITS-2) // Q1.62 + BTMP = insert(B,#MANTBITS,#EXPBITS-2) // Q1.62 + EXPDIFF = sub(EXPA,EXPB) + ZTMP = combine(#62,#1) + } +#undef BIGB +#undef NORMAL +#define B_POS p3 +#define A_POS p2 +#define NO_STICKIES p1 +.Ladd_continue: + { + EXPDIFF = min(EXPDIFF,ZTMPH) // If exponent difference >= ~60, + // will collapse to sticky bit + ATMP2 = neg(ATMP) + A_POS = cmp.gt(AH,#-1) + EXTRACTOFF = #0 + } + { + if (!A_POS) ATMP = ATMP2 + ATMP2 = extractu(BTMP,EXTRACTAMT) + BTMP = ASR(BTMP,EXPDIFF) +#undef EXTRACTAMT +#undef EXPDIFF +#undef EXTRACTOFF +#define ZERO r15:14 + ZERO = #0 + } + { + NO_STICKIES = cmp.eq(ATMP2,ZERO) + if (!NO_STICKIES.new) BTMPL = or(BTMPL,ZTMPL) + EXPB = add(EXPA,#-BIAS-60) + B_POS = cmp.gt(BH,#-1) + } + { + ATMP = add(ATMP,BTMP) // ADD!!! + ATMP2 = sub(ATMP,BTMP) // Negate and ADD --> SUB!!! + ZTMP = combine(#54,##2045) + } + { + p0 = cmp.gtu(EXPA,ZTMPH) // must be pretty high in case of large cancellation + p0 = !cmp.gtu(EXPA,ZTMPL) + if (!p0.new) jump:nt .Ladd_ovf_unf + if (!B_POS) ATMP = ATMP2 // if B neg, pick difference + } + { + A = convert_d2df(ATMP) // Convert to Double Precision, taking care of flags, etc. So nice! + p0 = cmp.eq(ATMPH,#0) + p0 = cmp.eq(ATMPL,#0) + if (p0.new) jump:nt .Ladd_zero // or maybe conversion handles zero case correctly? + } + { + AH += asl(EXPB,#HI_MANTBITS) + jumpr r31 + } + .falign +__hexagon_subdf3: + { + BH = togglebit(BH,#31) + jump __qdsp_adddf3 + } + + + .falign +.Ladd_zero: + // True zero, full cancellation + // +0 unless round towards negative infinity + { + TMP = USR + A = #0 + BH = #1 + } + { + TMP = extractu(TMP,#2,#22) + BH = asl(BH,#31) + } + { + p0 = cmp.eq(TMP,#2) + if (p0.new) AH = xor(AH,BH) + jumpr r31 + } + .falign +.Ladd_ovf_unf: + // Overflow or Denormal is possible + // Good news: Underflow flag is not possible! + /* + * ATMP has 2's complement value + * + * EXPA has A's exponent, EXPB has EXPA-BIAS-60 + * + * Convert, extract exponent, add adjustment. + * If > 2046, overflow + * If <= 0, denormal + * + * Note that we've not done our zero check yet, so do that too + * + */ + { + A = convert_d2df(ATMP) + p0 = cmp.eq(ATMPH,#0) + p0 = cmp.eq(ATMPL,#0) + if (p0.new) jump:nt .Ladd_zero + } + { + TMP = extractu(AH,#EXPBITS,#HI_MANTBITS) + AH += asl(EXPB,#HI_MANTBITS) + } + { + EXPB = add(EXPB,TMP) + B = combine(##0x00100000,#0) + } + { + p0 = cmp.gt(EXPB,##BIAS+BIAS-2) + if (p0.new) jump:nt .Ladd_ovf + } + { + p0 = cmp.gt(EXPB,#0) + if (p0.new) jumpr:t r31 + TMP = sub(#1,EXPB) + } + { + B = insert(A,#MANTBITS,#0) + A = ATMP + } + { + B = lsr(B,TMP) + } + { + A = insert(B,#63,#0) + jumpr r31 + } + .falign +.Ladd_ovf: + // We get either max finite value or infinity. Either way, overflow+inexact + { + A = ATMP // 2's complement value + TMP = USR + ATMP = combine(##0x7fefffff,#-1) // positive max finite + } + { + EXPB = extractu(TMP,#2,#SR_ROUND_OFF) // rounding bits + TMP = or(TMP,#0x28) // inexact + overflow + BTMP = combine(##0x7ff00000,#0) // positive infinity + } + { + USR = TMP + EXPB ^= lsr(AH,#31) // Does sign match rounding? + TMP = EXPB // unmodified rounding mode + } + { + p0 = !cmp.eq(TMP,#1) // If not round-to-zero and + p0 = !cmp.eq(EXPB,#2) // Not rounding the other way, + if (p0.new) ATMP = BTMP // we should get infinity + } + { + A = insert(ATMP,#63,#0) // insert inf/maxfinite, leave sign + } + { + p0 = dfcmp.eq(A,A) + jumpr r31 + } + +.Ladd_abnormal: + { + ATMP = extractu(A,#63,#0) // strip off sign + BTMP = extractu(B,#63,#0) // strip off sign + } + { + p3 = cmp.gtu(ATMP,BTMP) + if (!p3.new) A = B // sort values + if (!p3.new) B = A // sort values + } + { + // Any NaN --> NaN, possibly raise invalid if sNaN + p0 = dfclass(A,#0x0f) // A not NaN? + if (!p0.new) jump:nt .Linvalid_nan_add + if (!p3) ATMP = BTMP + if (!p3) BTMP = ATMP + } + { + // Infinity + non-infinity number is infinity + // Infinity + infinity --> inf or nan + p1 = dfclass(A,#0x08) // A is infinity + if (p1.new) jump:nt .Linf_add + } + { + p2 = dfclass(B,#0x01) // B is zero + if (p2.new) jump:nt .LB_zero // so return A or special 0+0 + ATMP = #0 + } + // We are left with adding one or more subnormals + { + p0 = dfclass(A,#4) + if (p0.new) jump:nt .Ladd_two_subnormal + ATMP = combine(##0x20000000,#0) + } + { + EXPA = extractu(AH,#EXPBITS,#HI_MANTBITS) + EXPB = #1 + // BTMP already ABS(B) + BTMP = asl(BTMP,#EXPBITS-2) + } +#undef ZERO +#define EXTRACTOFF r14 +#define EXPDIFF r15 + { + ATMP = insert(A,#MANTBITS,#EXPBITS-2) + EXPDIFF = sub(EXPA,EXPB) + ZTMP = combine(#62,#1) + jump .Ladd_continue + } + +.Ladd_two_subnormal: + { + ATMP = extractu(A,#63,#0) + BTMP = extractu(B,#63,#0) + } + { + ATMP = neg(ATMP) + BTMP = neg(BTMP) + p0 = cmp.gt(AH,#-1) + p1 = cmp.gt(BH,#-1) + } + { + if (p0) ATMP = A + if (p1) BTMP = B + } + { + ATMP = add(ATMP,BTMP) + } + { + BTMP = neg(ATMP) + p0 = cmp.gt(ATMPH,#-1) + B = #0 + } + { + if (!p0) A = BTMP + if (p0) A = ATMP + BH = ##0x80000000 + } + { + if (!p0) AH = or(AH,BH) + p0 = dfcmp.eq(A,B) + if (p0.new) jump:nt .Lzero_plus_zero + } + { + jumpr r31 + } + +.Linvalid_nan_add: + { + TMP = convert_df2sf(A) // will generate invalid if sNaN + p0 = dfclass(B,#0x0f) // if B is not NaN + if (p0.new) B = A // make it whatever A is + } + { + BL = convert_df2sf(B) // will generate invalid if sNaN + A = #-1 + jumpr r31 + } + .falign +.LB_zero: + { + p0 = dfcmp.eq(ATMP,A) // is A also zero? + if (!p0.new) jumpr:t r31 // If not, just return A + } + // 0 + 0 is special + // if equal integral values, they have the same sign, which is fine for all rounding + // modes. + // If unequal in sign, we get +0 for all rounding modes except round down +.Lzero_plus_zero: + { + p0 = cmp.eq(A,B) + if (p0.new) jumpr:t r31 + } + { + TMP = USR + } + { + TMP = extractu(TMP,#2,#SR_ROUND_OFF) + A = #0 + } + { + p0 = cmp.eq(TMP,#2) + if (p0.new) AH = ##0x80000000 + jumpr r31 + } +.Linf_add: + // adding infinities is only OK if they are equal + { + p0 = !cmp.eq(AH,BH) // Do they have different signs + p0 = dfclass(B,#8) // And is B also infinite? + if (!p0.new) jumpr:t r31 // If not, just a normal inf + } + { + BL = ##0x7f800001 // sNAN + } + { + A = convert_sf2df(BL) // trigger invalid, set NaN + jumpr r31 + } +END(__hexagon_adddf3) Index: lib/builtins/hexagon/dfdiv.S =================================================================== --- lib/builtins/hexagon/dfdiv.S +++ lib/builtins/hexagon/dfdiv.S @@ -0,0 +1,492 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Double Precision Divide */ + +#define A r1:0 +#define AH r1 +#define AL r0 + +#define B r3:2 +#define BH r3 +#define BL r2 + +#define Q r5:4 +#define QH r5 +#define QL r4 + +#define PROD r7:6 +#define PRODHI r7 +#define PRODLO r6 + +#define SFONE r8 +#define SFDEN r9 +#define SFERROR r10 +#define SFRECIP r11 + +#define EXPBA r13:12 +#define EXPB r13 +#define EXPA r12 + +#define REMSUB2 r15:14 + + + +#define SIGN r28 + +#define Q_POSITIVE p3 +#define NORMAL p2 +#define NO_OVF_UNF p1 +#define P_TMP p0 + +#define RECIPEST_SHIFT 3 +#define QADJ 61 + +#define DFCLASS_NORMAL 0x02 +#define DFCLASS_NUMBER 0x0F +#define DFCLASS_INFINITE 0x08 +#define DFCLASS_ZERO 0x01 +#define DFCLASS_NONZERO (DFCLASS_NUMBER ^ DFCLASS_ZERO) +#define DFCLASS_NONINFINITE (DFCLASS_NUMBER ^ DFCLASS_INFINITE) + +#define DF_MANTBITS 52 +#define DF_EXPBITS 11 +#define SF_MANTBITS 23 +#define SF_EXPBITS 8 +#define DF_BIAS 0x3ff + +#define SR_ROUND_OFF 22 + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG +#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG +#define END(TAG) .size TAG,.-TAG + + .text + .global __hexagon_divdf3 + .type __hexagon_divdf3,@function + Q6_ALIAS(divdf3) + FAST_ALIAS(divdf3) + FAST2_ALIAS(divdf3) + .p2align 5 +__hexagon_divdf3: + { + NORMAL = dfclass(A,#DFCLASS_NORMAL) + NORMAL = dfclass(B,#DFCLASS_NORMAL) + EXPBA = combine(BH,AH) + SIGN = xor(AH,BH) + } +#undef A +#undef AH +#undef AL +#undef B +#undef BH +#undef BL +#define REM r1:0 +#define REMHI r1 +#define REMLO r0 +#define DENOM r3:2 +#define DENOMHI r3 +#define DENOMLO r2 + { + if (!NORMAL) jump .Ldiv_abnormal + PROD = extractu(DENOM,#SF_MANTBITS,#DF_MANTBITS-SF_MANTBITS) + SFONE = ##0x3f800001 + } + { + SFDEN = or(SFONE,PRODLO) + EXPB = extractu(EXPB,#DF_EXPBITS,#DF_MANTBITS-32) + EXPA = extractu(EXPA,#DF_EXPBITS,#DF_MANTBITS-32) + Q_POSITIVE = cmp.gt(SIGN,#-1) + } +#undef SIGN +#define ONE r28 +.Ldenorm_continue: + { + SFRECIP,P_TMP = sfrecipa(SFONE,SFDEN) + SFERROR = and(SFONE,#-2) + ONE = #1 + EXPA = sub(EXPA,EXPB) + } +#undef EXPB +#define RECIPEST r13 + { + SFERROR -= sfmpy(SFRECIP,SFDEN):lib + REMHI = insert(ONE,#DF_EXPBITS+1,#DF_MANTBITS-32) + RECIPEST = ##0x00800000 << RECIPEST_SHIFT + } + { + SFRECIP += sfmpy(SFRECIP,SFERROR):lib + DENOMHI = insert(ONE,#DF_EXPBITS+1,#DF_MANTBITS-32) + SFERROR = and(SFONE,#-2) + } + { + SFERROR -= sfmpy(SFRECIP,SFDEN):lib + QH = #-DF_BIAS+1 + QL = #DF_BIAS-1 + } + { + SFRECIP += sfmpy(SFRECIP,SFERROR):lib + NO_OVF_UNF = cmp.gt(EXPA,QH) + NO_OVF_UNF = !cmp.gt(EXPA,QL) + } + { + RECIPEST = insert(SFRECIP,#SF_MANTBITS,#RECIPEST_SHIFT) + Q = #0 + EXPA = add(EXPA,#-QADJ) + } +#undef SFERROR +#undef SFRECIP +#define TMP r10 +#define TMP1 r11 + { + RECIPEST = add(RECIPEST,#((-3) << RECIPEST_SHIFT)) + } + +#define DIV_ITER1B(QSHIFTINSN,QSHIFT,REMSHIFT,EXTRA) \ + { \ + PROD = mpyu(RECIPEST,REMHI); \ + REM = asl(REM,# ## ( REMSHIFT )); \ + }; \ + { \ + PRODLO = # ## 0; \ + REM -= mpyu(PRODHI,DENOMLO); \ + REMSUB2 = mpyu(PRODHI,DENOMHI); \ + }; \ + { \ + Q += QSHIFTINSN(PROD, # ## ( QSHIFT )); \ + REM -= asl(REMSUB2, # ## 32); \ + EXTRA \ + } + + + DIV_ITER1B(ASL,14,15,) + DIV_ITER1B(ASR,1,15,) + DIV_ITER1B(ASR,16,15,) + DIV_ITER1B(ASR,31,15,PROD=# ( 0 );) + +#undef REMSUB2 +#define TMPPAIR r15:14 +#define TMPPAIRHI r15 +#define TMPPAIRLO r14 +#undef RECIPEST +#define EXPB r13 + { + // compare or sub with carry + TMPPAIR = sub(REM,DENOM) + P_TMP = cmp.gtu(DENOM,REM) + // set up amt to add to q + if (!P_TMP.new) PRODLO = #2 + } + { + Q = add(Q,PROD) + if (!P_TMP) REM = TMPPAIR + TMPPAIR = #0 + } + { + P_TMP = cmp.eq(REM,TMPPAIR) + if (!P_TMP.new) QL = or(QL,ONE) + } + { + PROD = neg(Q) + } + { + if (!Q_POSITIVE) Q = PROD + } +#undef REM +#undef REMHI +#undef REMLO +#undef DENOM +#undef DENOMLO +#undef DENOMHI +#define A r1:0 +#define AH r1 +#define AL r0 +#define B r3:2 +#define BH r3 +#define BL r2 + { + A = convert_d2df(Q) + if (!NO_OVF_UNF) jump .Ldiv_ovf_unf + } + { + AH += asl(EXPA,#DF_MANTBITS-32) + jumpr r31 + } + +.Ldiv_ovf_unf: + { + AH += asl(EXPA,#DF_MANTBITS-32) + EXPB = extractu(AH,#DF_EXPBITS,#DF_MANTBITS-32) + } + { + PROD = abs(Q) + EXPA = add(EXPA,EXPB) + } + { + P_TMP = cmp.gt(EXPA,##DF_BIAS+DF_BIAS) // overflow + if (P_TMP.new) jump:nt .Ldiv_ovf + } + { + P_TMP = cmp.gt(EXPA,#0) + if (P_TMP.new) jump:nt .Lpossible_unf // round up to normal possible... + } + /* Underflow */ + /* We know what the infinite range exponent should be (EXPA) */ + /* Q is 2's complement, PROD is abs(Q) */ + /* Normalize Q, shift right, add a high bit, convert, change exponent */ + +#define FUDGE1 7 // how much to shift right +#define FUDGE2 4 // how many guard/round to keep at lsbs + + { + EXPB = add(clb(PROD),#-1) // doesn't need to be added in since + EXPA = sub(#FUDGE1,EXPA) // we extract post-converted exponent + TMP = USR + TMP1 = #63 + } + { + EXPB = min(EXPA,TMP1) + TMP1 = or(TMP,#0x030) + PROD = asl(PROD,EXPB) + EXPA = #0 + } + { + TMPPAIR = extractu(PROD,EXPBA) // bits that will get shifted out + PROD = lsr(PROD,EXPB) // shift out bits + B = #1 + } + { + P_TMP = cmp.gtu(B,TMPPAIR) + if (!P_TMP.new) PRODLO = or(BL,PRODLO) + PRODHI = setbit(PRODHI,#DF_MANTBITS-32+FUDGE2) + } + { + Q = neg(PROD) + P_TMP = bitsclr(PRODLO,#(1<