Index: llvm/test/CodeGen/RISCV/shlimm-addimm.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/shlimm-addimm.ll @@ -0,0 +1,408 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py + +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s + +;; Test that (add (shl x, c0), c1) can be transformed to +;; (add (shl (add x, c1>>c0), c0), c1-(c1>>c0<>c0), c0) if profitable. + +define i32 @shl5_add1184_a(i32 %x) { +; RV32I-LABEL: shl5_add1184_a: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: addi a0, a0, 1184 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add1184_a: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: addiw a0, a0, 1184 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, 1184 + ret i32 %tmp1 +} + +define signext i32 @shl5_add1184_b(i32 signext %x) { +; RV32I-LABEL: shl5_add1184_b: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: addi a0, a0, 1184 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add1184_b: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: addiw a0, a0, 1184 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, 1184 + ret i32 %tmp1 +} + +define i64 @shl5_add1184_c(i64 %x) { +; RV32I-LABEL: shl5_add1184_c: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a2, a0, 27 +; RV32I-NEXT: slli a1, a1, 5 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 5 +; RV32I-NEXT: addi a0, a2, 1184 +; RV32I-NEXT: sltu a2, a0, a2 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add1184_c: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: addi a0, a0, 1184 +; RV64I-NEXT: ret + %tmp0 = shl i64 %x, 5 + %tmp1 = add i64 %tmp0, 1184 + ret i64 %tmp1 +} + +define i32 @shl5_add101024_a(i32 %x) { +; RV32I-LABEL: shl5_add101024_a: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 25 +; RV32I-NEXT: addi a1, a1, -1376 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add101024_a: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 25 +; RV64I-NEXT: addiw a1, a1, -1376 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, 101024 + ret i32 %tmp1 +} + +define signext i32 @shl5_add101024_b(i32 signext %x) { +; RV32I-LABEL: shl5_add101024_b: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 25 +; RV32I-NEXT: addi a1, a1, -1376 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add101024_b: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 25 +; RV64I-NEXT: addiw a1, a1, -1376 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, 101024 + ret i32 %tmp1 +} + +define i64 @shl5_add101024_c(i64 %x) { +; RV32I-LABEL: shl5_add101024_c: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a2, a0, 27 +; RV32I-NEXT: slli a1, a1, 5 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 5 +; RV32I-NEXT: lui a0, 25 +; RV32I-NEXT: addi a0, a0, -1376 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: sltu a2, a0, a2 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add101024_c: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: lui a1, 25 +; RV64I-NEXT: addiw a1, a1, -1376 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i64 %x, 5 + %tmp1 = add i64 %tmp0, 101024 + ret i64 %tmp1 +} + +define i32 @shl5_add47968_a(i32 %x) { +; RV32I-LABEL: shl5_add47968_a: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 12 +; RV32I-NEXT: addi a1, a1, -1184 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add47968_a: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 12 +; RV64I-NEXT: addiw a1, a1, -1184 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, 47968 + ret i32 %tmp1 +} + +define signext i32 @shl5_add47968_b(i32 signext %x) { +; RV32I-LABEL: shl5_add47968_b: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 12 +; RV32I-NEXT: addi a1, a1, -1184 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add47968_b: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 12 +; RV64I-NEXT: addiw a1, a1, -1184 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, 47968 + ret i32 %tmp1 +} + +define i64 @shl5_add47968_c(i64 %x) { +; RV32I-LABEL: shl5_add47968_c: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a2, a0, 27 +; RV32I-NEXT: slli a1, a1, 5 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 5 +; RV32I-NEXT: lui a0, 12 +; RV32I-NEXT: addi a0, a0, -1184 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: sltu a2, a0, a2 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add47968_c: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: lui a1, 12 +; RV64I-NEXT: addiw a1, a1, -1184 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i64 %x, 5 + %tmp1 = add i64 %tmp0, 47968 + ret i64 %tmp1 +} + +define i32 @shl5_add47969_a(i32 %x) { +; RV32I-LABEL: shl5_add47969_a: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 12 +; RV32I-NEXT: addi a1, a1, -1183 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add47969_a: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 12 +; RV64I-NEXT: addiw a1, a1, -1183 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, 47969 + ret i32 %tmp1 +} + +define signext i32 @shl5_add47969_b(i32 signext %x) { +; RV32I-LABEL: shl5_add47969_b: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 12 +; RV32I-NEXT: addi a1, a1, -1183 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add47969_b: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 12 +; RV64I-NEXT: addiw a1, a1, -1183 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, 47969 + ret i32 %tmp1 +} + +define i64 @shl5_add47969_c(i64 %x) { +; RV32I-LABEL: shl5_add47969_c: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a2, a0, 27 +; RV32I-NEXT: slli a1, a1, 5 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 5 +; RV32I-NEXT: lui a0, 12 +; RV32I-NEXT: addi a0, a0, -1183 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: sltu a2, a0, a2 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_add47969_c: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: lui a1, 12 +; RV64I-NEXT: addiw a1, a1, -1183 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i64 %x, 5 + %tmp1 = add i64 %tmp0, 47969 + ret i64 %tmp1 +} + +define i32 @shl5_sub47968_a(i32 %x) { +; RV32I-LABEL: shl5_sub47968_a: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 1048564 +; RV32I-NEXT: addi a1, a1, 1184 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_sub47968_a: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 1048564 +; RV64I-NEXT: addiw a1, a1, 1184 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, -47968 + ret i32 %tmp1 +} + +define signext i32 @shl5_sub47968_b(i32 signext %x) { +; RV32I-LABEL: shl5_sub47968_b: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 1048564 +; RV32I-NEXT: addi a1, a1, 1184 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_sub47968_b: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 1048564 +; RV64I-NEXT: addiw a1, a1, 1184 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, -47968 + ret i32 %tmp1 +} + +define i64 @shl5_sub47968_c(i64 %x) { +; RV32I-LABEL: shl5_sub47968_c: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a2, a0, 27 +; RV32I-NEXT: slli a1, a1, 5 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 5 +; RV32I-NEXT: lui a0, 1048564 +; RV32I-NEXT: addi a0, a0, 1184 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: sltu a2, a0, a2 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_sub47968_c: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: lui a1, 1048564 +; RV64I-NEXT: addiw a1, a1, 1184 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i64 %x, 5 + %tmp1 = add i64 %tmp0, -47968 + ret i64 %tmp1 +} + +define i32 @shl5_sub47969_a(i32 %x) { +; RV32I-LABEL: shl5_sub47969_a: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 1048564 +; RV32I-NEXT: addi a1, a1, 1183 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_sub47969_a: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 1048564 +; RV64I-NEXT: addiw a1, a1, 1183 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, -47969 + ret i32 %tmp1 +} + +define signext i32 @shl5_sub47969_b(i32 signext %x) { +; RV32I-LABEL: shl5_sub47969_b: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a0, a0, 5 +; RV32I-NEXT: lui a1, 1048564 +; RV32I-NEXT: addi a1, a1, 1183 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_sub47969_b: +; RV64I: # %bb.0: +; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: lui a1, 1048564 +; RV64I-NEXT: addiw a1, a1, 1183 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i32 %x, 5 + %tmp1 = add i32 %tmp0, -47969 + ret i32 %tmp1 +} + +define i64 @shl5_sub47969_c(i64 %x) { +; RV32I-LABEL: shl5_sub47969_c: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a2, a0, 27 +; RV32I-NEXT: slli a1, a1, 5 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 5 +; RV32I-NEXT: lui a0, 1048564 +; RV32I-NEXT: addi a0, a0, 1183 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: sltu a2, a0, a2 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: shl5_sub47969_c: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: lui a1, 1048564 +; RV64I-NEXT: addiw a1, a1, 1183 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret + %tmp0 = shl i64 %x, 5 + %tmp1 = add i64 %tmp0, -47969 + ret i64 %tmp1 +}