diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -140,8 +140,16 @@ } } else { if (Mapped == 0) { - dbgs() << "Processed value not in any map!"; - Failed = true; + SDValue NodeById = IdToValueMap.lookup(ResId); + // It's possible the node has been remapped to another node and had + // it's Id updated in the Value to Id table. The node it remapped to + // may not have been processed yet. Look up the Id in the Id to Value + // table to see if it is the same node and whether it has been + // processed. + if (NodeById->getNodeId() == Processed) { + dbgs() << "Processed value not in any map!"; + Failed = true; + } } else if (Mapped & (Mapped - 1)) { dbgs() << "Value in multiple maps!"; Failed = true; diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll --- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -1,13 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32I -; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv64 -verify-machineinstrs -enable-legalize-types-checking < %s \ ; RUN: | FileCheck %s -check-prefix=RV64I ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBB ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBB +; NOTE: -enable-legalize-types-checking is on one command line due to a previous +; assertion failure on an expensive checks build for @rotr_32_mask_multiple. + ; These IR sequences are idioms for rotates. If rotate instructions are ; supported, they will be turned into ISD::ROTL or ISD::ROTR. @@ -550,3 +553,333 @@ %d = or i64 %b, %c ret i64 %d } + +define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; RV32I-LABEL: rotl_32_mask_multiple: +; RV32I: # %bb.0: +; RV32I-NEXT: sll a3, a0, a2 +; RV32I-NEXT: neg a4, a2 +; RV32I-NEXT: srl a0, a0, a4 +; RV32I-NEXT: or a0, a3, a0 +; RV32I-NEXT: sll a2, a1, a2 +; RV32I-NEXT: srl a1, a1, a4 +; RV32I-NEXT: or a1, a2, a1 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: rotl_32_mask_multiple: +; RV64I: # %bb.0: +; RV64I-NEXT: sllw a3, a0, a2 +; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: srlw a0, a0, a4 +; RV64I-NEXT: or a0, a3, a0 +; RV64I-NEXT: sllw a2, a1, a2 +; RV64I-NEXT: srlw a1, a1, a4 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV32ZBB-LABEL: rotl_32_mask_multiple: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andi a2, a2, 31 +; RV32ZBB-NEXT: rol a0, a0, a2 +; RV32ZBB-NEXT: rol a1, a1, a2 +; RV32ZBB-NEXT: add a0, a0, a1 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: rotl_32_mask_multiple: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andi a2, a2, 31 +; RV64ZBB-NEXT: rolw a0, a0, a2 +; RV64ZBB-NEXT: rolw a1, a1, a2 +; RV64ZBB-NEXT: addw a0, a0, a1 +; RV64ZBB-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = tail call i32 @llvm.fshl.i32(i32 %b, i32 %b, i32 %maskedamt) + %3 = add i32 %1, %2 + ret i32 %3 +} +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { +; RV32I-LABEL: rotl_64_mask_multiple: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a5, a4, 26 +; RV32I-NEXT: srli a5, a5, 31 +; RV32I-NEXT: mv a6, a1 +; RV32I-NEXT: bnez a5, .LBB9_2 +; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: mv a6, a0 +; RV32I-NEXT: .LBB9_2: +; RV32I-NEXT: bnez a5, .LBB9_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: .LBB9_4: +; RV32I-NEXT: sll a7, a6, a4 +; RV32I-NEXT: srli t0, a0, 1 +; RV32I-NEXT: not a1, a4 +; RV32I-NEXT: srl t0, t0, a1 +; RV32I-NEXT: sll t1, a0, a4 +; RV32I-NEXT: srli a0, a6, 1 +; RV32I-NEXT: srl t2, a0, a1 +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: bnez a5, .LBB9_6 +; RV32I-NEXT: # %bb.5: +; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .LBB9_6: +; RV32I-NEXT: or a6, a7, t0 +; RV32I-NEXT: or a7, t1, t2 +; RV32I-NEXT: sll t0, a0, a4 +; RV32I-NEXT: bnez a5, .LBB9_8 +; RV32I-NEXT: # %bb.7: +; RV32I-NEXT: mv a2, a3 +; RV32I-NEXT: .LBB9_8: +; RV32I-NEXT: srli a3, a2, 1 +; RV32I-NEXT: srl a3, a3, a1 +; RV32I-NEXT: or a3, t0, a3 +; RV32I-NEXT: sll a2, a2, a4 +; RV32I-NEXT: srli a0, a0, 1 +; RV32I-NEXT: srl a0, a0, a1 +; RV32I-NEXT: or a0, a2, a0 +; RV32I-NEXT: add a1, a7, a0 +; RV32I-NEXT: add a0, a6, a3 +; RV32I-NEXT: sltu a2, a0, a6 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: ret +; +; RV64I-LABEL: rotl_64_mask_multiple: +; RV64I: # %bb.0: +; RV64I-NEXT: sll a3, a0, a2 +; RV64I-NEXT: neg a4, a2 +; RV64I-NEXT: srl a0, a0, a4 +; RV64I-NEXT: or a0, a3, a0 +; RV64I-NEXT: sll a2, a1, a2 +; RV64I-NEXT: srl a1, a1, a4 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret +; +; RV32ZBB-LABEL: rotl_64_mask_multiple: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: slli a5, a4, 26 +; RV32ZBB-NEXT: srli a5, a5, 31 +; RV32ZBB-NEXT: mv a6, a1 +; RV32ZBB-NEXT: bnez a5, .LBB9_2 +; RV32ZBB-NEXT: # %bb.1: +; RV32ZBB-NEXT: mv a6, a0 +; RV32ZBB-NEXT: .LBB9_2: +; RV32ZBB-NEXT: bnez a5, .LBB9_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: mv a0, a1 +; RV32ZBB-NEXT: .LBB9_4: +; RV32ZBB-NEXT: sll a7, a6, a4 +; RV32ZBB-NEXT: srli t0, a0, 1 +; RV32ZBB-NEXT: not a1, a4 +; RV32ZBB-NEXT: srl t0, t0, a1 +; RV32ZBB-NEXT: sll t1, a0, a4 +; RV32ZBB-NEXT: srli a0, a6, 1 +; RV32ZBB-NEXT: srl t2, a0, a1 +; RV32ZBB-NEXT: mv a0, a3 +; RV32ZBB-NEXT: bnez a5, .LBB9_6 +; RV32ZBB-NEXT: # %bb.5: +; RV32ZBB-NEXT: mv a0, a2 +; RV32ZBB-NEXT: .LBB9_6: +; RV32ZBB-NEXT: or a6, a7, t0 +; RV32ZBB-NEXT: or a7, t1, t2 +; RV32ZBB-NEXT: sll t0, a0, a4 +; RV32ZBB-NEXT: bnez a5, .LBB9_8 +; RV32ZBB-NEXT: # %bb.7: +; RV32ZBB-NEXT: mv a2, a3 +; RV32ZBB-NEXT: .LBB9_8: +; RV32ZBB-NEXT: srli a3, a2, 1 +; RV32ZBB-NEXT: srl a3, a3, a1 +; RV32ZBB-NEXT: or a3, t0, a3 +; RV32ZBB-NEXT: sll a2, a2, a4 +; RV32ZBB-NEXT: srli a0, a0, 1 +; RV32ZBB-NEXT: srl a0, a0, a1 +; RV32ZBB-NEXT: or a0, a2, a0 +; RV32ZBB-NEXT: add a1, a7, a0 +; RV32ZBB-NEXT: add a0, a6, a3 +; RV32ZBB-NEXT: sltu a2, a0, a6 +; RV32ZBB-NEXT: add a1, a1, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: rotl_64_mask_multiple: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andi a2, a2, 63 +; RV64ZBB-NEXT: rol a0, a0, a2 +; RV64ZBB-NEXT: rol a1, a1, a2 +; RV64ZBB-NEXT: add a0, a0, a1 +; RV64ZBB-NEXT: ret + %maskedamt = and i64 %amt, 63 + %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %maskedamt) + %2 = tail call i64 @llvm.fshl.i64(i64 %b, i64 %b, i64 %maskedamt) + %3 = add i64 %1, %2 + ret i64 %3 +} +declare i64 @llvm.fshl.i64(i64, i64, i64) + +define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; RV32I-LABEL: rotr_32_mask_multiple: +; RV32I: # %bb.0: +; RV32I-NEXT: srl a3, a0, a2 +; RV32I-NEXT: neg a4, a2 +; RV32I-NEXT: sll a0, a0, a4 +; RV32I-NEXT: or a0, a3, a0 +; RV32I-NEXT: srl a2, a1, a2 +; RV32I-NEXT: sll a1, a1, a4 +; RV32I-NEXT: or a1, a2, a1 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: rotr_32_mask_multiple: +; RV64I: # %bb.0: +; RV64I-NEXT: srlw a3, a0, a2 +; RV64I-NEXT: negw a4, a2 +; RV64I-NEXT: sllw a0, a0, a4 +; RV64I-NEXT: or a0, a3, a0 +; RV64I-NEXT: srlw a2, a1, a2 +; RV64I-NEXT: sllw a1, a1, a4 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV32ZBB-LABEL: rotr_32_mask_multiple: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andi a2, a2, 31 +; RV32ZBB-NEXT: ror a0, a0, a2 +; RV32ZBB-NEXT: ror a1, a1, a2 +; RV32ZBB-NEXT: add a0, a0, a1 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: rotr_32_mask_multiple: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andi a2, a2, 31 +; RV64ZBB-NEXT: rorw a0, a0, a2 +; RV64ZBB-NEXT: rorw a1, a1, a2 +; RV64ZBB-NEXT: addw a0, a0, a1 +; RV64ZBB-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = tail call i32 @llvm.fshr.i32(i32 %b, i32 %b, i32 %maskedamt) + %3 = add i32 %1, %2 + ret i32 %3 +} +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { +; RV32I-LABEL: rotr_64_mask_multiple: +; RV32I: # %bb.0: +; RV32I-NEXT: andi a5, a4, 32 +; RV32I-NEXT: mv a6, a0 +; RV32I-NEXT: beqz a5, .LBB11_2 +; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: mv a6, a1 +; RV32I-NEXT: .LBB11_2: +; RV32I-NEXT: beqz a5, .LBB11_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: .LBB11_4: +; RV32I-NEXT: srl a7, a6, a4 +; RV32I-NEXT: slli t0, a1, 1 +; RV32I-NEXT: not a0, a4 +; RV32I-NEXT: sll t0, t0, a0 +; RV32I-NEXT: srl t1, a1, a4 +; RV32I-NEXT: slli a1, a6, 1 +; RV32I-NEXT: sll t2, a1, a0 +; RV32I-NEXT: mv a6, a2 +; RV32I-NEXT: beqz a5, .LBB11_6 +; RV32I-NEXT: # %bb.5: +; RV32I-NEXT: mv a6, a3 +; RV32I-NEXT: .LBB11_6: +; RV32I-NEXT: or a1, t0, a7 +; RV32I-NEXT: or a7, t2, t1 +; RV32I-NEXT: srl t0, a6, a4 +; RV32I-NEXT: beqz a5, .LBB11_8 +; RV32I-NEXT: # %bb.7: +; RV32I-NEXT: mv a3, a2 +; RV32I-NEXT: .LBB11_8: +; RV32I-NEXT: slli a2, a3, 1 +; RV32I-NEXT: sll a2, a2, a0 +; RV32I-NEXT: or a2, a2, t0 +; RV32I-NEXT: srl a3, a3, a4 +; RV32I-NEXT: slli a4, a6, 1 +; RV32I-NEXT: sll a0, a4, a0 +; RV32I-NEXT: or a0, a0, a3 +; RV32I-NEXT: add a3, a7, a0 +; RV32I-NEXT: add a0, a1, a2 +; RV32I-NEXT: sltu a1, a0, a1 +; RV32I-NEXT: add a1, a3, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: rotr_64_mask_multiple: +; RV64I: # %bb.0: +; RV64I-NEXT: srl a3, a0, a2 +; RV64I-NEXT: neg a4, a2 +; RV64I-NEXT: sll a0, a0, a4 +; RV64I-NEXT: or a0, a3, a0 +; RV64I-NEXT: srl a2, a1, a2 +; RV64I-NEXT: sll a1, a1, a4 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret +; +; RV32ZBB-LABEL: rotr_64_mask_multiple: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andi a5, a4, 32 +; RV32ZBB-NEXT: mv a6, a0 +; RV32ZBB-NEXT: beqz a5, .LBB11_2 +; RV32ZBB-NEXT: # %bb.1: +; RV32ZBB-NEXT: mv a6, a1 +; RV32ZBB-NEXT: .LBB11_2: +; RV32ZBB-NEXT: beqz a5, .LBB11_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: mv a1, a0 +; RV32ZBB-NEXT: .LBB11_4: +; RV32ZBB-NEXT: srl a7, a6, a4 +; RV32ZBB-NEXT: slli t0, a1, 1 +; RV32ZBB-NEXT: not a0, a4 +; RV32ZBB-NEXT: sll t0, t0, a0 +; RV32ZBB-NEXT: srl t1, a1, a4 +; RV32ZBB-NEXT: slli a1, a6, 1 +; RV32ZBB-NEXT: sll t2, a1, a0 +; RV32ZBB-NEXT: mv a6, a2 +; RV32ZBB-NEXT: beqz a5, .LBB11_6 +; RV32ZBB-NEXT: # %bb.5: +; RV32ZBB-NEXT: mv a6, a3 +; RV32ZBB-NEXT: .LBB11_6: +; RV32ZBB-NEXT: or a1, t0, a7 +; RV32ZBB-NEXT: or a7, t2, t1 +; RV32ZBB-NEXT: srl t0, a6, a4 +; RV32ZBB-NEXT: beqz a5, .LBB11_8 +; RV32ZBB-NEXT: # %bb.7: +; RV32ZBB-NEXT: mv a3, a2 +; RV32ZBB-NEXT: .LBB11_8: +; RV32ZBB-NEXT: slli a2, a3, 1 +; RV32ZBB-NEXT: sll a2, a2, a0 +; RV32ZBB-NEXT: or a2, a2, t0 +; RV32ZBB-NEXT: srl a3, a3, a4 +; RV32ZBB-NEXT: slli a4, a6, 1 +; RV32ZBB-NEXT: sll a0, a4, a0 +; RV32ZBB-NEXT: or a0, a0, a3 +; RV32ZBB-NEXT: add a3, a7, a0 +; RV32ZBB-NEXT: add a0, a1, a2 +; RV32ZBB-NEXT: sltu a1, a0, a1 +; RV32ZBB-NEXT: add a1, a3, a1 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: rotr_64_mask_multiple: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andi a2, a2, 63 +; RV64ZBB-NEXT: ror a0, a0, a2 +; RV64ZBB-NEXT: ror a1, a1, a2 +; RV64ZBB-NEXT: add a0, a0, a1 +; RV64ZBB-NEXT: ret + %maskedamt = and i64 %amt, 63 + %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %maskedamt) + %2 = tail call i64 @llvm.fshr.i64(i64 %b, i64 %b, i64 %maskedamt) + %3 = add i64 %1, %2 + ret i64 %3 +} +declare i64 @llvm.fshr.i64(i64, i64, i64)