diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -994,13 +994,6 @@ if (ShiftAmtIsConstant) { Known = KF(Known2, Known); - - // If the known bits conflict, this must be an overflowing left shift, so - // the shift result is poison. We can return anything we want. Choose 0 for - // the best folding opportunity. - if (Known.hasConflict()) - Known.setAllZero(); - return; } diff --git a/llvm/lib/Support/KnownBits.cpp b/llvm/lib/Support/KnownBits.cpp --- a/llvm/lib/Support/KnownBits.cpp +++ b/llvm/lib/Support/KnownBits.cpp @@ -183,9 +183,11 @@ unsigned MinTrailingZeros = LHS.countMinTrailingZeros(); APInt MinShiftAmount = RHS.getMinValue(); - if (MinShiftAmount.uge(BitWidth)) - // Always poison. Return unknown because we don't like returning conflict. + if (MinShiftAmount.uge(BitWidth)) { + // Always poison. Return zero because we don't like returning conflict. + Known.setAllZero(); return Known; + } // Minimum shift amount low bits are known zero. MinTrailingZeros += MinShiftAmount.getZExtValue(); @@ -240,9 +242,11 @@ // Minimum shift amount high bits are known zero. APInt MinShiftAmount = RHS.getMinValue(); - if (MinShiftAmount.uge(BitWidth)) - // Always poison. Return unknown because we don't like returning conflict. + if (MinShiftAmount.uge(BitWidth)) { + // Always poison. Return zero because we don't like returning conflict. + Known.setAllZero(); return Known; + } MinLeadingZeros += MinShiftAmount.getZExtValue(); MinLeadingZeros = std::min(MinLeadingZeros, BitWidth); @@ -295,9 +299,11 @@ // Minimum shift amount high bits are known sign bits. APInt MinShiftAmount = RHS.getMinValue(); - if (MinShiftAmount.uge(BitWidth)) - // Always poison. Return unknown because we don't like returning conflict. + if (MinShiftAmount.uge(BitWidth)) { + // Always poison. Return zero because we don't like returning conflict. + Known.setAllZero(); return Known; + } if (MinLeadingZeros) { MinLeadingZeros += MinShiftAmount.getZExtValue(); diff --git a/llvm/test/Analysis/ScalarEvolution/ashr.ll b/llvm/test/Analysis/ScalarEvolution/ashr.ll --- a/llvm/test/Analysis/ScalarEvolution/ashr.ll +++ b/llvm/test/Analysis/ScalarEvolution/ashr.ll @@ -51,7 +51,7 @@ ; CHECK-LABEL: 't4' ; CHECK-NEXT: Classifying expressions for: @t4 ; CHECK-NEXT: %i0 = ashr exact i32 %x, 32 -; CHECK-NEXT: --> %i0 U: full-set S: full-set +; CHECK-NEXT: --> %i0 U: [0,1) S: [0,1) ; CHECK-NEXT: Determining loop execution counts for: @t4 ; %i0 = ashr exact i32 %x, 32 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir @@ -95,8 +95,7 @@ ; CHECK-NEXT: %x:_(s32) = COPY $w0 ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 32 ; CHECK-NEXT: %shift:_(s32) = G_ASHR %x, %lsb(s32) - ; CHECK-NEXT: %sext_inreg:_(s32) = G_SEXT_INREG %shift, 1 - ; CHECK-NEXT: $w0 = COPY %sext_inreg(s32) + ; CHECK-NEXT: $w0 = COPY %shift(s32) ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %lsb:_(s32) = G_CONSTANT i32 32 @@ -122,8 +121,7 @@ ; CHECK-NEXT: %x:_(s32) = COPY $w0 ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 -1 ; CHECK-NEXT: %shift:_(s32) = G_ASHR %x, %lsb(s32) - ; CHECK-NEXT: %sext_inreg:_(s32) = G_SEXT_INREG %shift, 1 - ; CHECK-NEXT: $w0 = COPY %sext_inreg(s32) + ; CHECK-NEXT: $w0 = COPY %shift(s32) ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %lsb:_(s32) = G_CONSTANT i32 -1 diff --git a/llvm/test/CodeGen/LoongArch/rotl-rotr.ll b/llvm/test/CodeGen/LoongArch/rotl-rotr.ll --- a/llvm/test/CodeGen/LoongArch/rotl-rotr.ll +++ b/llvm/test/CodeGen/LoongArch/rotl-rotr.ll @@ -374,10 +374,9 @@ define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind { ; LA32-LABEL: rotl_64_mask_or_128_or_64: ; LA32: # %bb.0: -; LA32-NEXT: sll.w $a3, $a0, $a2 ; LA32-NEXT: sub.w $a0, $zero, $a2 ; LA32-NEXT: srl.w $a0, $a1, $a0 -; LA32-NEXT: move $a1, $a3 +; LA32-NEXT: move $a1, $zero ; LA32-NEXT: ret ; ; LA64-LABEL: rotl_64_mask_or_128_or_64: @@ -499,10 +498,8 @@ define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind { ; LA32-LABEL: rotr_64_mask_or_128_or_64: ; LA32: # %bb.0: -; LA32-NEXT: srl.w $a3, $a1, $a2 -; LA32-NEXT: sub.w $a1, $zero, $a2 -; LA32-NEXT: sll.w $a1, $a0, $a1 -; LA32-NEXT: move $a0, $a3 +; LA32-NEXT: srl.w $a0, $a1, $a2 +; LA32-NEXT: move $a1, $zero ; LA32-NEXT: ret ; ; LA64-LABEL: rotr_64_mask_or_128_or_64: diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll --- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -516,20 +516,15 @@ define i32 @rotl_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotl_32_mask_or_64_or_32: ; RV32I: # %bb.0: -; RV32I-NEXT: ori a2, a1, 64 -; RV32I-NEXT: sll a2, a0, a2 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: ori a1, a1, 32 ; RV32I-NEXT: srl a0, a0, a1 -; RV32I-NEXT: or a0, a2, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_32_mask_or_64_or_32: ; RV64I: # %bb.0: -; RV64I-NEXT: sllw a2, a0, a1 ; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 -; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_32_mask_or_64_or_32: @@ -670,20 +665,13 @@ define i32 @rotr_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotr_32_mask_or_64_or_32: ; RV32I: # %bb.0: -; RV32I-NEXT: ori a2, a1, 64 -; RV32I-NEXT: srl a2, a0, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: ori a1, a1, 32 -; RV32I-NEXT: sll a0, a0, a1 -; RV32I-NEXT: or a0, a2, a0 +; RV32I-NEXT: ori a1, a1, 64 +; RV32I-NEXT: srl a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_32_mask_or_64_or_32: ; RV64I: # %bb.0: -; RV64I-NEXT: srlw a2, a0, a1 -; RV64I-NEXT: negw a1, a1 -; RV64I-NEXT: sllw a0, a0, a1 -; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_32_mask_or_64_or_32: @@ -1013,28 +1001,23 @@ define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotl_64_mask_or_128_or_64: ; RV32I: # %bb.0: -; RV32I-NEXT: sll a3, a0, a2 ; RV32I-NEXT: neg a0, a2 ; RV32I-NEXT: srl a0, a1, a0 -; RV32I-NEXT: mv a1, a3 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_64_mask_or_128_or_64: ; RV64I: # %bb.0: -; RV64I-NEXT: ori a2, a1, 128 -; RV64I-NEXT: sll a2, a0, a2 ; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: ori a1, a1, 64 ; RV64I-NEXT: srl a0, a0, a1 -; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_64_mask_or_128_or_64: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sll a3, a0, a2 ; RV32ZBB-NEXT: neg a0, a2 ; RV32ZBB-NEXT: srl a0, a1, a0 -; RV32ZBB-NEXT: mv a1, a3 +; RV32ZBB-NEXT: li a1, 0 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_64_mask_or_128_or_64: @@ -1044,10 +1027,9 @@ ; ; RV32XTHEADBB-LABEL: rotl_64_mask_or_128_or_64: ; RV32XTHEADBB: # %bb.0: -; RV32XTHEADBB-NEXT: sll a3, a0, a2 ; RV32XTHEADBB-NEXT: neg a0, a2 ; RV32XTHEADBB-NEXT: srl a0, a1, a0 -; RV32XTHEADBB-NEXT: mv a1, a3 +; RV32XTHEADBB-NEXT: li a1, 0 ; RV32XTHEADBB-NEXT: ret ; ; RV64XTHEADBB-LABEL: rotl_64_mask_or_128_or_64: @@ -1359,28 +1341,20 @@ define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotr_64_mask_or_128_or_64: ; RV32I: # %bb.0: -; RV32I-NEXT: srl a3, a1, a2 -; RV32I-NEXT: neg a1, a2 -; RV32I-NEXT: sll a1, a0, a1 -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: srl a0, a1, a2 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_64_mask_or_128_or_64: ; RV64I: # %bb.0: -; RV64I-NEXT: ori a2, a1, 128 -; RV64I-NEXT: srl a2, a0, a2 -; RV64I-NEXT: negw a1, a1 -; RV64I-NEXT: ori a1, a1, 64 -; RV64I-NEXT: sll a0, a0, a1 -; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: ori a1, a1, 128 +; RV64I-NEXT: srl a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_64_mask_or_128_or_64: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srl a3, a1, a2 -; RV32ZBB-NEXT: neg a1, a2 -; RV32ZBB-NEXT: sll a1, a0, a1 -; RV32ZBB-NEXT: mv a0, a3 +; RV32ZBB-NEXT: srl a0, a1, a2 +; RV32ZBB-NEXT: li a1, 0 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_64_mask_or_128_or_64: @@ -1390,10 +1364,8 @@ ; ; RV32XTHEADBB-LABEL: rotr_64_mask_or_128_or_64: ; RV32XTHEADBB: # %bb.0: -; RV32XTHEADBB-NEXT: srl a3, a1, a2 -; RV32XTHEADBB-NEXT: neg a1, a2 -; RV32XTHEADBB-NEXT: sll a1, a0, a1 -; RV32XTHEADBB-NEXT: mv a0, a3 +; RV32XTHEADBB-NEXT: srl a0, a1, a2 +; RV32XTHEADBB-NEXT: li a1, 0 ; RV32XTHEADBB-NEXT: ret ; ; RV64XTHEADBB-LABEL: rotr_64_mask_or_128_or_64: diff --git a/llvm/test/Transforms/InstCombine/not-add.ll b/llvm/test/Transforms/InstCombine/not-add.ll --- a/llvm/test/Transforms/InstCombine/not-add.ll +++ b/llvm/test/Transforms/InstCombine/not-add.ll @@ -170,11 +170,6 @@ define void @pr50370(i32 %x) { ; CHECK-LABEL: @pr50370( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[X:%.*]], 1 -; CHECK-NEXT: [[B15:%.*]] = srem i32 ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537)), [[XOR]] -; CHECK-NEXT: [[B12:%.*]] = add nsw i32 [[B15]], ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537)) -; CHECK-NEXT: [[B:%.*]] = xor i32 [[B12]], -1 -; CHECK-NEXT: store i32 [[B]], ptr undef, align 4 ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll b/llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll --- a/llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll +++ b/llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll @@ -9,7 +9,7 @@ ; CHECK: cond.true: ; CHECK-NEXT: br label [[END]] ; CHECK: end: -; CHECK-NEXT: ret i1 false +; CHECK-NEXT: ret i1 [[C1]] ; entry: br i1 %c1, label %cond.true, label %end diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll --- a/llvm/test/Transforms/InstCombine/shift.ll +++ b/llvm/test/Transforms/InstCombine/shift.ll @@ -1743,12 +1743,6 @@ ; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26135 define void @ashr_out_of_range_1(ptr %A) { ; CHECK-LABEL: @ashr_out_of_range_1( -; CHECK-NEXT: [[L:%.*]] = load i177, ptr [[A:%.*]], align 4 -; CHECK-NEXT: [[G11:%.*]] = getelementptr i177, ptr [[A]], i64 -1 -; CHECK-NEXT: [[B24_LOBIT:%.*]] = ashr i177 [[L]], 175 -; CHECK-NEXT: [[TMP1:%.*]] = trunc i177 [[B24_LOBIT]] to i64 -; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, ptr [[G11]], i64 [[TMP1]] -; CHECK-NEXT: store i177 0, ptr [[G62]], align 4 ; CHECK-NEXT: ret void ; %L = load i177, ptr %A, align 4 diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp --- a/llvm/unittests/Analysis/ValueTrackingTest.cpp +++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp @@ -694,7 +694,7 @@ " %A = ashr i32 %a, -1\n" " ret i32 %A\n" "}\n"); - EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u); + EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 32u); } // No guarantees for canonical IR in this analysis, so this just bails out. diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp --- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp @@ -1630,12 +1630,12 @@ KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl); - // We don't know what the result of the shift is, but we should not crash + // Result can be anything, but we should not crash. EXPECT_TRUE(EqSizeRes.One.isZero()); - EXPECT_TRUE(EqSizeRes.Zero.isZero()); + EXPECT_TRUE(EqSizeRes.Zero.isAllOnes()); EXPECT_TRUE(BiggerSizeRes.One.isZero()); - EXPECT_TRUE(BiggerSizeRes.Zero.isZero()); + EXPECT_TRUE(BiggerSizeRes.Zero.isAllOnes()); } TEST_F(AArch64GISelMITest, TestKnownBitsAssertZext) { diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp --- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp @@ -1456,10 +1456,10 @@ KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl); EXPECT_TRUE(EqSizeRes.One.isZero()); - EXPECT_TRUE(EqSizeRes.Zero.isZero()); + EXPECT_TRUE(EqSizeRes.Zero.isAllOnes()); EXPECT_TRUE(BiggerSizeRes.One.isZero()); - EXPECT_TRUE(BiggerSizeRes.Zero.isZero()); + EXPECT_TRUE(BiggerSizeRes.Zero.isAllOnes()); } TEST_F(AArch64GISelMITest, TestKnownBitsVectorAssertZext) {