Index: lib/Target/ARM/ARMCodeGenPrepare.cpp =================================================================== --- lib/Target/ARM/ARMCodeGenPrepare.cpp +++ lib/Target/ARM/ARMCodeGenPrepare.cpp @@ -461,7 +461,7 @@ // > The operators that can wrap are: add, sub, mul and shl. // > shl interprets its second operand as unsigned and if the first operand // is an immediate, it will need zext to be nuw. - // > I'm assuming mul cannot be nuw while using a negative immediate... + // > I'm assuming mul has to interpret immediates as unsigned for nuw. // > Which leaves the nuw add and sub to be handled; as with shl, if an // immediate is used as operand 0, it will need zext to be nuw. // - We also allow add and sub to safely overflow in certain circumstances @@ -486,8 +486,8 @@ break; unsigned Opc = I->getOpcode(); - assert((Opc == Instruction::Add || Opc == Instruction::Sub) && - "expected only an add or sub to use a negative imm"); + if (Opc != Instruction::Add && Opc != Instruction::Sub) + continue; LLVM_DEBUG(dbgs() << "ARM CGP: Adjusting " << *I << "\n"); auto *NewConst = ConstantInt::get(Ctx, Const->getValue().abs()); Index: test/CodeGen/ARM/CGP/arm-cgp-icmps.ll =================================================================== --- test/CodeGen/ARM/CGP/arm-cgp-icmps.ll +++ test/CodeGen/ARM/CGP/arm-cgp-icmps.ll @@ -310,3 +310,25 @@ ret i32 %conv1 } +; CHECK-COMMON-LABEL: mul_with_neg_imm +; CHECK-COMMON-NOT: uxtb +; CHECK-COMMON: and [[BIT0:r[0-9]+]], r0, #1 +; CHECK-COMMON: add.w [[MUL32:r[0-9]+]], [[BIT0]], [[BIT0]], lsl #5 +; CHECK-COMMON: cmp.w r0, [[MUL32]], lsl #2 +define void @mul_with_neg_imm(i32* %a, i32* %b, i32* %c) { +entry: + store i32 -59772, i32* %c, align 4 + %0 = load i32, i32* %a, align 4 + %1 = trunc i32 %0 to i8 + %2 = and i8 %1, 1 + %conv.i = mul nuw i8 %2, -124 + %tobool = icmp eq i8 %conv.i, 0 + br i1 %tobool, label %if.end, label %if.then + +if.then: + store i32 0, i32* %b, align 4 + br label %if.end + +if.end: + ret void +}