diff --git a/llvm/test/CodeGen/AArch64/shift-amount-mod.ll b/llvm/test/CodeGen/AArch64/shift-amount-mod.ll --- a/llvm/test/CodeGen/AArch64/shift-amount-mod.ll +++ b/llvm/test/CodeGen/AArch64/shift-amount-mod.ll @@ -56,6 +56,24 @@ store i32 %shifted, i32* %valptr ret void } +define void @modify32_shl_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind { +; CHECK-LABEL: modify32_shl_by_negated_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: neg w8, w1 +; CHECK-NEXT: ldr w9, [x0] +; CHECK-NEXT: mov w10, #32 +; CHECK-NEXT: lsl w8, w9, w8 +; CHECK-NEXT: sub w9, w10, w1 +; CHECK-NEXT: str w8, [x0] +; CHECK-NEXT: str w9, [x2] +; CHECK-NEXT: ret + %val = load i32, i32* %valptr + %negshamt = sub i32 32, %shamt + %shifted = shl i32 %val, %negshamt + store i32 %shifted, i32* %valptr + store i32 %negshamt, i32* %shamtptr + ret void +} define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind { ; CHECK-LABEL: reg64_shl_by_negated: @@ -105,6 +123,24 @@ store i64 %shifted, i64* %valptr ret void } +define void @modify64_shl_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind { +; CHECK-LABEL: modify64_shl_by_negated_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: neg x8, x1 +; CHECK-NEXT: ldr x9, [x0] +; CHECK-NEXT: mov w10, #64 +; CHECK-NEXT: lsl x8, x9, x8 +; CHECK-NEXT: sub x9, x10, x1 +; CHECK-NEXT: str x8, [x0] +; CHECK-NEXT: str x9, [x2] +; CHECK-NEXT: ret + %val = load i64, i64* %valptr + %negshamt = sub i64 64, %shamt + %shifted = shl i64 %val, %negshamt + store i64 %shifted, i64* %valptr + store i64 %negshamt, i64* %shamtptr + ret void +} ; logical shift right ;------------------------------------------------------------------------------; @@ -157,6 +193,24 @@ store i32 %shifted, i32* %valptr ret void } +define void @modify32_lshr_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind { +; CHECK-LABEL: modify32_lshr_by_negated_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: neg w8, w1 +; CHECK-NEXT: ldr w9, [x0] +; CHECK-NEXT: mov w10, #32 +; CHECK-NEXT: lsr w8, w9, w8 +; CHECK-NEXT: sub w9, w10, w1 +; CHECK-NEXT: str w8, [x0] +; CHECK-NEXT: str w9, [x2] +; CHECK-NEXT: ret + %val = load i32, i32* %valptr + %negshamt = sub i32 32, %shamt + %shifted = lshr i32 %val, %negshamt + store i32 %shifted, i32* %valptr + store i32 %negshamt, i32* %shamtptr + ret void +} define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind { ; CHECK-LABEL: reg64_lshr_by_negated: @@ -206,6 +260,24 @@ store i64 %shifted, i64* %valptr ret void } +define void @modify64_lshr_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind { +; CHECK-LABEL: modify64_lshr_by_negated_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: neg x8, x1 +; CHECK-NEXT: ldr x9, [x0] +; CHECK-NEXT: mov w10, #64 +; CHECK-NEXT: lsr x8, x9, x8 +; CHECK-NEXT: sub x9, x10, x1 +; CHECK-NEXT: str x8, [x0] +; CHECK-NEXT: str x9, [x2] +; CHECK-NEXT: ret + %val = load i64, i64* %valptr + %negshamt = sub i64 64, %shamt + %shifted = lshr i64 %val, %negshamt + store i64 %shifted, i64* %valptr + store i64 %negshamt, i64* %shamtptr + ret void +} ; arithmetic shift right ;------------------------------------------------------------------------------; @@ -258,6 +330,24 @@ store i32 %shifted, i32* %valptr ret void } +define void @modify32_ashr_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind { +; CHECK-LABEL: modify32_ashr_by_negated_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: neg w8, w1 +; CHECK-NEXT: ldr w9, [x0] +; CHECK-NEXT: mov w10, #32 +; CHECK-NEXT: asr w8, w9, w8 +; CHECK-NEXT: sub w9, w10, w1 +; CHECK-NEXT: str w8, [x0] +; CHECK-NEXT: str w9, [x2] +; CHECK-NEXT: ret + %val = load i32, i32* %valptr + %negshamt = sub i32 32, %shamt + %shifted = ashr i32 %val, %negshamt + store i32 %shifted, i32* %valptr + store i32 %negshamt, i32* %shamtptr + ret void +} define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind { ; CHECK-LABEL: reg64_ashr_by_negated: @@ -307,6 +397,24 @@ store i64 %shifted, i64* %valptr ret void } +define void @modify64_ashr_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind { +; CHECK-LABEL: modify64_ashr_by_negated_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: neg x8, x1 +; CHECK-NEXT: ldr x9, [x0] +; CHECK-NEXT: mov w10, #64 +; CHECK-NEXT: asr x8, x9, x8 +; CHECK-NEXT: sub x9, x10, x1 +; CHECK-NEXT: str x8, [x0] +; CHECK-NEXT: str x9, [x2] +; CHECK-NEXT: ret + %val = load i64, i64* %valptr + %negshamt = sub i64 64, %shamt + %shifted = ashr i64 %val, %negshamt + store i64 %shifted, i64* %valptr + store i64 %negshamt, i64* %shamtptr + ret void +} ;==============================================================================; ; the shift amount is complemented (shiftbitwidth - 1 - shiftamt) @@ -367,6 +475,23 @@ store i32 %shifted, i32* %valptr ret void } +define void @modify32_shl_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind { +; CHECK-LABEL: modify32_shl_by_complemented_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #31 +; CHECK-NEXT: ldr w9, [x0] +; CHECK-NEXT: sub w8, w8, w1 +; CHECK-NEXT: lsl w9, w9, w8 +; CHECK-NEXT: str w9, [x0] +; CHECK-NEXT: str w8, [x2] +; CHECK-NEXT: ret + %val = load i32, i32* %valptr + %negshamt = sub i32 31, %shamt + %shifted = shl i32 %val, %negshamt + store i32 %shifted, i32* %valptr + store i32 %negshamt, i32* %shamtptr + ret void +} define i64 @reg64_shl_by_complemented(i64 %val, i64 %shamt) nounwind { ; CHECK-LABEL: reg64_shl_by_complemented: @@ -420,6 +545,23 @@ store i64 %shifted, i64* %valptr ret void } +define void @modify64_shl_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind { +; CHECK-LABEL: modify64_shl_by_complemented_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #63 +; CHECK-NEXT: ldr x9, [x0] +; CHECK-NEXT: sub x8, x8, x1 +; CHECK-NEXT: lsl x9, x9, x8 +; CHECK-NEXT: str x9, [x0] +; CHECK-NEXT: str x8, [x2] +; CHECK-NEXT: ret + %val = load i64, i64* %valptr + %negshamt = sub i64 63, %shamt + %shifted = shl i64 %val, %negshamt + store i64 %shifted, i64* %valptr + store i64 %negshamt, i64* %shamtptr + ret void +} ; logical shift right ;------------------------------------------------------------------------------; @@ -476,6 +618,23 @@ store i32 %shifted, i32* %valptr ret void } +define void @modify32_lshr_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind { +; CHECK-LABEL: modify32_lshr_by_complemented_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #31 +; CHECK-NEXT: ldr w9, [x0] +; CHECK-NEXT: sub w8, w8, w1 +; CHECK-NEXT: lsr w9, w9, w8 +; CHECK-NEXT: str w9, [x0] +; CHECK-NEXT: str w8, [x2] +; CHECK-NEXT: ret + %val = load i32, i32* %valptr + %negshamt = sub i32 31, %shamt + %shifted = lshr i32 %val, %negshamt + store i32 %shifted, i32* %valptr + store i32 %negshamt, i32* %shamtptr + ret void +} define i64 @reg64_lshr_by_complemented(i64 %val, i64 %shamt) nounwind { ; CHECK-LABEL: reg64_lshr_by_complemented: @@ -529,6 +688,23 @@ store i64 %shifted, i64* %valptr ret void } +define void @modify64_lshr_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind { +; CHECK-LABEL: modify64_lshr_by_complemented_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #63 +; CHECK-NEXT: ldr x9, [x0] +; CHECK-NEXT: sub x8, x8, x1 +; CHECK-NEXT: lsr x9, x9, x8 +; CHECK-NEXT: str x9, [x0] +; CHECK-NEXT: str x8, [x2] +; CHECK-NEXT: ret + %val = load i64, i64* %valptr + %negshamt = sub i64 63, %shamt + %shifted = lshr i64 %val, %negshamt + store i64 %shifted, i64* %valptr + store i64 %negshamt, i64* %shamtptr + ret void +} ; arithmetic shift right ;------------------------------------------------------------------------------; @@ -585,6 +761,23 @@ store i32 %shifted, i32* %valptr ret void } +define void @modify32_ashr_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind { +; CHECK-LABEL: modify32_ashr_by_complemented_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #31 +; CHECK-NEXT: ldr w9, [x0] +; CHECK-NEXT: sub w8, w8, w1 +; CHECK-NEXT: asr w9, w9, w8 +; CHECK-NEXT: str w9, [x0] +; CHECK-NEXT: str w8, [x2] +; CHECK-NEXT: ret + %val = load i32, i32* %valptr + %negshamt = sub i32 31, %shamt + %shifted = ashr i32 %val, %negshamt + store i32 %shifted, i32* %valptr + store i32 %negshamt, i32* %shamtptr + ret void +} define i64 @reg64_ashr_by_complemented(i64 %val, i64 %shamt) nounwind { ; CHECK-LABEL: reg64_ashr_by_complemented: @@ -638,6 +831,23 @@ store i64 %shifted, i64* %valptr ret void } +define void @modify64_ashr_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind { +; CHECK-LABEL: modify64_ashr_by_complemented_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #63 +; CHECK-NEXT: ldr x9, [x0] +; CHECK-NEXT: sub x8, x8, x1 +; CHECK-NEXT: asr x9, x9, x8 +; CHECK-NEXT: str x9, [x0] +; CHECK-NEXT: str x8, [x2] +; CHECK-NEXT: ret + %val = load i64, i64* %valptr + %negshamt = sub i64 63, %shamt + %shifted = ashr i64 %val, %negshamt + store i64 %shifted, i64* %valptr + store i64 %negshamt, i64* %shamtptr + ret void +} ;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||; ; next let's only test simple reg pattern, and only lshr.