diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td --- a/llvm/lib/Target/AArch64/AArch64Combine.td +++ b/llvm/lib/Target/AArch64/AArch64Combine.td @@ -212,6 +212,6 @@ form_bitfield_extract, rotate_out_of_range, icmp_to_true_false_known_bits, merge_unmerge, select_combines, fold_merge_to_zext, - constant_fold]> { + constant_fold, identity_combines]> { let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule"; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll @@ -350,10 +350,8 @@ ; CHECK-LLSC-O1-NEXT: .LBB4_1: // %atomicrmw.start ; CHECK-LLSC-O1-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-LLSC-O1-NEXT: ldxp x9, x8, [x2] -; CHECK-LLSC-O1-NEXT: lsr x8, x8, #0 -; CHECK-LLSC-O1-NEXT: lsr x10, x8, #0 -; CHECK-LLSC-O1-NEXT: stxp w11, x9, x10, [x2] -; CHECK-LLSC-O1-NEXT: cbnz w11, .LBB4_1 +; CHECK-LLSC-O1-NEXT: stxp w10, x9, x8, [x2] +; CHECK-LLSC-O1-NEXT: cbnz w10, .LBB4_1 ; CHECK-LLSC-O1-NEXT: // %bb.2: // %atomicrmw.end ; CHECK-LLSC-O1-NEXT: mov v0.d[0], x9 ; CHECK-LLSC-O1-NEXT: mov v0.d[1], x8 @@ -365,10 +363,8 @@ ; CHECK-CAS-O1-NEXT: .LBB4_1: // %atomicrmw.start ; CHECK-CAS-O1-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-CAS-O1-NEXT: ldxp x9, x8, [x2] -; CHECK-CAS-O1-NEXT: lsr x8, x8, #0 -; CHECK-CAS-O1-NEXT: lsr x10, x8, #0 -; CHECK-CAS-O1-NEXT: stxp w11, x9, x10, [x2] -; CHECK-CAS-O1-NEXT: cbnz w11, .LBB4_1 +; CHECK-CAS-O1-NEXT: stxp w10, x9, x8, [x2] +; CHECK-CAS-O1-NEXT: cbnz w10, .LBB4_1 ; CHECK-CAS-O1-NEXT: // %bb.2: // %atomicrmw.end ; CHECK-CAS-O1-NEXT: mov v0.d[0], x9 ; CHECK-CAS-O1-NEXT: mov v0.d[1], x8 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-identity.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-identity.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-identity.mir @@ -0,0 +1,24 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: shift_of_zero +alignment: 4 +legalized: true +liveins: + - { reg: '$w0' } +body: | + bb.1.entry: + liveins: $x0 + + ; CHECK-LABEL: name: shift_of_zero + ; CHECK: %a:_(s64) = COPY $x0 + ; CHECK: $x0 = COPY %a(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %a:_(s64) = COPY $x0 + %b:_(s64) = G_CONSTANT i64 0 + %res:_(s64) = G_LSHR %a, %b + $x0 = COPY %res(s64) + RET_ReallyLR implicit $x0 + +... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-select.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-select.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-select.mir @@ -9,9 +9,7 @@ liveins: $x0, $x1 ; CHECK-LABEL: name: test_combine_select_same_res ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 - ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) - ; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY]] - ; CHECK: $x0 = COPY [[SELECT]](s64) + ; CHECK: $x0 = COPY [[COPY]](s64) %0:_(s64) = COPY $x0 %1:_(s1) = G_TRUNC %0 %2:_(s64) = G_SELECT %1, %0, %0 diff --git a/llvm/test/CodeGen/AArch64/fold-global-offsets.ll b/llvm/test/CodeGen/AArch64/fold-global-offsets.ll --- a/llvm/test/CodeGen/AArch64/fold-global-offsets.ll +++ b/llvm/test/CodeGen/AArch64/fold-global-offsets.ll @@ -133,7 +133,6 @@ ; GISEL-NEXT: mov v0.d[1], x8 ; GISEL-NEXT: mov d0, v0.d[1] ; GISEL-NEXT: fmov x8, d0 -; GISEL-NEXT: lsr x8, x8, #0 ; GISEL-NEXT: ldr w0, [x8, #20] ; GISEL-NEXT: ret