diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll --- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll +++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll @@ -289,4 +289,58 @@ ret void } +define i64 @i64_or_lhs_bitfield_positioning(i64 %tmp1, i64 %tmp2) { +; CHECK-LABEL: i64_or_lhs_bitfield_positioning: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: lsl w8, w1, #7 +; CHECK-NEXT: and x8, x8, #0x3f80 +; CHECK-NEXT: orr x0, x8, x0 +; CHECK-NEXT: ret +entry: + %and = shl i64 %tmp2, 7 + %shl = and i64 %and, 16256 ; 0x3f80 + %or = or i64 %shl, %tmp1 + ret i64 %or +} + +define i64 @i64_or_rhs_bitfield_positioning(i64 %tmp1, i64 %tmp2) { +; CHECK-LABEL: i64_or_rhs_bitfield_positioning: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: lsl w8, w1, #7 +; CHECK-NEXT: and x8, x8, #0x3f80 +; CHECK-NEXT: orr x0, x0, x8 +; CHECK-NEXT: ret +entry: + %and = shl i64 %tmp2, 7 + %shl = and i64 %and, 16256 ; 0x3f80 + %or = or i64 %tmp1, %shl + ret i64 %or +} + +define i32 @i32_or_lhs_bitfield_positioning(i32 %tmp1, i32 %tmp2) { +; CHECK-LABEL: i32_or_lhs_bitfield_positioning: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ubfiz w8, w1, #7, #7 +; CHECK-NEXT: orr w0, w8, w0 +; CHECK-NEXT: ret +entry: + %and = shl i32 %tmp2, 7 + %shl = and i32 %and, 16256 ; 0x3f80 + %or = or i32 %shl, %tmp1 + ret i32 %or +} + +define i32 @i32_or_rhs_bitfield_positioning(i32 %tmp1, i32 %tmp2) { +; CHECK-LABEL: i32_or_rhs_bitfield_positioning: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ubfiz w8, w1, #7, #7 +; CHECK-NEXT: orr w0, w0, w8 +; CHECK-NEXT: ret +entry: + %and = shl i32 %tmp2, 7 + %shl = and i32 %and, 16256 ; 0x3f80 + %or = or i32 %tmp1, %shl + ret i32 %or +} + !1 = !{!"branch_weights", i32 1, i32 1}