Index: lib/CodeGen/MIRParser/MILexer.cpp =================================================================== --- lib/CodeGen/MIRParser/MILexer.cpp +++ lib/CodeGen/MIRParser/MILexer.cpp @@ -410,17 +410,26 @@ return isIdentifierChar(C) && C != '.'; } -static Cursor maybeLexRegister(Cursor C, MIToken &Token) { - if (C.peek() != '%') +static Cursor maybeLexRegister(Cursor C, MIToken &Token, + ErrorCallbackType ErrorCallback) { + if (C.peek() != '%' && C.peek() != '$') return None; - if (isdigit(C.peek(1))) - return lexVirtualRegister(C, Token); + + if (C.peek() == '%') { + if (isdigit(C.peek(1))) + return lexVirtualRegister(C, Token); + + // ErrorCallback(Token.location(), "Named vregs are not yet supported."); + return None; + } + + assert(C.peek() == '$'); auto Range = C; - C.advance(); // Skip '%' + C.advance(); // Skip '$' while (isRegisterChar(C.peek())) C.advance(); Token.reset(MIToken::NamedRegister, Range.upto(C)) - .setStringValue(Range.upto(C).drop_front(1)); // Drop the '%' + .setStringValue(Range.upto(C).drop_front(1)); // Drop the '$' return C; } @@ -642,7 +651,7 @@ return R.remaining(); if (Cursor R = maybeLexIRValue(C, Token, ErrorCallback)) return R.remaining(); - if (Cursor R = maybeLexRegister(C, Token)) + if (Cursor R = maybeLexRegister(C, Token, ErrorCallback)) return R.remaining(); if (Cursor R = maybeLexGlobalValue(C, Token, ErrorCallback)) return R.remaining(); Index: lib/CodeGen/TargetRegisterInfo.cpp =================================================================== --- lib/CodeGen/TargetRegisterInfo.cpp +++ lib/CodeGen/TargetRegisterInfo.cpp @@ -89,15 +89,15 @@ unsigned SubIdx) { return Printable([Reg, TRI, SubIdx](raw_ostream &OS) { if (!Reg) - OS << "%noreg"; + OS << "$noreg"; else if (TargetRegisterInfo::isStackSlot(Reg)) OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg); else if (TargetRegisterInfo::isVirtualRegister(Reg)) OS << '%' << TargetRegisterInfo::virtReg2Index(Reg); else if (!TRI) - OS << '%' << "physreg" << Reg; + OS << '$' << "physreg" << Reg; else if (Reg < TRI->getNumRegs()) { - OS << '%'; + OS << '$'; printLowerCase(TRI->getName(Reg), OS); } else llvm_unreachable("Register kind is unsupported."); Index: test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll @@ -13,9 +13,9 @@ ; CHECK: [[F_ONE:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 ; CHECK: [[TWO:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 -; CHECK: %w0 = COPY [[ANSWER]] -; CHECK: %d0 = COPY [[D_ONE]] -; CHECK: %x1 = COPY [[TWELVE]] +; CHECK: $w0 = COPY [[ANSWER]] +; CHECK: $d0 = COPY [[D_ONE]] +; CHECK: $x1 = COPY [[TWELVE]] ; CHECK: G_STORE [[THREE]](s8), {{%[0-9]+}}(p0) :: (store 1 into stack, align 0) ; CHECK: G_STORE [[ONE]](s16), {{%[0-9]+}}(p0) :: (store 2 into stack + 8, align 0) ; CHECK: G_STORE [[FOUR]](s32), {{%[0-9]+}}(p0) :: (store 4 into stack + 16, align 0) Index: test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll @@ -4,15 +4,15 @@ target triple = "aarch64-linux-gnu" ; CHECK-LABEL: name: args_i32 -; CHECK: %[[ARG0:[0-9]+]]:_(s32) = COPY %w0 -; CHECK: %{{[0-9]+}}:_(s32) = COPY %w1 -; CHECK: %{{[0-9]+}}:_(s32) = COPY %w2 -; CHECK: %{{[0-9]+}}:_(s32) = COPY %w3 -; CHECK: %{{[0-9]+}}:_(s32) = COPY %w4 -; CHECK: %{{[0-9]+}}:_(s32) = COPY %w5 -; CHECK: %{{[0-9]+}}:_(s32) = COPY %w6 -; CHECK: %{{[0-9]+}}:_(s32) = COPY %w7 -; CHECK: %w0 = COPY %[[ARG0]] +; CHECK: %[[ARG0:[0-9]+]]:_(s32) = COPY $w0 +; CHECK: %{{[0-9]+}}:_(s32) = COPY $w1 +; CHECK: %{{[0-9]+}}:_(s32) = COPY $w2 +; CHECK: %{{[0-9]+}}:_(s32) = COPY $w3 +; CHECK: %{{[0-9]+}}:_(s32) = COPY $w4 +; CHECK: %{{[0-9]+}}:_(s32) = COPY $w5 +; CHECK: %{{[0-9]+}}:_(s32) = COPY $w6 +; CHECK: %{{[0-9]+}}:_(s32) = COPY $w7 +; CHECK: $w0 = COPY %[[ARG0]] define i32 @args_i32(i32 %w0, i32 %w1, i32 %w2, i32 %w3, i32 %w4, i32 %w5, i32 %w6, i32 %w7) { @@ -20,15 +20,15 @@ } ; CHECK-LABEL: name: args_i64 -; CHECK: %[[ARG0:[0-9]+]]:_(s64) = COPY %x0 -; CHECK: %{{[0-9]+}}:_(s64) = COPY %x1 -; CHECK: %{{[0-9]+}}:_(s64) = COPY %x2 -; CHECK: %{{[0-9]+}}:_(s64) = COPY %x3 -; CHECK: %{{[0-9]+}}:_(s64) = COPY %x4 -; CHECK: %{{[0-9]+}}:_(s64) = COPY %x5 -; CHECK: %{{[0-9]+}}:_(s64) = COPY %x6 -; CHECK: %{{[0-9]+}}:_(s64) = COPY %x7 -; CHECK: %x0 = COPY %[[ARG0]] +; CHECK: %[[ARG0:[0-9]+]]:_(s64) = COPY $x0 +; CHECK: %{{[0-9]+}}:_(s64) = COPY $x1 +; CHECK: %{{[0-9]+}}:_(s64) = COPY $x2 +; CHECK: %{{[0-9]+}}:_(s64) = COPY $x3 +; CHECK: %{{[0-9]+}}:_(s64) = COPY $x4 +; CHECK: %{{[0-9]+}}:_(s64) = COPY $x5 +; CHECK: %{{[0-9]+}}:_(s64) = COPY $x6 +; CHECK: %{{[0-9]+}}:_(s64) = COPY $x7 +; CHECK: $x0 = COPY %[[ARG0]] define i64 @args_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3, i64 %x4, i64 %x5, i64 %x6, i64 %x7) { ret i64 %x0 @@ -36,23 +36,23 @@ ; CHECK-LABEL: name: args_ptrs -; CHECK: %[[ARG0:[0-9]+]]:_(p0) = COPY %x0 -; CHECK: %{{[0-9]+}}:_(p0) = COPY %x1 -; CHECK: %{{[0-9]+}}:_(p0) = COPY %x2 -; CHECK: %{{[0-9]+}}:_(p0) = COPY %x3 -; CHECK: %{{[0-9]+}}:_(p0) = COPY %x4 -; CHECK: %{{[0-9]+}}:_(p0) = COPY %x5 -; CHECK: %{{[0-9]+}}:_(p0) = COPY %x6 -; CHECK: %{{[0-9]+}}:_(p0) = COPY %x7 -; CHECK: %x0 = COPY %[[ARG0]] +; CHECK: %[[ARG0:[0-9]+]]:_(p0) = COPY $x0 +; CHECK: %{{[0-9]+}}:_(p0) = COPY $x1 +; CHECK: %{{[0-9]+}}:_(p0) = COPY $x2 +; CHECK: %{{[0-9]+}}:_(p0) = COPY $x3 +; CHECK: %{{[0-9]+}}:_(p0) = COPY $x4 +; CHECK: %{{[0-9]+}}:_(p0) = COPY $x5 +; CHECK: %{{[0-9]+}}:_(p0) = COPY $x6 +; CHECK: %{{[0-9]+}}:_(p0) = COPY $x7 +; CHECK: $x0 = COPY %[[ARG0]] define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3, [3 x float]* %x4, double* %x5, i8* %x6, i8* %x7) { ret i8* %x0 } ; CHECK-LABEL: name: args_arr -; CHECK: %[[ARG0:[0-9]+]]:_(s64) = COPY %d0 -; CHECK: %d0 = COPY %[[ARG0]] +; CHECK: %[[ARG0:[0-9]+]]:_(s64) = COPY $d0 +; CHECK: $d0 = COPY %[[ARG0]] define [1 x double] @args_arr([1 x double] %d0) { ret [1 x double] %d0 } @@ -67,16 +67,16 @@ ; CHECK: [[F_ONE:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 ; CHECK: [[TWO:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 -; CHECK: %w0 = COPY [[ANSWER]] -; CHECK: %d0 = COPY [[D_ONE]] -; CHECK: %x1 = COPY [[TWELVE]] +; CHECK: $w0 = COPY [[ANSWER]] +; CHECK: $d0 = COPY [[D_ONE]] +; CHECK: $x1 = COPY [[TWELVE]] ; CHECK: [[THREE_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[THREE]] -; CHECK: %w2 = COPY [[THREE_TMP]](s32) +; CHECK: $w2 = COPY [[THREE_TMP]](s32) ; CHECK: [[ONE_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[ONE]] -; CHECK: %w3 = COPY [[ONE_TMP]](s32) -; CHECK: %w4 = COPY [[FOUR]](s32) -; CHECK: %s1 = COPY [[F_ONE]](s32) -; CHECK: %d2 = COPY [[TWO]](s64) +; CHECK: $w3 = COPY [[ONE_TMP]](s32) +; CHECK: $w4 = COPY [[FOUR]](s32) +; CHECK: $s1 = COPY [[F_ONE]](s32) +; CHECK: $d2 = COPY [[TWO]](s64) declare void @varargs(i32, double, i64, ...) define void @test_varargs() { call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12, i8 3, i16 1, i32 4, float 1.0, double 2.0) Index: test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -7,22 +7,22 @@ ; Tests for add. ; CHECK-LABEL: name: addi64 -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_ADD [[ARG1]], [[ARG2]] -; CHECK-NEXT: %x0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %x0 +; CHECK-NEXT: $x0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $x0 define i64 @addi64(i64 %arg1, i64 %arg2) { %res = add i64 %arg1, %arg2 ret i64 %res } ; CHECK-LABEL: name: muli64 -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_MUL [[ARG1]], [[ARG2]] -; CHECK-NEXT: %x0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %x0 +; CHECK-NEXT: $x0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $x0 define i64 @muli64(i64 %arg1, i64 %arg2) { %res = mul i64 %arg1, %arg2 ret i64 %res @@ -107,7 +107,7 @@ ; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+]](0x40000000), ; CHECK: %[[FALSE:bb.[0-9]+]](0x40000000) ; -; CHECK: [[ADDR:%.*]]:_(p0) = COPY %x0 +; CHECK: [[ADDR:%.*]]:_(p0) = COPY $x0 ; ; Check that we emit the correct branch. ; CHECK: [[TST:%.*]]:_(s1) = G_LOAD [[ADDR]](p0) @@ -135,7 +135,7 @@ ; ; CHECK: bb.{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: successors: %[[BB_CASE100:bb.[0-9]+]](0x40000000), %[[BB_NOTCASE100_CHECKNEXT:bb.[0-9]+]](0x40000000) -; CHECK: %0:_(s32) = COPY %w0 +; CHECK: %0:_(s32) = COPY $w0 ; CHECK: %[[reg100:[0-9]+]]:_(s32) = G_CONSTANT i32 100 ; CHECK: %[[reg200:[0-9]+]]:_(s32) = G_CONSTANT i32 200 ; CHECK: %[[reg0:[0-9]+]]:_(s32) = G_CONSTANT i32 0 @@ -171,8 +171,8 @@ ; ; CHECK: [[BB_RET]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: %[[regret:[0-9]+]]:_(s32) = G_PHI %[[regretdefault]](s32), %[[BB_DEFAULT]], %[[regretc100]](s32), %[[BB_CASE100]] -; CHECK: %w0 = COPY %[[regret]](s32) -; CHECK: RET_ReallyLR implicit %w0 +; CHECK: $w0 = COPY %[[regret]](s32) +; CHECK: RET_ReallyLR implicit $w0 ; define i32 @switch(i32 %argc) { entry: @@ -289,22 +289,22 @@ ; Tests for or. ; CHECK-LABEL: name: ori64 -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_OR [[ARG1]], [[ARG2]] -; CHECK-NEXT: %x0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %x0 +; CHECK-NEXT: $x0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $x0 define i64 @ori64(i64 %arg1, i64 %arg2) { %res = or i64 %arg1, %arg2 ret i64 %res } ; CHECK-LABEL: name: ori32 -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_OR [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @ori32(i32 %arg1, i32 %arg2) { %res = or i32 %arg1, %arg2 ret i32 %res @@ -312,22 +312,22 @@ ; Tests for xor. ; CHECK-LABEL: name: xori64 -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_XOR [[ARG1]], [[ARG2]] -; CHECK-NEXT: %x0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %x0 +; CHECK-NEXT: $x0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $x0 define i64 @xori64(i64 %arg1, i64 %arg2) { %res = xor i64 %arg1, %arg2 ret i64 %res } ; CHECK-LABEL: name: xori32 -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_XOR [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @xori32(i32 %arg1, i32 %arg2) { %res = xor i32 %arg1, %arg2 ret i32 %res @@ -335,22 +335,22 @@ ; Tests for and. ; CHECK-LABEL: name: andi64 -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_AND [[ARG1]], [[ARG2]] -; CHECK-NEXT: %x0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %x0 +; CHECK-NEXT: $x0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $x0 define i64 @andi64(i64 %arg1, i64 %arg2) { %res = and i64 %arg1, %arg2 ret i64 %res } ; CHECK-LABEL: name: andi32 -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_AND [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @andi32(i32 %arg1, i32 %arg2) { %res = and i32 %arg1, %arg2 ret i32 %res @@ -358,58 +358,58 @@ ; Tests for sub. ; CHECK-LABEL: name: subi64 -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_SUB [[ARG1]], [[ARG2]] -; CHECK-NEXT: %x0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %x0 +; CHECK-NEXT: $x0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $x0 define i64 @subi64(i64 %arg1, i64 %arg2) { %res = sub i64 %arg1, %arg2 ret i64 %res } ; CHECK-LABEL: name: subi32 -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SUB [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @subi32(i32 %arg1, i32 %arg2) { %res = sub i32 %arg1, %arg2 ret i32 %res } ; CHECK-LABEL: name: ptrtoint -; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_PTRTOINT [[ARG1]] -; CHECK: %x0 = COPY [[RES]] -; CHECK: RET_ReallyLR implicit %x0 +; CHECK: $x0 = COPY [[RES]] +; CHECK: RET_ReallyLR implicit $x0 define i64 @ptrtoint(i64* %a) { %val = ptrtoint i64* %a to i64 ret i64 %val } ; CHECK-LABEL: name: inttoptr -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_INTTOPTR [[ARG1]] -; CHECK: %x0 = COPY [[RES]] -; CHECK: RET_ReallyLR implicit %x0 +; CHECK: $x0 = COPY [[RES]] +; CHECK: RET_ReallyLR implicit $x0 define i64* @inttoptr(i64 %a) { %val = inttoptr i64 %a to i64* ret i64* %val } ; CHECK-LABEL: name: trivial_bitcast -; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: %x0 = COPY [[ARG1]] -; CHECK: RET_ReallyLR implicit %x0 +; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: $x0 = COPY [[ARG1]] +; CHECK: RET_ReallyLR implicit $x0 define i64* @trivial_bitcast(i8* %a) { %val = bitcast i8* %a to i64* ret i64* %val } ; CHECK-LABEL: name: trivial_bitcast_with_copy -; CHECK: [[A:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[A:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: G_BR %[[CAST:bb\.[0-9]+]] ; CHECK: [[END:bb\.[0-9]+]].{{[a-zA-Z0-9.]+}}: @@ -429,11 +429,11 @@ } ; CHECK-LABEL: name: bitcast -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[RES1:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[ARG1]] ; CHECK: [[RES2:%[0-9]+]]:_(s64) = G_BITCAST [[RES1]] -; CHECK: %x0 = COPY [[RES2]] -; CHECK: RET_ReallyLR implicit %x0 +; CHECK: $x0 = COPY [[RES2]] +; CHECK: RET_ReallyLR implicit $x0 define i64 @bitcast(i64 %a) { %res1 = bitcast i64 %a to <2 x i32> %res2 = bitcast <2 x i32> %res1 to i64 @@ -441,7 +441,7 @@ } ; CHECK-LABEL: name: trunc -; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0 +; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_LOAD ; CHECK: [[RES1:%[0-9]+]]:_(s8) = G_TRUNC [[ARG1]] ; CHECK: [[RES2:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[VEC]] @@ -454,15 +454,15 @@ } ; CHECK-LABEL: name: load -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY %x1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1 ; CHECK: [[VAL1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, align 16) ; CHECK: [[VAL2:%[0-9]+]]:_(s64) = G_LOAD [[ADDR42]](p42) :: (load 8 from %ir.addr42, addrspace 42) ; CHECK: [[SUM2:%.*]]:_(s64) = G_ADD [[VAL1]], [[VAL2]] ; CHECK: [[VAL3:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (volatile load 8 from %ir.addr) ; CHECK: [[SUM3:%[0-9]+]]:_(s64) = G_ADD [[SUM2]], [[VAL3]] -; CHECK: %x0 = COPY [[SUM3]] -; CHECK: RET_ReallyLR implicit %x0 +; CHECK: $x0 = COPY [[SUM3]] +; CHECK: RET_ReallyLR implicit $x0 define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) { %val1 = load i64, i64* %addr, align 16 @@ -475,10 +475,10 @@ } ; CHECK-LABEL: name: store -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY %x1 -; CHECK: [[VAL1:%[0-9]+]]:_(s64) = COPY %x2 -; CHECK: [[VAL2:%[0-9]+]]:_(s64) = COPY %x3 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1 +; CHECK: [[VAL1:%[0-9]+]]:_(s64) = COPY $x2 +; CHECK: [[VAL2:%[0-9]+]]:_(s64) = COPY $x3 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr, align 16) ; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store 8 into %ir.addr42, addrspace 42) ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store 8 into %ir.addr) @@ -492,8 +492,8 @@ } ; CHECK-LABEL: name: intrinsics -; CHECK: [[CUR:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[BITS:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[CUR:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[BITS:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[CREG:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTRINSIC intrinsic(@llvm.returnaddress), [[CREG]] ; CHECK: [[PTR_VEC:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.ptr.vec @@ -522,7 +522,7 @@ ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_LOAD ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_PHI [[RES1]](s32), %[[TRUE]], [[RES2]](s32), %[[FALSE]] -; CHECK: %w0 = COPY [[RES]] +; CHECK: $w0 = COPY [[RES]] define i32 @test_phi(i32* %addr1, i32* %addr2, i1 %tst) { br i1 %tst, label %true, label %false @@ -551,14 +551,14 @@ ; It's important that constants are after argument passing, but before the ; rest of the entry block. ; CHECK-LABEL: name: constant_int -; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: ; CHECK: [[SUM1:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]] ; CHECK: [[SUM2:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]] ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ADD [[SUM1]], [[SUM2]] -; CHECK: %w0 = COPY [[RES]] +; CHECK: $w0 = COPY [[RES]] define i32 @constant_int(i32 %in) { br label %next @@ -581,7 +581,7 @@ ; CHECK-LABEL: name: test_undef ; CHECK: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF -; CHECK: %w0 = COPY [[UNDEF]] +; CHECK: $w0 = COPY [[UNDEF]] define i32 @test_undef() { ret i32 undef } @@ -589,7 +589,7 @@ ; CHECK-LABEL: name: test_constant_inttoptr ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ONE]] -; CHECK: %x0 = COPY [[PTR]] +; CHECK: $x0 = COPY [[PTR]] define i8* @test_constant_inttoptr() { ret i8* inttoptr(i64 1 to i8*) } @@ -598,35 +598,35 @@ ; functions, so reuse the "i64 1" from above. ; CHECK-LABEL: name: test_reused_constant ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 -; CHECK: %x0 = COPY [[ONE]] +; CHECK: $x0 = COPY [[ONE]] define i64 @test_reused_constant() { ret i64 1 } ; CHECK-LABEL: name: test_sext -; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_SEXT [[IN]] -; CHECK: %x0 = COPY [[RES]] +; CHECK: $x0 = COPY [[RES]] define i64 @test_sext(i32 %in) { %res = sext i32 %in to i64 ret i64 %res } ; CHECK-LABEL: name: test_zext -; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ZEXT [[IN]] -; CHECK: %x0 = COPY [[RES]] +; CHECK: $x0 = COPY [[RES]] define i64 @test_zext(i32 %in) { %res = zext i32 %in to i64 ret i64 %res } ; CHECK-LABEL: name: test_shl -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SHL [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @test_shl(i32 %arg1, i32 %arg2) { %res = shl i32 %arg1, %arg2 ret i32 %res @@ -634,66 +634,66 @@ ; CHECK-LABEL: name: test_lshr -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_LSHR [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @test_lshr(i32 %arg1, i32 %arg2) { %res = lshr i32 %arg1, %arg2 ret i32 %res } ; CHECK-LABEL: name: test_ashr -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_ASHR [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @test_ashr(i32 %arg1, i32 %arg2) { %res = ashr i32 %arg1, %arg2 ret i32 %res } ; CHECK-LABEL: name: test_sdiv -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SDIV [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @test_sdiv(i32 %arg1, i32 %arg2) { %res = sdiv i32 %arg1, %arg2 ret i32 %res } ; CHECK-LABEL: name: test_udiv -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_UDIV [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @test_udiv(i32 %arg1, i32 %arg2) { %res = udiv i32 %arg1, %arg2 ret i32 %res } ; CHECK-LABEL: name: test_srem -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SREM [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @test_srem(i32 %arg1, i32 %arg2) { %res = srem i32 %arg1, %arg2 ret i32 %res } ; CHECK-LABEL: name: test_urem -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_UREM [[ARG1]], [[ARG2]] -; CHECK-NEXT: %w0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %w0 +; CHECK-NEXT: $w0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $w0 define i32 @test_urem(i32 %arg1, i32 %arg2) { %res = urem i32 %arg1, %arg2 ret i32 %res @@ -701,13 +701,13 @@ ; CHECK-LABEL: name: test_constant_null ; CHECK: [[NULL:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 -; CHECK: %x0 = COPY [[NULL]] +; CHECK: $x0 = COPY [[NULL]] define i8* @test_constant_null() { ret i8* null } ; CHECK-LABEL: name: test_struct_memops -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, align 4) ; CHECK: G_STORE [[VAL]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr, align 4) define void @test_struct_memops({ i8, i32 }* %addr) { @@ -717,7 +717,7 @@ } ; CHECK-LABEL: name: test_i1_memops -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[VAL:%[0-9]+]]:_(s1) = G_LOAD [[ADDR]](p0) :: (load 1 from %ir.addr) ; CHECK: G_STORE [[VAL]](s1), [[ADDR]](p0) :: (store 1 into %ir.addr) define void @test_i1_memops(i1* %addr) { @@ -727,9 +727,9 @@ } ; CHECK-LABEL: name: int_comparison -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1 -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LHS]](s32), [[RHS]] ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0) define void @int_comparison(i32 %a, i32 %b, i1* %addr) { @@ -739,9 +739,9 @@ } ; CHECK-LABEL: name: ptr_comparison -; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY %x1 -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY $x1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LHS]](p0), [[RHS]] ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0) define void @ptr_comparison(i8* %a, i8* %b, i1* %addr) { @@ -751,64 +751,64 @@ } ; CHECK-LABEL: name: test_fadd -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FADD [[ARG1]], [[ARG2]] -; CHECK-NEXT: %s0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %s0 +; CHECK-NEXT: $s0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $s0 define float @test_fadd(float %arg1, float %arg2) { %res = fadd float %arg1, %arg2 ret float %res } ; CHECK-LABEL: name: test_fsub -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FSUB [[ARG1]], [[ARG2]] -; CHECK-NEXT: %s0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %s0 +; CHECK-NEXT: $s0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $s0 define float @test_fsub(float %arg1, float %arg2) { %res = fsub float %arg1, %arg2 ret float %res } ; CHECK-LABEL: name: test_fmul -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FMUL [[ARG1]], [[ARG2]] -; CHECK-NEXT: %s0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %s0 +; CHECK-NEXT: $s0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $s0 define float @test_fmul(float %arg1, float %arg2) { %res = fmul float %arg1, %arg2 ret float %res } ; CHECK-LABEL: name: test_fdiv -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FDIV [[ARG1]], [[ARG2]] -; CHECK-NEXT: %s0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %s0 +; CHECK-NEXT: $s0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $s0 define float @test_fdiv(float %arg1, float %arg2) { %res = fdiv float %arg1, %arg2 ret float %res } ; CHECK-LABEL: name: test_frem -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FREM [[ARG1]], [[ARG2]] -; CHECK-NEXT: %s0 = COPY [[RES]] -; CHECK-NEXT: RET_ReallyLR implicit %s0 +; CHECK-NEXT: $s0 = COPY [[RES]] +; CHECK-NEXT: RET_ReallyLR implicit $s0 define float @test_frem(float %arg1, float %arg2) { %res = frem float %arg1, %arg2 ret float %res } ; CHECK-LABEL: name: test_sadd_overflow -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1 -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SADDO [[LHS]], [[RHS]] ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; CHECK: [[TMP1:%[0-9]+]]:_(s64) = G_INSERT [[TMP]], [[VAL]](s32), 0 @@ -822,9 +822,9 @@ } ; CHECK-LABEL: name: test_uadd_overflow -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1 -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[ZERO:%[0-9]+]]:_(s1) = G_CONSTANT i1 false ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UADDE [[LHS]], [[RHS]], [[ZERO]] ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF @@ -839,9 +839,9 @@ } ; CHECK-LABEL: name: test_ssub_overflow -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1 -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SSUBO [[LHS]], [[RHS]] ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; CHECK: [[TMP1:%[0-9]+]]:_(s64) = G_INSERT [[TMP]], [[VAL]](s32), 0 @@ -855,9 +855,9 @@ } ; CHECK-LABEL: name: test_usub_overflow -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1 -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[ZERO:%[0-9]+]]:_(s1) = G_CONSTANT i1 false ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_USUBE [[LHS]], [[RHS]], [[ZERO]] ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF @@ -872,9 +872,9 @@ } ; CHECK-LABEL: name: test_smul_overflow -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1 -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SMULO [[LHS]], [[RHS]] ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; CHECK: [[TMP1:%[0-9]+]]:_(s64) = G_INSERT [[TMP]], [[VAL]](s32), 0 @@ -888,9 +888,9 @@ } ; CHECK-LABEL: name: test_umul_overflow -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1 -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UMULO [[LHS]], [[RHS]] ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; CHECK: [[TMP1:%[0-9]+]]:_(s64) = G_INSERT [[TMP]], [[VAL]](s32), 0 @@ -906,7 +906,7 @@ ; CHECK-LABEL: name: test_extractvalue ; CHECK: [[STRUCT:%[0-9]+]]:_(s128) = G_LOAD ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_EXTRACT [[STRUCT]](s128), 64 -; CHECK: %w0 = COPY [[RES]] +; CHECK: $w0 = COPY [[RES]] %struct.nested = type {i8, { i8, i32 }, i32} define i32 @test_extractvalue(%struct.nested* %addr) { %struct = load %struct.nested, %struct.nested* %addr @@ -926,7 +926,7 @@ } ; CHECK-LABEL: name: test_insertvalue -; CHECK: [[VAL:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[VAL:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[STRUCT:%[0-9]+]]:_(s128) = G_LOAD ; CHECK: [[NEWSTRUCT:%[0-9]+]]:_(s128) = G_INSERT [[STRUCT]], [[VAL]](s32), 64 ; CHECK: G_STORE [[NEWSTRUCT]](s128), @@ -939,20 +939,20 @@ define [1 x i64] @test_trivial_insert([1 x i64] %s, i64 %val) { ; CHECK-LABEL: name: test_trivial_insert -; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK: [[VAL:%[0-9]+]]:_(s64) = COPY %x1 +; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK: [[VAL:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[RES:%[0-9]+]]:_(s64) = COPY [[VAL]](s64) -; CHECK: %x0 = COPY [[RES]] +; CHECK: $x0 = COPY [[RES]] %res = insertvalue [1 x i64] %s, i64 %val, 0 ret [1 x i64] %res } define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) { ; CHECK-LABEL: name: test_trivial_insert_ptr -; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK: [[VAL:%[0-9]+]]:_(p0) = COPY %x1 +; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK: [[VAL:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_PTRTOINT [[VAL]](p0) -; CHECK: %x0 = COPY [[RES]] +; CHECK: $x0 = COPY [[RES]] %res = insertvalue [1 x i8*] %s, i8* %val, 0 ret [1 x i8*] %res } @@ -971,48 +971,48 @@ } ; CHECK-LABEL: name: test_select -; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]] -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w1 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w2 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w1 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w2 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]] -; CHECK: %w0 = COPY [[RES]] +; CHECK: $w0 = COPY [[RES]] define i32 @test_select(i1 %tst, i32 %lhs, i32 %rhs) { %res = select i1 %tst, i32 %lhs, i32 %rhs ret i32 %res } ; CHECK-LABEL: name: test_select_ptr -; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]] -; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY %x1 -; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY $x1 +; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]] -; CHECK: %x0 = COPY [[RES]] +; CHECK: $x0 = COPY [[RES]] define i8* @test_select_ptr(i1 %tst, i8* %lhs, i8* %rhs) { %res = select i1 %tst, i8* %lhs, i8* %rhs ret i8* %res } ; CHECK-LABEL: name: test_select_vec -; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]] -; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY %q0 -; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY %q1 +; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY $q0 +; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY $q1 ; CHECK: [[RES:%[0-9]+]]:_(<4 x s32>) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]] -; CHECK: %q0 = COPY [[RES]] +; CHECK: $q0 = COPY [[RES]] define <4 x i32> @test_select_vec(i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs) { %res = select i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs ret <4 x i32> %res } ; CHECK-LABEL: name: test_vselect_vec -; CHECK: [[TST32:%[0-9]+]]:_(<4 x s32>) = COPY %q0 -; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY %q1 -; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY %q2 +; CHECK: [[TST32:%[0-9]+]]:_(<4 x s32>) = COPY $q0 +; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY $q1 +; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY $q2 ; CHECK: [[TST:%[0-9]+]]:_(<4 x s1>) = G_TRUNC [[TST32]](<4 x s32>) ; CHECK: [[RES:%[0-9]+]]:_(<4 x s32>) = G_SELECT [[TST]](<4 x s1>), [[LHS]], [[RHS]] -; CHECK: %q0 = COPY [[RES]] +; CHECK: $q0 = COPY [[RES]] define <4 x i32> @test_vselect_vec(<4 x i32> %tst32, <4 x i32> %lhs, <4 x i32> %rhs) { %tst = trunc <4 x i32> %tst32 to <4 x i1> %res = select <4 x i1> %tst, <4 x i32> %lhs, <4 x i32> %rhs @@ -1020,10 +1020,10 @@ } ; CHECK-LABEL: name: test_fptosi -; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0) ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOSI [[FP]](s32) -; CHECK: %x0 = COPY [[RES]] +; CHECK: $x0 = COPY [[RES]] define i64 @test_fptosi(float* %fp.addr) { %fp = load float, float* %fp.addr %res = fptosi float %fp to i64 @@ -1031,10 +1031,10 @@ } ; CHECK-LABEL: name: test_fptoui -; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0) ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOUI [[FP]](s32) -; CHECK: %x0 = COPY [[RES]] +; CHECK: $x0 = COPY [[RES]] define i64 @test_fptoui(float* %fp.addr) { %fp = load float, float* %fp.addr %res = fptoui float %fp to i64 @@ -1042,8 +1042,8 @@ } ; CHECK-LABEL: name: test_sitofp -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_SITOFP [[IN]](s32) ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0) define void @test_sitofp(double* %addr, i32 %in) { @@ -1053,8 +1053,8 @@ } ; CHECK-LABEL: name: test_uitofp -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_UITOFP [[IN]](s32) ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0) define void @test_uitofp(double* %addr, i32 %in) { @@ -1064,25 +1064,25 @@ } ; CHECK-LABEL: name: test_fpext -; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %s0 +; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPEXT [[IN]](s32) -; CHECK: %d0 = COPY [[RES]] +; CHECK: $d0 = COPY [[RES]] define double @test_fpext(float %in) { %res = fpext float %in to double ret double %res } ; CHECK-LABEL: name: test_fptrunc -; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY %d0 +; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY $d0 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FPTRUNC [[IN]](s64) -; CHECK: %s0 = COPY [[RES]] +; CHECK: $s0 = COPY [[RES]] define float @test_fptrunc(double %in) { %res = fptrunc double %in to float ret float %res } ; CHECK-LABEL: name: test_constant_float -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[TMP:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.500000e+00 ; CHECK: G_STORE [[TMP]](s32), [[ADDR]](p0) define void @test_constant_float(float* %addr) { @@ -1091,9 +1091,9 @@ } ; CHECK-LABEL: name: float_comparison -; CHECK: [[LHSADDR:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[RHSADDR:%[0-9]+]]:_(p0) = COPY %x1 -; CHECK: [[BOOLADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[LHSADDR:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[RHSADDR:%[0-9]+]]:_(p0) = COPY $x1 +; CHECK: [[BOOLADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = G_LOAD [[LHSADDR]](p0) ; CHECK: [[RHS:%[0-9]+]]:_(s32) = G_LOAD [[RHSADDR]](p0) ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]] @@ -1124,7 +1124,7 @@ define i32* @test_global() { ; CHECK-LABEL: name: test_global ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var{{$}} -; CHECK: %x0 = COPY [[TMP]](p0) +; CHECK: $x0 = COPY [[TMP]](p0) ret i32* @var } @@ -1133,7 +1133,7 @@ define i32 addrspace(42)* @test_global_addrspace() { ; CHECK-LABEL: name: test_global ; CHECK: [[TMP:%[0-9]+]]:_(p42) = G_GLOBAL_VALUE @var1{{$}} -; CHECK: %x0 = COPY [[TMP]](p42) +; CHECK: $x0 = COPY [[TMP]](p42) ret i32 addrspace(42)* @var1 } @@ -1142,7 +1142,7 @@ define void()* @test_global_func() { ; CHECK-LABEL: name: test_global_func ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @allocai64{{$}} -; CHECK: %x0 = COPY [[TMP]](p0) +; CHECK: $x0 = COPY [[TMP]](p0) ret void()* @allocai64 } @@ -1150,13 +1150,13 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1) define void @test_memcpy(i8* %dst, i8* %src, i64 %size) { ; CHECK-LABEL: name: test_memcpy -; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY %x1 -; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY %x2 -; CHECK: %x0 = COPY [[DST]] -; CHECK: %x1 = COPY [[SRC]] -; CHECK: %x2 = COPY [[SIZE]] -; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2 +; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1 +; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2 +; CHECK: $x0 = COPY [[DST]] +; CHECK: $x1 = COPY [[SRC]] +; CHECK: $x2 = COPY [[SIZE]] +; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0) ret void } @@ -1164,13 +1164,13 @@ declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1) define void @test_memmove(i8* %dst, i8* %src, i64 %size) { ; CHECK-LABEL: name: test_memmove -; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY %x1 -; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY %x2 -; CHECK: %x0 = COPY [[DST]] -; CHECK: %x1 = COPY [[SRC]] -; CHECK: %x2 = COPY [[SIZE]] -; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2 +; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1 +; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2 +; CHECK: $x0 = COPY [[DST]] +; CHECK: $x1 = COPY [[SRC]] +; CHECK: $x2 = COPY [[SIZE]] +; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2 call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0) ret void } @@ -1178,15 +1178,15 @@ declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1) define void @test_memset(i8* %dst, i8 %val, i64 %size) { ; CHECK-LABEL: name: test_memset -; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]] -; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY %x2 -; CHECK: %x0 = COPY [[DST]] +; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2 +; CHECK: $x0 = COPY [[DST]] ; CHECK: [[SRC_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[SRC]] -; CHECK: %w1 = COPY [[SRC_TMP]] -; CHECK: %x2 = COPY [[SIZE]] -; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %w1, implicit %x2 +; CHECK: $w1 = COPY [[SRC_TMP]] +; CHECK: $x2 = COPY [[SIZE]] +; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1, implicit $x2 call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0) ret void } @@ -1195,8 +1195,8 @@ declare i32 @llvm.objectsize.i32(i8*, i1) define void @test_objectsize(i8* %addr0, i8* %addr1) { ; CHECK-LABEL: name: test_objectsize -; CHECK: [[ADDR0:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY %x1 +; CHECK: [[ADDR0:%[0-9]+]]:_(p0) = COPY $x0 +; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK: {{%[0-9]+}}:_(s64) = G_CONSTANT i64 -1 ; CHECK: {{%[0-9]+}}:_(s64) = G_CONSTANT i64 0 ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 -1 @@ -1210,7 +1210,7 @@ define void @test_large_const(i128* %addr) { ; CHECK-LABEL: name: test_large_const -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[VAL:%[0-9]+]]:_(s128) = G_CONSTANT i128 42 ; CHECK: G_STORE [[VAL]](s128), [[ADDR]](p0) store i128 42, i128* %addr @@ -1245,7 +1245,7 @@ define void @test_va_arg(i8* %list) { ; CHECK-LABEL: test_va_arg -; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: G_VAARG [[LIST]](p0), 8 ; CHECK: G_VAARG [[LIST]](p0), 1 ; CHECK: G_VAARG [[LIST]](p0), 16 @@ -1259,10 +1259,10 @@ declare float @llvm.pow.f32(float, float) define float @test_pow_intrin(float %l, float %r) { ; CHECK-LABEL: name: test_pow_intrin -; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %s1 +; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $s1 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FPOW [[LHS]], [[RHS]] -; CHECK: %s0 = COPY [[RES]] +; CHECK: $s0 = COPY [[RES]] %res = call float @llvm.pow.f32(float %l, float %r) ret float %res } @@ -1270,11 +1270,11 @@ declare float @llvm.fma.f32(float, float, float) define float @test_fma_intrin(float %a, float %b, float %c) { ; CHECK-LABEL: name: test_fma_intrin -; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK: [[B:%[0-9]+]]:_(s32) = COPY %s1 -; CHECK: [[C:%[0-9]+]]:_(s32) = COPY %s2 +; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $s1 +; CHECK: [[C:%[0-9]+]]:_(s32) = COPY $s2 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FMA [[A]], [[B]], [[C]] -; CHECK: %s0 = COPY [[RES]] +; CHECK: $s0 = COPY [[RES]] %res = call float @llvm.fma.f32(float %a, float %b, float %c) ret float %res } @@ -1282,9 +1282,9 @@ declare float @llvm.exp.f32(float) define float @test_exp_intrin(float %a) { ; CHECK-LABEL: name: test_exp_intrin -; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0 +; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FEXP [[A]] -; CHECK: %s0 = COPY [[RES]] +; CHECK: $s0 = COPY [[RES]] %res = call float @llvm.exp.f32(float %a) ret float %res } @@ -1292,9 +1292,9 @@ declare float @llvm.exp2.f32(float) define float @test_exp2_intrin(float %a) { ; CHECK-LABEL: name: test_exp2_intrin -; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0 +; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FEXP2 [[A]] -; CHECK: %s0 = COPY [[RES]] +; CHECK: $s0 = COPY [[RES]] %res = call float @llvm.exp2.f32(float %a) ret float %res } @@ -1302,9 +1302,9 @@ declare float @llvm.log.f32(float) define float @test_log_intrin(float %a) { ; CHECK-LABEL: name: test_log_intrin -; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0 +; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG [[A]] -; CHECK: %s0 = COPY [[RES]] +; CHECK: $s0 = COPY [[RES]] %res = call float @llvm.log.f32(float %a) ret float %res } @@ -1312,9 +1312,9 @@ declare float @llvm.log2.f32(float) define float @test_log2_intrin(float %a) { ; CHECK-LABEL: name: test_log2_intrin -; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0 +; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG2 [[A]] -; CHECK: %s0 = COPY [[RES]] +; CHECK: $s0 = COPY [[RES]] %res = call float @llvm.log2.f32(float %a) ret float %res } @@ -1331,7 +1331,7 @@ define void @test_load_store_atomics(i8* %addr) { ; CHECK-LABEL: name: test_load_store_atomics -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr) ; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr) ; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr) @@ -1352,18 +1352,18 @@ define float @test_fneg_f32(float %x) { ; CHECK-LABEL: name: test_fneg_f32 -; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %s0 +; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FNEG [[ARG]] -; CHECK: %s0 = COPY [[RES]](s32) +; CHECK: $s0 = COPY [[RES]](s32) %neg = fsub float -0.000000e+00, %x ret float %neg } define double @test_fneg_f64(double %x) { ; CHECK-LABEL: name: test_fneg_f64 -; CHECK: [[ARG:%[0-9]+]]:_(s64) = COPY %d0 +; CHECK: [[ARG:%[0-9]+]]:_(s64) = COPY $d0 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FNEG [[ARG]] -; CHECK: %d0 = COPY [[RES]](s64) +; CHECK: $d0 = COPY [[RES]](s64) %neg = fsub double -0.000000e+00, %x ret double %neg } @@ -1379,31 +1379,31 @@ define <2 x i32> @test_insertelement(<2 x i32> %vec, i32 %elt, i32 %idx){ ; CHECK-LABEL: name: test_insertelement -; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY %d0 -; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0 +; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[RES:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[VEC]], [[ELT]](s32), [[IDX]](s32) -; CHECK: %d0 = COPY [[RES]](<2 x s32>) +; CHECK: $d0 = COPY [[RES]](<2 x s32>) %res = insertelement <2 x i32> %vec, i32 %elt, i32 %idx ret <2 x i32> %res } define i32 @test_extractelement(<2 x i32> %vec, i32 %idx) { ; CHECK-LABEL: name: test_extractelement -; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY %d0 -; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0 +; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[IDX]](s32) -; CHECK: %w0 = COPY [[RES]](s32) +; CHECK: $w0 = COPY [[RES]](s32) %res = extractelement <2 x i32> %vec, i32 %idx ret i32 %res } define i32 @test_singleelementvector(i32 %elt){ ; CHECK-LABEL: name: test_singleelementvector -; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK-NOT: G_INSERT_VECTOR_ELT ; CHECK-NOT: G_EXTRACT_VECTOR_ELT -; CHECK: %w0 = COPY [[ELT]](s32) +; CHECK: $w0 = COPY [[ELT]](s32) %vec = insertelement <1 x i32> undef, i32 %elt, i32 0 %res = extractelement <1 x i32> %vec, i32 0 ret i32 %res @@ -1413,7 +1413,7 @@ ; CHECK-LABEL: name: test_constantaggzerovector_v2i32 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32) -; CHECK: %d0 = COPY [[VEC]](<2 x s32>) +; CHECK: $d0 = COPY [[VEC]](<2 x s32>) ret <2 x i32> zeroinitializer } @@ -1421,7 +1421,7 @@ ; CHECK-LABEL: name: test_constantaggzerovector_v2f32 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32) -; CHECK: %d0 = COPY [[VEC]](<2 x s32>) +; CHECK: $d0 = COPY [[VEC]](<2 x s32>) ret <2 x float> zeroinitializer } @@ -1439,7 +1439,7 @@ ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32) -; CHECK: %d0 = COPY [[VEC]](<2 x s32>) +; CHECK: $d0 = COPY [[VEC]](<2 x s32>) ret <2 x i32> } @@ -1461,7 +1461,7 @@ ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32) -; CHECK: %q0 = COPY [[VEC]](<4 x s32>) +; CHECK: $q0 = COPY [[VEC]](<4 x s32>) ret <4 x i32> } @@ -1470,13 +1470,13 @@ ; CHECK: [[FC1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 ; CHECK: [[FC2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s64>) = G_MERGE_VALUES [[FC1]](s64), [[FC2]](s64) -; CHECK: %q0 = COPY [[VEC]](<2 x s64>) +; CHECK: $q0 = COPY [[VEC]](<2 x s64>) ret <2 x double> } define i32 @test_constantaggzerovector_v1s32(i32 %arg){ ; CHECK-LABEL: name: test_constantaggzerovector_v1s32 -; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-NOT: G_MERGE_VALUES ; CHECK: G_ADD [[ARG]], [[C0]] @@ -1488,7 +1488,7 @@ define i32 @test_constantdatavector_v1s32(i32 %arg){ ; CHECK-LABEL: name: test_constantdatavector_v1s32 -; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-NOT: G_MERGE_VALUES ; CHECK: G_ADD [[ARG]], [[C1]] @@ -1501,21 +1501,21 @@ declare ghccc float @different_call_conv_target(float %x) define float @test_different_call_conv_target(float %x) { ; CHECK-LABEL: name: test_different_call_conv -; CHECK: [[X:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK: %s8 = COPY [[X]] -; CHECK: BL @different_call_conv_target, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s8, implicit-def %s0 +; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK: $s8 = COPY [[X]] +; CHECK: BL @different_call_conv_target, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s8, implicit-def $s0 %res = call ghccc float @different_call_conv_target(float %x) ret float %res } define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) { ; CHECK-LABEL: name: test_shufflevector_s32_v2s32 -; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>) -; CHECK: %d0 = COPY [[VEC]](<2 x s32>) +; CHECK: $d0 = COPY [[VEC]](<2 x s32>) %vec = insertelement <1 x i32> undef, i32 %arg, i32 0 %res = shufflevector <1 x i32> %vec, <1 x i32> undef, <2 x i32> zeroinitializer ret <2 x i32> %res @@ -1523,11 +1523,11 @@ define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) { ; CHECK-LABEL: name: test_shufflevector_v2s32_s32 -; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY %d0 +; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[C1]](s32) -; CHECK: %w0 = COPY [[RES]](s32) +; CHECK: $w0 = COPY [[RES]](s32) %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <1 x i32> %res = extractelement <1 x i32> %vec, i32 0 ret i32 %res @@ -1535,20 +1535,20 @@ define <2 x i32> @test_shufflevector_v2s32_v2s32(<2 x i32> %arg) { ; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32 -; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY %d0 +; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[MASK]](<2 x s32>) -; CHECK: %d0 = COPY [[VEC]](<2 x s32>) +; CHECK: $d0 = COPY [[VEC]](<2 x s32>) %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> ret <2 x i32> %res } define i32 @test_shufflevector_v2s32_v3s32(<2 x i32> %arg) { ; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32 -; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY %d0 +; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 @@ -1562,28 +1562,28 @@ define <4 x i32> @test_shufflevector_v2s32_v4s32(<2 x i32> %arg1, <2 x i32> %arg2) { ; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32 -; CHECK: [[ARG1:%[0-9]+]]:_(<2 x s32>) = COPY %d0 -; CHECK: [[ARG2:%[0-9]+]]:_(<2 x s32>) = COPY %d1 +; CHECK: [[ARG1:%[0-9]+]]:_(<2 x s32>) = COPY $d0 +; CHECK: [[ARG2:%[0-9]+]]:_(<2 x s32>) = COPY $d1 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CHECK: [[MASK:%[0-9]+]]:_(<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[ARG1]](<2 x s32>), [[ARG2]], [[MASK]](<4 x s32>) -; CHECK: %q0 = COPY [[VEC]](<4 x s32>) +; CHECK: $q0 = COPY [[VEC]](<4 x s32>) %res = shufflevector <2 x i32> %arg1, <2 x i32> %arg2, <4 x i32> ret <4 x i32> %res } define <2 x i32> @test_shufflevector_v4s32_v2s32(<4 x i32> %arg) { ; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32 -; CHECK: [[ARG:%[0-9]+]]:_(<4 x s32>) = COPY %q0 +; CHECK: [[ARG:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-DAG: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<4 x s32>), [[UNDEF]], [[MASK]](<2 x s32>) -; CHECK: %d0 = COPY [[VEC]](<2 x s32>) +; CHECK: $d0 = COPY [[VEC]](<2 x s32>) %res = shufflevector <4 x i32> %arg, <4 x i32> undef, <2 x i32> ret <2 x i32> %res } @@ -1591,8 +1591,8 @@ define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2) { ; CHECK-LABEL: name: test_shufflevector_v8s8_v16s8 -; CHECK: [[ARG1:%[0-9]+]]:_(<8 x s8>) = COPY %d0 -; CHECK: [[ARG2:%[0-9]+]]:_(<8 x s8>) = COPY %d1 +; CHECK: [[ARG1:%[0-9]+]]:_(<8 x s8>) = COPY $d0 +; CHECK: [[ARG2:%[0-9]+]]:_(<8 x s8>) = COPY $d1 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -1611,7 +1611,7 @@ ; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 ; CHECK: [[MASK:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C8]](s32), [[C1]](s32), [[C9]](s32), [[C2]](s32), [[C10]](s32), [[C3]](s32), [[C11]](s32), [[C4]](s32), [[C12]](s32), [[C5]](s32), [[C13]](s32), [[C6]](s32), [[C14]](s32), [[C7]](s32), [[C15]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[ARG1]](<8 x s8>), [[ARG2]], [[MASK]](<16 x s32>) -; CHECK: %q0 = COPY [[VEC]](<16 x s8>) +; CHECK: $q0 = COPY [[VEC]](<16 x s8>) %res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> ret <16 x i8> %res } @@ -1620,14 +1620,14 @@ ; CHECK: [[UNDEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF ; CHECK: [[F:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00 ; CHECK: [[M:%[0-9]+]]:_(<4 x s16>) = G_MERGE_VALUES [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16) -; CHECK: %d0 = COPY [[M]](<4 x s16>) +; CHECK: $d0 = COPY [[M]](<4 x s16>) define <4 x half> @test_constant_vector() { ret <4 x half> } define i32 @test_target_mem_intrinsic(i32* %addr) { ; CHECK-LABEL: name: test_target_mem_intrinsic -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load 4 from %ir.addr) ; CHECK: G_TRUNC [[VAL]](s64) %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr) Index: test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir +++ test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir @@ -122,10 +122,10 @@ - { id: 1, class: _ } body: | bb.0.entry: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: defaultMapping ; CHECK: %1:gpr(s32) = G_ADD %0 - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_ADD %0, %0 ... @@ -140,11 +140,11 @@ - { id: 1, class: _ } body: | bb.0.entry: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: defaultMappingVector - ; CHECK: %0:fpr(<2 x s32>) = COPY %d0 + ; CHECK: %0:fpr(<2 x s32>) = COPY $d0 ; CHECK: %1:fpr(<2 x s32>) = G_ADD %0 - %0(<2 x s32>) = COPY %d0 + %0(<2 x s32>) = COPY $d0 %1(<2 x s32>) = G_ADD %0, %0 ... @@ -160,14 +160,14 @@ - { id: 2, class: _ } body: | bb.0.entry: - liveins: %s0, %x0 + liveins: $s0, $x0 ; CHECK-LABEL: name: defaultMapping1Repair - ; CHECK: %0:fpr(s32) = COPY %s0 - ; CHECK-NEXT: %1:gpr(s32) = COPY %w0 + ; CHECK: %0:fpr(s32) = COPY $s0 + ; CHECK-NEXT: %1:gpr(s32) = COPY $w0 ; CHECK-NEXT: %3:gpr(s32) = COPY %0 ; CHECK-NEXT: %2:gpr(s32) = G_ADD %3, %1 - %0(s32) = COPY %s0 - %1(s32) = COPY %w0 + %0(s32) = COPY $s0 + %1(s32) = COPY $w0 %2(s32) = G_ADD %0, %1 ... @@ -179,13 +179,13 @@ - { id: 1, class: _ } body: | bb.0.entry: - liveins: %s0, %x0 + liveins: $s0, $x0 ; CHECK-LABEL: name: defaultMapping2Repairs - ; CHECK: %0:fpr(s32) = COPY %s0 + ; CHECK: %0:fpr(s32) = COPY $s0 ; CHECK-NEXT: %2:gpr(s32) = COPY %0 ; CHECK-NEXT: %3:gpr(s32) = COPY %0 ; CHECK-NEXT: %1:gpr(s32) = G_ADD %2, %3 - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s32) = G_ADD %0, %0 ... @@ -201,12 +201,12 @@ - { id: 1, class: fpr } body: | bb.0.entry: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: defaultMappingDefRepair - ; CHECK: %0:gpr(s32) = COPY %w0 + ; CHECK: %0:gpr(s32) = COPY $w0 ; CHECK-NEXT: %2:gpr(s32) = G_ADD %0, %0 ; CHECK-NEXT: %1:fpr(s32) = COPY %2 - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_ADD %0, %0 ... @@ -231,12 +231,12 @@ body: | bb.0.entry: successors: %bb.2.end, %bb.1.then - liveins: %x0, %x1, %w2 + liveins: $x0, $x1, $w2 - %0 = LDRWui killed %x0, 0 :: (load 4 from %ir.src) + %0 = LDRWui killed $x0, 0 :: (load 4 from %ir.src) %5(s32) = COPY %0 - %1(p0) = COPY %x1 - %2 = COPY %w2 + %1(p0) = COPY $x1 + %2 = COPY $w2 TBNZW killed %2, 0, %bb.2.end bb.1.then: @@ -259,14 +259,14 @@ - { id: 2, class: _ } body: | bb.0.entry: - liveins: %w0, %s0 + liveins: $w0, $s0 ; CHECK-LABEL: name: defaultMappingUseRepairPhysReg - ; CHECK: %0:gpr(s32) = COPY %w0 - ; CHECK-NEXT: %1:fpr(s32) = COPY %s0 + ; CHECK: %0:gpr(s32) = COPY $w0 + ; CHECK-NEXT: %1:fpr(s32) = COPY $s0 ; CHECK-NEXT: %3:gpr(s32) = COPY %1 ; CHECK-NEXT: %2:gpr(s32) = G_ADD %0, %3 - %0(s32) = COPY %w0 - %1(s32) = COPY %s0 + %0(s32) = COPY $w0 + %1(s32) = COPY $s0 %2(s32) = G_ADD %0, %1 ... @@ -279,14 +279,14 @@ - { id: 1, class: _ } body: | bb.0.entry: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: defaultMappingDefRepairPhysReg - ; CHECK: %0:gpr(s32) = COPY %w0 + ; CHECK: %0:gpr(s32) = COPY $w0 ; CHECK-NEXT: %1:gpr(s32) = G_ADD %0, %0 - ; CHECK-NEXT: %s0 = COPY %1 - %0(s32) = COPY %w0 + ; CHECK-NEXT: $s0 = COPY %1 + %0(s32) = COPY $w0 %1(s32) = G_ADD %0, %0 - %s0 = COPY %1 + $s0 = COPY %1 ... --- @@ -300,9 +300,9 @@ - { id: 2, class: _ } body: | bb.0.entry: - liveins: %x0, %x1 - ; CHECK: %0:gpr(<2 x s32>) = COPY %x0 - ; CHECK-NEXT: %1:gpr(<2 x s32>) = COPY %x1 + liveins: $x0, $x1 + ; CHECK: %0:gpr(<2 x s32>) = COPY $x0 + ; CHECK-NEXT: %1:gpr(<2 x s32>) = COPY $x1 ; Fast mode tries to reuse the source of the copy for the destination. ; Now, the default mapping says that %0 and %1 need to be in FPR. @@ -314,8 +314,8 @@ ; Greedy mode remapped the instruction on the GPR bank. ; GREEDY-NEXT: %2:gpr(<2 x s32>) = G_OR %0, %1 - %0(<2 x s32>) = COPY %x0 - %1(<2 x s32>) = COPY %x1 + %0(<2 x s32>) = COPY $x0 + %1(<2 x s32>) = COPY $x1 %2(<2 x s32>) = G_OR %0, %1 ... @@ -331,11 +331,11 @@ - { id: 2, class: fpr } body: | bb.0.entry: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: greedyMappingOrWithConstraints - ; CHECK: %0:gpr(<2 x s32>) = COPY %x0 - ; CHECK-NEXT: %1:gpr(<2 x s32>) = COPY %x1 + ; CHECK: %0:gpr(<2 x s32>) = COPY $x0 + ; CHECK-NEXT: %1:gpr(<2 x s32>) = COPY $x1 ; Fast mode tries to reuse the source of the copy for the destination. ; Now, the default mapping says that %0 and %1 need to be in FPR. @@ -349,8 +349,8 @@ ; GREEDY-NEXT: %3:gpr(<2 x s32>) = G_OR %0, %1 ; We need to keep %2 into FPR because we do not know anything about it. ; GREEDY-NEXT: %2:fpr(<2 x s32>) = COPY %3 - %0(<2 x s32>) = COPY %x0 - %1(<2 x s32>) = COPY %x1 + %0(<2 x s32>) = COPY $x0 + %1(<2 x s32>) = COPY $x1 %2(<2 x s32>) = G_OR %0, %1 ... @@ -366,17 +366,17 @@ - { id: 1, class: gpr64 } body: | bb.0: - liveins: %x0 + liveins: $x0 - ; CHECK: %0:gpr64 = COPY %x0 + ; CHECK: %0:gpr64 = COPY $x0 ; CHECK-NEXT: %1:gpr64 = ADDXrr %0, %0 - ; CHECK-NEXT: %x0 = COPY %1 - ; CHECK-NEXT: RET_ReallyLR implicit %x0 + ; CHECK-NEXT: $x0 = COPY %1 + ; CHECK-NEXT: RET_ReallyLR implicit $x0 - %0 = COPY %x0 + %0 = COPY $x0 %1 = ADDXrr %0, %0 - %x0 = COPY %1 - RET_ReallyLR implicit %x0 + $x0 = COPY %1 + RET_ReallyLR implicit $x0 ... --- @@ -404,13 +404,13 @@ - { id: 1, class: _ } # CHECK: body: -# CHECK: %0:gpr(s32) = COPY %w0 +# CHECK: %0:gpr(s32) = COPY $w0 # CHECK: %1:gpr(s32) = G_BITCAST %0 body: | bb.0: - liveins: %w0 + liveins: $w0 - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_BITCAST %0 ... @@ -427,13 +427,13 @@ - { id: 1, class: _ } # CHECK: body: -# CHECK: %0:fpr(<2 x s16>) = COPY %s0 +# CHECK: %0:fpr(<2 x s16>) = COPY $s0 # CHECK: %1:fpr(<2 x s16>) = G_BITCAST %0 body: | bb.0: - liveins: %s0 + liveins: $s0 - %0(<2 x s16>) = COPY %s0 + %0(<2 x s16>) = COPY $s0 %1(<2 x s16>) = G_BITCAST %0 ... @@ -451,14 +451,14 @@ - { id: 1, class: _ } # CHECK: body: -# CHECK: %0:gpr(s32) = COPY %w0 +# CHECK: %0:gpr(s32) = COPY $w0 # FAST: %1:fpr(<2 x s16>) = G_BITCAST %0 # GREEDY: %1:gpr(<2 x s16>) = G_BITCAST %0 body: | bb.0: - liveins: %w0 + liveins: $w0 - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(<2 x s16>) = G_BITCAST %0 ... @@ -470,14 +470,14 @@ - { id: 0, class: _ } - { id: 1, class: _ } # CHECK: body: -# CHECK: %0:fpr(<2 x s16>) = COPY %s0 +# CHECK: %0:fpr(<2 x s16>) = COPY $s0 # FAST: %1:gpr(s32) = G_BITCAST %0 # GREEDY: %1:fpr(s32) = G_BITCAST %0 body: | bb.0: - liveins: %s0 + liveins: $s0 - %0(<2 x s16>) = COPY %s0 + %0(<2 x s16>) = COPY $s0 %1(s32) = G_BITCAST %0 ... @@ -489,13 +489,13 @@ - { id: 0, class: _ } - { id: 1, class: _ } # CHECK: body: -# CHECK: %0:gpr(s64) = COPY %x0 +# CHECK: %0:gpr(s64) = COPY $x0 # CHECK: %1:gpr(s64) = G_BITCAST %0 body: | bb.0: - liveins: %x0 + liveins: $x0 - %0(s64) = COPY %x0 + %0(s64) = COPY $x0 %1(s64) = G_BITCAST %0 ... @@ -507,13 +507,13 @@ - { id: 0, class: _ } - { id: 1, class: _ } # CHECK: body: -# CHECK: %0:fpr(<2 x s32>) = COPY %d0 +# CHECK: %0:fpr(<2 x s32>) = COPY $d0 # CHECK: %1:fpr(<2 x s32>) = G_BITCAST %0 body: | bb.0: - liveins: %d0 + liveins: $d0 - %0(<2 x s32>) = COPY %d0 + %0(<2 x s32>) = COPY $d0 %1(<2 x s32>) = G_BITCAST %0 ... @@ -525,14 +525,14 @@ - { id: 0, class: _ } - { id: 1, class: _ } # CHECK: body: -# CHECK: %0:gpr(s64) = COPY %x0 +# CHECK: %0:gpr(s64) = COPY $x0 # FAST: %1:fpr(<2 x s32>) = G_BITCAST %0 # GREEDY: %1:gpr(<2 x s32>) = G_BITCAST %0 body: | bb.0: - liveins: %x0 + liveins: $x0 - %0(s64) = COPY %x0 + %0(s64) = COPY $x0 %1(<2 x s32>) = G_BITCAST %0 ... @@ -544,14 +544,14 @@ - { id: 0, class: _ } - { id: 1, class: _ } # CHECK: body: -# CHECK: %0:fpr(<2 x s32>) = COPY %d0 +# CHECK: %0:fpr(<2 x s32>) = COPY $d0 # FAST: %1:gpr(s64) = G_BITCAST %0 # GREEDY: %1:fpr(s64) = G_BITCAST %0 body: | bb.0: - liveins: %d0 + liveins: $d0 - %0(<2 x s32>) = COPY %d0 + %0(<2 x s32>) = COPY $d0 %1(s64) = G_BITCAST %0 ... @@ -569,13 +569,13 @@ # CHECK: %2:fpr(<2 x s64>) = G_BITCAST %3(s128) body: | bb.1: - liveins: %x0, %x1 - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + liveins: $x0, $x1 + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %3(s128) = G_MERGE_VALUES %0(s64), %1(s64) %2(<2 x s64>) = G_BITCAST %3(s128) - %q0 = COPY %2(<2 x s64>) - RET_ReallyLR implicit %q0 + $q0 = COPY %2(<2 x s64>) + RET_ReallyLR implicit $q0 ... @@ -598,14 +598,14 @@ # CHECK-NEXT: %2:fpr(<2 x s64>) = G_BITCAST %4(s128) body: | bb.1: - liveins: %x0, %x1 - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + liveins: $x0, $x1 + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %3(s128) = G_MERGE_VALUES %0(s64), %1(s64) %4(s128) = COPY %3(s128) %2(<2 x s64>) = G_BITCAST %4(s128) - %q0 = COPY %2(<2 x s64>) - RET_ReallyLR implicit %q0 + $q0 = COPY %2(<2 x s64>) + RET_ReallyLR implicit $q0 ... @@ -630,11 +630,11 @@ # CHECK: %1:fpr(s128) = COPY %0 body: | bb.1: - liveins: %x0 - %0 = LDRQui killed %x0, 0 + liveins: $x0 + %0 = LDRQui killed $x0, 0 %1(s128) = COPY %0 - %q0 = COPY %1(s128) - RET_ReallyLR implicit %q0 + $q0 = COPY %1(s128) + RET_ReallyLR implicit $q0 ... @@ -654,15 +654,15 @@ # CHECK: registers: # CHECK: - { id: 0, class: gpr, preferred-register: '' } # CHECK: - { id: 1, class: gpr, preferred-register: '' } -# CHECK: %0:gpr(s32) = COPY %w0 +# CHECK: %0:gpr(s32) = COPY $w0 # CHECK-NEXT: %1:gpr(s16) = G_TRUNC %0(s32) body: | bb.1: - liveins: %w0 - %0(s32) = COPY %w0 + liveins: $w0 + %0(s32) = COPY $w0 %1(s16) = G_TRUNC %0(s32) - %h0 = COPY %1(s16) - RET_ReallyLR implicit %h0 + $h0 = COPY %1(s16) + RET_ReallyLR implicit $h0 ... @@ -682,8 +682,8 @@ - { id: 4, class: _ } - { id: 5, class: _ } # No repairing should be necessary for both modes. -# CHECK: %0:gpr(s64) = COPY %x0 -# CHECK-NEXT: %1:gpr(p0) = COPY %x1 +# CHECK: %0:gpr(s64) = COPY $x0 +# CHECK-NEXT: %1:gpr(p0) = COPY $x1 # FAST-NEXT: %2:fpr(<2 x s32>) = G_BITCAST %0(s64) # FAST-NEXT: %3:fpr(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr) # FAST-NEXT: %4:fpr(<2 x s32>) = G_OR %2, %3 @@ -691,20 +691,20 @@ # GREEDY-NEXT: %3:gpr(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr) # GREEDY-NEXT: %4:gpr(<2 x s32>) = G_OR %2, %3 # CHECK-NEXT: %5:gpr(s64) = G_BITCAST %4(<2 x s32>) -# CHECK-NEXT: %x0 = COPY %5(s64) -# CHECK-NEXT: RET_ReallyLR implicit %x0 +# CHECK-NEXT: $x0 = COPY %5(s64) +# CHECK-NEXT: RET_ReallyLR implicit $x0 body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 - %0(s64) = COPY %x0 - %1(p0) = COPY %x1 + %0(s64) = COPY $x0 + %1(p0) = COPY $x1 %2(<2 x s32>) = G_BITCAST %0(s64) %3(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr) %4(<2 x s32>) = G_OR %2, %3 %5(s64) = G_BITCAST %4(<2 x s32>) - %x0 = COPY %5(s64) - RET_ReallyLR implicit %x0 + $x0 = COPY %5(s64) + RET_ReallyLR implicit $x0 ... @@ -728,25 +728,25 @@ - { id: 3, class: _ } # No repairing should be necessary for both modes. -# CHECK: %0:gpr(s64) = COPY %x0 -# CHECK-NEXT: %1:gpr(p0) = COPY %x1 +# CHECK: %0:gpr(s64) = COPY $x0 +# CHECK-NEXT: %1:gpr(p0) = COPY $x1 # CHECK-NEXT: %2:fpr(s64) = G_LOAD %1(p0) :: (load 8 from %ir.addr) # %0 has been mapped to GPR, we need to repair to match FPR. # CHECK-NEXT: %4:fpr(s64) = COPY %0 # CHECK-NEXT: %3:fpr(s64) = G_FADD %4, %2 -# CHECK-NEXT: %x0 = COPY %3(s64) -# CHECK-NEXT: RET_ReallyLR implicit %x0 +# CHECK-NEXT: $x0 = COPY %3(s64) +# CHECK-NEXT: RET_ReallyLR implicit $x0 body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 - %0(s64) = COPY %x0 - %1(p0) = COPY %x1 + %0(s64) = COPY $x0 + %1(p0) = COPY $x1 %2(s64) = G_LOAD %1(p0) :: (load 8 from %ir.addr) %3(s64) = G_FADD %0, %2 - %x0 = COPY %3(s64) - RET_ReallyLR implicit %x0 + $x0 = COPY %3(s64) + RET_ReallyLR implicit $x0 ... @@ -768,8 +768,8 @@ - { id: 1, class: _ } - { id: 2, class: _ } -# CHECK: %0:gpr(s64) = COPY %x0 -# CHECK-NEXT: %1:gpr(p0) = COPY %x1 +# CHECK: %0:gpr(s64) = COPY $x0 +# CHECK-NEXT: %1:gpr(p0) = COPY $x1 # %0 has been mapped to GPR, we need to repair to match FPR. # CHECK-NEXT: %3:fpr(s64) = COPY %0 # CHECK-NEXT: %4:fpr(s64) = COPY %0 @@ -779,10 +779,10 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 - %0(s64) = COPY %x0 - %1(p0) = COPY %x1 + %0(s64) = COPY $x0 + %1(p0) = COPY $x1 %2(s64) = G_FADD %0, %0 G_STORE %2(s64), %1(p0) :: (store 8 into %ir.addr) RET_ReallyLR @@ -804,23 +804,23 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# CHECK: %1:gpr(s32) = COPY %w0 +# CHECK: %1:gpr(s32) = COPY $w0 # CHECK-NEXT: %0:gpr(s16) = G_TRUNC %1 # %0 has been mapped to GPR, we need to repair to match FPR. # CHECK-NEXT: %3:fpr(s16) = COPY %0 # CHECK-NEXT: %2:fpr(s32) = G_FPEXT %3 -# CHECK-NEXT: %s0 = COPY %2 +# CHECK-NEXT: $s0 = COPY %2 # CHECK-NEXT: RET_ReallyLR body: | bb.1: - liveins: %w0 + liveins: $w0 - %1(s32) = COPY %w0 + %1(s32) = COPY $w0 %0(s16) = G_TRUNC %1(s32) %2(s32) = G_FPEXT %0(s16) - %s0 = COPY %2(s32) - RET_ReallyLR implicit %s0 + $s0 = COPY %2(s32) + RET_ReallyLR implicit $s0 ... @@ -839,23 +839,23 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# CHECK: %1:gpr(s32) = COPY %w0 +# CHECK: %1:gpr(s32) = COPY $w0 # CHECK-NEXT: %0:gpr(s16) = G_TRUNC %1 # %0 has been mapped to GPR, we need to repair to match FPR. # CHECK-NEXT: %3:fpr(s16) = COPY %0 # CHECK-NEXT: %2:fpr(s64) = G_FPEXT %3 -# CHECK-NEXT: %d0 = COPY %2 +# CHECK-NEXT: $d0 = COPY %2 # CHECK-NEXT: RET_ReallyLR body: | bb.1: - liveins: %w0 + liveins: $w0 - %1(s32) = COPY %w0 + %1(s32) = COPY $w0 %0(s16) = G_TRUNC %1(s32) %2(s64) = G_FPEXT %0(s16) - %d0 = COPY %2(s64) - RET_ReallyLR implicit %d0 + $d0 = COPY %2(s64) + RET_ReallyLR implicit $d0 ... @@ -872,20 +872,20 @@ registers: - { id: 0, class: _ } - { id: 1, class: _ } -# CHECK: %0:gpr(s32) = COPY %w0 +# CHECK: %0:gpr(s32) = COPY $w0 # %0 has been mapped to GPR, we need to repair to match FPR. # CHECK-NEXT: %2:fpr(s32) = COPY %0 # CHECK-NEXT: %1:fpr(s64) = G_FPEXT %2 -# CHECK-NEXT: %d0 = COPY %1 +# CHECK-NEXT: $d0 = COPY %1 # CHECK-NEXT: RET_ReallyLR body: | bb.1: - liveins: %w0 + liveins: $w0 - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s64) = G_FPEXT %0(s32) - %d0 = COPY %1(s64) - RET_ReallyLR implicit %d0 + $d0 = COPY %1(s64) + RET_ReallyLR implicit $d0 ... @@ -894,8 +894,8 @@ # CHECK-LABEL: name: passFp16 # CHECK: registers: # CHECK: - { id: 0, class: fpr, preferred-register: '' } -# CHECK: %0:fpr(s16) = COPY %h0 -# CHECK-NEXT: %h0 = COPY %0(s16) +# CHECK: %0:fpr(s16) = COPY $h0 +# CHECK-NEXT: $h0 = COPY %0(s16) name: passFp16 alignment: 2 legalized: true @@ -903,11 +903,11 @@ - { id: 0, class: _ } body: | bb.1.entry: - liveins: %h0 + liveins: $h0 - %0(s16) = COPY %h0 - %h0 = COPY %0(s16) - RET_ReallyLR implicit %h0 + %0(s16) = COPY $h0 + $h0 = COPY %0(s16) + RET_ReallyLR implicit $h0 ... --- @@ -919,7 +919,7 @@ # CHECK: - { id: 1, class: gpr, preferred-register: '' } # CHECK: - { id: 2, class: fpr, preferred-register: '' } # -# CHECK: %0:fpr(s16) = COPY %h0 +# CHECK: %0:fpr(s16) = COPY $h0 # CHECK-NEXT: %1:gpr(p0) = G_FRAME_INDEX %stack.0.p.addr # If we didn't look through the copy for %0, the default mapping # would have been on GPR and we would have to insert a copy to move @@ -929,7 +929,7 @@ # would have been on GPR and we would have to insert a copy to move # the value to FPR (h0). # CHECK-NEXT: %2:fpr(s16) = G_LOAD %1(p0) :: (load 2 from %ir.p.addr) -# CHECK-NEXT: %h0 = COPY %2(s16) +# CHECK-NEXT: $h0 = COPY %2(s16) name: passFp16ViaAllocas alignment: 2 legalized: true @@ -944,13 +944,13 @@ - { id: 0, name: p.addr, size: 2, alignment: 2, stack-id: 0 } body: | bb.1.entry: - liveins: %h0 + liveins: $h0 - %0(s16) = COPY %h0 + %0(s16) = COPY $h0 %1(p0) = G_FRAME_INDEX %stack.0.p.addr G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p.addr) %2(s16) = G_LOAD %1(p0) :: (load 2 from %ir.p.addr) - %h0 = COPY %2(s16) - RET_ReallyLR implicit %h0 + $h0 = COPY %2(s16) + RET_ReallyLR implicit $h0 ... Index: test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll +++ test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll @@ -11,7 +11,7 @@ ; CHECK: [[RHS:%[0-9]+]]:_(s8) = G_LOAD [[RHS_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK8]], align 0) ; CHECK: [[SUM:%[0-9]+]]:_(s8) = G_ADD [[LHS]], [[RHS]] ; CHECK: [[SUM32:%[0-9]+]]:_(s32) = G_SEXT [[SUM]](s8) -; CHECK: %w0 = COPY [[SUM32]](s32) +; CHECK: $w0 = COPY [[SUM32]](s32) define signext i8 @test_stack_slots([8 x i64], i8 signext %lhs, i8 signext %rhs) { %sum = add i8 %lhs, %rhs ret i8 %sum @@ -20,11 +20,11 @@ ; CHECK-LABEL: name: test_call_stack ; CHECK: [[C42:%[0-9]+]]:_(s8) = G_CONSTANT i8 42 ; CHECK: [[C12:%[0-9]+]]:_(s8) = G_CONSTANT i8 12 -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[C42_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C42_OFFS]](s64) ; CHECK: G_STORE [[C42]](s8), [[C42_LOC]](p0) :: (store 1 into stack, align 0) -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[C12_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C12_OFFS]](s64) ; CHECK: G_STORE [[C12]](s8), [[C12_LOC]](p0) :: (store 1 into stack + 1, align 0) @@ -35,9 +35,9 @@ } ; CHECK-LABEL: name: test_128bit_struct -; CHECK: %x0 = COPY -; CHECK: %x1 = COPY -; CHECK: %x2 = COPY +; CHECK: $x0 = COPY +; CHECK: $x1 = COPY +; CHECK: $x2 = COPY ; CHECK: BL @take_128bit_struct define void @test_128bit_struct([2 x i64]* %ptr) { %struct = load [2 x i64], [2 x i64]* %ptr @@ -46,9 +46,9 @@ } ; CHECK-LABEL: name: take_128bit_struct -; CHECK: {{%.*}}:_(p0) = COPY %x0 -; CHECK: {{%.*}}:_(s64) = COPY %x1 -; CHECK: {{%.*}}:_(s64) = COPY %x2 +; CHECK: {{%.*}}:_(p0) = COPY $x0 +; CHECK: {{%.*}}:_(s64) = COPY $x1 +; CHECK: {{%.*}}:_(s64) = COPY $x2 define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) { store [2 x i64] %in, [2 x i64]* %ptr ret void @@ -59,12 +59,12 @@ ; CHECK: [[LO:%[0-9]+]]:_(s64) = G_EXTRACT [[STRUCT]](s128), 0 ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_EXTRACT [[STRUCT]](s128), 64 -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]] ; CHECK: G_STORE [[LO]](s64), [[ADDR]](p0) :: (store 8 into stack, align 0) -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]] ; CHECK: G_STORE [[HI]](s64), [[ADDR]](p0) :: (store 8 into stack + 8, align 0) Index: test/CodeGen/AArch64/GlobalISel/call-translator.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/call-translator.ll +++ test/CodeGen/AArch64/GlobalISel/call-translator.ll @@ -1,9 +1,9 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s ; CHECK-LABEL: name: test_trivial_call -; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def %sp, implicit %sp -; CHECK: BL @trivial_callee, csr_aarch64_aapcs, implicit-def %lr -; CHECK: ADJCALLSTACKUP 0, 0, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp +; CHECK: BL @trivial_callee, csr_aarch64_aapcs, implicit-def $lr +; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp declare void @trivial_callee() define void @test_trivial_call() { call void @trivial_callee() @@ -11,10 +11,10 @@ } ; CHECK-LABEL: name: test_simple_return -; CHECK: BL @simple_return_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit-def %x0 -; CHECK: [[RES:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK: %x0 = COPY [[RES]] -; CHECK: RET_ReallyLR implicit %x0 +; CHECK: BL @simple_return_callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit-def $x0 +; CHECK: [[RES:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK: $x0 = COPY [[RES]] +; CHECK: RET_ReallyLR implicit $x0 declare i64 @simple_return_callee() define i64 @test_simple_return() { %res = call i64 @simple_return_callee() @@ -22,9 +22,9 @@ } ; CHECK-LABEL: name: test_simple_arg -; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0 -; CHECK: %w0 = COPY [[IN]] -; CHECK: BL @simple_arg_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0 +; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0 +; CHECK: $w0 = COPY [[IN]] +; CHECK: BL @simple_arg_callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0 ; CHECK: RET_ReallyLR declare void @simple_arg_callee(i32 %in) define void @test_simple_arg(i32 %in) { @@ -36,8 +36,8 @@ ; CHECK: registers: ; Make sure the register feeding the indirect call is properly constrained. ; CHECK: - { id: [[FUNC:[0-9]+]], class: gpr64, preferred-register: '' } -; CHECK: %[[FUNC]]:gpr64(p0) = COPY %x0 -; CHECK: BLR %[[FUNC]](p0), csr_aarch64_aapcs, implicit-def %lr, implicit %sp +; CHECK: %[[FUNC]]:gpr64(p0) = COPY $x0 +; CHECK: BLR %[[FUNC]](p0), csr_aarch64_aapcs, implicit-def $lr, implicit $sp ; CHECK: RET_ReallyLR define void @test_indirect_call(void()* %func) { call void %func() @@ -45,11 +45,11 @@ } ; CHECK-LABEL: name: test_multiple_args -; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY %x0 +; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[ANSWER:%[0-9]+]]:_(s32) = G_CONSTANT i32 42 -; CHECK: %w0 = COPY [[ANSWER]] -; CHECK: %x1 = COPY [[IN]] -; CHECK: BL @multiple_args_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit %x1 +; CHECK: $w0 = COPY [[ANSWER]] +; CHECK: $x1 = COPY [[IN]] +; CHECK: BL @multiple_args_callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $x1 ; CHECK: RET_ReallyLR declare void @multiple_args_callee(i32, i64) define void @test_multiple_args(i64 %in) { @@ -59,11 +59,11 @@ ; CHECK-LABEL: name: test_struct_formal -; CHECK: [[DBL:%[0-9]+]]:_(s64) = COPY %d0 -; CHECK: [[I64:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK: [[I8_C:%[0-9]+]]:_(s32) = COPY %w1 +; CHECK: [[DBL:%[0-9]+]]:_(s64) = COPY $d0 +; CHECK: [[I64:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK: [[I8_C:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[I8:%[0-9]+]]:_(s8) = G_TRUNC [[I8_C]] -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[UNDEF:%[0-9]+]]:_(s192) = G_IMPLICIT_DEF ; CHECK: [[ARG0:%[0-9]+]]:_(s192) = G_INSERT [[UNDEF]], [[DBL]](s64), 0 @@ -80,17 +80,17 @@ ; CHECK-LABEL: name: test_struct_return -; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[VAL:%[0-9]+]]:_(s192) = G_LOAD [[ADDR]](p0) ; CHECK: [[DBL:%[0-9]+]]:_(s64) = G_EXTRACT [[VAL]](s192), 0 ; CHECK: [[I64:%[0-9]+]]:_(s64) = G_EXTRACT [[VAL]](s192), 64 ; CHECK: [[I32:%[0-9]+]]:_(s32) = G_EXTRACT [[VAL]](s192), 128 -; CHECK: %d0 = COPY [[DBL]](s64) -; CHECK: %x0 = COPY [[I64]](s64) -; CHECK: %w1 = COPY [[I32]](s32) -; CHECK: RET_ReallyLR implicit %d0, implicit %x0, implicit %w1 +; CHECK: $d0 = COPY [[DBL]](s64) +; CHECK: $x0 = COPY [[I64]](s64) +; CHECK: $w1 = COPY [[I32]](s32) +; CHECK: RET_ReallyLR implicit $d0, implicit $x0, implicit $w1 define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) { %val = load {double, i64, i32}, {double, i64, i32}* %addr ret {double, i64, i32} %val @@ -105,15 +105,15 @@ ; CHECK: [[E2:%[0-9]+]]:_(s64) = G_EXTRACT [[ARG]](s256), 128 ; CHECK: [[E3:%[0-9]+]]:_(s64) = G_EXTRACT [[ARG]](s256), 192 -; CHECK: %x0 = COPY [[E0]](s64) -; CHECK: %x1 = COPY [[E1]](s64) -; CHECK: %x2 = COPY [[E2]](s64) -; CHECK: %x3 = COPY [[E3]](s64) -; CHECK: BL @arr_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2, implicit %x3, implicit-def %x0, implicit-def %x1, implicit-def %x2, implicit-def %x3 -; CHECK: [[E0:%[0-9]+]]:_(s64) = COPY %x0 -; CHECK: [[E1:%[0-9]+]]:_(s64) = COPY %x1 -; CHECK: [[E2:%[0-9]+]]:_(s64) = COPY %x2 -; CHECK: [[E3:%[0-9]+]]:_(s64) = COPY %x3 +; CHECK: $x0 = COPY [[E0]](s64) +; CHECK: $x1 = COPY [[E1]](s64) +; CHECK: $x2 = COPY [[E2]](s64) +; CHECK: $x3 = COPY [[E3]](s64) +; CHECK: BL @arr_callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit-def $x0, implicit-def $x1, implicit-def $x2, implicit-def $x3 +; CHECK: [[E0:%[0-9]+]]:_(s64) = COPY $x0 +; CHECK: [[E1:%[0-9]+]]:_(s64) = COPY $x1 +; CHECK: [[E2:%[0-9]+]]:_(s64) = COPY $x2 +; CHECK: [[E3:%[0-9]+]]:_(s64) = COPY $x3 ; CHECK: [[RES:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[E0]](s64), [[E1]](s64), [[E2]](s64), [[E3]](s64) ; CHECK: G_EXTRACT [[RES]](s256), 64 declare [4 x i64] @arr_callee([4 x i64]) @@ -128,14 +128,14 @@ ; CHECK-LABEL: name: test_abi_exts_call ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_LOAD ; CHECK: [[VAL_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[VAL]] -; CHECK: %w0 = COPY [[VAL_TMP]] -; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0 +; CHECK: $w0 = COPY [[VAL_TMP]] +; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0 ; CHECK: [[SVAL:%[0-9]+]]:_(s32) = G_SEXT [[VAL]](s8) -; CHECK: %w0 = COPY [[SVAL]](s32) -; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0 +; CHECK: $w0 = COPY [[SVAL]](s32) +; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0 ; CHECK: [[ZVAL:%[0-9]+]]:_(s32) = G_ZEXT [[VAL]](s8) -; CHECK: %w0 = COPY [[ZVAL]](s32) -; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0 +; CHECK: $w0 = COPY [[ZVAL]](s32) +; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0 declare void @take_char(i8) define void @test_abi_exts_call(i8* %addr) { %val = load i8, i8* %addr @@ -148,8 +148,8 @@ ; CHECK-LABEL: name: test_abi_sext_ret ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_LOAD ; CHECK: [[SVAL:%[0-9]+]]:_(s32) = G_SEXT [[VAL]](s8) -; CHECK: %w0 = COPY [[SVAL]](s32) -; CHECK: RET_ReallyLR implicit %w0 +; CHECK: $w0 = COPY [[SVAL]](s32) +; CHECK: RET_ReallyLR implicit $w0 define signext i8 @test_abi_sext_ret(i8* %addr) { %val = load i8, i8* %addr ret i8 %val @@ -158,8 +158,8 @@ ; CHECK-LABEL: name: test_abi_zext_ret ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_LOAD ; CHECK: [[SVAL:%[0-9]+]]:_(s32) = G_ZEXT [[VAL]](s8) -; CHECK: %w0 = COPY [[SVAL]](s32) -; CHECK: RET_ReallyLR implicit %w0 +; CHECK: $w0 = COPY [[SVAL]](s32) +; CHECK: RET_ReallyLR implicit $w0 define zeroext i8 @test_abi_zext_ret(i8* %addr) { %val = load i8, i8* %addr ret i8 %val @@ -188,21 +188,21 @@ ; CHECK: [[C42:%[0-9]+]]:_(s64) = G_CONSTANT i64 42 ; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 -; CHECK: ADJCALLSTACKDOWN 24, 0, implicit-def %sp, implicit %sp -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[C42_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C42_OFFS]](s64) ; CHECK: G_STORE [[C42]](s64), [[C42_LOC]](p0) :: (store 8 into stack, align 0) -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[C12_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C12_OFFS]](s64) ; CHECK: G_STORE [[C12]](s64), [[C12_LOC]](p0) :: (store 8 into stack + 8, align 0) -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[PTR_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[PTR_OFFS]](s64) ; CHECK: G_STORE [[PTR]](p0), [[PTR_LOC]](p0) :: (store 8 into stack + 16, align 0) ; CHECK: BL @test_stack_slots -; CHECK: ADJCALLSTACKUP 24, 0, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp define void @test_call_stack() { call void @test_stack_slots([8 x i64] undef, i64 42, i64 12, i64* null) ret void @@ -219,9 +219,9 @@ } ; CHECK-LABEL: name: test_128bit_struct -; CHECK: %x0 = COPY -; CHECK: %x1 = COPY -; CHECK: %x2 = COPY +; CHECK: $x0 = COPY +; CHECK: $x1 = COPY +; CHECK: $x2 = COPY ; CHECK: BL @take_128bit_struct define void @test_128bit_struct([2 x i64]* %ptr) { %struct = load [2 x i64], [2 x i64]* %ptr @@ -230,9 +230,9 @@ } ; CHECK-LABEL: name: take_128bit_struct -; CHECK: {{%.*}}:_(p0) = COPY %x0 -; CHECK: {{%.*}}:_(s64) = COPY %x1 -; CHECK: {{%.*}}:_(s64) = COPY %x2 +; CHECK: {{%.*}}:_(p0) = COPY $x0 +; CHECK: {{%.*}}:_(s64) = COPY $x1 +; CHECK: {{%.*}}:_(s64) = COPY $x2 define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) { store [2 x i64] %in, [2 x i64]* %ptr ret void @@ -243,12 +243,12 @@ ; CHECK: [[LO:%[0-9]+]]:_(s64) = G_EXTRACT [[STRUCT]](s128), 0 ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_EXTRACT [[STRUCT]](s128), 64 -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]] ; CHECK: G_STORE [[LO]](s64), [[ADDR]](p0) :: (store 8 into stack, align 0) -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]] ; CHECK: G_STORE [[HI]](s64), [[ADDR]](p0) :: (store 8 into stack + 8, align 0) Index: test/CodeGen/AArch64/GlobalISel/debug-insts.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/debug-insts.ll +++ test/CodeGen/AArch64/GlobalISel/debug-insts.ll @@ -6,7 +6,7 @@ ; CHECK: - { id: {{.*}}, name: in.addr, type: default, offset: 0, size: {{.*}}, alignment: {{.*}}, ; CHECK-NEXT: callee-saved-register: '', callee-saved-restored: true, ; CHECK-NEXT: di-variable: '!11', di-expression: '!DIExpression()', -; CHECK: DBG_VALUE debug-use %0(s32), debug-use %noreg, !11, !DIExpression(), debug-location !12 +; CHECK: DBG_VALUE debug-use %0(s32), debug-use $noreg, !11, !DIExpression(), debug-location !12 define void @debug_declare(i32 %in) #0 !dbg !7 { entry: %in.addr = alloca i32, align 4 @@ -17,7 +17,7 @@ } ; CHECK-LABEL: name: debug_declare_vla -; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use %noreg, !14, !DIExpression(), debug-location !15 +; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use $noreg, !14, !DIExpression(), debug-location !15 define void @debug_declare_vla(i32 %in) #0 !dbg !13 { entry: %vla.addr = alloca i32, i32 %in @@ -26,19 +26,19 @@ } ; CHECK-LABEL: name: debug_value -; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0 define void @debug_value(i32 %in) #0 !dbg !16 { %addr = alloca i32 -; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use %noreg, !17, !DIExpression(), debug-location !18 +; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use $noreg, !17, !DIExpression(), debug-location !18 call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !17, metadata !DIExpression()), !dbg !18 store i32 %in, i32* %addr -; CHECK: DBG_VALUE debug-use %1(p0), debug-use %noreg, !17, !DIExpression(DW_OP_deref), debug-location !18 +; CHECK: DBG_VALUE debug-use %1(p0), debug-use $noreg, !17, !DIExpression(DW_OP_deref), debug-location !18 call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !17, metadata !DIExpression(DW_OP_deref)), !dbg !18 ; CHECK: DBG_VALUE 123, 0, !17, !DIExpression(), debug-location !18 call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !17, metadata !DIExpression()), !dbg !18 ; CHECK: DBG_VALUE float 1.000000e+00, 0, !17, !DIExpression(), debug-location !18 call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !17, metadata !DIExpression()), !dbg !18 -; CHECK: DBG_VALUE %noreg, 0, !17, !DIExpression(), debug-location !18 +; CHECK: DBG_VALUE $noreg, 0, !17, !DIExpression(), debug-location !18 call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !17, metadata !DIExpression()), !dbg !18 ret void } Index: test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll +++ test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll @@ -1,47 +1,47 @@ ; RUN: llc -mtriple=aarch64 -global-isel %s -o - -stop-after=irtranslator | FileCheck %s ; CHECK-LABEL: name: test_simple_alloca -; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[TYPE_SIZE:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK: [[NUMELTS_64:%[0-9]+]]:_(s64) = G_ZEXT [[NUMELTS]](s32) ; CHECK: [[NUMBYTES:%[0-9]+]]:_(s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]] -; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = G_GEP [[SP_TMP]], [[NUMBYTES]] ; CHECK: [[ALIGNED_ALLOC:%[0-9]+]]:_(p0) = G_PTR_MASK [[ALLOC]], 4 -; CHECK: %sp = COPY [[ALIGNED_ALLOC]] +; CHECK: $sp = COPY [[ALIGNED_ALLOC]] ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = COPY [[ALIGNED_ALLOC]] -; CHECK: %x0 = COPY [[ALLOC]] +; CHECK: $x0 = COPY [[ALLOC]] define i8* @test_simple_alloca(i32 %numelts) { %addr = alloca i8, i32 %numelts ret i8* %addr } ; CHECK-LABEL: name: test_aligned_alloca -; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[TYPE_SIZE:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK: [[NUMELTS_64:%[0-9]+]]:_(s64) = G_ZEXT [[NUMELTS]](s32) ; CHECK: [[NUMBYTES:%[0-9]+]]:_(s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]] -; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = G_GEP [[SP_TMP]], [[NUMBYTES]] ; CHECK: [[ALIGNED_ALLOC:%[0-9]+]]:_(p0) = G_PTR_MASK [[ALLOC]], 5 -; CHECK: %sp = COPY [[ALIGNED_ALLOC]] +; CHECK: $sp = COPY [[ALIGNED_ALLOC]] ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = COPY [[ALIGNED_ALLOC]] -; CHECK: %x0 = COPY [[ALLOC]] +; CHECK: $x0 = COPY [[ALLOC]] define i8* @test_aligned_alloca(i32 %numelts) { %addr = alloca i8, i32 %numelts, align 32 ret i8* %addr } ; CHECK-LABEL: name: test_natural_alloca -; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY %w0 +; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[TYPE_SIZE:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 ; CHECK: [[NUMELTS_64:%[0-9]+]]:_(s64) = G_ZEXT [[NUMELTS]](s32) ; CHECK: [[NUMBYTES:%[0-9]+]]:_(s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]] -; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = G_GEP [[SP_TMP]], [[NUMBYTES]] -; CHECK: %sp = COPY [[ALLOC]] +; CHECK: $sp = COPY [[ALLOC]] ; CHECK: [[ALLOC_TMP:%[0-9]+]]:_(p0) = COPY [[ALLOC]] -; CHECK: %x0 = COPY [[ALLOC_TMP]] +; CHECK: $x0 = COPY [[ALLOC_TMP]] define i128* @test_natural_alloca(i32 %numelts) { %addr = alloca i128, i32 %numelts ret i128* %addr Index: test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir +++ test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir @@ -28,17 +28,17 @@ di-variable: '', di-expression: '', di-location: '' } body: | bb.1.entry: - liveins: %q0 + liveins: $q0 ; This test just checks we don't crash on G_FNEG of FP128 types. Expect to fall ; back until support is added for fp128. ; CHECK: ret - %0:_(s128) = COPY %q0 + %0:_(s128) = COPY $q0 %1:_(p0) = G_FRAME_INDEX %stack.0.a.addr G_STORE %0(s128), %1(p0) :: (store 16 into %ir.a.addr) %2:_(s128) = G_LOAD %1(p0) :: (load 16 from %ir.a.addr) %3:_(s128) = G_FNEG %2 - %q0 = COPY %3(s128) - RET_ReallyLR implicit %q0 + $q0 = COPY %3(s128) + RET_ReallyLR implicit $q0 ... Index: test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll +++ test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll @@ -24,7 +24,7 @@ ; At this point we mapped 46 values. The 'i32 100' constant will grow the map. ; CHECK: %46:_(s32) = G_CONSTANT i32 100 -; CHECK: %w0 = COPY %46(s32) +; CHECK: $w0 = COPY %46(s32) %res = bitcast i32 100 to i32 ret i32 %res } Index: test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll +++ test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll @@ -11,24 +11,24 @@ ; CHECK-NEXT: bb.1 (%ir-block.0): ; CHECK: successors: %[[GOOD:bb.[0-9]+]]{{.*}}%[[BAD:bb.[0-9]+]] ; CHECK: EH_LABEL -; CHECK: %w0 = COPY -; CHECK: BL @foo, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit-def %w0 -; CHECK: {{%[0-9]+}}:_(s32) = COPY %w0 +; CHECK: $w0 = COPY +; CHECK: BL @foo, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit-def $w0 +; CHECK: {{%[0-9]+}}:_(s32) = COPY $w0 ; CHECK: EH_LABEL ; CHECK: G_BR %[[GOOD]] ; CHECK: [[BAD]].{{[a-z]+}} (landing-pad): ; CHECK: EH_LABEL ; CHECK: [[UNDEF:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF -; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[VAL_WITH_PTR:%[0-9]+]]:_(s128) = G_INSERT [[UNDEF]], [[PTR]](p0), 0 -; CHECK: [[SEL_PTR:%[0-9]+]]:_(p0) = COPY %x1 +; CHECK: [[SEL_PTR:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_PTRTOINT [[SEL_PTR]] ; CHECK: [[PTR_SEL:%[0-9]+]]:_(s128) = G_INSERT [[VAL_WITH_PTR]], [[SEL]](s32), 64 ; CHECK: [[PTR_RET:%[0-9]+]]:_(s64) = G_EXTRACT [[PTR_SEL]](s128), 0 ; CHECK: [[SEL_RET:%[0-9]+]]:_(s32) = G_EXTRACT [[PTR_SEL]](s128), 64 -; CHECK: %x0 = COPY [[PTR_RET]] -; CHECK: %w1 = COPY [[SEL_RET]] +; CHECK: $x0 = COPY [[PTR_RET]] +; CHECK: $w1 = COPY [[SEL_RET]] ; CHECK: [[GOOD]].{{[a-z]+}}: ; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -49,7 +49,7 @@ } ; CHECK-LABEL: name: test_invoke_indirect -; CHECK: [[CALLEE:%[0-9]+]]:gpr64(p0) = COPY %x0 +; CHECK: [[CALLEE:%[0-9]+]]:gpr64(p0) = COPY $x0 ; CHECK: BLR [[CALLEE]] define void @test_invoke_indirect(void()* %callee) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { invoke void %callee() to label %continue unwind label %broken @@ -68,14 +68,14 @@ ; CHECK: [[ANSWER:%[0-9]+]]:_(s32) = G_CONSTANT i32 42 ; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.0 -; CHECK: %x0 = COPY [[NULL]] +; CHECK: $x0 = COPY [[NULL]] -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[SLOT:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFFSET]](s64) ; CHECK: G_STORE [[ANSWER]](s32), [[SLOT]] -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK: [[SLOT:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFFSET]](s64) ; CHECK: G_STORE [[ONE]](s32), [[SLOT]] Index: test/CodeGen/AArch64/GlobalISel/legalize-add.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-add.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-add.mir @@ -30,29 +30,29 @@ name: test_scalar_add_big body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_scalar_add_big - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 - ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY %x2 - ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY %x3 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2 + ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) ; CHECK: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY]], [[COPY2]], [[TRUNC]] ; CHECK: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY3]], [[UADDE1]] - ; CHECK: %x0 = COPY [[UADDE]](s64) - ; CHECK: %x1 = COPY [[UADDE2]](s64) - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 - %2:_(s64) = COPY %x2 - %3:_(s64) = COPY %x3 + ; CHECK: $x0 = COPY [[UADDE]](s64) + ; CHECK: $x1 = COPY [[UADDE2]](s64) + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 + %2:_(s64) = COPY $x2 + %3:_(s64) = COPY $x3 %4:_(s128) = G_MERGE_VALUES %0, %1 %5:_(s128) = G_MERGE_VALUES %2, %3 %6:_(s128) = G_ADD %4, %5 %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6 - %x0 = COPY %7 - %x1 = COPY %8 + $x0 = COPY %7 + $x1 = COPY %8 ... --- @@ -70,7 +70,7 @@ - { id: 9, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_scalar_add_big_nonpow2 ; CHECK-NOT: G_MERGE_VALUES ; CHECK-NOT: G_UNMERGE_VALUES @@ -81,71 +81,71 @@ ; CHECK: [[RES_HI:%[0-9]+]]:_(s64), {{%.*}}(s1) = G_UADDE %2, %3, [[CARRY2]] ; CHECK-NOT: G_MERGE_VALUES ; CHECK-NOT: G_UNMERGE_VALUES - ; CHECK: %x0 = COPY [[RES_LO]] - ; CHECK: %x1 = COPY [[RES_MI]] - ; CHECK: %x2 = COPY [[RES_HI]] + ; CHECK: $x0 = COPY [[RES_LO]] + ; CHECK: $x1 = COPY [[RES_MI]] + ; CHECK: $x2 = COPY [[RES_HI]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 - %2(s64) = COPY %x2 - %3(s64) = COPY %x3 + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 + %2(s64) = COPY $x2 + %3(s64) = COPY $x3 %4(s192) = G_MERGE_VALUES %0, %1, %2 %5(s192) = G_MERGE_VALUES %1, %2, %3 %6(s192) = G_ADD %4, %5 %7(s64), %8(s64), %9(s64) = G_UNMERGE_VALUES %6 - %x0 = COPY %7 - %x1 = COPY %8 - %x2 = COPY %9 + $x0 = COPY %7 + $x1 = COPY %8 + $x2 = COPY %9 ... --- name: test_scalar_add_small body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_scalar_add_small - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]] ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32) - ; CHECK: %x0 = COPY [[ANYEXT]](s64) - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[ANYEXT]](s64) + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 %2:_(s8) = G_TRUNC %0 %3:_(s8) = G_TRUNC %1 %4:_(s8) = G_ADD %2, %3 %5:_(s64) = G_ANYEXT %4 - %x0 = COPY %5 + $x0 = COPY %5 ... --- name: test_vector_add body: | bb.0.entry: - liveins: %q0, %q1, %q2, %q3 + liveins: $q0, $q1, $q2, $q3 ; CHECK-LABEL: name: test_vector_add - ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY %q0 - ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY %q1 - ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY %q2 - ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY %q3 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1 + ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2 + ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3 ; CHECK: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY]], [[COPY2]] ; CHECK: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY1]], [[COPY3]] - ; CHECK: %q0 = COPY [[ADD]](<2 x s64>) - ; CHECK: %q1 = COPY [[ADD1]](<2 x s64>) - %0:_(<2 x s64>) = COPY %q0 - %1:_(<2 x s64>) = COPY %q1 - %2:_(<2 x s64>) = COPY %q2 - %3:_(<2 x s64>) = COPY %q3 + ; CHECK: $q0 = COPY [[ADD]](<2 x s64>) + ; CHECK: $q1 = COPY [[ADD1]](<2 x s64>) + %0:_(<2 x s64>) = COPY $q0 + %1:_(<2 x s64>) = COPY $q1 + %2:_(<2 x s64>) = COPY $q2 + %3:_(<2 x s64>) = COPY $q3 %4:_(<4 x s64>) = G_MERGE_VALUES %0, %1 %5:_(<4 x s64>) = G_MERGE_VALUES %2, %3 %6:_(<4 x s64>) = G_ADD %4, %5 %7:_(<2 x s64>), %8:_(<2 x s64>) = G_UNMERGE_VALUES %6 - %q0 = COPY %7 - %q1 = COPY %8 + $q0 = COPY %7 + $q1 = COPY %8 ... --- name: test_vector_add_nonpow2 @@ -162,7 +162,7 @@ - { id: 9, class: _ } body: | bb.0.entry: - liveins: %q0, %q1, %q2, %q3 + liveins: $q0, $q1, $q2, $q3 ; CHECK-LABEL: name: test_vector_add_nonpow2 ; CHECK-NOT: G_EXTRACT ; CHECK-NOT: G_SEQUENCE @@ -171,19 +171,19 @@ ; CHECK: [[RES_HI:%[0-9]+]]:_(<2 x s64>) = G_ADD %2, %3 ; CHECK-NOT: G_EXTRACT ; CHECK-NOT: G_SEQUENCE - ; CHECK: %q0 = COPY [[RES_LO]] - ; CHECK: %q1 = COPY [[RES_MI]] - ; CHECK: %q2 = COPY [[RES_HI]] + ; CHECK: $q0 = COPY [[RES_LO]] + ; CHECK: $q1 = COPY [[RES_MI]] + ; CHECK: $q2 = COPY [[RES_HI]] - %0(<2 x s64>) = COPY %q0 - %1(<2 x s64>) = COPY %q1 - %2(<2 x s64>) = COPY %q2 - %3(<2 x s64>) = COPY %q3 + %0(<2 x s64>) = COPY $q0 + %1(<2 x s64>) = COPY $q1 + %2(<2 x s64>) = COPY $q2 + %3(<2 x s64>) = COPY $q3 %4(<6 x s64>) = G_MERGE_VALUES %0, %1, %2 %5(<6 x s64>) = G_MERGE_VALUES %1, %2, %3 %6(<6 x s64>) = G_ADD %4, %5 %7(<2 x s64>), %8(<2 x s64>), %9(<2 x s64>) = G_UNMERGE_VALUES %6 - %q0 = COPY %7 - %q1 = COPY %8 - %q2 = COPY %9 + $q0 = COPY %7 + $q1 = COPY %8 + $q2 = COPY %9 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-and.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-and.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-and.mir @@ -22,25 +22,25 @@ - { id: 6, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_scalar_and_small - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]] ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[AND]](s32) - ; CHECK: %w0 = COPY [[COPY2]](s32) + ; CHECK: $w0 = COPY [[COPY2]](s32) ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) - ; CHECK: %x0 = COPY [[COPY3]](s64) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[COPY3]](s64) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 %4(s8) = G_AND %2, %3 %6(s32) = G_ANYEXT %4 - %w0 = COPY %6 + $w0 = COPY %6 %5(s64) = G_ANYEXT %2 - %x0 = COPY %5 + $x0 = COPY %5 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir @@ -14,72 +14,72 @@ name: cmpxchg_i8 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i8 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[CST2:%[0-9]+]]:_(s8) = G_TRUNC [[CST]] ; CHECK: [[RES:%[0-9]+]]:_(s8) = G_ATOMICRMW_ADD [[COPY]](p0), [[CST2]] :: (load store monotonic 1 on %ir.addr) ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]] - ; CHECK: %w0 = COPY [[RES2]] - %0:_(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES2]] + %0:_(p0) = COPY $x0 %1:_(s8) = G_CONSTANT i8 1 %2:_(s8) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 1 on %ir.addr) %3:_(s32) = G_ANYEXT %2 - %w0 = COPY %3(s32) + $w0 = COPY %3(s32) ... --- name: cmpxchg_i16 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[CST2:%[0-9]+]]:_(s16) = G_TRUNC [[CST]] ; CHECK: [[RES:%[0-9]+]]:_(s16) = G_ATOMICRMW_ADD [[COPY]](p0), [[CST2]] :: (load store monotonic 2 on %ir.addr) ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]] - ; CHECK: %w0 = COPY [[RES2]] - %0:_(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES2]] + %0:_(p0) = COPY $x0 %1:_(s16) = G_CONSTANT i16 1 %2:_(s16) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 2 on %ir.addr) %3:_(s32) = G_ANYEXT %2 - %w0 = COPY %3(s32) + $w0 = COPY %3(s32) ... --- name: cmpxchg_i32 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i32 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[CST]] :: (load store monotonic 4 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:_(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:_(p0) = COPY $x0 %1:_(s32) = G_CONSTANT i32 1 %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 4 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- name: cmpxchg_i64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i64 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[CST]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: %x0 = COPY [[RES]] - %0:_(p0) = COPY %x0 + ; CHECK: $x0 = COPY [[RES]] + %0:_(p0) = COPY $x0 %1:_(s64) = G_CONSTANT i64 1 %2:_(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr) - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... Index: test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir @@ -30,13 +30,13 @@ - { id: 14, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_icmp - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sge), [[COPY]](s64), [[COPY1]] ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) - ; CHECK: %w0 = COPY [[COPY2]](s32) + ; CHECK: $w0 = COPY [[COPY2]](s32) ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]] @@ -45,27 +45,27 @@ ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C1]] ; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[AND]](s32), [[AND1]] ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32) - ; CHECK: %w0 = COPY [[COPY3]](s32) + ; CHECK: $w0 = COPY [[COPY3]](s32) ; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[COPY]](s64) ; CHECK: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[INTTOPTR]](p0), [[INTTOPTR]] ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP2]](s32) - ; CHECK: %w0 = COPY [[COPY4]](s32) - %0(s64) = COPY %x0 - %1(s64) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY4]](s32) + %0(s64) = COPY $x0 + %1(s64) = COPY $x0 %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 %4(s1) = G_ICMP intpred(sge), %0, %1 %11(s32) = G_ANYEXT %4 - %w0 = COPY %11 + $w0 = COPY %11 %8(s1) = G_ICMP intpred(ult), %2, %3 %12(s32) = G_ANYEXT %8 - %w0 = COPY %12 + $w0 = COPY %12 %9(p0) = G_INTTOPTR %0(s64) %10(s1) = G_ICMP intpred(eq), %9(p0), %9(p0) %14(s32) = G_ANYEXT %10 - %w0 = COPY %14 + $w0 = COPY %14 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir @@ -13,24 +13,24 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i32 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr) ; CHECK: [[SRES:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[RES]](s32), [[CMP]] ; CHECK: [[SRES32:%[0-9]+]]:_(s32) = COPY [[SRES]] ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[RES]], [[SRES32]] - ; CHECK: %w0 = COPY [[MUL]] - %0:_(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[MUL]] + %0:_(p0) = COPY $x0 %1:_(s32) = G_CONSTANT i32 0 %2:_(s32) = G_CONSTANT i32 1 %3:_(s32), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic 8 on %ir.addr) %5:_(s32) = G_ANYEXT %4 %6:_(s32) = G_MUL %3, %5 - %w0 = COPY %6(s32) + $w0 = COPY %6(s32) ... --- @@ -38,22 +38,22 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i64 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CMP:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr) ; CHECK: [[SRES:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[RES]](s64), [[CMP]] ; CHECK: [[SRES64:%[0-9]+]]:_(s64) = G_ANYEXT [[SRES]] ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[RES]], [[SRES64]] - ; CHECK: %x0 = COPY [[MUL]] - %0:_(p0) = COPY %x0 + ; CHECK: $x0 = COPY [[MUL]] + %0:_(p0) = COPY $x0 %1:_(s64) = G_CONSTANT i64 0 %2:_(s64) = G_CONSTANT i64 1 %3:_(s64), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic 8 on %ir.addr) %5:_(s64) = G_ANYEXT %4 %6:_(s64) = G_MUL %3, %5 - %x0 = COPY %6(s64) + $x0 = COPY %6(s64) ... Index: test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir @@ -14,82 +14,82 @@ name: cmpxchg_i8 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i8 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[CMPT:%[0-9]+]]:_(s8) = G_TRUNC [[CMP]] ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[CSTT:%[0-9]+]]:_(s8) = G_TRUNC [[CST]] ; CHECK: [[RES:%[0-9]+]]:_(s8) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMPT]], [[CSTT]] :: (load store monotonic 1 on %ir.addr) ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]](s8) - ; CHECK: %w0 = COPY [[RES2]] - %0:_(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES2]] + %0:_(p0) = COPY $x0 %1:_(s8) = G_CONSTANT i8 0 %2:_(s8) = G_CONSTANT i8 1 %3:_(s8) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 1 on %ir.addr) %4:_(s32) = G_ANYEXT %3 - %w0 = COPY %4(s32) + $w0 = COPY %4(s32) ... --- name: cmpxchg_i16 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[CMPT:%[0-9]+]]:_(s16) = G_TRUNC [[CMP]] ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[CSTT:%[0-9]+]]:_(s16) = G_TRUNC [[CST]] ; CHECK: [[RES:%[0-9]+]]:_(s16) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMPT]], [[CSTT]] :: (load store monotonic 2 on %ir.addr) ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]](s16) - ; CHECK: %w0 = COPY [[RES2]] - %0:_(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES2]] + %0:_(p0) = COPY $x0 %1:_(s16) = G_CONSTANT i16 0 %2:_(s16) = G_CONSTANT i16 1 %3:_(s16) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 2 on %ir.addr) %4:_(s32) = G_ANYEXT %3 - %w0 = COPY %4(s32) + $w0 = COPY %4(s32) ... --- name: cmpxchg_i32 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i32 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 4 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:_(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:_(p0) = COPY $x0 %1:_(s32) = G_CONSTANT i32 0 %2:_(s32) = G_CONSTANT i32 1 %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 4 on %ir.addr) - %w0 = COPY %3(s32) + $w0 = COPY %3(s32) ... --- name: cmpxchg_i64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i64 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[CMP:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: %x0 = COPY [[RES]] - %0:_(p0) = COPY %x0 + ; CHECK: $x0 = COPY [[RES]] + %0:_(p0) = COPY $x0 %1:_(s64) = G_CONSTANT i64 0 %2:_(s64) = G_CONSTANT i64 1 %3:_(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr) - %x0 = COPY %3(s64) + $x0 = COPY %3(s64) ... Index: test/CodeGen/AArch64/GlobalISel/legalize-combines.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-combines.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-combines.mir @@ -14,78 +14,78 @@ name: test_combines_2 body: | bb.0: - liveins: %w0 + liveins: $w0 ; Here the types don't match. ; CHECK-LABEL: name: test_combines_2 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]] ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32) ; CHECK: [[EXTRACT:%[0-9]+]]:_(s1) = G_EXTRACT [[MV]](s64), 0 ; CHECK: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[MV]](s64), 0 - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s32) = G_ADD %0, %0 %2:_(s64) = G_MERGE_VALUES %0, %1 %3:_(s1) = G_EXTRACT %2, 0 %5:_(s32) = G_ANYEXT %3 - %w0 = COPY %5 + $w0 = COPY %5 %4:_(s64) = G_EXTRACT %2, 0 - %x0 = COPY %4 + $x0 = COPY %4 ... --- name: test_combines_3 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_combines_3 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]] ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]] - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s32) = G_ADD %0, %0 %2:_(s64) = G_MERGE_VALUES %0, %1 %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2 %5:_(s32) = G_ADD %3, %4 - %w0 = COPY %5 + $w0 = COPY %5 ... --- name: test_combines_4 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_combines_4 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY1]] - %0:_(s64) = COPY %x0 + %0:_(s64) = COPY $x0 %1:_(s128) = G_MERGE_VALUES %0, %0 %2:_(s64) = G_EXTRACT %1, 0 %3:_(s64) = G_ADD %2, %2 - %w0 = COPY %3 + $w0 = COPY %3 ... --- name: test_combines_5 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_combines_5 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]] ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]] - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s32) = G_ADD %0, %0 %2:_(s64) = G_MERGE_VALUES %0, %1 %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2 %5:_(s32) = G_ADD %3, %4 - %w0 = COPY %5 + $w0 = COPY %5 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-constant.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-constant.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-constant.mir @@ -31,34 +31,34 @@ ; CHECK-LABEL: name: test_constant ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C]](s32) - ; CHECK: %w0 = COPY [[COPY]](s32) + ; CHECK: $w0 = COPY [[COPY]](s32) ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 42 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32) - ; CHECK: %w0 = COPY [[COPY1]](s32) + ; CHECK: $w0 = COPY [[COPY1]](s32) ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) - ; CHECK: %w0 = COPY [[COPY2]](s32) + ; CHECK: $w0 = COPY [[COPY2]](s32) ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; CHECK: %w0 = COPY [[C3]](s32) + ; CHECK: $w0 = COPY [[C3]](s32) ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 - ; CHECK: %x0 = COPY [[C4]](s64) + ; CHECK: $x0 = COPY [[C4]](s64) ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; CHECK: %x0 = COPY [[C5]](s64) + ; CHECK: $x0 = COPY [[C5]](s64) %0(s1) = G_CONSTANT i1 0 %6:_(s32) = G_ANYEXT %0 - %w0 = COPY %6 + $w0 = COPY %6 %1(s8) = G_CONSTANT i8 42 %7:_(s32) = G_ANYEXT %1 - %w0 = COPY %7 + $w0 = COPY %7 %2(s16) = G_CONSTANT i16 65535 %8:_(s32) = G_ANYEXT %2 - %w0 = COPY %8 + $w0 = COPY %8 %3(s32) = G_CONSTANT i32 -1 - %w0 = COPY %3 + $w0 = COPY %3 %4(s64) = G_CONSTANT i64 1 - %x0 = COPY %4 + $x0 = COPY %4 %5(s64) = G_CONSTANT i64 0 - %x0 = COPY %5 + $x0 = COPY %5 ... --- @@ -98,8 +98,8 @@ ; CHECK-LABEL: name: test_global ; CHECK: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0) - ; CHECK: %x0 = COPY [[PTRTOINT]](s64) + ; CHECK: $x0 = COPY [[PTRTOINT]](s64) %0(p0) = G_GLOBAL_VALUE @var %1:_(s64) = G_PTRTOINT %0 - %x0 = COPY %1 + $x0 = COPY %1 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-div.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-div.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-div.mir @@ -21,10 +21,10 @@ - { id: 5, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_div - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]] @@ -35,7 +35,7 @@ ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]] ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[ASHR]], [[ASHR1]] ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32) - ; CHECK: %w0 = COPY [[COPY2]](s32) + ; CHECK: $w0 = COPY [[COPY2]](s32) ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C2]] @@ -44,20 +44,20 @@ ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C3]] ; CHECK: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[AND]], [[AND1]] ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UDIV]](s32) - ; CHECK: %w0 = COPY [[COPY3]](s32) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $w0 = COPY [[COPY3]](s32) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 %4(s8) = G_SDIV %2, %3 %6:_(s32) = G_ANYEXT %4 - %w0 = COPY %6 + $w0 = COPY %6 %5(s8) = G_UDIV %2, %3 %7:_(s32) = G_ANYEXT %5 - %w0 = COPY %7 + $w0 = COPY %7 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll +++ test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll @@ -15,10 +15,10 @@ ; CHECK: [[LP]].{{[a-z]+}} (landing-pad): ; CHECK: EH_LABEL -; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[STRUCT_PTR:%[0-9]+]]:_(s64) = G_PTRTOINT [[PTR]](p0) -; CHECK: [[SEL_PTR:%[0-9]+]]:_(p0) = COPY %x1 +; CHECK: [[SEL_PTR:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_PTRTOINT [[SEL_PTR]] ; CHECK: [[STRUCT_SEL:%[0-9]+]]:_(s64) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 0 Index: test/CodeGen/AArch64/GlobalISel/legalize-ext.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-ext.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-ext.mir @@ -34,110 +34,110 @@ - { id: 18, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_ext - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: %w0 = COPY [[TRUNC]](s32) + ; CHECK: $w0 = COPY [[TRUNC]](s32) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: %w0 = COPY [[TRUNC1]](s32) + ; CHECK: $w0 = COPY [[TRUNC1]](s32) ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: %w0 = COPY [[TRUNC2]](s32) + ; CHECK: $w0 = COPY [[TRUNC2]](s32) ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: %w0 = COPY [[TRUNC3]](s32) + ; CHECK: $w0 = COPY [[TRUNC3]](s32) ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) - ; CHECK: %x0 = COPY [[COPY1]](s64) + ; CHECK: $x0 = COPY [[COPY1]](s64) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]] - ; CHECK: %x0 = COPY [[AND]](s64) + ; CHECK: $x0 = COPY [[AND]](s64) ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) - ; CHECK: %x0 = COPY [[COPY3]](s64) + ; CHECK: $x0 = COPY [[COPY3]](s64) ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C1]] ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]] - ; CHECK: %x0 = COPY [[ASHR]](s64) + ; CHECK: $x0 = COPY [[ASHR]](s64) ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[C2]] ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C2]] - ; CHECK: %w0 = COPY [[ASHR1]](s32) + ; CHECK: $w0 = COPY [[ASHR1]](s32) ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C3]] - ; CHECK: %w0 = COPY [[AND1]](s32) + ; CHECK: $w0 = COPY [[AND1]](s32) ; CHECK: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: %w0 = COPY [[TRUNC6]](s32) + ; CHECK: $w0 = COPY [[TRUNC6]](s32) ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC7]], [[C4]] - ; CHECK: %w0 = COPY [[AND2]](s32) + ; CHECK: $w0 = COPY [[AND2]](s32) ; CHECK: [[TRUNC8:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: %w0 = COPY [[TRUNC8]](s32) + ; CHECK: $w0 = COPY [[TRUNC8]](s32) ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CHECK: [[TRUNC9:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[TRUNC9]], [[C5]] ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C5]] - ; CHECK: %w0 = COPY [[ASHR2]](s32) + ; CHECK: $w0 = COPY [[ASHR2]](s32) ; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[TRUNC10:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[TRUNC3]]4(s32) ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]]1, [[TRUNC3]]2 ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[TRUNC3]]3(s32) - ; CHECK: %w0 = COPY [[COPY6]](s32) + ; CHECK: $w0 = COPY [[COPY6]](s32) ; CHECK: [[TRUNC11:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: %w0 = COPY [[TRUNC11]](s32) + ; CHECK: $w0 = COPY [[TRUNC11]](s32) ; CHECK: [[TRUNC12:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: %w0 = COPY [[TRUNC12]](s32) + ; CHECK: $w0 = COPY [[TRUNC12]](s32) ; CHECK: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[TRUNC12]](s32) - ; CHECK: %x0 = COPY [[FPEXT]](s64) - %0(s64) = COPY %x0 + ; CHECK: $x0 = COPY [[FPEXT]](s64) + %0(s64) = COPY $x0 %1(s1) = G_TRUNC %0 %19:_(s32) = G_ANYEXT %1 - %w0 = COPY %19 + $w0 = COPY %19 %2(s8) = G_TRUNC %0 %20:_(s32) = G_ANYEXT %2 - %w0 = COPY %20 + $w0 = COPY %20 %3(s16) = G_TRUNC %0 %21:_(s32) = G_ANYEXT %3 - %w0 = COPY %21 + $w0 = COPY %21 %4(s32) = G_TRUNC %0 - %w0 = COPY %4 + $w0 = COPY %4 %5(s64) = G_ANYEXT %1 - %x0 = COPY %5 + $x0 = COPY %5 %6(s64) = G_ZEXT %2 - %x0 = COPY %6 + $x0 = COPY %6 %7(s64) = G_ANYEXT %3 - %x0 = COPY %7 + $x0 = COPY %7 %8(s64) = G_SEXT %4 - %x0 = COPY %8 + $x0 = COPY %8 %9(s32) = G_SEXT %1 - %w0 = COPY %9 + $w0 = COPY %9 %10(s32) = G_ZEXT %2 - %w0 = COPY %10 + $w0 = COPY %10 %11(s32) = G_ANYEXT %3 - %w0 = COPY %11 + $w0 = COPY %11 %12(s32) = G_ZEXT %1 - %w0 = COPY %12 + $w0 = COPY %12 %13(s32) = G_ANYEXT %2 - %w0 = COPY %13 + $w0 = COPY %13 %14(s32) = G_SEXT %3 - %w0 = COPY %14 + $w0 = COPY %14 %15(s8) = G_ZEXT %1 %22:_(s32) = G_ANYEXT %15 - %w0 = COPY %22 + $w0 = COPY %22 %16(s16) = G_ANYEXT %2 %23:_(s32) = G_ANYEXT %16 - %w0 = COPY %23 + $w0 = COPY %23 %17(s32) = G_TRUNC %0 - %w0 = COPY %17 + $w0 = COPY %17 %18(s64) = G_FPEXT %17 - %x0 = COPY %18 + $x0 = COPY %18 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir @@ -5,13 +5,13 @@ name: test_extracts_1 body: | bb.0: - liveins: %w0 + liveins: $w0 ; Low part of extraction takes entirity of the low register entirely, so ; value stored is forwarded directly from first load. ; CHECK-LABEL: name: test_extracts_1 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x2 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 16) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64) @@ -19,9 +19,9 @@ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[LOAD]](s64) ; CHECK: G_STORE [[COPY1]](s64), [[COPY]](p0) :: (store 8) ; CHECK: RET_ReallyLR - %0:_(s64) = COPY %x0 - %1:_(s32) = COPY %w1 - %2:_(p0) = COPY %x2 + %0:_(s64) = COPY $x0 + %1:_(s32) = COPY $w1 + %2:_(p0) = COPY $x2 %3:_(s128) = G_LOAD %2(p0) :: (load 16) %4:_(s64) = G_EXTRACT %3(s128), 0 G_STORE %4(s64), %2(p0) :: (store 8) @@ -32,11 +32,11 @@ name: test_extracts_2 body: | bb.0: - liveins: %w0 + liveins: $w0 ; Low extraction wipes takes whole low register. High extraction is real. ; CHECK-LABEL: name: test_extracts_2 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x2 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x2 ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 16) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64) @@ -47,9 +47,9 @@ ; CHECK: G_STORE [[COPY1]](s64), [[COPY]](p0) :: (store 8) ; CHECK: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 4) ; CHECK: RET_ReallyLR - %0:_(s64) = COPY %x0 - %1:_(s32) = COPY %w1 - %2:_(p0) = COPY %x2 + %0:_(s64) = COPY $x0 + %1:_(s32) = COPY $w1 + %2:_(p0) = COPY $x2 %3:_(s128) = G_LOAD %2(p0) :: (load 16) %4:_(s64) = G_EXTRACT %3(s128), 0 %5:_(s32) = G_EXTRACT %3(s128), 64 @@ -62,22 +62,22 @@ name: test_extracts_3 body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 ; CHECK-LABEL: name: test_extracts_3 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](s64), 32 ; CHECK: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](s64), 0 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[EXTRACT]](s32), [[EXTRACT1]](s32) - ; CHECK: %x0 = COPY [[MV]](s64) + ; CHECK: $x0 = COPY [[MV]](s64) ; CHECK: RET_ReallyLR - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 %2:_(s128) = G_MERGE_VALUES %0, %1 %3:_(s64) = G_EXTRACT %2, 32 - %x0 = COPY %3 + $x0 = COPY %3 RET_ReallyLR ... @@ -85,19 +85,19 @@ name: test_extracts_4 body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 ; CHECK-LABEL: name: test_extracts_4 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](s64), 32 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32) - ; CHECK: %w0 = COPY [[COPY1]](s32) + ; CHECK: $w0 = COPY [[COPY1]](s32) ; CHECK: RET_ReallyLR - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 %2:_(s128) = G_MERGE_VALUES %0, %1 %3:_(s32) = G_EXTRACT %2, 32 - %w0 = COPY %3 + $w0 = COPY %3 RET_ReallyLR ... Index: test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir @@ -23,25 +23,25 @@ - { id: 7, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_icmp - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(oge), [[COPY]](s64), [[COPY1]] - ; CHECK: %w0 = COPY [[FCMP]](s32) + ; CHECK: $w0 = COPY [[FCMP]](s32) ; CHECK: [[FCMP1:%[0-9]+]]:_(s32) = G_FCMP floatpred(uno), [[TRUNC]](s32), [[TRUNC1]] - ; CHECK: %w0 = COPY [[FCMP1]](s32) - %0(s64) = COPY %x0 - %1(s64) = COPY %x0 + ; CHECK: $w0 = COPY [[FCMP1]](s32) + %0(s64) = COPY $x0 + %1(s64) = COPY $x0 %2(s32) = G_TRUNC %0 %3(s32) = G_TRUNC %1 %4(s32) = G_FCMP floatpred(oge), %0, %1 - %w0 = COPY %4 + $w0 = COPY %4 %5(s32) = G_FCMP floatpred(uno), %2, %3 - %w0 = COPY %5 + $w0 = COPY %5 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir @@ -20,15 +20,15 @@ - { id: 1, class: _ } body: | bb.1: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: test_fneg_f32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -0.000000e+00 ; CHECK: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[C]], [[COPY]] - ; CHECK: %s0 = COPY [[FSUB]](s32) - %0(s32) = COPY %s0 + ; CHECK: $s0 = COPY [[FSUB]](s32) + %0(s32) = COPY $s0 %1(s32) = G_FNEG %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- name: test_fneg_f64 @@ -37,13 +37,13 @@ - { id: 1, class: _ } body: | bb.1: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: test_fneg_f64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $d0 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -0.000000e+00 ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[C]], [[COPY]] - ; CHECK: %d0 = COPY [[FSUB]](s64) - %0(s64) = COPY %d0 + ; CHECK: $d0 = COPY [[FSUB]](s64) + %0(s64) = COPY $d0 %1(s64) = G_FNEG %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... Index: test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir @@ -29,112 +29,112 @@ name: test_fptosi_s32_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_fptosi_s32_s32 ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32) - ; CHECK: %w0 = COPY [[FPTOSI]](s32) + ; CHECK: $w0 = COPY [[FPTOSI]](s32) %0:_(s32) = G_IMPLICIT_DEF %1:_(s32) = G_FPTOSI %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_fptoui_s32_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_fptoui_s32_s32 ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[DEF]](s32) - ; CHECK: %w0 = COPY [[FPTOUI]](s32) + ; CHECK: $w0 = COPY [[FPTOUI]](s32) %0:_(s32) = G_IMPLICIT_DEF %1:_(s32) = G_FPTOUI %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_fptosi_s32_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_fptosi_s32_s64 ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32) - ; CHECK: %w0 = COPY [[FPTOSI]](s32) + ; CHECK: $w0 = COPY [[FPTOSI]](s32) %0:_(s32) = G_IMPLICIT_DEF %1:_(s32) = G_FPTOSI %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_fptoui_s32_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_fptoui_s32_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s64) - ; CHECK: %w0 = COPY [[FPTOUI]](s32) - %0:_(s64) = COPY %x0 + ; CHECK: $w0 = COPY [[FPTOUI]](s32) + %0:_(s64) = COPY $x0 %1:_(s32) = G_FPTOUI %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_fptosi_s64_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_fptosi_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY]](s32) - ; CHECK: %x0 = COPY [[FPTOSI]](s64) - %0:_(s32) = COPY %w0 + ; CHECK: $x0 = COPY [[FPTOSI]](s64) + %0:_(s32) = COPY $w0 %1:_(s64) = G_FPTOSI %0 - %x0 = COPY %1 + $x0 = COPY %1 ... --- name: test_fptoui_s64_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_fptoui_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s64) = G_FPTOUI [[COPY]](s32) - ; CHECK: %x0 = COPY [[FPTOUI]](s64) - %0:_(s32) = COPY %w0 + ; CHECK: $x0 = COPY [[FPTOUI]](s64) + %0:_(s32) = COPY $w0 %1:_(s64) = G_FPTOUI %0 - %x0 = COPY %1 + $x0 = COPY %1 ... --- name: test_fptosi_s64_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_fptosi_s64_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY]](s64) - ; CHECK: %x0 = COPY [[FPTOSI]](s64) - %0:_(s64) = COPY %x0 + ; CHECK: $x0 = COPY [[FPTOSI]](s64) + %0:_(s64) = COPY $x0 %1:_(s64) = G_FPTOSI %0 - %x0 = COPY %1 + $x0 = COPY %1 ... --- name: test_fptoui_s64_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_fptoui_s64_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s64) = G_FPTOUI [[COPY]](s64) - ; CHECK: %x0 = COPY [[FPTOUI]](s64) - %0:_(s64) = COPY %x0 + ; CHECK: $x0 = COPY [[FPTOUI]](s64) + %0:_(s64) = COPY $x0 %1:_(s64) = G_FPTOUI %0 - %x0 = COPY %1 + $x0 = COPY %1 ... @@ -143,93 +143,93 @@ name: test_fptosi_s1_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_fptosi_s1_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32) ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[FPTOSI]](s32) - ; CHECK: %x0 = COPY [[TRUNC]](s1) - %0:_(s32) = COPY %w0 + ; CHECK: $x0 = COPY [[TRUNC]](s1) + %0:_(s32) = COPY $w0 %1:_(s1) = G_FPTOSI %0 - %x0 = COPY %1 + $x0 = COPY %1 ... --- name: test_fptoui_s1_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_fptoui_s1_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s32) ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOUI]](s32) - ; CHECK: %w0 = COPY [[COPY1]](s32) - %0:_(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[COPY1]](s32) + %0:_(s32) = COPY $w0 %1:_(s1) = G_FPTOUI %0 %2:_(s32) = G_ANYEXT %1 - %w0 = COPY %2 + $w0 = COPY %2 ... --- name: test_fptosi_s8_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_fptosi_s8_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64) ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOSI]](s32) - ; CHECK: %w0 = COPY [[COPY1]](s32) - %0:_(s64) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY1]](s32) + %0:_(s64) = COPY $x0 %1:_(s8) = G_FPTOSI %0 %2:_(s32) = G_ANYEXT %1 - %w0 = COPY %2 + $w0 = COPY %2 ... --- name: test_fptoui_s8_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_fptoui_s8_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s64) ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOUI]](s32) - ; CHECK: %w0 = COPY [[COPY1]](s32) - %0:_(s64) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY1]](s32) + %0:_(s64) = COPY $x0 %1:_(s8) = G_FPTOUI %0 %2:_(s32) = G_ANYEXT %1 - %w0 = COPY %2 + $w0 = COPY %2 ... --- name: test_fptosi_s16_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_fptosi_s16_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32) ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOSI]](s32) - ; CHECK: %w0 = COPY [[COPY1]](s32) - %0:_(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[COPY1]](s32) + %0:_(s32) = COPY $w0 %1:_(s16) = G_FPTOSI %0 %2:_(s32) = G_ANYEXT %1 - %w0 = COPY %2 + $w0 = COPY %2 ... --- name: test_fptoui_s16_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_fptoui_s16_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s32) ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOUI]](s32) - ; CHECK: %w0 = COPY [[COPY1]](s32) - %0:_(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[COPY1]](s32) + %0:_(s32) = COPY $w0 %1:_(s16) = G_FPTOUI %0 %2:_(s32) = G_ANYEXT %1 - %w0 = COPY %2 + $w0 = COPY %2 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-gep.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-gep.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-gep.mir @@ -19,20 +19,20 @@ - { id: 3, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_gep_small - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]] ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]] ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[ASHR]](s64) - ; CHECK: %x0 = COPY [[GEP]](p0) - %0(p0) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[GEP]](p0) + %0(p0) = COPY $x0 + %1(s64) = COPY $x1 %2(s8) = G_TRUNC %1 %3(p0) = G_GEP %0, %2(s8) - %x0 = COPY %3 + $x0 = COPY %3 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir @@ -14,13 +14,13 @@ - { id: 0, class: _ } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_copy - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: %x0 = COPY [[COPY]](s64) - %0(s64) = COPY %x0 - %x0 = COPY %0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: $x0 = COPY [[COPY]](s64) + %0(s64) = COPY $x0 + $x0 = COPY %0 ... --- Index: test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir @@ -16,7 +16,7 @@ name: test_inserts_1 body: | bb.0: - liveins: %w0 + liveins: $w0 ; Low part of insertion wipes out the old register entirely, so %0 gets ; forwarded to the G_STORE. Hi part is unchanged so (split) G_LOAD gets @@ -26,9 +26,9 @@ ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD ; CHECK: G_STORE %0(s64) ; CHECK: G_STORE [[HI]] - %0:_(s64) = COPY %x0 - %1:_(s32) = COPY %w1 - %2:_(p0) = COPY %x2 + %0:_(s64) = COPY $x0 + %1:_(s32) = COPY $w1 + %2:_(p0) = COPY $x2 %3:_(s128) = G_LOAD %2(p0) :: (load 16) %4:_(s128) = G_INSERT %3(s128), %0(s64), 0 G_STORE %4(s128), %2(p0) :: (store 16) @@ -39,7 +39,7 @@ name: test_inserts_2 body: | bb.0: - liveins: %w0 + liveins: $w0 ; Low insertion wipes out the old register entirely, so %0 gets forwarded ; to the G_STORE again. Second insertion is real. @@ -49,9 +49,9 @@ ; CHECK: [[NEWHI:%[0-9]+]]:_(s64) = G_INSERT [[HI]], %1(s32), 0 ; CHECK: G_STORE %0(s64) ; CHECK: G_STORE [[NEWHI]] - %0:_(s64) = COPY %x0 - %1:_(s32) = COPY %w1 - %2:_(p0) = COPY %x2 + %0:_(s64) = COPY $x0 + %1:_(s32) = COPY $w1 + %2:_(p0) = COPY $x2 %3:_(s128) = G_LOAD %2(p0) :: (load 16) %4:_(s128) = G_INSERT %3(s128), %0(s64), 0 %5:_(s128) = G_INSERT %4(s128), %1(s32), 64 @@ -63,7 +63,7 @@ name: test_inserts_3 body: | bb.0: - liveins: %w0 + liveins: $w0 ; I'm not entirely convinced inserting a p0 into an s64 is valid, but it's ; certainly better than the alternative of directly forwarding the value @@ -74,9 +74,9 @@ ; CHECK: [[NEWLO:%[0-9]+]]:_(s64) = G_PTRTOINT %0(p0) ; CHECK: G_STORE [[NEWLO]](s64) ; CHECK: G_STORE [[HI]] - %0:_(p0) = COPY %x0 - %1:_(s32) = COPY %w1 - %2:_(p0) = COPY %x2 + %0:_(p0) = COPY $x0 + %1:_(s32) = COPY $w1 + %2:_(p0) = COPY $x2 %3:_(s128) = G_LOAD %2(p0) :: (load 16) %4:_(s128) = G_INSERT %3(s128), %0(p0), 0 G_STORE %4(s128), %2(p0) :: (store 16) @@ -87,18 +87,18 @@ name: test_inserts_4 body: | bb.0: - liveins: %w0 + liveins: $w0 ; A narrow insert gets surrounded by a G_ANYEXT/G_TRUNC pair. ; CHECK-LABEL: name: test_inserts_4 ; CHECK: [[VALEXT:%[0-9]+]]:_(s32) = COPY %2(s32) ; CHECK: [[VAL:%[0-9]+]]:_(s32) = G_INSERT [[VALEXT]], %1(s1), 0 ; CHECK: %5:_(s8) = G_TRUNC [[VAL]](s32) - %4:_(s32) = COPY %w0 + %4:_(s32) = COPY $w0 %0:_(s1) = G_TRUNC %4 - %5:_(s32) = COPY %w1 + %5:_(s32) = COPY $w1 %1:_(s8) = G_TRUNC %5 - %2:_(p0) = COPY %x2 + %2:_(p0) = COPY $x2 %3:_(s8) = G_INSERT %1(s8), %0(s1), 0 G_STORE %3(s8), %2(p0) :: (store 1) RET_ReallyLR @@ -108,7 +108,7 @@ name: test_inserts_5 body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 ; CHECK-LABEL: name: test_inserts_5 @@ -117,13 +117,13 @@ ; CHECK: [[INS_HI:%[0-9]+]]:_(s32) = G_EXTRACT %2(s64), 32 ; CHECK: [[VAL_HI:%[0-9]+]]:_(s64) = G_INSERT %1, [[INS_HI]](s32), 0 ; CHECK: %4:_(s128) = G_MERGE_VALUES [[VAL_LO]](s64), [[VAL_HI]](s64) - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 - %2:_(s64) = COPY %x2 + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 + %2:_(s64) = COPY $x2 %3:_(s128) = G_MERGE_VALUES %0, %1 %4:_(s128) = G_INSERT %3, %2, 32 %5:_(s64) = G_TRUNC %4 - %x0 = COPY %5 + $x0 = COPY %5 RET_ReallyLR ... @@ -131,19 +131,19 @@ name: test_inserts_6 body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 ; CHECK-LABEL: name: test_inserts_6 ; CHECK: [[VAL_LO:%[0-9]+]]:_(s64) = G_INSERT %0, %2(s32), 32 ; CHECK: %4:_(s128) = G_MERGE_VALUES [[VAL_LO]](s64), %1(s64) - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 - %2:_(s32) = COPY %w2 + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 + %2:_(s32) = COPY $w2 %3:_(s128) = G_MERGE_VALUES %0, %1 %4:_(s128) = G_INSERT %3, %2, 32 %5:_(s64) = G_TRUNC %4 - %x0 = COPY %5 + $x0 = COPY %5 RET_ReallyLR ... @@ -151,19 +151,19 @@ name: test_inserts_nonpow2 body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 ; CHECK-LABEL: name: test_inserts_nonpow2 - ; CHECK: [[C:%[0-9]+]]:_(s64) = COPY %x3 - ; CHECK: %x0 = COPY [[C]] - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 - %2:_(s64) = COPY %x2 - %3:_(s64) = COPY %x3 + ; CHECK: [[C:%[0-9]+]]:_(s64) = COPY $x3 + ; CHECK: $x0 = COPY [[C]] + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 + %2:_(s64) = COPY $x2 + %3:_(s64) = COPY $x3 %4:_(s192) = G_MERGE_VALUES %0, %1, %2 %5:_(s192) = G_INSERT %4, %3, 0 %6:_(s64), %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %5 - %x0 = COPY %6 + $x0 = COPY %6 RET_ReallyLR ... Index: test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir @@ -29,104 +29,104 @@ name: test_sitofp_s32_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sitofp_s32_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s32) = G_SITOFP %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_uitofp_s32_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_uitofp_s32_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[COPY]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s32) = G_UITOFP %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_sitofp_s32_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_sitofp_s32_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s64) - %0:_(s64) = COPY %x0 + %0:_(s64) = COPY $x0 %1:_(s32) = G_SITOFP %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_uitofp_s32_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_uitofp_s32_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[COPY]](s64) - %0:_(s64) = COPY %x0 + %0:_(s64) = COPY $x0 %1:_(s32) = G_UITOFP %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_sitofp_s64_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sitofp_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[COPY]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s64) = G_SITOFP %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- name: test_uitofp_s64_s32 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_uitofp_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[COPY]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s64) = G_UITOFP %0 - %x0 = COPY %1 + $x0 = COPY %1 ... --- name: test_sitofp_s64_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_sitofp_s64_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[COPY]](s64) - %0:_(s64) = COPY %x0 + %0:_(s64) = COPY $x0 %1:_(s64) = G_SITOFP %0 - %x0 = COPY %1 + $x0 = COPY %1 ... --- name: test_uitofp_s64_s64 body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_uitofp_s64_s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[COPY]](s64) - %0:_(s64) = COPY %x0 + %0:_(s64) = COPY $x0 %1:_(s64) = G_UITOFP %0 - %x0 = COPY %1 + $x0 = COPY %1 ... @@ -134,103 +134,103 @@ name: test_sitofp_s32_s1 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sitofp_s32_s1 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]] ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]] ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s1) = G_TRUNC %0 %2:_(s32) = G_SITOFP %1 - %w0 = COPY %2 + $w0 = COPY %2 ... --- name: test_uitofp_s32_s1 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_uitofp_s32_s1 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]] ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[AND]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s1) = G_TRUNC %0 %2:_(s32) = G_UITOFP %1 - %w0 = COPY %2 + $w0 = COPY %2 ... --- name: test_sitofp_s64_s8 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sitofp_s64_s8 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]] ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]] ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s8) = G_TRUNC %0 %2:_(s64) = G_SITOFP %1 - %x0 = COPY %2 + $x0 = COPY %2 ... --- name: test_uitofp_s64_s8 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_uitofp_s64_s8 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]] ; CHECK: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[AND]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s8) = G_TRUNC %0 %2:_(s64) = G_UITOFP %1 - %x0 = COPY %2 + $x0 = COPY %2 ... --- name: test_sitofp_s32_s16 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sitofp_s32_s16 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]] ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]] ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s16) = G_TRUNC %0 %2:_(s32) = G_SITOFP %1 - %w0 = COPY %2 + $w0 = COPY %2 ... --- name: test_uitofp_s32_s16 body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_uitofp_s32_s16 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]] ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[AND]](s32) - %0:_(s32) = COPY %w0 + %0:_(s32) = COPY $w0 %1:_(s16) = G_TRUNC %0 %2:_(s32) = G_UITOFP %1 - %w0 = COPY %2 + $w0 = COPY %2 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir @@ -27,40 +27,40 @@ - { id: 8, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_load - %0(p0) = COPY %x0 + %0(p0) = COPY $x0 %1(s1) = G_LOAD %0 :: (load 1 from %ir.addr) %9:_(s32) = G_ANYEXT %1 - %w0 = COPY %9 + $w0 = COPY %9 ; CHECK: %2:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr) %2(s8) = G_LOAD %0 :: (load 1 from %ir.addr) %10:_(s32) = G_ANYEXT %2 - %w0 = COPY %10 + $w0 = COPY %10 ; CHECK: %3:_(s16) = G_LOAD %0(p0) :: (load 2 from %ir.addr) %3(s16) = G_LOAD %0 :: (load 2 from %ir.addr) %11:_(s32) = G_ANYEXT %3 - %w0 = COPY %11 + $w0 = COPY %11 ; CHECK: %4:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.addr) %4(s32) = G_LOAD %0 :: (load 4 from %ir.addr) - %w0 = COPY %4 + $w0 = COPY %4 ; CHECK: %5:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.addr) %5(s64) = G_LOAD %0 :: (load 8 from %ir.addr) - %x0 = COPY %5 + $x0 = COPY %5 %6(p0) = G_LOAD %0(p0) :: (load 8 from %ir.addr) %12:_(s64) = G_PTRTOINT %6 - %x0 = COPY %12 + $x0 = COPY %12 ; CHECK: %7:_(<2 x s32>) = G_LOAD %0(p0) :: (load 8 from %ir.addr) %7(<2 x s32>) = G_LOAD %0(p0) :: (load 8 from %ir.addr) %13:_(s64) = G_BITCAST %7 - %x0 = COPY %13 + $x0 = COPY %13 ; CHECK: [[LOAD0:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 16 from %ir.addr) ; CHECK: [[OFFSET1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 @@ -69,7 +69,7 @@ ; CHECK: %8:_(s128) = G_MERGE_VALUES [[LOAD0]](s64), [[LOAD1]](s64) %8(s128) = G_LOAD %0(p0) :: (load 16 from %ir.addr) %14:_(s64) = G_TRUNC %8 - %x0 = COPY %14 + $x0 = COPY %14 ... --- @@ -85,11 +85,11 @@ - { id: 7, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_store - %0(p0) = COPY %x0 - %1(s32) = COPY %w1 + %0(p0) = COPY $x0 + %1(s32) = COPY $w1 ; CHECK: [[C1:%.*]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[B:%.*]]:_(s32) = COPY %1(s32) Index: test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir @@ -26,5 +26,5 @@ %2(s8) = G_MERGE_VALUES %1(s4), %1(s4) %3(s8) = COPY %2(s8) %4(s64) = G_ANYEXT %3(s8) - %x0 = COPY %4(s64) + $x0 = COPY %4(s64) ... Index: test/CodeGen/AArch64/GlobalISel/legalize-mul.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-mul.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-mul.mir @@ -27,23 +27,23 @@ - { id: 5, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_scalar_mul_small - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[TRUNC]], [[TRUNC1]] ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[MUL]](s32) - ; CHECK: %x0 = COPY [[ANYEXT]](s64) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[ANYEXT]](s64) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 %4(s8) = G_MUL %2, %3 %5(s64) = G_ANYEXT %4 - %x0 = COPY %5 + $x0 = COPY %5 ... @@ -51,25 +51,25 @@ name: test_smul_overflow body: | bb.0: - liveins: %x0, %x1, %w2, %w3 + liveins: $x0, $x1, $w2, $w3 ; CHECK-LABEL: name: test_smul_overflow - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]] ; CHECK: [[SMULH:%[0-9]+]]:_(s64) = G_SMULH [[COPY]], [[COPY1]] ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MUL]], [[C]] ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SMULH]](s64), [[ASHR]] - ; CHECK: %x0 = COPY [[MUL]](s64) + ; CHECK: $x0 = COPY [[MUL]](s64) ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) - ; CHECK: %w0 = COPY [[COPY2]](s32) - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 + ; CHECK: $w0 = COPY [[COPY2]](s32) + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 %2:_(s64), %3:_(s1) = G_SMULO %0, %1 - %x0 = COPY %2 + $x0 = COPY %2 %4:_(s32) = G_ANYEXT %3 - %w0 = COPY %4 + $w0 = COPY %4 ... @@ -78,23 +78,23 @@ name: test_umul_overflow body: | bb.0: - liveins: %x0, %x1, %w2, %w3 + liveins: $x0, $x1, $w2, $w3 ; CHECK-LABEL: name: test_umul_overflow - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]] ; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY1]] ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]] - ; CHECK: %x0 = COPY [[MUL]](s64) + ; CHECK: $x0 = COPY [[MUL]](s64) ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) - ; CHECK: %w0 = COPY [[COPY2]](s32) - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 + ; CHECK: $w0 = COPY [[COPY2]](s32) + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 %2:_(s64), %3:_(s1) = G_UMULO %0, %1 - %x0 = COPY %2 + $x0 = COPY %2 %4:_(s32) = G_ANYEXT %3 - %w0 = COPY %4 + $w0 = COPY %4 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir @@ -19,16 +19,16 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_legalize_merge_v3s64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[MV:%[0-9]+]]:_(<3 x s64>) = G_MERGE_VALUES [[COPY]](s64), [[COPY]](s64), [[COPY]](s64) ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s64>) = COPY [[MV]](<3 x s64>) ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64), [[UV2:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<3 x s64>) - ; CHECK: %x0 = COPY [[UV]](s64) - %0(s64) = COPY %x0 + ; CHECK: $x0 = COPY [[UV]](s64) + %0(s64) = COPY $x0 %1(<3 x s64>) = G_MERGE_VALUES %0(s64), %0(s64), %0(s64) %2(<3 x s64>) = COPY %1(<3 x s64>) %3(s64), %4(s64), %5(s64) = G_UNMERGE_VALUES %2(<3 x s64>) - %x0 = COPY %3(s64) + $x0 = COPY %3(s64) ... Index: test/CodeGen/AArch64/GlobalISel/legalize-or.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-or.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-or.mir @@ -5,51 +5,51 @@ name: test_scalar_or_small body: | bb.0: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_scalar_or_small - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC]], [[TRUNC1]] ; CHECK: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[OR]](s32) - ; CHECK: %x0 = COPY [[TRUNC2]](s8) - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[TRUNC2]](s8) + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 %2:_(s8) = G_TRUNC %0 %3:_(s8) = G_TRUNC %1 %4:_(s8) = G_OR %2, %3 - %x0 = COPY %4 + $x0 = COPY %4 ... --- name: test_big_scalar_power_of_2 body: | bb.0: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; We have a temporary G_MERGE_VALUES in the legalizer that gets ; cleaned up with the G_UNMERGE_VALUES, so we end up directly ; copying the results of the G_OR ops. ; CHECK-LABEL: name: test_big_scalar_power_of_2 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 - ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY %x2 - ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY %x3 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2 + ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3 ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY2]] ; CHECK: [[OR1:%[0-9]+]]:_(s64) = G_OR [[COPY1]], [[COPY3]] - ; CHECK: %x0 = COPY [[OR]](s64) - ; CHECK: %x1 = COPY [[OR1]](s64) - ; CHECK: RET_ReallyLR implicit %x0, implicit %x1 - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 - %2:_(s64) = COPY %x2 - %3:_(s64) = COPY %x3 + ; CHECK: $x0 = COPY [[OR]](s64) + ; CHECK: $x1 = COPY [[OR1]](s64) + ; CHECK: RET_ReallyLR implicit $x0, implicit $x1 + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 + %2:_(s64) = COPY $x2 + %3:_(s64) = COPY $x3 %4:_(s128) = G_MERGE_VALUES %0, %1 %5:_(s128) = G_MERGE_VALUES %2, %3 %6:_(s128) = G_OR %4, %5 %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6 - %x0 = COPY %7 - %x1 = COPY %8 - RET_ReallyLR implicit %x0, implicit %x1 + $x0 = COPY %7 + $x1 = COPY %8 + RET_ReallyLR implicit $x0, implicit $x1 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-phi.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-phi.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-phi.mir @@ -66,8 +66,8 @@ ; CHECK-LABEL: name: legalize_phi ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK: liveins: %w0 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 @@ -89,17 +89,17 @@ ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]] - ; CHECK: %w0 = COPY [[AND]](s32) - ; CHECK: RET_ReallyLR implicit %w0 + ; CHECK: $w0 = COPY [[AND]](s32) + ; CHECK: RET_ReallyLR implicit $w0 bb.0: ; Test that we insert legalization artifacts(Truncs here) into the correct BBs ; while legalizing the G_PHI to s16. successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %w0 + liveins: $w0 - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_CONSTANT i32 0 %3(s32) = G_CONSTANT i32 1 %6(s32) = G_CONSTANT i32 2 @@ -123,8 +123,8 @@ bb.3: %9(s1) = G_PHI %5(s1), %bb.1, %8(s1), %bb.2 %10(s32) = G_ZEXT %9(s1) - %w0 = COPY %10(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %10(s32) + RET_ReallyLR implicit $w0 ... --- @@ -147,10 +147,10 @@ ; CHECK-LABEL: name: legalize_phi_ptr ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK: liveins: %w2, %x0, %x1 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY %x1 - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY %w2 + ; CHECK: liveins: $w2, $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY2]](s32) ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1 ; CHECK: G_BR %bb.2 @@ -158,16 +158,16 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: bb.2: ; CHECK: [[PHI:%[0-9]+]]:_(p0) = G_PHI [[COPY]](p0), %bb.0, [[COPY1]](p0), %bb.1 - ; CHECK: %x0 = COPY [[PHI]](p0) - ; CHECK: RET_ReallyLR implicit %x0 + ; CHECK: $x0 = COPY [[PHI]](p0) + ; CHECK: RET_ReallyLR implicit $x0 bb.1: successors: %bb.2, %bb.3 - liveins: %w2, %x0, %x1 + liveins: $w2, $x0, $x1 - %0(p0) = COPY %x0 - %1(p0) = COPY %x1 - %4(s32) = COPY %w2 + %0(p0) = COPY $x0 + %1(p0) = COPY $x1 + %4(s32) = COPY $w2 %2(s1) = G_TRUNC %4(s32) G_BRCOND %2(s1), %bb.2 G_BR %bb.3 @@ -177,8 +177,8 @@ bb.3: %3(p0) = G_PHI %0(p0), %bb.1, %1(p0), %bb.2 - %x0 = COPY %3(p0) - RET_ReallyLR implicit %x0 + $x0 = COPY %3(p0) + RET_ReallyLR implicit $x0 ... --- @@ -206,8 +206,8 @@ ; CHECK-LABEL: name: legalize_phi_empty ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK: liveins: %w0 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -229,17 +229,17 @@ ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]] - ; CHECK: %w0 = COPY [[AND]](s32) - ; CHECK: RET_ReallyLR implicit %w0 + ; CHECK: $w0 = COPY [[AND]](s32) + ; CHECK: RET_ReallyLR implicit $w0 bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %w0 + liveins: $w0 ; Test that we properly legalize a phi with a predecessor that's empty - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_CONSTANT i32 0 %3(s32) = G_CONSTANT i32 3 %6(s32) = G_CONSTANT i32 1 @@ -263,8 +263,8 @@ bb.3: %9(s1) = G_PHI %8(s1), %bb.1, %5(s1), %bb.2 %10(s32) = G_ZEXT %9(s1) - %w0 = COPY %10(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %10(s32) + RET_ReallyLR implicit $w0 ... --- @@ -289,8 +289,8 @@ ; CHECK-LABEL: name: legalize_phi_loop ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: liveins: %w0 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C1]](s32) @@ -312,14 +312,14 @@ ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C3]] - ; CHECK: %w0 = COPY [[AND1]](s32) - ; CHECK: RET_ReallyLR implicit %w0 + ; CHECK: $w0 = COPY [[AND1]](s32) + ; CHECK: RET_ReallyLR implicit $w0 bb.0: successors: %bb.1(0x80000000) - liveins: %w0 + liveins: $w0 ; Test that we properly legalize a phi that uses a value from the same BB - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %2(s8) = G_CONSTANT i8 1 %7(s8) = G_CONSTANT i8 0 @@ -334,8 +334,8 @@ bb.3: %6(s32) = G_ZEXT %3(s8) - %w0 = COPY %6(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %6(s32) + RET_ReallyLR implicit $w0 ... --- @@ -357,8 +357,8 @@ ; CHECK-LABEL: name: legalize_phi_cycle ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: liveins: %w0 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) ; CHECK: bb.1: @@ -373,15 +373,15 @@ ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY [[PHI]](s16) ; CHECK: G_BRCOND [[TRUNC2]](s1), %bb.1 ; CHECK: bb.2: - ; CHECK: %w0 = COPY [[AND]](s32) - ; CHECK: RET_ReallyLR implicit %w0 + ; CHECK: $w0 = COPY [[AND]](s32) + ; CHECK: RET_ReallyLR implicit $w0 bb.0: successors: %bb.1(0x80000000) - liveins: %w0 + liveins: $w0 ; Test that we properly legalize a phi that uses itself - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %4(s8) = G_CONSTANT i8 0 bb.1: @@ -393,8 +393,8 @@ G_BRCOND %3(s1), %bb.1 bb.3: - %w0 = COPY %2(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %2(s32) + RET_ReallyLR implicit $w0 ... --- @@ -426,8 +426,8 @@ ; CHECK-LABEL: name: legalize_phi_same_bb ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK: liveins: %w0 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -457,18 +457,18 @@ ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16) ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C5]] ; CHECK: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[C]]1, [[C]]2 - ; CHECK: %w0 = COPY [[C]]3(s32) - ; CHECK: RET_ReallyLR implicit %w0 + ; CHECK: $w0 = COPY [[C]]3(s32) + ; CHECK: RET_ReallyLR implicit $w0 bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %w0 + liveins: $w0 ; Make sure that we correctly insert the new legalized G_PHI at the ; correct location (ie make sure G_PHIs are the first insts in the BB). - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_CONSTANT i32 0 %3(s32) = G_CONSTANT i32 3 %6(s32) = G_CONSTANT i32 1 @@ -496,8 +496,8 @@ %11(s32) = G_ZEXT %9(s8) %12(s32) = G_ZEXT %10(s8) %13(s32) = G_ADD %11, %12 - %w0 = COPY %13(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %13(s32) + RET_ReallyLR implicit $w0 ... --- @@ -530,8 +530,8 @@ ; CHECK-LABEL: name: legalize_phi_diff_bb ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK: liveins: %w0, %w1 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0 + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -563,18 +563,18 @@ ; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16) ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[C]]8, [[C]]7 - ; CHECK: %w0 = COPY [[AND1]](s32) - ; CHECK: RET_ReallyLR implicit %w0 + ; CHECK: $w0 = COPY [[AND1]](s32) + ; CHECK: RET_ReallyLR implicit $w0 bb.0: successors: %bb.1(0x40000000), %bb.3(0x40000000) - liveins: %w0, %w1 + liveins: $w0, $w1 ; Make sure that we correctly legalize PHIs sharing common defs ; in different BBs. - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_CONSTANT i32 0 %4(s32) = G_CONSTANT i32 3 %9(s32) = G_CONSTANT i32 1 @@ -599,7 +599,7 @@ bb.3: %13(s8) = G_PHI %7(s8), %bb.1, %6(s8), %bb.0 %14(s32) = G_ZEXT %13(s8) - %w0 = COPY %14(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %14(s32) + RET_ReallyLR implicit $w0 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-pow.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-pow.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-pow.mir @@ -13,28 +13,28 @@ name: test_pow body: | bb.0.entry: - liveins: %d0, %d1, %s2, %s3 + liveins: $d0, $d1, $s2, $s3 ; CHECK-LABEL: name: test_pow ; CHECK: hasCalls: true - %0:_(s64) = COPY %d0 - %1:_(s64) = COPY %d1 - %2:_(s32) = COPY %s2 - %3:_(s32) = COPY %s3 + %0:_(s64) = COPY $d0 + %1:_(s64) = COPY $d1 + %2:_(s32) = COPY $s2 + %3:_(s32) = COPY $s3 - ; CHECK: %d0 = COPY %0 - ; CHECK: %d1 = COPY %1 - ; CHECK: BL &pow, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0 - ; CHECK: %4:_(s64) = COPY %d0 + ; CHECK: $d0 = COPY %0 + ; CHECK: $d1 = COPY %1 + ; CHECK: BL &pow, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0 + ; CHECK: %4:_(s64) = COPY $d0 %4:_(s64) = G_FPOW %0, %1 - %x0 = COPY %4 + $x0 = COPY %4 - ; CHECK: %s0 = COPY %2 - ; CHECK: %s1 = COPY %3 - ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0 - ; CHECK: %5:_(s32) = COPY %s0 + ; CHECK: $s0 = COPY %2 + ; CHECK: $s1 = COPY %3 + ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0 + ; CHECK: %5:_(s32) = COPY $s0 %5:_(s32) = G_FPOW %2, %3 - %w0 = COPY %5 + $w0 = COPY %5 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-rem.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-rem.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-rem.mir @@ -30,19 +30,19 @@ - { id: 2, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_urem_64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]] ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[UDIV]], [[COPY1]] ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[MUL]] - ; CHECK: %x0 = COPY [[SUB]](s64) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[SUB]](s64) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_UREM %0, %1 - %x0 = COPY %2 + $x0 = COPY %2 ... @@ -56,23 +56,23 @@ - { id: 5, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_srem_32 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[TRUNC]], [[TRUNC1]] ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SDIV]], [[TRUNC1]] ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[MUL]] - ; CHECK: %w0 = COPY [[SUB]](s32) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $w0 = COPY [[SUB]](s32) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %3(s32) = G_TRUNC %0 %4(s32) = G_TRUNC %1 %5(s32) = G_SREM %3, %4 - %w0 = COPY %5 + $w0 = COPY %5 ... --- @@ -85,12 +85,12 @@ - { id: 8, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_srem_8 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]] @@ -107,14 +107,14 @@ ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[MUL]](s32) ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[COPY3]] ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB]](s32) - ; CHECK: %w0 = COPY [[COPY4]](s32) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $w0 = COPY [[COPY4]](s32) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %6(s8) = G_TRUNC %0 %7(s8) = G_TRUNC %1 %8(s8) = G_SREM %6, %7 %9:_(s32) = G_ANYEXT %8 - %w0 = COPY %9 + $w0 = COPY %9 ... --- name: test_frem @@ -127,33 +127,33 @@ - { id: 5, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_frem - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 - ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def %sp, implicit %sp - ; CHECK: %d0 = COPY [[COPY]](s64) - ; CHECK: %d1 = COPY [[COPY1]](s64) - ; CHECK: BL &fmod, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0 - ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY %d0 - ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def %sp, implicit %sp - ; CHECK: %x0 = COPY [[COPY2]](s64) + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: $d0 = COPY [[COPY]](s64) + ; CHECK: $d1 = COPY [[COPY1]](s64) + ; CHECK: BL &fmod, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0 + ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: $x0 = COPY [[COPY2]](s64) ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) - ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def %sp, implicit %sp - ; CHECK: %s0 = COPY [[TRUNC]](s32) - ; CHECK: %s1 = COPY [[TRUNC1]](s32) - ; CHECK: BL &fmodf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0 - ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY %s0 - ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def %sp, implicit %sp - ; CHECK: %w0 = COPY [[COPY3]](s32) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: $s0 = COPY [[TRUNC]](s32) + ; CHECK: $s1 = COPY [[TRUNC1]](s32) + ; CHECK: BL &fmodf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0 + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: $w0 = COPY [[COPY3]](s32) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_FREM %0, %1 - %x0 = COPY %2 + $x0 = COPY %2 %3(s32) = G_TRUNC %0 %4(s32) = G_TRUNC %1 %5(s32) = G_FREM %3, %4 - %w0 = COPY %5 + $w0 = COPY %5 Index: test/CodeGen/AArch64/GlobalISel/legalize-shift.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-shift.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-shift.mir @@ -22,10 +22,10 @@ - { id: 6, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_shift - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]] @@ -36,7 +36,7 @@ ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]] ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[ASHR1]] ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR2]](s32) - ; CHECK: %w0 = COPY [[COPY2]](s32) + ; CHECK: $w0 = COPY [[COPY2]](s32) ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C2]] @@ -45,27 +45,27 @@ ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C3]] ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[AND1]] ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; CHECK: %w0 = COPY [[COPY3]](s32) + ; CHECK: $w0 = COPY [[COPY3]](s32) ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY1]]0, [[COPY1]]1 ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY1]]2(s32) - ; CHECK: %w0 = COPY [[COPY4]](s32) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $w0 = COPY [[COPY4]](s32) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 %4(s8) = G_ASHR %2, %3 %7:_(s32) = G_ANYEXT %4 - %w0 = COPY %7 + $w0 = COPY %7 %5(s8) = G_LSHR %2, %3 %8:_(s32) = G_ANYEXT %5 - %w0 = COPY %8 + $w0 = COPY %8 %6(s8) = G_SHL %2, %3 %9:_(s32) = G_ANYEXT %6 - %w0 = COPY %9 + $w0 = COPY %9 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-simple.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-simple.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-simple.mir @@ -45,46 +45,46 @@ ; CHECK-LABEL: name: test_simple ; CHECK: bb.0.{{[a-zA-Z0-9]+}}: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[COPY]](s64) ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[INTTOPTR]](p0) - ; CHECK: %x0 = COPY [[PTRTOINT]](s64) + ; CHECK: $x0 = COPY [[PTRTOINT]](s64) ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1 ; CHECK: bb.1.{{[a-zA-Z0-9]+}}: ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC2]], [[TRUNC3]] ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32) - ; CHECK: %w0 = COPY [[COPY1]](s32) + ; CHECK: $w0 = COPY [[COPY1]](s32) ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC4]], [[TRUNC5]] ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SELECT1]](s32) - ; CHECK: %w0 = COPY [[COPY2]](s32) + ; CHECK: $w0 = COPY [[COPY2]](s32) ; CHECK: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC6]], [[TRUNC7]] ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SELECT2]](s32) - ; CHECK: %w0 = COPY [[COPY3]](s32) + ; CHECK: $w0 = COPY [[COPY3]](s32) ; CHECK: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC1]], [[TRUNC1]] ; CHECK: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY]] - ; CHECK: %x0 = COPY [[SELECT4]](s64) + ; CHECK: $x0 = COPY [[SELECT4]](s64) ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[COPY]](s64) ; CHECK: [[BITCAST1:%[0-9]+]]:_(s64) = G_BITCAST [[BITCAST]](<2 x s32>) - ; CHECK: %x0 = COPY [[BITCAST1]](s64) + ; CHECK: $x0 = COPY [[BITCAST1]](s64) ; CHECK: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[SELECT3]](s32) - ; CHECK: %w0 = COPY [[BITCAST2]](s32) + ; CHECK: $w0 = COPY [[BITCAST2]](s32) ; CHECK: [[BITCAST3:%[0-9]+]]:_(<4 x s8>) = G_BITCAST [[COPY]](s64) ; CHECK: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST3]](<4 x s8>) - ; CHECK: %w0 = COPY [[BITCAST4]](s32) + ; CHECK: $w0 = COPY [[BITCAST4]](s32) ; CHECK: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY]](s64) ; CHECK: [[BITCAST6:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST5]](<2 x s16>) - ; CHECK: %w0 = COPY [[BITCAST6]](s32) + ; CHECK: $w0 = COPY [[BITCAST6]](s32) bb.0.entry: - liveins: %x0, %x1, %x2, %x3 - %0(s64) = COPY %x0 + liveins: $x0, $x1, $x2, $x3 + %0(s64) = COPY $x0 %1(s1) = G_TRUNC %0 %2(s8) = G_TRUNC %0 @@ -93,7 +93,7 @@ %5(p0) = G_INTTOPTR %0 %6(s64) = G_PTRTOINT %5 - %x0 = COPY %6 + $x0 = COPY %6 G_BRCOND %1, %bb.1 @@ -101,31 +101,31 @@ %7(s1) = G_SELECT %1, %1, %1 %21:_(s32) = G_ANYEXT %7 - %w0 = COPY %21 + $w0 = COPY %21 %8(s8) = G_SELECT %1, %2, %2 %20:_(s32) = G_ANYEXT %8 - %w0 = COPY %20 + $w0 = COPY %20 %9(s16) = G_SELECT %1, %3, %3 %19:_(s32) = G_ANYEXT %9 - %w0 = COPY %19 + $w0 = COPY %19 %10(s32) = G_SELECT %1, %4, %4 %11(s64) = G_SELECT %1, %0, %0 - %x0 = COPY %11 + $x0 = COPY %11 %12(<2 x s32>) = G_BITCAST %0 %13(s64) = G_BITCAST %12 - %x0 = COPY %13 + $x0 = COPY %13 %14(s32) = G_BITCAST %10 - %w0 = COPY %14 + $w0 = COPY %14 %15(<4 x s8>) = G_BITCAST %0 %17:_(s32) = G_BITCAST %15 - %w0 = COPY %17 + $w0 = COPY %17 %16(<2 x s16>) = G_BITCAST %0 %18:_(s32) = G_BITCAST %16 - %w0 = COPY %18 + $w0 = COPY %18 ... --- @@ -138,22 +138,22 @@ - { id: 3, class: _} body: | bb.1: - liveins: %x0, %x1 + liveins: $x0, $x1 ; This is legal and shouldn't be changed. ; CHECK-LABEL: name: bitcast128 - ; CHECK: liveins: %x0, %x1 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64) ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[MV]](s128) - ; CHECK: %q0 = COPY [[BITCAST]](<2 x s64>) - ; CHECK: RET_ReallyLR implicit %q0 - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $q0 = COPY [[BITCAST]](<2 x s64>) + ; CHECK: RET_ReallyLR implicit $q0 + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %3(s128) = G_MERGE_VALUES %0(s64), %1(s64) %2(<2 x s64>) = G_BITCAST %3(s128) - %q0 = COPY %2(<2 x s64>) - RET_ReallyLR implicit %q0 + $q0 = COPY %2(<2 x s64>) + RET_ReallyLR implicit $q0 ... --- @@ -166,19 +166,19 @@ - { id: 3, class: _} body: | bb.1: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: testExtOfCopyOfTrunc - ; CHECK: liveins: %x0 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) - ; CHECK: %x0 = COPY [[COPY1]](s64) - ; CHECK: RET_ReallyLR implicit %x0 - %0(s64) = COPY %x0 + ; CHECK: $x0 = COPY [[COPY1]](s64) + ; CHECK: RET_ReallyLR implicit $x0 + %0(s64) = COPY $x0 %1(s1) = G_TRUNC %0 %2(s1) = COPY %1 %3(s64) = G_ANYEXT %2 - %x0 = COPY %3 - RET_ReallyLR implicit %x0 + $x0 = COPY %3 + RET_ReallyLR implicit $x0 ... --- @@ -191,19 +191,19 @@ - { id: 3, class: _} body: | bb.1: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: testExtOf2CopyOfTrunc - ; CHECK: liveins: %x0 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) - ; CHECK: %x0 = COPY [[COPY1]](s64) - ; CHECK: RET_ReallyLR implicit %x0 - %0(s64) = COPY %x0 + ; CHECK: $x0 = COPY [[COPY1]](s64) + ; CHECK: RET_ReallyLR implicit $x0 + %0(s64) = COPY $x0 %1(s1) = G_TRUNC %0 %2(s1) = COPY %1 %4:_(s1) = COPY %2 %3(s64) = G_ANYEXT %4 - %x0 = COPY %3 - RET_ReallyLR implicit %x0 + $x0 = COPY %3 + RET_ReallyLR implicit $x0 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-sub.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-sub.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-sub.mir @@ -21,21 +21,21 @@ - { id: 5, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_scalar_sub_small - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[TRUNC1]] ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SUB]](s32) - ; CHECK: %x0 = COPY [[ANYEXT]](s64) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[ANYEXT]](s64) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 %4(s8) = G_SUB %2, %3 %5(s64) = G_ANYEXT %4 - %x0 = COPY %5 + $x0 = COPY %5 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-undef.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-undef.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-undef.mir @@ -14,5 +14,5 @@ ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[DEF]](s64), [[DEF1]](s64) %0:_(s128) = G_IMPLICIT_DEF %1:_(s64) = G_TRUNC %0 - %x0 = COPY %1 + $x0 = COPY %1 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir @@ -23,6 +23,6 @@ ; CHECK: unable to legalize instruction: {{.*}} G_UNMERGE_VALUES %1(s4), %2(s4)= G_UNMERGE_VALUES %0(s8) %3(s64) = G_ANYEXT %1(s4) - %x0 = COPY %3(s64) + $x0 = COPY %3(s64) ... Index: test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir @@ -12,7 +12,7 @@ body: | bb.0: ; CHECK-LABEL: name: test_vaarg - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[LOAD]], [[C]](s64) @@ -28,7 +28,7 @@ ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[PTR_MASK]], [[C3]](s64) ; CHECK: G_STORE [[GEP3]](p0), [[COPY]](p0) :: (store 8) - %0:_(p0) = COPY %x0 + %0:_(p0) = COPY $x0 %1:_(s8) = G_VAARG %0(p0), 1 Index: test/CodeGen/AArch64/GlobalISel/legalize-xor.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-xor.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-xor.mir @@ -21,21 +21,21 @@ - { id: 5, class: _ } body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 ; CHECK-LABEL: name: test_scalar_xor_small - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]] ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32) - ; CHECK: %x0 = COPY [[ANYEXT]](s64) - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[ANYEXT]](s64) + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 %4(s8) = G_XOR %2, %3 %5(s64) = G_ANYEXT %4 - %x0 = COPY %5 + $x0 = COPY %5 ... Index: test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir +++ test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir @@ -75,10 +75,10 @@ # CHECK-NEXT: G_FADD %0, %2 body: | bb.0 (%ir-block.0): - liveins: %s0, %w0 + liveins: $s0, $w0 - %0(s32) = COPY %s0 - %6(s32) = COPY %w0 + %0(s32) = COPY $s0 + %6(s32) = COPY $w0 %1(s1) = G_TRUNC %6 %4(s32) = G_FCONSTANT float 1.000000e+00 %5(s32) = G_FCONSTANT float 2.000000e+00 @@ -93,7 +93,7 @@ bb.3.end: %2(s32) = PHI %4(s32), %bb.1, %5(s32), %bb.2 %3(s32) = G_FADD %0, %2 - %s0 = COPY %3(s32) - RET_ReallyLR implicit %s0 + $s0 = COPY %3(s32) + RET_ReallyLR implicit $s0 ... Index: test/CodeGen/AArch64/GlobalISel/localizer.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/localizer.mir +++ test/CodeGen/AArch64/GlobalISel/localizer.mir @@ -274,8 +274,8 @@ ; CHECK-LABEL: name: non_local_label ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: liveins: %s0 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0 + ; CHECK: liveins: $s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[C:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00 ; CHECK: bb.1: ; CHECK: successors: %bb.1(0x80000000) @@ -288,10 +288,10 @@ ; The newly created reg should be on the same regbank/regclass as its origin. bb.0: - liveins: %s0 + liveins: $s0 successors: %bb.1 - %0:fpr(s32) = COPY %s0 + %0:fpr(s32) = COPY $s0 %1:fpr(s32) = G_FCONSTANT float 1.0 bb.1: Index: test/CodeGen/AArch64/GlobalISel/machine-cse-mid-pipeline.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/machine-cse-mid-pipeline.mir +++ test/CodeGen/AArch64/GlobalISel/machine-cse-mid-pipeline.mir @@ -9,15 +9,15 @@ ; CHECK: %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-NEXT: %[[TWO:[0-9]+]]:_(s32) = G_ADD %[[ONE]], %[[ONE]] ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]] - ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) - ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]] + ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) + ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]] bb.0: %0:_(s32) = G_CONSTANT i32 1 %1:_(s32) = G_ADD %0, %0 %2:_(s32) = G_ADD %0, %0 %3:_(s32) = G_ADD %1, %2 - %w0 = COPY %3(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %3(s32) + RET_ReallyLR implicit $w0 ... --- name: regbankselected @@ -29,15 +29,15 @@ ; CHECK: %[[ONE:[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]] ; CHECK-NEXT: %[[SUM:[0-9]+]]:gpr(s32) = G_ADD %[[TWO]], %[[TWO]] - ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) - ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]] + ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) + ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]] bb.0: %0:gpr(s32) = G_CONSTANT i32 1 %1:gpr(s32) = G_ADD %0, %0 %2:gpr(s32) = G_ADD %0, %0 %3:gpr(s32) = G_ADD %1, %2 - %w0 = COPY %3(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %3(s32) + RET_ReallyLR implicit $w0 ... --- name: legalized @@ -49,15 +49,15 @@ ; CHECK: %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]] ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]] - ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) - ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]] + ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) + ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]] bb.0: %0:_(s32) = G_CONSTANT i32 1 %1:_(s32) = G_ADD %0, %0 %2:gpr(s32) = G_ADD %0, %0 %3:_(s32) = G_ADD %1, %2 - %w0 = COPY %3(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %3(s32) + RET_ReallyLR implicit $w0 ... --- name: legalized_sym @@ -69,15 +69,15 @@ ; CHECK: %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]] ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]] - ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) - ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]] + ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) + ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]] bb.0: %0:_(s32) = G_CONSTANT i32 1 %1:gpr(s32) = G_ADD %0, %0 %2:_(s32) = G_ADD %0, %0 %3:_(s32) = G_ADD %1, %2 - %w0 = COPY %3(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %3(s32) + RET_ReallyLR implicit $w0 ... --- name: int_extensions @@ -93,8 +93,8 @@ ; CHECK-NEXT: %[[S16_Z64:[0-9]+]]:_(s64) = G_ZEXT %[[S16]](s16) ; CHECK-NEXT: %[[S32_Z64:[0-9]+]]:_(s64) = G_ZEXT %[[S32]](s32) ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s64) = G_ADD %[[S16_Z64]], %[[S32_Z64]] - ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s64) - ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]] + ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s64) + ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]] bb.0.entry: %0:_(s8) = G_CONSTANT i8 1 %1:_(s16) = G_SEXT %0(s8) @@ -102,8 +102,8 @@ %3:_(s64) = G_ZEXT %1(s16) %4:_(s64) = G_ZEXT %2(s32) %5:_(s64) = G_ADD %3, %4 - %x0 = COPY %5(s64) - RET_ReallyLR implicit %x0 + $x0 = COPY %5(s64) + RET_ReallyLR implicit $x0 ... --- name: generic @@ -115,13 +115,13 @@ ; CHECK: %[[SG:[0-9]+]]:_(s32) = G_ADD %{{[0-9]+}}, %{{[0-9]+}} ; CHECK-NEXT: %{{[0-9]+}}:_(s32) = G_ADD %[[SG]], %[[SG]] bb.0: - %0:_(s32) = COPY %w0 - %1:_(s32) = COPY %w1 + %0:_(s32) = COPY $w0 + %1:_(s32) = COPY $w1 %2:_(s32) = G_ADD %0, %1 %3:_(s32) = COPY %2(s32) %4:_(s32) = G_ADD %3, %3 - %w0 = COPY %4(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %4(s32) + RET_ReallyLR implicit $w0 ... --- name: generic_to_concrete_copy @@ -134,13 +134,13 @@ ; CHECK-NEXT: %[[S2:[0-9]+]]:gpr32 = COPY %[[S1]](s32) ; CHECK-NEXT: %{{[0-9]+}}:gpr32 = ADDWrr %[[S2]], %[[S2]] bb.0: - %0:_(s32) = COPY %w0 - %1:_(s32) = COPY %w1 + %0:_(s32) = COPY $w0 + %1:_(s32) = COPY $w1 %2:_(s32) = G_ADD %0, %1 %3:gpr32 = COPY %2(s32) %4:gpr32 = ADDWrr %3, %3 - %w0 = COPY %4 - RET_ReallyLR implicit %w0 + $w0 = COPY %4 + RET_ReallyLR implicit $w0 ... --- name: concrete_to_generic_copy @@ -153,13 +153,13 @@ ; CHECK-NEXT: %[[S2:[0-9]+]]:_(s32) = COPY %[[S1]] ; CHECK-NEXT: %{{[0-9]+}}:_(s32) = G_ADD %[[S2]], %[[S2]] bb.0: - %0:gpr32 = COPY %w0 - %1:gpr32 = COPY %w1 + %0:gpr32 = COPY $w0 + %1:gpr32 = COPY $w1 %2:gpr32 = ADDWrr %0, %1 %3:_(s32) = COPY %2 %4:_(s32) = G_ADD %3, %3 - %w0 = COPY %4(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %4(s32) + RET_ReallyLR implicit $w0 ... --- name: concrete @@ -171,11 +171,11 @@ ; CHECK: %[[SC:[0-9]+]]:gpr32 = ADDWrr %{{[0-9]+}}, %{{[0-9]+}} ; CHECK-NEXT: %{{[0-9]+}}:gpr32 = ADDWrr %[[SC]], %[[SC]] bb.0: - %0:gpr32 = COPY %w0 - %1:gpr32 = COPY %w1 + %0:gpr32 = COPY $w0 + %1:gpr32 = COPY $w1 %2:gpr32 = ADDWrr %0, %1 %3:gpr32 = COPY %2 %4:gpr32 = ADDWrr %3, %3 - %w0 = COPY %4 - RET_ReallyLR implicit %w0 + $w0 = COPY %4 + RET_ReallyLR implicit $w0 ... Index: test/CodeGen/AArch64/GlobalISel/no-regclass.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/no-regclass.mir +++ test/CodeGen/AArch64/GlobalISel/no-regclass.mir @@ -19,13 +19,13 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: unused_reg - ; CHECK: liveins: %w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %w0 - ; CHECK: %w0 = COPY [[COPY]] - %0:gpr(s32) = COPY %w0 + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0 + ; CHECK: $w0 = COPY [[COPY]] + %0:gpr(s32) = COPY $w0 %1:gpr(s64) = G_MERGE_VALUES %0(s32), %0(s32) %2:gpr(s32), %3:gpr(s32) = G_UNMERGE_VALUES %1(s64) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... Index: test/CodeGen/AArch64/GlobalISel/reg-bank-128bit.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/reg-bank-128bit.mir +++ test/CodeGen/AArch64/GlobalISel/reg-bank-128bit.mir @@ -5,7 +5,7 @@ registers: body: | bb.0.entry: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 ; CHECK-LABEL: name: test_large_merge ; CHECK: registers: @@ -13,10 +13,10 @@ ; CHECK: - { id: 1, class: gpr ; CHECK: - { id: 2, class: gpr ; CHECK: - { id: 3, class: fpr - %0:_(s64) = COPY %x0 - %1:_(s64) = COPY %x1 - %2:_(p0) = COPY %x2 + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 + %2:_(p0) = COPY $x2 %3:_(s128) = G_MERGE_VALUES %0, %1 %4:_(s64) = G_TRUNC %3 - %d0 = COPY %4 + $d0 = COPY %4 ... Index: test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir +++ test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir @@ -34,11 +34,11 @@ # CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' } body: | bb.0: - liveins: %w0 - %0:_(s32) = COPY %w0 - ; CHECK: DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9 - DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9 + liveins: $w0 + %0:_(s32) = COPY $w0 + ; CHECK: DBG_VALUE debug-use %0(s32), debug-use $noreg, !7, !DIExpression(), debug-location !9 + DBG_VALUE debug-use %0(s32), debug-use $noreg, !7, !DIExpression(), debug-location !9 - ; CHECK: DBG_VALUE %noreg, 0, !7, !DIExpression(), debug-location !9 - DBG_VALUE %noreg, 0, !7, !DIExpression(), debug-location !9 + ; CHECK: DBG_VALUE $noreg, 0, !7, !DIExpression(), debug-location !9 + DBG_VALUE $noreg, 0, !7, !DIExpression(), debug-location !9 ... Index: test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir +++ test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir @@ -80,11 +80,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_add_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_ADD %0, %0 ... @@ -96,11 +96,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %q0 + liveins: $q0 ; CHECK-LABEL: name: test_add_v4s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 ; CHECK: [[ADD:%[0-9]+]]:fpr(<4 x s32>) = G_ADD [[COPY]], [[COPY]] - %0(<4 x s32>) = COPY %q0 + %0(<4 x s32>) = COPY $q0 %1(<4 x s32>) = G_ADD %0, %0 ... @@ -112,11 +112,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sub_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[SUB:%[0-9]+]]:gpr(s32) = G_SUB [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_SUB %0, %0 ... @@ -128,11 +128,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %q0 + liveins: $q0 ; CHECK-LABEL: name: test_sub_v4s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 ; CHECK: [[SUB:%[0-9]+]]:fpr(<4 x s32>) = G_SUB [[COPY]], [[COPY]] - %0(<4 x s32>) = COPY %q0 + %0(<4 x s32>) = COPY $q0 %1(<4 x s32>) = G_SUB %0, %0 ... @@ -144,11 +144,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_mul_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[MUL:%[0-9]+]]:gpr(s32) = G_MUL [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_MUL %0, %0 ... @@ -160,11 +160,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %q0 + liveins: $q0 ; CHECK-LABEL: name: test_mul_v4s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 ; CHECK: [[MUL:%[0-9]+]]:fpr(<4 x s32>) = G_MUL [[COPY]], [[COPY]] - %0(<4 x s32>) = COPY %q0 + %0(<4 x s32>) = COPY $q0 %1(<4 x s32>) = G_MUL %0, %0 ... @@ -176,11 +176,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_and_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_AND %0, %0 ... @@ -192,11 +192,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %q0 + liveins: $q0 ; CHECK-LABEL: name: test_and_v4s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 ; CHECK: [[AND:%[0-9]+]]:fpr(<4 x s32>) = G_AND [[COPY]], [[COPY]] - %0(<4 x s32>) = COPY %q0 + %0(<4 x s32>) = COPY $q0 %1(<4 x s32>) = G_AND %0, %0 ... @@ -208,11 +208,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_or_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[OR:%[0-9]+]]:gpr(s32) = G_OR [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_OR %0, %0 ... @@ -224,11 +224,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %q0 + liveins: $q0 ; CHECK-LABEL: name: test_or_v4s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 ; CHECK: [[OR:%[0-9]+]]:fpr(<4 x s32>) = G_OR [[COPY]], [[COPY]] - %0(<4 x s32>) = COPY %q0 + %0(<4 x s32>) = COPY $q0 %1(<4 x s32>) = G_OR %0, %0 ... @@ -240,11 +240,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_xor_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[XOR:%[0-9]+]]:gpr(s32) = G_XOR [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_XOR %0, %0 ... @@ -256,11 +256,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %q0 + liveins: $q0 ; CHECK-LABEL: name: test_xor_v4s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 ; CHECK: [[XOR:%[0-9]+]]:fpr(<4 x s32>) = G_XOR [[COPY]], [[COPY]] - %0(<4 x s32>) = COPY %q0 + %0(<4 x s32>) = COPY $q0 %1(<4 x s32>) = G_XOR %0, %0 ... @@ -272,11 +272,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_shl_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[SHL:%[0-9]+]]:gpr(s32) = G_SHL [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_SHL %0, %0 ... @@ -288,11 +288,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %q0 + liveins: $q0 ; CHECK-LABEL: name: test_shl_v4s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 ; CHECK: [[SHL:%[0-9]+]]:fpr(<4 x s32>) = G_SHL [[COPY]], [[COPY]] - %0(<4 x s32>) = COPY %q0 + %0(<4 x s32>) = COPY $q0 %1(<4 x s32>) = G_SHL %0, %0 ... @@ -304,11 +304,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_lshr_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[LSHR:%[0-9]+]]:gpr(s32) = G_LSHR [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_LSHR %0, %0 ... @@ -320,11 +320,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_ashr_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[ASHR:%[0-9]+]]:gpr(s32) = G_ASHR [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_ASHR %0, %0 ... @@ -336,11 +336,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sdiv_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[SDIV:%[0-9]+]]:gpr(s32) = G_SDIV [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_SDIV %0, %0 ... @@ -352,11 +352,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_udiv_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[UDIV:%[0-9]+]]:gpr(s32) = G_UDIV [[COPY]], [[COPY]] - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_UDIV %0, %0 ... @@ -368,11 +368,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_anyext_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[ANYEXT:%[0-9]+]]:gpr(s64) = G_ANYEXT [[COPY]](s32) - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s64) = G_ANYEXT %0 ... @@ -384,11 +384,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sext_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[SEXT:%[0-9]+]]:gpr(s64) = G_SEXT [[COPY]](s32) - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s64) = G_SEXT %0 ... @@ -400,11 +400,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_zext_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[ZEXT:%[0-9]+]]:gpr(s64) = G_ZEXT [[COPY]](s32) - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s64) = G_ZEXT %0 ... @@ -416,11 +416,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_trunc_s32_s64 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0 ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s32) = G_TRUNC [[COPY]](s64) - %0(s64) = COPY %x0 + %0(s64) = COPY $x0 %1(s32) = G_TRUNC %0 ... @@ -457,12 +457,12 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_icmp_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY]] ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32) - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_ICMP intpred(ne), %0, %0 %2(s1) = G_TRUNC %1(s32) ... @@ -476,12 +476,12 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_icmp_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0 ; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[COPY]](p0), [[COPY]] ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32) - %0(p0) = COPY %x0 + %0(p0) = COPY $x0 %1(s32) = G_ICMP intpred(ne), %0, %0 %2(s1) = G_TRUNC %1(s32) ... @@ -508,11 +508,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_ptrtoint_s64_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0 ; CHECK: [[PTRTOINT:%[0-9]+]]:gpr(s64) = G_PTRTOINT [[COPY]](p0) - %0(p0) = COPY %x0 + %0(p0) = COPY $x0 %1(s64) = G_PTRTOINT %0 ... @@ -524,11 +524,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_inttoptr_p0_s64 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0 ; CHECK: [[INTTOPTR:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[COPY]](s64) - %0(s64) = COPY %x0 + %0(s64) = COPY $x0 %1(p0) = G_INTTOPTR %0 ... @@ -540,11 +540,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_load_s32_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0 ; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4) - %0(p0) = COPY %x0 + %0(p0) = COPY $x0 %1(s32) = G_LOAD %0 :: (load 4) ... @@ -556,13 +556,13 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %x0, %w1 + liveins: $x0, $w1 ; CHECK-LABEL: name: test_store_s32_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $w1 ; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4) - %0(p0) = COPY %x0 - %1(s32) = COPY %w1 + %0(p0) = COPY $x0 + %1(s32) = COPY $w1 G_STORE %1, %0 :: (store 4) ... @@ -574,11 +574,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: test_fadd_s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[COPY]], [[COPY]] - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s32) = G_FADD %0, %0 ... @@ -590,11 +590,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: test_fsub_s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[FSUB:%[0-9]+]]:fpr(s32) = G_FSUB [[COPY]], [[COPY]] - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s32) = G_FSUB %0, %0 ... @@ -606,11 +606,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: test_fmul_s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[FMUL:%[0-9]+]]:fpr(s32) = G_FMUL [[COPY]], [[COPY]] - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s32) = G_FMUL %0, %0 ... @@ -622,11 +622,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: test_fdiv_s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[FDIV:%[0-9]+]]:fpr(s32) = G_FDIV [[COPY]], [[COPY]] - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s32) = G_FDIV %0, %0 ... @@ -638,11 +638,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: test_fpext_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[FPEXT:%[0-9]+]]:fpr(s64) = G_FPEXT [[COPY]](s32) - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s64) = G_FPEXT %0 ... @@ -654,11 +654,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: test_fptrunc_s32_s64 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0 ; CHECK: [[FPTRUNC:%[0-9]+]]:fpr(s32) = G_FPTRUNC [[COPY]](s64) - %0(s64) = COPY %d0 + %0(s64) = COPY $d0 %1(s32) = G_FPTRUNC %0 ... @@ -683,12 +683,12 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: test_fcmp_s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[FCMP:%[0-9]+]]:gpr(s32) = G_FCMP floatpred(olt), [[COPY]](s32), [[COPY]] ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[FCMP]](s32) - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s32) = G_FCMP floatpred(olt), %0, %0 %2(s1) = G_TRUNC %1(s32) ... @@ -701,11 +701,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_sitofp_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 ; CHECK: [[SITOFP:%[0-9]+]]:fpr(s64) = G_SITOFP [[COPY]](s32) - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s64) = G_SITOFP %0 ... @@ -717,11 +717,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: test_uitofp_s32_s64 - ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0 ; CHECK: [[UITOFP:%[0-9]+]]:fpr(s32) = G_UITOFP [[COPY]](s64) - %0(s64) = COPY %x0 + %0(s64) = COPY $x0 %1(s32) = G_UITOFP %0 ... @@ -733,11 +733,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: test_fptosi_s64_s32 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[FPTOSI:%[0-9]+]]:gpr(s64) = G_FPTOSI [[COPY]](s32) - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s64) = G_FPTOSI %0 ... @@ -749,11 +749,11 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: test_fptoui_s32_s64 - ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0 ; CHECK: [[FPTOUI:%[0-9]+]]:gpr(s32) = G_FPTOUI [[COPY]](s64) - %0(s64) = COPY %d0 + %0(s64) = COPY $d0 %1(s32) = G_FPTOUI %0 ... @@ -772,10 +772,10 @@ ; CHECK-LABEL: name: test_gphi_ptr ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK: liveins: %w2, %x0, %x1 - ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %x1 - ; CHECK: [[COPY2:%[0-9]+]]:gpr(s32) = COPY %w2 + ; CHECK: liveins: $w2, $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $x1 + ; CHECK: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $w2 ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY2]](s32) ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1 ; CHECK: G_BR %bb.2 @@ -783,15 +783,15 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: bb.2: ; CHECK: [[PHI:%[0-9]+]]:gpr(p0) = G_PHI [[COPY]](p0), %bb.0, [[COPY1]](p0), %bb.1 - ; CHECK: %x0 = COPY [[PHI]](p0) - ; CHECK: RET_ReallyLR implicit %x0 + ; CHECK: $x0 = COPY [[PHI]](p0) + ; CHECK: RET_ReallyLR implicit $x0 bb.0: successors: %bb.1, %bb.2 - liveins: %w2, %x0, %x1 + liveins: $w2, $x0, $x1 - %0(p0) = COPY %x0 - %1(p0) = COPY %x1 - %4(s32) = COPY %w2 + %0(p0) = COPY $x0 + %1(p0) = COPY $x1 + %4(s32) = COPY $w2 %2(s1) = G_TRUNC %4(s32) G_BRCOND %2(s1), %bb.1 G_BR %bb.2 @@ -802,7 +802,7 @@ bb.2: %3(p0) = G_PHI %0(p0), %bb.0, %1(p0), %bb.1 - %x0 = COPY %3(p0) - RET_ReallyLR implicit %x0 + $x0 = COPY %3(p0) + RET_ReallyLR implicit $x0 ... Index: test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir +++ test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir @@ -18,8 +18,8 @@ - { id: 0, class: dd } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0 = REG_SEQUENCE %d0, %subreg.dsub0, %d1, %subreg.dsub1 + %0 = REG_SEQUENCE $d0, %subreg.dsub0, $d1, %subreg.dsub1 ... Index: test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir +++ test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir @@ -25,17 +25,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_xchg_i64 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr64 = SWPX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: %x0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $x0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s64) = G_CONSTANT i64 1 %2:gpr(s64) = G_ATOMICRMW_XCHG %0, %1 :: (load store monotonic 8 on %ir.addr) - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- name: atomicrmw_add_i64 @@ -44,17 +44,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_add_i64 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr64 = LDADDX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: %x0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $x0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s64) = G_CONSTANT i64 1 %2:gpr(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr) - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- name: atomicrmw_add_i32 @@ -63,17 +63,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_add_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -83,17 +83,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_sub_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -103,18 +103,18 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_and_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[CST2:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[CST]] + ; CHECK: [[CST2:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[CST]] ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDCLRAW [[CST2]], [[COPY]] :: (load store acquire 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_AND %0, %1 :: (load store acquire 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -124,17 +124,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_or_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSETLW [[CST]], [[COPY]] :: (load store release 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_OR %0, %1 :: (load store release 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -144,17 +144,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_xor_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDEORALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store acq_rel 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -164,17 +164,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_min_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store acq_rel 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -184,17 +184,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_max_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store acq_rel 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -204,17 +204,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_umin_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store acq_rel 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -224,15 +224,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: atomicrmw_umax_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store acq_rel 8 on %ir.addr) - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select-binop.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-binop.mir +++ test/CodeGen/AArch64/GlobalISel/select-binop.mir @@ -70,17 +70,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: add_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[ADDWrr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[ADDWrr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_ADD %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -96,17 +96,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: add_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[ADDXrr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[ADDXrr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_ADD %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -121,16 +121,16 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: add_imm_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 ; CHECK: [[ADDWri:%[0-9]+]]:gpr32sp = ADDWri [[COPY]], 1, 0 - ; CHECK: %w0 = COPY [[ADDWri]] - %0(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[ADDWri]] + %0(s32) = COPY $w0 %1(s32) = G_CONSTANT i32 1 %2(s32) = G_ADD %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -145,16 +145,16 @@ body: | bb.0: - liveins: %x0, %w1 + liveins: $x0, $w1 ; CHECK-LABEL: name: add_imm_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri [[COPY]], 1, 0 - ; CHECK: %x0 = COPY [[ADDXri]] - %0(s64) = COPY %x0 + ; CHECK: $x0 = COPY [[ADDXri]] + %0(s64) = COPY $x0 %1(s64) = G_CONSTANT i32 1 %2(s64) = G_ADD %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -171,22 +171,22 @@ ; CHECK-LABEL: name: add_imm_s32_gpr_bb ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 ; CHECK: B %bb.1 ; CHECK: bb.1: ; CHECK: [[ADDWri:%[0-9]+]]:gpr32sp = ADDWri [[COPY]], 1, 0 - ; CHECK: %w0 = COPY [[ADDWri]] + ; CHECK: $w0 = COPY [[ADDWri]] bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 successors: %bb.1 - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_CONSTANT i32 1 G_BR %bb.1 bb.1: %2(s32) = G_ADD %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -202,17 +202,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: sub_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 - ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY1]], implicit-def %nzcv - ; CHECK: %w0 = COPY [[SUBSWrr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY1]], implicit-def $nzcv + ; CHECK: $w0 = COPY [[SUBSWrr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_SUB %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -228,17 +228,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: sub_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 - ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def %nzcv - ; CHECK: %x0 = COPY [[SUBSXrr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv + ; CHECK: $x0 = COPY [[SUBSXrr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_SUB %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -254,17 +254,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: or_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[ORRWrr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[ORRWrr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_OR %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -280,17 +280,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: or_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[ORRXrr:%[0-9]+]]:gpr64 = ORRXrr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[ORRXrr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[ORRXrr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_OR %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -308,17 +308,17 @@ # on 64-bit width vector. body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: or_v2s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 ; CHECK: [[ORRv8i8_:%[0-9]+]]:fpr64 = ORRv8i8 [[COPY]], [[COPY1]] - ; CHECK: %d0 = COPY [[ORRv8i8_]] - %0(<2 x s32>) = COPY %d0 - %1(<2 x s32>) = COPY %d1 + ; CHECK: $d0 = COPY [[ORRv8i8_]] + %0(<2 x s32>) = COPY $d0 + %1(<2 x s32>) = COPY $d1 %2(<2 x s32>) = G_OR %0, %1 - %d0 = COPY %2(<2 x s32>) + $d0 = COPY %2(<2 x s32>) ... --- @@ -334,17 +334,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: and_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[ANDWrr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[ANDWrr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_AND %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -360,17 +360,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: and_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[ANDXrr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[ANDXrr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_AND %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -386,17 +386,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: shl_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[LSLVWr:%[0-9]+]]:gpr32 = LSLVWr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[LSLVWr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[LSLVWr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_SHL %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -412,17 +412,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: shl_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[LSLVXr:%[0-9]+]]:gpr64 = LSLVXr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[LSLVXr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[LSLVXr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_SHL %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -438,17 +438,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: lshr_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[LSRVWr:%[0-9]+]]:gpr32 = LSRVWr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[LSRVWr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[LSRVWr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_LSHR %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -464,17 +464,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: lshr_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[LSRVXr:%[0-9]+]]:gpr64 = LSRVXr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[LSRVXr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[LSRVXr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_LSHR %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -490,17 +490,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: ashr_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[ASRVWr:%[0-9]+]]:gpr32 = ASRVWr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[ASRVWr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[ASRVWr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_ASHR %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -516,17 +516,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: ashr_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[ASRVXr:%[0-9]+]]:gpr64 = ASRVXr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[ASRVXr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[ASRVXr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_ASHR %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -543,17 +543,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: mul_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 - ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], [[COPY1]], %wzr - ; CHECK: %w0 = COPY [[MADDWrrr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], [[COPY1]], $wzr + ; CHECK: $w0 = COPY [[MADDWrrr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_MUL %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -569,17 +569,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: mul_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 - ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[COPY]], [[COPY1]], %xzr - ; CHECK: %x0 = COPY [[MADDXrrr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[COPY]], [[COPY1]], $xzr + ; CHECK: $x0 = COPY [[MADDXrrr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_MUL %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -591,21 +591,21 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: mulh_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[SMULHrr:%[0-9]+]]:gpr64 = SMULHrr [[COPY]], [[COPY1]] ; CHECK: [[UMULHrr:%[0-9]+]]:gpr64 = UMULHrr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[SMULHrr]] - ; CHECK: %x0 = COPY [[UMULHrr]] - %0:gpr(s64) = COPY %x0 - %1:gpr(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[SMULHrr]] + ; CHECK: $x0 = COPY [[UMULHrr]] + %0:gpr(s64) = COPY $x0 + %1:gpr(s64) = COPY $x1 %2:gpr(s64) = G_SMULH %0, %1 %3:gpr(s64) = G_UMULH %0, %1 - %x0 = COPY %2(s64) - %x0 = COPY %3(s64) + $x0 = COPY %2(s64) + $x0 = COPY %3(s64) ... --- @@ -621,17 +621,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: sdiv_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[SDIVWr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[SDIVWr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_SDIV %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -647,17 +647,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: sdiv_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[SDIVXr:%[0-9]+]]:gpr64 = SDIVXr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[SDIVXr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[SDIVXr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_SDIV %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -673,17 +673,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: udiv_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[UDIVWr:%[0-9]+]]:gpr32 = UDIVWr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[UDIVWr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[UDIVWr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_UDIV %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -699,17 +699,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: udiv_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[UDIVXr:%[0-9]+]]:gpr64 = UDIVXr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[UDIVXr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[UDIVXr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_UDIV %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -725,17 +725,17 @@ body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: fadd_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1 ; CHECK: [[FADDSrr:%[0-9]+]]:fpr32 = FADDSrr [[COPY]], [[COPY1]] - ; CHECK: %s0 = COPY [[FADDSrr]] - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: $s0 = COPY [[FADDSrr]] + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s32) = G_FADD %0, %1 - %s0 = COPY %2(s32) + $s0 = COPY %2(s32) ... --- @@ -750,17 +750,17 @@ body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: fadd_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 ; CHECK: [[FADDDrr:%[0-9]+]]:fpr64 = FADDDrr [[COPY]], [[COPY1]] - ; CHECK: %d0 = COPY [[FADDDrr]] - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: $d0 = COPY [[FADDDrr]] + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s64) = G_FADD %0, %1 - %d0 = COPY %2(s64) + $d0 = COPY %2(s64) ... --- @@ -775,17 +775,17 @@ body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: fsub_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1 ; CHECK: [[FSUBSrr:%[0-9]+]]:fpr32 = FSUBSrr [[COPY]], [[COPY1]] - ; CHECK: %s0 = COPY [[FSUBSrr]] - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: $s0 = COPY [[FSUBSrr]] + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s32) = G_FSUB %0, %1 - %s0 = COPY %2(s32) + $s0 = COPY %2(s32) ... --- @@ -800,17 +800,17 @@ body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: fsub_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 ; CHECK: [[FSUBDrr:%[0-9]+]]:fpr64 = FSUBDrr [[COPY]], [[COPY1]] - ; CHECK: %d0 = COPY [[FSUBDrr]] - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: $d0 = COPY [[FSUBDrr]] + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s64) = G_FSUB %0, %1 - %d0 = COPY %2(s64) + $d0 = COPY %2(s64) ... --- @@ -825,17 +825,17 @@ body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: fmul_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1 ; CHECK: [[FMULSrr:%[0-9]+]]:fpr32 = FMULSrr [[COPY]], [[COPY1]] - ; CHECK: %s0 = COPY [[FMULSrr]] - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: $s0 = COPY [[FMULSrr]] + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s32) = G_FMUL %0, %1 - %s0 = COPY %2(s32) + $s0 = COPY %2(s32) ... --- @@ -850,17 +850,17 @@ body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: fmul_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 ; CHECK: [[FMULDrr:%[0-9]+]]:fpr64 = FMULDrr [[COPY]], [[COPY1]] - ; CHECK: %d0 = COPY [[FMULDrr]] - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: $d0 = COPY [[FMULDrr]] + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s64) = G_FMUL %0, %1 - %d0 = COPY %2(s64) + $d0 = COPY %2(s64) ... --- @@ -875,17 +875,17 @@ body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: fdiv_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1 ; CHECK: [[FDIVSrr:%[0-9]+]]:fpr32 = FDIVSrr [[COPY]], [[COPY1]] - ; CHECK: %s0 = COPY [[FDIVSrr]] - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: $s0 = COPY [[FDIVSrr]] + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s32) = G_FDIV %0, %1 - %s0 = COPY %2(s32) + $s0 = COPY %2(s32) ... --- @@ -900,15 +900,15 @@ body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: fdiv_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 ; CHECK: [[FDIVDrr:%[0-9]+]]:fpr64 = FDIVDrr [[COPY]], [[COPY1]] - ; CHECK: %d0 = COPY [[FDIVDrr]] - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: $d0 = COPY [[FDIVDrr]] + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s64) = G_FDIV %0, %1 - %d0 = COPY %2(s64) + $d0 = COPY %2(s64) ... Index: test/CodeGen/AArch64/GlobalISel/select-bitcast-bigendian.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-bitcast-bigendian.mir +++ test/CodeGen/AArch64/GlobalISel/select-bitcast-bigendian.mir @@ -6,13 +6,13 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: bitcast_v2f32_to_s64 - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $x0 ; CHECK: [[REV:%[0-9]+]]:fpr64 = REV64v2i32 [[COPY]] - ; CHECK: %x0 = COPY [[REV]] - %0:fpr(<2 x s32>) = COPY %x0 + ; CHECK: $x0 = COPY [[REV]] + %0:fpr(<2 x s32>) = COPY $x0 %1:fpr(s64) = G_BITCAST %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... Index: test/CodeGen/AArch64/GlobalISel/select-bitcast.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-bitcast.mir +++ test/CodeGen/AArch64/GlobalISel/select-bitcast.mir @@ -26,14 +26,14 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: bitcast_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %w0 - ; CHECK: %w0 = COPY [[COPY]] - %0(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0 + ; CHECK: $w0 = COPY [[COPY]] + %0(s32) = COPY $w0 %1(s32) = G_BITCAST %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -47,14 +47,14 @@ body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: bitcast_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 - ; CHECK: %s0 = COPY [[COPY]] - %0(s32) = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 + ; CHECK: $s0 = COPY [[COPY]] + %0(s32) = COPY $s0 %1(s32) = G_BITCAST %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -68,15 +68,15 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: bitcast_s32_gpr_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]] - ; CHECK: %s0 = COPY [[COPY1]] - %0(s32) = COPY %w0 + ; CHECK: $s0 = COPY [[COPY1]] + %0(s32) = COPY $w0 %1(s32) = G_BITCAST %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -90,15 +90,15 @@ body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: bitcast_s32_fpr_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: %w0 = COPY [[COPY1]] - %0(s32) = COPY %s0 + ; CHECK: $w0 = COPY [[COPY1]] + %0(s32) = COPY $s0 %1(s32) = G_BITCAST %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -112,14 +112,14 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: bitcast_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %x0 - ; CHECK: %x0 = COPY [[COPY]] - %0(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0 + ; CHECK: $x0 = COPY [[COPY]] + %0(s64) = COPY $x0 %1(s64) = G_BITCAST %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -133,14 +133,14 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: bitcast_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 - ; CHECK: %d0 = COPY [[COPY]] - %0(s64) = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: $d0 = COPY [[COPY]] + %0(s64) = COPY $d0 %1(s64) = G_BITCAST %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -153,15 +153,15 @@ - { id: 1, class: fpr } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: bitcast_s64_gpr_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]] - ; CHECK: %d0 = COPY [[COPY1]] - %0(s64) = COPY %x0 + ; CHECK: $d0 = COPY [[COPY1]] + %0(s64) = COPY $x0 %1(s64) = G_BITCAST %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -175,15 +175,15 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: bitcast_s64_fpr_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]] - ; CHECK: %x0 = COPY [[COPY1]] - %0(s64) = COPY %d0 + ; CHECK: $x0 = COPY [[COPY1]] + %0(s64) = COPY $d0 %1(s64) = G_BITCAST %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -197,14 +197,14 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: bitcast_s64_v2f32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 - ; CHECK: %x0 = COPY [[COPY]] - %0(s64) = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: $x0 = COPY [[COPY]] + %0(s64) = COPY $d0 %1(<2 x s32>) = G_BITCAST %0 - %x0 = COPY %1(<2 x s32>) + $x0 = COPY %1(<2 x s32>) ... --- @@ -218,12 +218,12 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: bitcast_s64_v8i8_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 - ; CHECK: %x0 = COPY [[COPY]] - %0(s64) = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: $x0 = COPY [[COPY]] + %0(s64) = COPY $d0 %1(<8 x s8>) = G_BITCAST %0 - %x0 = COPY %1(<8 x s8>) + $x0 = COPY %1(<8 x s8>) ... Index: test/CodeGen/AArch64/GlobalISel/select-br.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-br.mir +++ test/CodeGen/AArch64/GlobalISel/select-br.mir @@ -42,7 +42,7 @@ body: | bb.0: successors: %bb.0, %bb.1 - %1(s32) = COPY %w0 + %1(s32) = COPY $w0 %0(s1) = G_TRUNC %1 G_BRCOND %0(s1), %bb.1 G_BR %bb.0 @@ -61,12 +61,12 @@ # CHECK: body: # CHECK: bb.0: -# CHECK: %0:gpr64 = COPY %x0 +# CHECK: %0:gpr64 = COPY $x0 # CHECK: BR %0 body: | bb.0: successors: %bb.0, %bb.1 - %0(p0) = COPY %x0 + %0(p0) = COPY $x0 G_BRINDIRECT %0(p0) bb.1: Index: test/CodeGen/AArch64/GlobalISel/select-bswap.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-bswap.mir +++ test/CodeGen/AArch64/GlobalISel/select-bswap.mir @@ -19,15 +19,15 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: bswap_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[REVWr:%[0-9]+]]:gpr32 = REVWr [[COPY]] - ; CHECK: %w0 = COPY [[REVWr]] - %0(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[REVWr]] + %0(s32) = COPY $w0 %1(s32) = G_BSWAP %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- @@ -41,13 +41,13 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: bswap_s64 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[REVXr:%[0-9]+]]:gpr64 = REVXr [[COPY]] - ; CHECK: %x0 = COPY [[REVXr]] - %0(s64) = COPY %x0 + ; CHECK: $x0 = COPY [[REVXr]] + %0(s64) = COPY $x0 %1(s64) = G_BSWAP %0 - %x0 = COPY %1 + $x0 = COPY %1 ... Index: test/CodeGen/AArch64/GlobalISel/select-cbz.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-cbz.mir +++ test/CodeGen/AArch64/GlobalISel/select-cbz.mir @@ -15,15 +15,15 @@ # CHECK: body: # CHECK: bb.0: -# CHECK: %0:gpr32 = COPY %w0 +# CHECK: %0:gpr32 = COPY $w0 # CHECK: CBZW %0, %bb.1 # CHECK: B %bb.0 body: | bb.0: - liveins: %w0 + liveins: $w0 successors: %bb.0, %bb.1 - %0:gpr(s32) = COPY %w0 + %0:gpr(s32) = COPY $w0 %1:gpr(s32) = G_CONSTANT i32 0 %2:gpr(s32) = G_ICMP intpred(eq), %0, %1 %3:gpr(s1) = G_TRUNC %2(s32) @@ -41,15 +41,15 @@ # CHECK: body: # CHECK: bb.0: -# CHECK: %0:gpr64 = COPY %x0 +# CHECK: %0:gpr64 = COPY $x0 # CHECK: CBZX %0, %bb.1 # CHECK: B %bb.0 body: | bb.0: - liveins: %x0 + liveins: $x0 successors: %bb.0, %bb.1 - %0:gpr(s64) = COPY %x0 + %0:gpr(s64) = COPY $x0 %1:gpr(s64) = G_CONSTANT i64 0 %2:gpr(s32) = G_ICMP intpred(eq), %0, %1 %3:gpr(s1) = G_TRUNC %2(s32) @@ -67,15 +67,15 @@ # CHECK: body: # CHECK: bb.0: -# CHECK: %0:gpr32 = COPY %w0 +# CHECK: %0:gpr32 = COPY $w0 # CHECK: CBNZW %0, %bb.1 # CHECK: B %bb.0 body: | bb.0: - liveins: %w0 + liveins: $w0 successors: %bb.0, %bb.1 - %0:gpr(s32) = COPY %w0 + %0:gpr(s32) = COPY $w0 %1:gpr(s32) = G_CONSTANT i32 0 %2:gpr(s32) = G_ICMP intpred(ne), %0, %1 %3:gpr(s1) = G_TRUNC %2(s32) @@ -93,15 +93,15 @@ # CHECK: body: # CHECK: bb.0: -# CHECK: %0:gpr64 = COPY %x0 +# CHECK: %0:gpr64 = COPY $x0 # CHECK: CBNZX %0, %bb.1 # CHECK: B %bb.0 body: | bb.0: - liveins: %x0 + liveins: $x0 successors: %bb.0, %bb.1 - %0:gpr(s64) = COPY %x0 + %0:gpr(s64) = COPY $x0 %1:gpr(s64) = G_CONSTANT i64 0 %2:gpr(s32) = G_ICMP intpred(ne), %0, %1 %3:gpr(s1) = G_TRUNC %2(s32) Index: test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir +++ test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir @@ -15,19 +15,19 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CMP:%[0-9]+]]:gpr32 = MOVi32imm 0 ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr32 = CASW [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: %w0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 0 %2:gpr(s32) = G_CONSTANT i32 1 %3:gpr(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr) - %w0 = COPY %3(s32) + $w0 = COPY %3(s32) ... --- @@ -37,17 +37,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: cmpxchg_i64 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[CMP:%[0-9]+]]:gpr64 = MOVi64imm 0 ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1 ; CHECK: [[RES:%[0-9]+]]:gpr64 = CASX [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: %x0 = COPY [[RES]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $x0 = COPY [[RES]] + %0:gpr(p0) = COPY $x0 %1:gpr(s64) = G_CONSTANT i64 0 %2:gpr(s64) = G_CONSTANT i64 1 %3:gpr(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr) - %x0 = COPY %3(s64) + $x0 = COPY %3(s64) ... Index: test/CodeGen/AArch64/GlobalISel/select-constant.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-constant.mir +++ test/CodeGen/AArch64/GlobalISel/select-constant.mir @@ -24,9 +24,9 @@ bb.0: ; CHECK-LABEL: name: const_s32 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42 - ; CHECK: %w0 = COPY [[MOVi32imm]] + ; CHECK: $w0 = COPY [[MOVi32imm]] %0(s32) = G_CONSTANT i32 42 - %w0 = COPY %0(s32) + $w0 = COPY %0(s32) ... --- @@ -40,9 +40,9 @@ bb.0: ; CHECK-LABEL: name: const_s64 ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 1234567890123 - ; CHECK: %x0 = COPY [[MOVi64imm]] + ; CHECK: $x0 = COPY [[MOVi64imm]] %0(s64) = G_CONSTANT i64 1234567890123 - %x0 = COPY %0(s64) + $x0 = COPY %0(s64) ... --- @@ -57,9 +57,9 @@ ; CHECK-LABEL: name: fconst_s32 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1080033280 ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY [[MOVi32imm]] - ; CHECK: %s0 = COPY [[COPY]] + ; CHECK: $s0 = COPY [[COPY]] %0(s32) = G_FCONSTANT float 3.5 - %s0 = COPY %0(s32) + $s0 = COPY %0(s32) ... --- @@ -74,9 +74,9 @@ ; CHECK-LABEL: name: fconst_s64 ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 4607182418800017408 ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVi64imm]] - ; CHECK: %d0 = COPY [[COPY]] + ; CHECK: $d0 = COPY [[COPY]] %0(s64) = G_FCONSTANT double 1.0 - %d0 = COPY %0(s64) + $d0 = COPY %0(s64) ... --- @@ -90,9 +90,9 @@ bb.0: ; CHECK-LABEL: name: fconst_s32_0 ; CHECK: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0 - ; CHECK: %s0 = COPY [[FMOVS0_]] + ; CHECK: $s0 = COPY [[FMOVS0_]] %0(s32) = G_FCONSTANT float 0.0 - %s0 = COPY %0(s32) + $s0 = COPY %0(s32) ... --- @@ -106,7 +106,7 @@ bb.0: ; CHECK-LABEL: name: fconst_s64_0 ; CHECK: [[FMOVD0_:%[0-9]+]]:fpr64 = FMOVD0 - ; CHECK: %x0 = COPY [[FMOVD0_]] + ; CHECK: $x0 = COPY [[FMOVD0_]] %0(s64) = G_FCONSTANT double 0.0 - %x0 = COPY %0(s64) + $x0 = COPY %0(s64) ... Index: test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir +++ test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir @@ -41,16 +41,16 @@ regBankSelected: true body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_dbg_value - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY]], [[COPY]] - ; CHECK: %w0 = COPY [[ADDWrr]] - ; CHECK: DBG_VALUE debug-use [[ADDWrr]], debug-use %noreg, !7, !DIExpression(), debug-location !9 - %0:gpr(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[ADDWrr]] + ; CHECK: DBG_VALUE debug-use [[ADDWrr]], debug-use $noreg, !7, !DIExpression(), debug-location !9 + %0:gpr(s32) = COPY $w0 %1:gpr(s32) = G_ADD %0, %0 - %w0 = COPY %1(s32) - DBG_VALUE debug-use %1(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9 + $w0 = COPY %1(s32) + DBG_VALUE debug-use %1(s32), debug-use $noreg, !7, !DIExpression(), debug-location !9 ... --- @@ -59,10 +59,10 @@ regBankSelected: true body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_dbg_value_dead ; CHECK-NOT: COPY - ; CHECK: DBG_VALUE debug-use %noreg, debug-use %noreg, !7, !DIExpression(), debug-location !9 - %0:gpr(s32) = COPY %w0 - DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9 + ; CHECK: DBG_VALUE debug-use $noreg, debug-use $noreg, !7, !DIExpression(), debug-location !9 + %0:gpr(s32) = COPY $w0 + DBG_VALUE debug-use %0(s32), debug-use $noreg, !7, !DIExpression(), debug-location !9 ... Index: test/CodeGen/AArch64/GlobalISel/select-fma.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-fma.mir +++ test/CodeGen/AArch64/GlobalISel/select-fma.mir @@ -20,17 +20,17 @@ body: | bb.0: - liveins: %w0, %w1, %w2 + liveins: $w0, $w1, $w2 ; CHECK-LABEL: name: FMADDSrrr_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %w1 - ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY %w2 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $w1 + ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $w2 ; CHECK: [[FMADDSrrr:%[0-9]+]]:fpr32 = FMADDSrrr [[COPY]], [[COPY1]], [[COPY2]] - ; CHECK: %w0 = COPY [[FMADDSrrr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 - %2(s32) = COPY %w2 + ; CHECK: $w0 = COPY [[FMADDSrrr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 + %2(s32) = COPY $w2 %3(s32) = G_FMA %0, %1, %2 - %w0 = COPY %3 + $w0 = COPY %3 ... Index: test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir +++ test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir @@ -44,15 +44,15 @@ body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: fptrunc_s16_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 ; CHECK: [[FCVTHSr:%[0-9]+]]:fpr16 = FCVTHSr [[COPY]] - ; CHECK: %h0 = COPY [[FCVTHSr]] - %0(s32) = COPY %s0 + ; CHECK: $h0 = COPY [[FCVTHSr]] + %0(s32) = COPY $s0 %1(s16) = G_FPTRUNC %0 - %h0 = COPY %1(s16) + $h0 = COPY %1(s16) ... --- @@ -66,15 +66,15 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: fptrunc_s16_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 ; CHECK: [[FCVTHDr:%[0-9]+]]:fpr16 = FCVTHDr [[COPY]] - ; CHECK: %h0 = COPY [[FCVTHDr]] - %0(s64) = COPY %d0 + ; CHECK: $h0 = COPY [[FCVTHDr]] + %0(s64) = COPY $d0 %1(s16) = G_FPTRUNC %0 - %h0 = COPY %1(s16) + $h0 = COPY %1(s16) ... --- @@ -88,15 +88,15 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: fptrunc_s32_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 ; CHECK: [[FCVTSDr:%[0-9]+]]:fpr32 = FCVTSDr [[COPY]] - ; CHECK: %s0 = COPY [[FCVTSDr]] - %0(s64) = COPY %d0 + ; CHECK: $s0 = COPY [[FCVTSDr]] + %0(s64) = COPY $d0 %1(s32) = G_FPTRUNC %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -110,15 +110,15 @@ body: | bb.0: - liveins: %h0 + liveins: $h0 ; CHECK-LABEL: name: fpext_s32_s16_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY %h0 + ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0 ; CHECK: [[FCVTSHr:%[0-9]+]]:fpr32 = FCVTSHr [[COPY]] - ; CHECK: %s0 = COPY [[FCVTSHr]] - %0(s16) = COPY %h0 + ; CHECK: $s0 = COPY [[FCVTSHr]] + %0(s16) = COPY $h0 %1(s32) = G_FPEXT %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -132,15 +132,15 @@ body: | bb.0: - liveins: %h0 + liveins: $h0 ; CHECK-LABEL: name: fpext_s64_s16_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY %h0 + ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0 ; CHECK: [[FCVTDHr:%[0-9]+]]:fpr64 = FCVTDHr [[COPY]] - ; CHECK: %d0 = COPY [[FCVTDHr]] - %0(s16) = COPY %h0 + ; CHECK: $d0 = COPY [[FCVTDHr]] + %0(s16) = COPY $h0 %1(s64) = G_FPEXT %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -154,15 +154,15 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: fpext_s64_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 ; CHECK: [[FCVTDSr:%[0-9]+]]:fpr64 = FCVTDSr [[COPY]] - ; CHECK: %d0 = COPY [[FCVTDSr]] - %0(s32) = COPY %s0 + ; CHECK: $d0 = COPY [[FCVTDSr]] + %0(s32) = COPY $s0 %1(s64) = G_FPEXT %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -176,15 +176,15 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: sitofp_s32_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[SCVTFUWSri:%[0-9]+]]:fpr32 = SCVTFUWSri [[COPY]] - ; CHECK: %s0 = COPY [[SCVTFUWSri]] - %0(s32) = COPY %w0 + ; CHECK: $s0 = COPY [[SCVTFUWSri]] + %0(s32) = COPY $w0 %1(s32) = G_SITOFP %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -198,15 +198,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: sitofp_s32_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[SCVTFUXSri:%[0-9]+]]:fpr32 = SCVTFUXSri [[COPY]] - ; CHECK: %s0 = COPY [[SCVTFUXSri]] - %0(s64) = COPY %x0 + ; CHECK: $s0 = COPY [[SCVTFUXSri]] + %0(s64) = COPY $x0 %1(s32) = G_SITOFP %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -220,15 +220,15 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: sitofp_s64_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[SCVTFUWDri:%[0-9]+]]:fpr64 = SCVTFUWDri [[COPY]] - ; CHECK: %d0 = COPY [[SCVTFUWDri]] - %0(s32) = COPY %w0 + ; CHECK: $d0 = COPY [[SCVTFUWDri]] + %0(s32) = COPY $w0 %1(s64) = G_SITOFP %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -242,15 +242,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: sitofp_s64_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[SCVTFUXDri:%[0-9]+]]:fpr64 = SCVTFUXDri [[COPY]] - ; CHECK: %d0 = COPY [[SCVTFUXDri]] - %0(s64) = COPY %x0 + ; CHECK: $d0 = COPY [[SCVTFUXDri]] + %0(s64) = COPY $x0 %1(s64) = G_SITOFP %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -264,15 +264,15 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: uitofp_s32_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[UCVTFUWSri:%[0-9]+]]:fpr32 = UCVTFUWSri [[COPY]] - ; CHECK: %s0 = COPY [[UCVTFUWSri]] - %0(s32) = COPY %w0 + ; CHECK: $s0 = COPY [[UCVTFUWSri]] + %0(s32) = COPY $w0 %1(s32) = G_UITOFP %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -286,15 +286,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: uitofp_s32_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[UCVTFUXSri:%[0-9]+]]:fpr32 = UCVTFUXSri [[COPY]] - ; CHECK: %s0 = COPY [[UCVTFUXSri]] - %0(s64) = COPY %x0 + ; CHECK: $s0 = COPY [[UCVTFUXSri]] + %0(s64) = COPY $x0 %1(s32) = G_UITOFP %0 - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -308,15 +308,15 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: uitofp_s64_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[UCVTFUWDri:%[0-9]+]]:fpr64 = UCVTFUWDri [[COPY]] - ; CHECK: %d0 = COPY [[UCVTFUWDri]] - %0(s32) = COPY %w0 + ; CHECK: $d0 = COPY [[UCVTFUWDri]] + %0(s32) = COPY $w0 %1(s64) = G_UITOFP %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -330,15 +330,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: uitofp_s64_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[UCVTFUXDri:%[0-9]+]]:fpr64 = UCVTFUXDri [[COPY]] - ; CHECK: %d0 = COPY [[UCVTFUXDri]] - %0(s64) = COPY %x0 + ; CHECK: $d0 = COPY [[UCVTFUXDri]] + %0(s64) = COPY $x0 %1(s64) = G_UITOFP %0 - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -352,15 +352,15 @@ body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: fptosi_s32_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 ; CHECK: [[FCVTZSUWSr:%[0-9]+]]:gpr32 = FCVTZSUWSr [[COPY]] - ; CHECK: %w0 = COPY [[FCVTZSUWSr]] - %0(s32) = COPY %s0 + ; CHECK: $w0 = COPY [[FCVTZSUWSr]] + %0(s32) = COPY $s0 %1(s32) = G_FPTOSI %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -374,15 +374,15 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: fptosi_s32_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 ; CHECK: [[FCVTZSUWDr:%[0-9]+]]:gpr32 = FCVTZSUWDr [[COPY]] - ; CHECK: %w0 = COPY [[FCVTZSUWDr]] - %0(s64) = COPY %d0 + ; CHECK: $w0 = COPY [[FCVTZSUWDr]] + %0(s64) = COPY $d0 %1(s32) = G_FPTOSI %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -396,15 +396,15 @@ body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: fptosi_s64_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 ; CHECK: [[FCVTZSUXSr:%[0-9]+]]:gpr64 = FCVTZSUXSr [[COPY]] - ; CHECK: %x0 = COPY [[FCVTZSUXSr]] - %0(s32) = COPY %s0 + ; CHECK: $x0 = COPY [[FCVTZSUXSr]] + %0(s32) = COPY $s0 %1(s64) = G_FPTOSI %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -418,15 +418,15 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: fptosi_s64_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 ; CHECK: [[FCVTZSUXDr:%[0-9]+]]:gpr64 = FCVTZSUXDr [[COPY]] - ; CHECK: %x0 = COPY [[FCVTZSUXDr]] - %0(s64) = COPY %d0 + ; CHECK: $x0 = COPY [[FCVTZSUXDr]] + %0(s64) = COPY $d0 %1(s64) = G_FPTOSI %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -440,15 +440,15 @@ body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: fptoui_s32_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 ; CHECK: [[FCVTZUUWSr:%[0-9]+]]:gpr32 = FCVTZUUWSr [[COPY]] - ; CHECK: %w0 = COPY [[FCVTZUUWSr]] - %0(s32) = COPY %s0 + ; CHECK: $w0 = COPY [[FCVTZUUWSr]] + %0(s32) = COPY $s0 %1(s32) = G_FPTOUI %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -462,15 +462,15 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: fptoui_s32_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 ; CHECK: [[FCVTZUUWDr:%[0-9]+]]:gpr32 = FCVTZUUWDr [[COPY]] - ; CHECK: %w0 = COPY [[FCVTZUUWDr]] - %0(s64) = COPY %d0 + ; CHECK: $w0 = COPY [[FCVTZUUWDr]] + %0(s64) = COPY $d0 %1(s32) = G_FPTOUI %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -484,15 +484,15 @@ body: | bb.0: - liveins: %s0 + liveins: $s0 ; CHECK-LABEL: name: fptoui_s64_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 ; CHECK: [[FCVTZUUXSr:%[0-9]+]]:gpr64 = FCVTZUUXSr [[COPY]] - ; CHECK: %x0 = COPY [[FCVTZUUXSr]] - %0(s32) = COPY %s0 + ; CHECK: $x0 = COPY [[FCVTZUUXSr]] + %0(s32) = COPY $s0 %1(s64) = G_FPTOUI %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -506,13 +506,13 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: fptoui_s64_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 ; CHECK: [[FCVTZUUXDr:%[0-9]+]]:gpr64 = FCVTZUUXDr [[COPY]] - ; CHECK: %x0 = COPY [[FCVTZUUXDr]] - %0(s64) = COPY %d0 + ; CHECK: $x0 = COPY [[FCVTZUUXDr]] + %0(s64) = COPY $d0 %1(s64) = G_FPTOUI %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... Index: test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir +++ test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir @@ -39,12 +39,12 @@ ; CHECK: [[MOVKXi4:%[0-9]+]]:gpr64 = MOVKXi [[MOVKXi3]], target-flags(aarch64-g2, aarch64-nc) @foo2, 32 ; CHECK: [[MOVKXi5:%[0-9]+]]:gpr64 = MOVKXi [[MOVKXi4]], target-flags(aarch64-g3) @foo2, 48 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[MOVKXi5]] - ; CHECK: STRWui %wzr, %stack.0.retval, 0 :: (store 4 into %ir.retval) + ; CHECK: STRWui $wzr, %stack.0.retval, 0 :: (store 4 into %ir.retval) ; CHECK: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo1, i64 0, i64 0)`) ; CHECK: [[LDRWui1:%[0-9]+]]:gpr32 = LDRWui [[COPY1]], 0 :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo2, i64 0, i64 0)`) ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRWui]], [[LDRWui1]] - ; CHECK: %w0 = COPY [[ADDWrr]] - ; CHECK: RET_ReallyLR implicit %w0 + ; CHECK: $w0 = COPY [[ADDWrr]] + ; CHECK: RET_ReallyLR implicit $w0 %1:gpr(s32) = G_CONSTANT i32 0 %4:gpr(p0) = G_GLOBAL_VALUE @foo1 %3:gpr(p0) = COPY %4(p0) @@ -55,7 +55,7 @@ %2:gpr(s32) = G_LOAD %3(p0) :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo1, i64 0, i64 0)`) %5:gpr(s32) = G_LOAD %6(p0) :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo2, i64 0, i64 0)`) %8:gpr(s32) = G_ADD %2, %5 - %w0 = COPY %8(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %8(s32) + RET_ReallyLR implicit $w0 ... Index: test/CodeGen/AArch64/GlobalISel/select-imm.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-imm.mir +++ test/CodeGen/AArch64/GlobalISel/select-imm.mir @@ -20,13 +20,13 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: imm_s32_gpr ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -1234 - ; CHECK: %w0 = COPY [[MOVi32imm]] + ; CHECK: $w0 = COPY [[MOVi32imm]] %0(s32) = G_CONSTANT i32 -1234 - %w0 = COPY %0(s32) + $w0 = COPY %0(s32) ... --- @@ -40,11 +40,11 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: imm_s64_gpr ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 1234 - ; CHECK: %x0 = COPY [[MOVi64imm]] + ; CHECK: $x0 = COPY [[MOVi64imm]] %0(s64) = G_CONSTANT i64 1234 - %x0 = COPY %0(s64) + $x0 = COPY %0(s64) ... Index: test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir +++ test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir @@ -20,8 +20,8 @@ ; CHECK-LABEL: name: implicit_def ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[DEF]], [[DEF]] - ; CHECK: %w0 = COPY [[ADDWrr]] + ; CHECK: $w0 = COPY [[ADDWrr]] %0(s32) = G_IMPLICIT_DEF %1(s32) = G_ADD %0, %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir +++ test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir @@ -8,9 +8,9 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 - %0:gpr(s32) = COPY %w0 + %0:gpr(s32) = COPY $w0 %1:gpr(s64) = G_IMPLICIT_DEF @@ -23,8 +23,8 @@ ; CHECK: %3:gpr64 = BFMXri %1, [[TMP]], 51, 31 %3:gpr(s64) = G_INSERT %1, %0, 13 - %x0 = COPY %2 - %x1 = COPY %3 + $x0 = COPY %2 + $x1 = COPY %3 ... @@ -36,9 +36,9 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 - %0:gpr(s64) = COPY %x0 + %0:gpr(s64) = COPY $x0 ; CHECK: body: ; CHECK: [[TMP:%[0-9]+]]:gpr64 = UBFMXri %0, 0, 31 @@ -49,6 +49,6 @@ ; CHECK: %2:gpr32 = COPY [[TMP]].sub_32 %2:gpr(s32) = G_EXTRACT %0, 13 - %w0 = COPY %1 - %w1 = COPY %2 + $w0 = COPY %1 + $w1 = COPY %2 ... Index: test/CodeGen/AArch64/GlobalISel/select-int-ext.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-int-ext.mir +++ test/CodeGen/AArch64/GlobalISel/select-int-ext.mir @@ -29,15 +29,15 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: anyext_s64_from_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32 - ; CHECK: %x0 = COPY [[SUBREG_TO_REG]] - %0(s32) = COPY %w0 + ; CHECK: $x0 = COPY [[SUBREG_TO_REG]] + %0(s32) = COPY $w0 %1(s64) = G_ANYEXT %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -51,16 +51,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: anyext_s32_from_s8 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY]] - ; CHECK: %w0 = COPY [[COPY2]] - %2:gpr(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[COPY2]] + %2:gpr(s32) = COPY $w0 %0(s8) = G_TRUNC %2 %1(s32) = G_ANYEXT %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -74,16 +74,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: zext_s64_from_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31 - ; CHECK: %x0 = COPY [[UBFMXri]] - %0(s32) = COPY %w0 + ; CHECK: $x0 = COPY [[UBFMXri]] + %0(s32) = COPY $w0 %1(s64) = G_ZEXT %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -97,16 +97,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: zext_s32_from_s16 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15 - ; CHECK: %w0 = COPY [[UBFMWri]] - %2:gpr(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[UBFMWri]] + %2:gpr(s32) = COPY $w0 %0(s16) = G_TRUNC %2 %1(s32) = G_ZEXT %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- @@ -120,16 +120,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: zext_s32_from_s8 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15 - ; CHECK: %w0 = COPY [[UBFMWri]] - %2:gpr(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[UBFMWri]] + %2:gpr(s32) = COPY $w0 %0(s16) = G_TRUNC %2 %1(s32) = G_ZEXT %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -143,18 +143,18 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: zext_s16_from_s8 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 7 ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[UBFMWri]] - ; CHECK: %w0 = COPY [[COPY2]] - %2:gpr(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[COPY2]] + %2:gpr(s32) = COPY $w0 %0(s8) = G_TRUNC %2 %1(s16) = G_ZEXT %0 %3:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %3(s32) + $w0 = COPY %3(s32) ... --- @@ -168,16 +168,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: sext_s64_from_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32 ; CHECK: [[SBFMXri:%[0-9]+]]:gpr64 = SBFMXri [[SUBREG_TO_REG]], 0, 31 - ; CHECK: %x0 = COPY [[SBFMXri]] - %0(s32) = COPY %w0 + ; CHECK: $x0 = COPY [[SBFMXri]] + %0(s32) = COPY $w0 %1(s64) = G_SEXT %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -191,16 +191,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: sext_s32_from_s16 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 15 - ; CHECK: %w0 = COPY [[SBFMWri]] - %2:gpr(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[SBFMWri]] + %2:gpr(s32) = COPY $w0 %0(s16) = G_TRUNC %2 %1(s32) = G_SEXT %0 - %w0 = COPY %1 + $w0 = COPY %1 ... --- @@ -214,16 +214,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: sext_s32_from_s8 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 7 - ; CHECK: %w0 = COPY [[SBFMWri]] - %2:gpr(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[SBFMWri]] + %2:gpr(s32) = COPY $w0 %0(s8) = G_TRUNC %2 %1(s32) = G_SEXT %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -237,16 +237,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: sext_s16_from_s8 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 7 ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[SBFMWri]] - ; CHECK: %w0 = COPY [[COPY2]] - %2:gpr(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[COPY2]] + %2:gpr(s32) = COPY $w0 %0(s8) = G_TRUNC %2 %1(s16) = G_SEXT %0 %3:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %3(s32) + $w0 = COPY %3(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir +++ test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir @@ -22,13 +22,13 @@ - { id: 1, class: gpr } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: inttoptr_p0_s64 - ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %x0 - ; CHECK: %x0 = COPY [[COPY]] - %0(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0 + ; CHECK: $x0 = COPY [[COPY]] + %0(s64) = COPY $x0 %1(p0) = G_INTTOPTR %0 - %x0 = COPY %1(p0) + $x0 = COPY %1(p0) ... --- @@ -41,13 +41,13 @@ - { id: 1, class: gpr } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: ptrtoint_s64_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: %x0 = COPY [[COPY]] - %0(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: $x0 = COPY [[COPY]] + %0(p0) = COPY $x0 %1(s64) = G_PTRTOINT %0 - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -60,14 +60,14 @@ - { id: 1, class: gpr } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: ptrtoint_s32_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 - ; CHECK: %w0 = COPY [[COPY1]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY1]] + %0(p0) = COPY $x0 %1(s32) = G_PTRTOINT %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -80,16 +80,16 @@ - { id: 1, class: gpr } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: ptrtoint_s16_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]] - ; CHECK: %w0 = COPY [[COPY2]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY2]] + %0(p0) = COPY $x0 %1(s16) = G_PTRTOINT %0 %2:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -102,16 +102,16 @@ - { id: 1, class: gpr } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: ptrtoint_s8_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]] - ; CHECK: %w0 = COPY [[COPY2]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY2]] + %0(p0) = COPY $x0 %1(s8) = G_PTRTOINT %0 %2:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -124,14 +124,14 @@ - { id: 1, class: gpr } body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: ptrtoint_s1_p0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]] - ; CHECK: %w0 = COPY [[COPY2]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY2]] + %0(p0) = COPY $x0 %1(s1) = G_PTRTOINT %0 %2:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-hint.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-hint.mir +++ test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-hint.mir @@ -22,7 +22,7 @@ # CHECK: HINT 1 body: | bb.0: - liveins: %w0 + liveins: $w0 %0(s32) = G_CONSTANT i32 1 G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.hint), %0 Index: test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir +++ test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir @@ -21,15 +21,15 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: sdiv_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[SDIVWr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[SDIVWr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.sdiv.i32), %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir +++ test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir @@ -10,17 +10,17 @@ body: | bb.0: - liveins: %q0, %q1 + liveins: $q0, $q1 ; CHECK-LABEL: name: aesmc_aese - ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY %q0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY %q1 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1 ; CHECK: [[T0:%[0-9]+]]:fpr128 = AESErr [[COPY]], [[COPY1]] ; CHECK: [[T1:%[0-9]+]]:fpr128 = AESMCrrTied [[T0]] - ; CHECK: %q0 = COPY [[T1]] - %0:fpr(<16 x s8>) = COPY %q0 - %1:fpr(<16 x s8>) = COPY %q1 + ; CHECK: $q0 = COPY [[T1]] + %0:fpr(<16 x s8>) = COPY $q0 + %1:fpr(<16 x s8>) = COPY $q1 %2:fpr(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.aarch64.crypto.aese), %0, %1 %3:fpr(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.aarch64.crypto.aesmc), %2 - %q0 = COPY %3(<16 x s8>) + $q0 = COPY %3(<16 x s8>) ... Index: test/CodeGen/AArch64/GlobalISel/select-load.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-load.mir +++ test/CodeGen/AArch64/GlobalISel/select-load.mir @@ -47,15 +47,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[COPY]], 0 :: (load 8 from %ir.addr) - ; CHECK: %x0 = COPY [[LDRXui]] - %0(p0) = COPY %x0 + ; CHECK: $x0 = COPY [[LDRXui]] + %0(p0) = COPY $x0 %1(s64) = G_LOAD %0 :: (load 8 from %ir.addr) - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -69,15 +69,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load 4 from %ir.addr) - ; CHECK: %w0 = COPY [[LDRWui]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[LDRWui]] + %0(p0) = COPY $x0 %1(s32) = G_LOAD %0 :: (load 4 from %ir.addr) - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -91,16 +91,16 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_s16_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load 2 from %ir.addr) - ; CHECK: %w0 = COPY [[LDRHHui]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[LDRHHui]] + %0(p0) = COPY $x0 %1(s16) = G_LOAD %0 :: (load 2 from %ir.addr) %2:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -114,16 +114,16 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_s8_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load 1 from %ir.addr) - ; CHECK: %w0 = COPY [[LDRBBui]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[LDRBBui]] + %0(p0) = COPY $x0 %1(s8) = G_LOAD %0 :: (load 1 from %ir.addr) %2:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -140,14 +140,14 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_fi_s64_gpr ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui %stack.0.ptr0, 0 :: (load 8) - ; CHECK: %x0 = COPY [[LDRXui]] + ; CHECK: $x0 = COPY [[LDRXui]] %0(p0) = G_FRAME_INDEX %stack.0.ptr0 %1(s64) = G_LOAD %0 :: (load 8) - %x0 = COPY %1(s64) + $x0 = COPY %1(s64) ... --- @@ -163,17 +163,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_gep_128_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[COPY]], 16 :: (load 8 from %ir.addr) - ; CHECK: %x0 = COPY [[LDRXui]] - %0(p0) = COPY %x0 + ; CHECK: $x0 = COPY [[LDRXui]] + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 128 %2(p0) = G_GEP %0, %1 %3(s64) = G_LOAD %2 :: (load 8 from %ir.addr) - %x0 = COPY %3 + $x0 = COPY %3 ... --- @@ -189,17 +189,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_gep_512_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 128 :: (load 4 from %ir.addr) - ; CHECK: %w0 = COPY [[LDRWui]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[LDRWui]] + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 512 %2(p0) = G_GEP %0, %1 %3(s32) = G_LOAD %2 :: (load 4 from %ir.addr) - %w0 = COPY %3 + $w0 = COPY %3 ... --- @@ -215,18 +215,18 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_gep_64_s16_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 32 :: (load 2 from %ir.addr) - ; CHECK: %w0 = COPY [[LDRHHui]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[LDRHHui]] + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 64 %2(p0) = G_GEP %0, %1 %3(s16) = G_LOAD %2 :: (load 2 from %ir.addr) %4:gpr(s32) = G_ANYEXT %3 - %w0 = COPY %4 + $w0 = COPY %4 ... --- @@ -242,18 +242,18 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_gep_1_s8_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 1 :: (load 1 from %ir.addr) - ; CHECK: %w0 = COPY [[LDRBBui]] - %0(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[LDRBBui]] + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 1 %2(p0) = G_GEP %0, %1 %3(s8) = G_LOAD %2 :: (load 1 from %ir.addr) %4:gpr(s32) = G_ANYEXT %3 - %w0 = COPY %4 + $w0 = COPY %4 ... --- @@ -267,15 +267,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY]], 0 :: (load 8 from %ir.addr) - ; CHECK: %d0 = COPY [[LDRDui]] - %0(p0) = COPY %x0 + ; CHECK: $d0 = COPY [[LDRDui]] + %0(p0) = COPY $x0 %1(s64) = G_LOAD %0 :: (load 8 from %ir.addr) - %d0 = COPY %1(s64) + $d0 = COPY %1(s64) ... --- @@ -289,15 +289,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRSui:%[0-9]+]]:fpr32 = LDRSui [[COPY]], 0 :: (load 4 from %ir.addr) - ; CHECK: %s0 = COPY [[LDRSui]] - %0(p0) = COPY %x0 + ; CHECK: $s0 = COPY [[LDRSui]] + %0(p0) = COPY $x0 %1(s32) = G_LOAD %0 :: (load 4 from %ir.addr) - %s0 = COPY %1(s32) + $s0 = COPY %1(s32) ... --- @@ -311,15 +311,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_s16_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRHui:%[0-9]+]]:fpr16 = LDRHui [[COPY]], 0 :: (load 2 from %ir.addr) - ; CHECK: %h0 = COPY [[LDRHui]] - %0(p0) = COPY %x0 + ; CHECK: $h0 = COPY [[LDRHui]] + %0(p0) = COPY $x0 %1(s16) = G_LOAD %0 :: (load 2 from %ir.addr) - %h0 = COPY %1(s16) + $h0 = COPY %1(s16) ... --- @@ -333,15 +333,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_s8_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRBui:%[0-9]+]]:fpr8 = LDRBui [[COPY]], 0 :: (load 1 from %ir.addr) - ; CHECK: %b0 = COPY [[LDRBui]] - %0(p0) = COPY %x0 + ; CHECK: $b0 = COPY [[LDRBui]] + %0(p0) = COPY $x0 %1(s8) = G_LOAD %0 :: (load 1 from %ir.addr) - %b0 = COPY %1(s8) + $b0 = COPY %1(s8) ... --- @@ -357,17 +357,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_gep_8_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY]], 1 :: (load 8 from %ir.addr) - ; CHECK: %d0 = COPY [[LDRDui]] - %0(p0) = COPY %x0 + ; CHECK: $d0 = COPY [[LDRDui]] + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 8 %2(p0) = G_GEP %0, %1 %3(s64) = G_LOAD %2 :: (load 8 from %ir.addr) - %d0 = COPY %3 + $d0 = COPY %3 ... --- @@ -383,17 +383,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_gep_16_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRSui:%[0-9]+]]:fpr32 = LDRSui [[COPY]], 4 :: (load 4 from %ir.addr) - ; CHECK: %s0 = COPY [[LDRSui]] - %0(p0) = COPY %x0 + ; CHECK: $s0 = COPY [[LDRSui]] + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 16 %2(p0) = G_GEP %0, %1 %3(s32) = G_LOAD %2 :: (load 4 from %ir.addr) - %s0 = COPY %3 + $s0 = COPY %3 ... --- @@ -409,17 +409,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_gep_64_s16_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRHui:%[0-9]+]]:fpr16 = LDRHui [[COPY]], 32 :: (load 2 from %ir.addr) - ; CHECK: %h0 = COPY [[LDRHui]] - %0(p0) = COPY %x0 + ; CHECK: $h0 = COPY [[LDRHui]] + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 64 %2(p0) = G_GEP %0, %1 %3(s16) = G_LOAD %2 :: (load 2 from %ir.addr) - %h0 = COPY %3 + $h0 = COPY %3 ... --- @@ -435,17 +435,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_gep_32_s8_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRBui:%[0-9]+]]:fpr8 = LDRBui [[COPY]], 32 :: (load 1 from %ir.addr) - ; CHECK: %b0 = COPY [[LDRBui]] - %0(p0) = COPY %x0 + ; CHECK: $b0 = COPY [[LDRBui]] + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 32 %2(p0) = G_GEP %0, %1 %3(s8) = G_LOAD %2 :: (load 1 from %ir.addr) - %b0 = COPY %3 + $b0 = COPY %3 ... --- name: load_v2s32 @@ -458,15 +458,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: load_v2s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY]], 0 :: (load 8 from %ir.addr) - ; CHECK: %d0 = COPY [[LDRDui]] - %0(p0) = COPY %x0 + ; CHECK: $d0 = COPY [[LDRDui]] + %0(p0) = COPY $x0 %1(<2 x s32>) = G_LOAD %0 :: (load 8 from %ir.addr) - %d0 = COPY %1(<2 x s32>) + $d0 = COPY %1(<2 x s32>) ... --- name: sextload_s32_from_s16 @@ -475,16 +475,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: sextload_s32_from_s16 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRSHWui [[COPY]], 0 :: (load 2 from %ir.addr) - ; CHECK: %w0 = COPY [[T0]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[T0]] + %0:gpr(p0) = COPY $x0 %1:gpr(s16) = G_LOAD %0 :: (load 2 from %ir.addr) %2:gpr(s32) = G_SEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -494,16 +494,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: zextload_s32_from_s16 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load 2 from %ir.addr) - ; CHECK: %w0 = COPY [[T0]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[T0]] + %0:gpr(p0) = COPY $x0 %1:gpr(s16) = G_LOAD %0 :: (load 2 from %ir.addr) %2:gpr(s32) = G_ZEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -513,14 +513,14 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: aextload_s32_from_s16 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load 2 from %ir.addr) - ; CHECK: %w0 = COPY [[T0]] - %0:gpr(p0) = COPY %x0 + ; CHECK: $w0 = COPY [[T0]] + %0:gpr(p0) = COPY $x0 %1:gpr(s16) = G_LOAD %0 :: (load 2 from %ir.addr) %2:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select-mul.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-mul.mir +++ test/CodeGen/AArch64/GlobalISel/select-mul.mir @@ -13,22 +13,22 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; Make sure InstructionSelector is able to match a pattern ; with an SDNodeXForm, trunc_imm. ; def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))), ; (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; ; CHECK-LABEL: name: mul_i64_sext_imm32 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 3 - ; CHECK: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY]], [[MOVi32imm]], %xzr - ; CHECK: %x0 = COPY [[SMADDLrrr]] - %0:gpr(s32) = COPY %w0 + ; CHECK: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY]], [[MOVi32imm]], $xzr + ; CHECK: $x0 = COPY [[SMADDLrrr]] + %0:gpr(s32) = COPY $w0 %1:gpr(s64) = G_SEXT %0(s32) %2:gpr(s64) = G_CONSTANT i64 3 %3:gpr(s64) = G_MUL %1, %2 - %x0 = COPY %3(s64) + $x0 = COPY %3(s64) ... Index: test/CodeGen/AArch64/GlobalISel/select-muladd.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-muladd.mir +++ test/CodeGen/AArch64/GlobalISel/select-muladd.mir @@ -23,21 +23,21 @@ body: | bb.0: - liveins: %x0, %w1, %w2 + liveins: $x0, $w1, $w2 ; CHECK-LABEL: name: SMADDLrrr_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 - ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY %w2 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2 ; CHECK: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY1]], [[COPY2]], [[COPY]] - ; CHECK: %x0 = COPY [[SMADDLrrr]] - %0(s64) = COPY %x0 - %1(s32) = COPY %w1 - %2(s32) = COPY %w2 + ; CHECK: $x0 = COPY [[SMADDLrrr]] + %0(s64) = COPY $x0 + %1(s32) = COPY $w1 + %2(s32) = COPY $w2 %3(s64) = G_SEXT %1 %4(s64) = G_SEXT %2 %5(s64) = G_MUL %3, %4 %6(s64) = G_ADD %0, %5 - %x0 = COPY %6 + $x0 = COPY %6 ... Index: test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir +++ test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir @@ -20,14 +20,14 @@ body: | bb.0: - liveins: %d0 + liveins: $d0 ; CHECK-LABEL: name: vcvtfxu2fp_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 ; CHECK: [[UCVTFd:%[0-9]+]]:fpr64 = UCVTFd [[COPY]], 12 - ; CHECK: %d1 = COPY [[UCVTFd]] - %0(s64) = COPY %d0 + ; CHECK: $d1 = COPY [[UCVTFd]] + %0(s64) = COPY $d0 %1(s32) = G_CONSTANT i32 12 %2(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.vcvtfxu2fp.f64), %0, %1 - %d1 = COPY %2(s64) + $d1 = COPY %2(s64) ... Index: test/CodeGen/AArch64/GlobalISel/select-phi.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-phi.mir +++ test/CodeGen/AArch64/GlobalISel/select-phi.mir @@ -51,11 +51,11 @@ body: | bb.1.entry: successors: %bb.2.case1(0x40000000), %bb.3.case2(0x40000000) - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: test_phi ; CHECK: [[RES:%.*]]:gpr32 = PHI - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_CONSTANT i32 0 %3(s32) = G_CONSTANT i32 1 %5(s32) = G_CONSTANT i32 2 @@ -77,8 +77,8 @@ bb.4.return: %7(s32) = G_PHI %4(s32), %bb.2.case1, %6(s32), %bb.3.case2 - %w0 = COPY %7(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %7(s32) + RET_ReallyLR implicit $w0 ... @@ -101,12 +101,12 @@ body: | bb.0: successors: %bb.1, %bb.2 - liveins: %w2, %x0, %x1 + liveins: $w2, $x0, $x1 ; CHECK-LABEL: name: test_phi_ptr - %0(p0) = COPY %x0 - %1(p0) = COPY %x1 - %6:gpr(s32) = COPY %w2 + %0(p0) = COPY $x0 + %1(p0) = COPY $x1 + %6:gpr(s32) = COPY $w2 %2(s1) = G_TRUNC %6 G_BRCOND %2(s1), %bb.1 G_BR %bb.2 @@ -118,7 +118,7 @@ bb.2: ; CHECK: %{{[0-9]+}}:gpr64 = PHI %{{[0-9]+}}, %bb.0, %{{[0-9]+}}, %bb.1 %3(p0) = G_PHI %0(p0), %bb.0, %1(p0), %bb.1 - %x0 = COPY %3(p0) - RET_ReallyLR implicit %x0 + $x0 = COPY %3(p0) + RET_ReallyLR implicit $x0 ... Index: test/CodeGen/AArch64/GlobalISel/select-pr32733.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-pr32733.mir +++ test/CodeGen/AArch64/GlobalISel/select-pr32733.mir @@ -50,17 +50,17 @@ hasMustTailInVarArgFunc: false body: | bb.1.entry: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: main - ; CHECK: liveins: %w0 + ; CHECK: liveins: $w0 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[EONWrr:%[0-9]+]]:gpr32 = EONWrr [[COPY]], [[MOVi32imm]] - ; CHECK: %w0 = COPY [[EONWrr]] + ; CHECK: $w0 = COPY [[EONWrr]] %0(s32) = G_CONSTANT i32 -1 %3(s32) = G_CONSTANT i32 1 - %1(s32) = COPY %w0 + %1(s32) = COPY $w0 %2(s32) = G_XOR %1, %0 %4(s32) = G_XOR %2, %3 - %w0 = COPY %4(s32) + $w0 = COPY %4(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select-store.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-store.mir +++ test/CodeGen/AArch64/GlobalISel/select-store.mir @@ -42,14 +42,14 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: store_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: STRXui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr) - %0(p0) = COPY %x0 - %1(s64) = COPY %x1 + %0(p0) = COPY $x0 + %1(s64) = COPY $x1 G_STORE %1, %0 :: (store 8 into %ir.addr) ... @@ -65,14 +65,14 @@ body: | bb.0: - liveins: %x0, %w1 + liveins: $x0, $w1 ; CHECK-LABEL: name: store_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: STRWui [[COPY1]], [[COPY]], 0 :: (store 4 into %ir.addr) - %0(p0) = COPY %x0 - %1(s32) = COPY %w1 + %0(p0) = COPY $x0 + %1(s32) = COPY $w1 G_STORE %1, %0 :: (store 4 into %ir.addr) ... @@ -88,14 +88,14 @@ body: | bb.0: - liveins: %x0, %w1 + liveins: $x0, $w1 ; CHECK-LABEL: name: store_s16_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: STRHHui [[COPY1]], [[COPY]], 0 :: (store 2 into %ir.addr) - %0(p0) = COPY %x0 - %2:gpr(s32) = COPY %w1 + %0(p0) = COPY $x0 + %2:gpr(s32) = COPY $w1 %1(s16) = G_TRUNC %2 G_STORE %1, %0 :: (store 2 into %ir.addr) @@ -112,14 +112,14 @@ body: | bb.0: - liveins: %x0, %w1 + liveins: $x0, $w1 ; CHECK-LABEL: name: store_s8_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: STRBBui [[COPY1]], [[COPY]], 0 :: (store 1 into %ir.addr) - %0(p0) = COPY %x0 - %2:gpr(s32) = COPY %w1 + %0(p0) = COPY $x0 + %2:gpr(s32) = COPY $w1 %1(s8) = G_TRUNC %2 G_STORE %1, %0 :: (store 1 into %ir.addr) @@ -136,12 +136,12 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: store_zero_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: STRXui %xzr, [[COPY]], 0 :: (store 8 into %ir.addr) - %0(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: STRXui $xzr, [[COPY]], 0 :: (store 8 into %ir.addr) + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 0 G_STORE %1, %0 :: (store 8 into %ir.addr) @@ -158,12 +158,12 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: store_zero_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: STRWui %wzr, [[COPY]], 0 :: (store 4 into %ir.addr) - %0(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: STRWui $wzr, [[COPY]], 0 :: (store 4 into %ir.addr) + %0(p0) = COPY $x0 %1(s32) = G_CONSTANT i32 0 G_STORE %1, %0 :: (store 4 into %ir.addr) @@ -183,12 +183,12 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: store_fi_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: STRXui [[COPY]], %stack.0.ptr0, 0 :: (store 8) - %0(p0) = COPY %x0 + %0(p0) = COPY $x0 %1(p0) = G_FRAME_INDEX %stack.0.ptr0 G_STORE %0, %1 :: (store 8) ... @@ -206,14 +206,14 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: store_gep_128_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: STRXui [[COPY1]], [[COPY]], 16 :: (store 8 into %ir.addr) - %0(p0) = COPY %x0 - %1(s64) = COPY %x1 + %0(p0) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_CONSTANT i64 128 %3(p0) = G_GEP %0, %2 G_STORE %1, %3 :: (store 8 into %ir.addr) @@ -232,14 +232,14 @@ body: | bb.0: - liveins: %x0, %w1 + liveins: $x0, $w1 ; CHECK-LABEL: name: store_gep_512_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: STRWui [[COPY1]], [[COPY]], 128 :: (store 4 into %ir.addr) - %0(p0) = COPY %x0 - %1(s32) = COPY %w1 + %0(p0) = COPY $x0 + %1(s32) = COPY $w1 %2(s64) = G_CONSTANT i64 512 %3(p0) = G_GEP %0, %2 G_STORE %1, %3 :: (store 4 into %ir.addr) @@ -258,14 +258,14 @@ body: | bb.0: - liveins: %x0, %w1 + liveins: $x0, $w1 ; CHECK-LABEL: name: store_gep_64_s16_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: STRHHui [[COPY1]], [[COPY]], 32 :: (store 2 into %ir.addr) - %0(p0) = COPY %x0 - %4:gpr(s32) = COPY %w1 + %0(p0) = COPY $x0 + %4:gpr(s32) = COPY $w1 %1(s16) = G_TRUNC %4 %2(s64) = G_CONSTANT i64 64 %3(p0) = G_GEP %0, %2 @@ -285,14 +285,14 @@ body: | bb.0: - liveins: %x0, %w1 + liveins: $x0, $w1 ; CHECK-LABEL: name: store_gep_1_s8_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: STRBBui [[COPY1]], [[COPY]], 1 :: (store 1 into %ir.addr) - %0(p0) = COPY %x0 - %4:gpr(s32) = COPY %w1 + %0(p0) = COPY $x0 + %4:gpr(s32) = COPY $w1 %1(s8) = G_TRUNC %4 %2(s64) = G_CONSTANT i64 1 %3(p0) = G_GEP %0, %2 @@ -310,14 +310,14 @@ body: | bb.0: - liveins: %x0, %d1 + liveins: $x0, $d1 ; CHECK-LABEL: name: store_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 ; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr) - %0(p0) = COPY %x0 - %1(s64) = COPY %d1 + %0(p0) = COPY $x0 + %1(s64) = COPY $d1 G_STORE %1, %0 :: (store 8 into %ir.addr) ... @@ -333,14 +333,14 @@ body: | bb.0: - liveins: %x0, %s1 + liveins: $x0, $s1 ; CHECK-LABEL: name: store_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1 ; CHECK: STRSui [[COPY1]], [[COPY]], 0 :: (store 4 into %ir.addr) - %0(p0) = COPY %x0 - %1(s32) = COPY %s1 + %0(p0) = COPY $x0 + %1(s32) = COPY $s1 G_STORE %1, %0 :: (store 4 into %ir.addr) ... @@ -358,14 +358,14 @@ body: | bb.0: - liveins: %x0, %d1 + liveins: $x0, $d1 ; CHECK-LABEL: name: store_gep_8_s64_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 ; CHECK: STRDui [[COPY1]], [[COPY]], 1 :: (store 8 into %ir.addr) - %0(p0) = COPY %x0 - %1(s64) = COPY %d1 + %0(p0) = COPY $x0 + %1(s64) = COPY $d1 %2(s64) = G_CONSTANT i64 8 %3(p0) = G_GEP %0, %2 G_STORE %1, %3 :: (store 8 into %ir.addr) @@ -384,14 +384,14 @@ body: | bb.0: - liveins: %x0, %s1 + liveins: $x0, $s1 ; CHECK-LABEL: name: store_gep_8_s32_fpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1 ; CHECK: STRSui [[COPY1]], [[COPY]], 2 :: (store 4 into %ir.addr) - %0(p0) = COPY %x0 - %1(s32) = COPY %s1 + %0(p0) = COPY $x0 + %1(s32) = COPY $s1 %2(s64) = G_CONSTANT i64 8 %3(p0) = G_GEP %0, %2 G_STORE %1, %3 :: (store 4 into %ir.addr) @@ -407,14 +407,14 @@ body: | bb.0: - liveins: %x0, %d1 + liveins: $x0, $d1 ; CHECK-LABEL: name: store_v2s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 ; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr) - %0(p0) = COPY %x0 - %1(<2 x s32>) = COPY %d1 + %0(p0) = COPY $x0 + %1(<2 x s32>) = COPY $d1 G_STORE %1, %0 :: (store 8 into %ir.addr) ... Index: test/CodeGen/AArch64/GlobalISel/select-trunc.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-trunc.mir +++ test/CodeGen/AArch64/GlobalISel/select-trunc.mir @@ -20,15 +20,15 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: trunc_s32_s64 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32sp = COPY [[COPY]].sub_32 - ; CHECK: %w0 = COPY [[COPY1]] - %0(s64) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY1]] + %0(s64) = COPY $x0 %1(s32) = G_TRUNC %0 - %w0 = COPY %1(s32) + $w0 = COPY %1(s32) ... --- @@ -42,17 +42,17 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: trunc_s8_s64 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]] - ; CHECK: %w0 = COPY [[COPY2]] - %0(s64) = COPY %x0 + ; CHECK: $w0 = COPY [[COPY2]] + %0(s64) = COPY $x0 %1(s8) = G_TRUNC %0 %2:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -66,14 +66,14 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: trunc_s1_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY]] - ; CHECK: %w0 = COPY [[COPY2]] - %0(s32) = COPY %w0 + ; CHECK: $w0 = COPY [[COPY2]] + %0(s32) = COPY $w0 %1(s1) = G_TRUNC %0 %2:gpr(s32) = G_ANYEXT %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select-xor.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-xor.mir +++ test/CodeGen/AArch64/GlobalISel/select-xor.mir @@ -26,17 +26,17 @@ body: | bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 ; CHECK-LABEL: name: xor_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[EORWrr:%[0-9]+]]:gpr32 = EORWrr [[COPY]], [[COPY1]] - ; CHECK: %w0 = COPY [[EORWrr]] - %0(s32) = COPY %w0 - %1(s32) = COPY %w1 + ; CHECK: $w0 = COPY [[EORWrr]] + %0(s32) = COPY $w0 + %1(s32) = COPY $w1 %2(s32) = G_XOR %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -52,17 +52,17 @@ body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 ; CHECK-LABEL: name: xor_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[EORXrr:%[0-9]+]]:gpr64 = EORXrr [[COPY]], [[COPY1]] - ; CHECK: %x0 = COPY [[EORXrr]] - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 + ; CHECK: $x0 = COPY [[EORXrr]] + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 %2(s64) = G_XOR %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -79,16 +79,16 @@ body: | bb.0: - liveins: %w0 + liveins: $w0 ; CHECK-LABEL: name: xor_constant_n1_s32_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[COPY]] - ; CHECK: %w0 = COPY [[ORNWrr]] - %0(s32) = COPY %w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]] + ; CHECK: $w0 = COPY [[ORNWrr]] + %0(s32) = COPY $w0 %1(s32) = G_CONSTANT i32 -1 %2(s32) = G_XOR %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... --- @@ -104,16 +104,16 @@ body: | bb.0: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: name: xor_constant_n1_s64_gpr - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0 - ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr %xzr, [[COPY]] - ; CHECK: %x0 = COPY [[ORNXrr]] - %0(s64) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY]] + ; CHECK: $x0 = COPY [[ORNXrr]] + %0(s64) = COPY $x0 %1(s64) = G_CONSTANT i64 -1 %2(s64) = G_XOR %0, %1 - %x0 = COPY %2(s64) + $x0 = COPY %2(s64) ... --- @@ -134,16 +134,16 @@ ; CHECK: successors: %bb.1(0x80000000) ; CHECK: B %bb.1 ; CHECK: bb.1: - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0 - ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[COPY]] - ; CHECK: %w0 = COPY [[ORNWrr]] + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]] + ; CHECK: $w0 = COPY [[ORNWrr]] bb.0: - liveins: %w0, %w1 + liveins: $w0, $w1 successors: %bb.1 %1(s32) = G_CONSTANT i32 -1 G_BR %bb.1 bb.1: - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %2(s32) = G_XOR %0, %1 - %w0 = COPY %2(s32) + $w0 = COPY %2(s32) ... Index: test/CodeGen/AArch64/GlobalISel/select.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select.mir +++ test/CodeGen/AArch64/GlobalISel/select.mir @@ -47,7 +47,7 @@ body: | bb.0: %0(p0) = G_FRAME_INDEX %stack.0.ptr0 - %x0 = COPY %0(p0) + $x0 = COPY %0(p0) ... --- @@ -65,11 +65,11 @@ # CHECK: %2:gpr64 = ADDXrr %0, %1 body: | bb.0: - liveins: %x0 - %0(p0) = COPY %x0 + liveins: $x0 + %0(p0) = COPY $x0 %1(s64) = G_CONSTANT i64 42 %2(p0) = G_GEP %0, %1(s64) - %x0 = COPY %2(p0) + $x0 = COPY %2(p0) ... --- @@ -82,10 +82,10 @@ # CHECK: %1:gpr64sp = ANDXri %0, 8060 body: | bb.0: - liveins: %x0 - %0:gpr(p0) = COPY %x0 + liveins: $x0 + %0:gpr(p0) = COPY $x0 %1:gpr(p0) = G_PTR_MASK %0, 3 - %x0 = COPY %1(p0) + $x0 = COPY %1(p0) ... --- @@ -104,7 +104,7 @@ body: | bb.0: %0(p0) = G_GLOBAL_VALUE @var_local - %x0 = COPY %0(p0) + $x0 = COPY %0(p0) ... --- @@ -122,7 +122,7 @@ body: | bb.0: %0(p0) = G_GLOBAL_VALUE @var_got - %x0 = COPY %0(p0) + $x0 = COPY %0(p0) ... --- @@ -153,36 +153,36 @@ - { id: 11, class: gpr } # CHECK: body: -# CHECK: %wzr = SUBSWrr %0, %0, implicit-def %nzcv -# CHECK: %1:gpr32 = CSINCWr %wzr, %wzr, 1, implicit %nzcv +# CHECK: $wzr = SUBSWrr %0, %0, implicit-def $nzcv +# CHECK: %1:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv -# CHECK: %xzr = SUBSXrr %2, %2, implicit-def %nzcv -# CHECK: %3:gpr32 = CSINCWr %wzr, %wzr, 3, implicit %nzcv +# CHECK: $xzr = SUBSXrr %2, %2, implicit-def $nzcv +# CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv -# CHECK: %xzr = SUBSXrr %4, %4, implicit-def %nzcv -# CHECK: %5:gpr32 = CSINCWr %wzr, %wzr, 0, implicit %nzcv +# CHECK: $xzr = SUBSXrr %4, %4, implicit-def $nzcv +# CHECK: %5:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv body: | bb.0: - liveins: %w0, %x0 + liveins: $w0, $x0 - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_ICMP intpred(eq), %0, %0 %6(s1) = G_TRUNC %1(s32) %9(s32) = G_ANYEXT %6 - %w0 = COPY %9(s32) + $w0 = COPY %9(s32) - %2(s64) = COPY %x0 + %2(s64) = COPY $x0 %3(s32) = G_ICMP intpred(uge), %2, %2 %7(s1) = G_TRUNC %3(s32) %10(s32) = G_ANYEXT %7 - %w0 = COPY %10(s32) + $w0 = COPY %10(s32) - %4(p0) = COPY %x0 + %4(p0) = COPY $x0 %5(s32) = G_ICMP intpred(ne), %4, %4 %8(s1) = G_TRUNC %5(s32) %11(s32) = G_ANYEXT %8 - %w0 = COPY %11(s32) + $w0 = COPY %11(s32) ... --- @@ -209,29 +209,29 @@ - { id: 7, class: gpr } # CHECK: body: -# CHECK: FCMPSrr %0, %0, implicit-def %nzcv -# CHECK: [[TST_MI:%[0-9]+]]:gpr32 = CSINCWr %wzr, %wzr, 5, implicit %nzcv -# CHECK: [[TST_GT:%[0-9]+]]:gpr32 = CSINCWr %wzr, %wzr, 13, implicit %nzcv +# CHECK: FCMPSrr %0, %0, implicit-def $nzcv +# CHECK: [[TST_MI:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv +# CHECK: [[TST_GT:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv # CHECK: %1:gpr32 = ORRWrr [[TST_MI]], [[TST_GT]] -# CHECK: FCMPDrr %2, %2, implicit-def %nzcv -# CHECK: %3:gpr32 = CSINCWr %wzr, %wzr, 4, implicit %nzcv +# CHECK: FCMPDrr %2, %2, implicit-def $nzcv +# CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv body: | bb.0: - liveins: %w0, %x0 + liveins: $w0, $x0 - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s32) = G_FCMP floatpred(one), %0, %0 %4(s1) = G_TRUNC %1(s32) %6(s32) = G_ANYEXT %4 - %w0 = COPY %6(s32) + $w0 = COPY %6(s32) - %2(s64) = COPY %d0 + %2(s64) = COPY $d0 %3(s32) = G_FCMP floatpred(uge), %2, %2 %5(s1) = G_TRUNC %3(s32) %7(s32) = G_ANYEXT %5 - %w0 = COPY %7(s32) + $w0 = COPY %7(s32) ... @@ -257,10 +257,10 @@ body: | bb.0: - liveins: %s0, %w0 + liveins: $s0, $w0 successors: %bb.1 - %0(s32) = COPY %s0 - %3:gpr(s32) = COPY %w0 + %0(s32) = COPY $s0 + %3:gpr(s32) = COPY $w0 %1(s1) = G_TRUNC %3 bb.1: @@ -269,8 +269,8 @@ G_BRCOND %1, %bb.1 bb.2: - %s0 = COPY %2 - RET_ReallyLR implicit %s0 + $s0 = COPY %2 + RET_ReallyLR implicit $s0 ... --- @@ -304,30 +304,30 @@ - { id: 9, class: gpr } # CHECK: body: -# CHECK: %wzr = ANDSWri %10, 0, implicit-def %nzcv -# CHECK: %3:gpr32 = CSELWr %1, %2, 1, implicit %nzcv -# CHECK: %wzr = ANDSWri %10, 0, implicit-def %nzcv -# CHECK: %6:gpr64 = CSELXr %4, %5, 1, implicit %nzcv -# CHECK: %wzr = ANDSWri %10, 0, implicit-def %nzcv -# CHECK: %9:gpr64 = CSELXr %7, %8, 1, implicit %nzcv +# CHECK: $wzr = ANDSWri %10, 0, implicit-def $nzcv +# CHECK: %3:gpr32 = CSELWr %1, %2, 1, implicit $nzcv +# CHECK: $wzr = ANDSWri %10, 0, implicit-def $nzcv +# CHECK: %6:gpr64 = CSELXr %4, %5, 1, implicit $nzcv +# CHECK: $wzr = ANDSWri %10, 0, implicit-def $nzcv +# CHECK: %9:gpr64 = CSELXr %7, %8, 1, implicit $nzcv body: | bb.0: - liveins: %w0, %w1, %w2 - %10:gpr(s32) = COPY %w0 + liveins: $w0, $w1, $w2 + %10:gpr(s32) = COPY $w0 %0(s1) = G_TRUNC %10 - %1(s32) = COPY %w1 - %2(s32) = COPY %w2 + %1(s32) = COPY $w1 + %2(s32) = COPY $w2 %3(s32) = G_SELECT %0, %1, %2 - %w0 = COPY %3(s32) + $w0 = COPY %3(s32) - %4(s64) = COPY %x0 - %5(s64) = COPY %x1 + %4(s64) = COPY $x0 + %5(s64) = COPY $x1 %6(s64) = G_SELECT %0, %4, %5 - %x0 = COPY %6(s64) + $x0 = COPY %6(s64) - %7(p0) = COPY %x0 - %8(p0) = COPY %x1 + %7(p0) = COPY $x0 + %8(p0) = COPY $x1 %9(p0) = G_SELECT %0, %7, %8 - %x0 = COPY %9(p0) + $x0 = COPY %9(p0) ... Index: test/CodeGen/AArch64/GlobalISel/translate-gep.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/translate-gep.ll +++ test/CodeGen/AArch64/GlobalISel/translate-gep.ll @@ -5,7 +5,7 @@ define i8* @translate_element_size1(i64 %arg) { ; CHECK-LABEL: name: translate_element_size1 -; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = COPY %x0 +; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[BASE:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[OFFSET]] %tmp = getelementptr i8, i8* null, i64 %arg @@ -16,12 +16,12 @@ ; CHECK-LABEL: name: first_offset_const ; CHECK: bb.1 (%ir-block.0): - ; CHECK: liveins: %x0 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64) - ; CHECK: %x0 = COPY [[GEP]](p0) - ; CHECK: RET_ReallyLR implicit %x0 + ; CHECK: $x0 = COPY [[GEP]](p0) + ; CHECK: RET_ReallyLR implicit $x0 %res = getelementptr %type, %type* %addr, i32 1 ret %type* %res } @@ -30,11 +30,11 @@ ; CHECK-LABEL: name: first_offset_trivial ; CHECK: bb.1 (%ir-block.0): - ; CHECK: liveins: %x0 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0) - ; CHECK: %x0 = COPY [[COPY1]](p0) - ; CHECK: RET_ReallyLR implicit %x0 + ; CHECK: $x0 = COPY [[COPY1]](p0) + ; CHECK: RET_ReallyLR implicit $x0 %res = getelementptr %type, %type* %addr, i32 0 ret %type* %res } @@ -43,15 +43,15 @@ ; CHECK-LABEL: name: first_offset_variable ; CHECK: bb.1 (%ir-block.0): - ; CHECK: liveins: %x0, %x1 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[COPY1]] ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64) ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0) - ; CHECK: %x0 = COPY [[COPY2]](p0) - ; CHECK: RET_ReallyLR implicit %x0 + ; CHECK: $x0 = COPY [[COPY2]](p0) + ; CHECK: RET_ReallyLR implicit $x0 %res = getelementptr %type, %type* %addr, i64 %idx ret %type* %res } @@ -60,16 +60,16 @@ ; CHECK-LABEL: name: first_offset_ext ; CHECK: bb.1 (%ir-block.0): - ; CHECK: liveins: %w1, %x0 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %w1 + ; CHECK: liveins: $w1, $x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]] ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64) ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0) - ; CHECK: %x0 = COPY [[COPY2]](p0) - ; CHECK: RET_ReallyLR implicit %x0 + ; CHECK: $x0 = COPY [[COPY2]](p0) + ; CHECK: RET_ReallyLR implicit $x0 %res = getelementptr %type, %type* %addr, i32 %idx ret %type* %res } @@ -79,17 +79,17 @@ ; CHECK-LABEL: name: const_then_var ; CHECK: bb.1 (%ir-block.0): - ; CHECK: liveins: %x0, %x1 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 272 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64) ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C1]], [[COPY1]] ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[GEP]], [[MUL]](s64) ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP1]](p0) - ; CHECK: %x0 = COPY [[COPY2]](p0) - ; CHECK: RET_ReallyLR implicit %x0 + ; CHECK: $x0 = COPY [[COPY2]](p0) + ; CHECK: RET_ReallyLR implicit $x0 %res = getelementptr %type1, %type1* %addr, i32 4, i32 1, i64 %idx ret i32* %res } @@ -98,16 +98,16 @@ ; CHECK-LABEL: name: var_then_const ; CHECK: bb.1 (%ir-block.0): - ; CHECK: liveins: %x0, %x1 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1 + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 40 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[COPY1]] ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64) ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[GEP]], [[C1]](s64) - ; CHECK: %x0 = COPY [[GEP1]](p0) - ; CHECK: RET_ReallyLR implicit %x0 + ; CHECK: $x0 = COPY [[GEP1]](p0) + ; CHECK: RET_ReallyLR implicit $x0 %res = getelementptr %type1, %type1* %addr, i64 %idx, i32 2, i32 2 ret i32* %res } Index: test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll +++ test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll @@ -6,7 +6,7 @@ ; CHECK: fixedStack: ; CHECK: - { id: [[VARARGS_SLOT:[0-9]+]], type: default, offset: 8 ; CHECK: body: -; CHECK: [[LIST:%[0-9]+]]:gpr64sp = COPY %x0 +; CHECK: [[LIST:%[0-9]+]]:gpr64sp = COPY $x0 ; CHECK: [[VARARGS_AREA:%[0-9]+]]:gpr64common = ADDXri %fixed-stack.[[VARARGS_SLOT]], 0, 0 ; CHECK: STRXui [[VARARGS_AREA]], [[LIST]], 0 :: (store 8 into %ir.list, align 0) call void @llvm.va_start(i8* %list) Index: test/CodeGen/AArch64/GlobalISel/vastart.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/vastart.ll +++ test/CodeGen/AArch64/GlobalISel/vastart.ll @@ -5,7 +5,7 @@ declare void @llvm.va_start(i8*) define void @test_va_start(i8* %list) { ; CHECK-LABEL: name: test_va_start -; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY %x0 +; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-IOS: G_VASTART [[LIST]](p0) :: (store 8 into %ir.list, align 0) ; CHECK-LINUX: G_VASTART [[LIST]](p0) :: (store 32 into %ir.list, align 0) call void @llvm.va_start(i8* %list) Index: test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir +++ test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir @@ -17,6 +17,6 @@ - { id: 0, class: _ } body: | bb.0: - liveins: %x0 - %0(s64) = COPY %x0 + liveins: $x0 + %0(s64) = COPY $x0 ... Index: test/CodeGen/AArch64/GlobalISel/verify-selected.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/verify-selected.mir +++ test/CodeGen/AArch64/GlobalISel/verify-selected.mir @@ -18,8 +18,8 @@ - { id: 2, class: gpr } body: | bb.0: - liveins: %x0 - %0 = COPY %x0 + liveins: $x0 + %0 = COPY $x0 ; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function *** ; CHECK: instruction: %1:gpr64 = G_ADD @@ -28,5 +28,5 @@ ; CHECK: *** Bad machine code: Generic virtual register invalid in a Selected function *** ; CHECK: instruction: %2:gpr(s64) = COPY ; CHECK: operand 0: %2 - %2(s64) = COPY %x0 + %2(s64) = COPY $x0 ... Index: test/CodeGen/AArch64/aarch64-combine-fmul-fsub.mir =================================================================== --- test/CodeGen/AArch64/aarch64-combine-fmul-fsub.mir +++ test/CodeGen/AArch64/aarch64-combine-fmul-fsub.mir @@ -12,13 +12,13 @@ - { id: 4, class: fpr64 } body: | bb.0.entry: - %2:fpr64 = COPY %d2 - %1:fpr64 = COPY %d1 - %0:fpr64 = COPY %d0 + %2:fpr64 = COPY $d2 + %1:fpr64 = COPY $d1 + %0:fpr64 = COPY $d0 %3:fpr64 = FMULv2f32 %0, %1 %4:fpr64 = FSUBv2f32 killed %3, %2 - %d0 = COPY %4 - RET_ReallyLR implicit %d0 + $d0 = COPY %4 + RET_ReallyLR implicit $d0 ... # UNPROFITABLE-LABEL: name: f1_2s @@ -38,13 +38,13 @@ - { id: 4, class: fpr128 } body: | bb.0.entry: - %2:fpr128 = COPY %q2 - %1:fpr128 = COPY %q1 - %0:fpr128 = COPY %q0 + %2:fpr128 = COPY $q2 + %1:fpr128 = COPY $q1 + %0:fpr128 = COPY $q0 %3:fpr128 = FMULv4f32 %0, %1 %4:fpr128 = FSUBv4f32 killed %3, %2 - %q0 = COPY %4 - RET_ReallyLR implicit %q0 + $q0 = COPY %4 + RET_ReallyLR implicit $q0 ... # UNPROFITABLE-LABEL: name: f1_4s @@ -64,13 +64,13 @@ - { id: 4, class: fpr128 } body: | bb.0.entry: - %2:fpr128 = COPY %q2 - %1:fpr128 = COPY %q1 - %0:fpr128 = COPY %q0 + %2:fpr128 = COPY $q2 + %1:fpr128 = COPY $q1 + %0:fpr128 = COPY $q0 %3:fpr128 = FMULv2f64 %0, %1 %4:fpr128 = FSUBv2f64 killed %3, %2 - %q0 = COPY %4 - RET_ReallyLR implicit %q0 + $q0 = COPY %4 + RET_ReallyLR implicit $q0 ... # UNPROFITABLE-LABEL: name: f1_2d @@ -92,15 +92,15 @@ - { id: 6, class: fpr64 } body: | bb.0.entry: - %3:fpr64 = COPY %q3 - %2:fpr64 = COPY %q2 - %1:fpr64 = COPY %q1 - %0:fpr64 = COPY %q0 + %3:fpr64 = COPY $q3 + %2:fpr64 = COPY $q2 + %1:fpr64 = COPY $q1 + %0:fpr64 = COPY $q0 %4:fpr64 = FMULv2f32 %0, %1 %5:fpr64 = FMULv2f32 %2, %3 %6:fpr64 = FSUBv2f32 killed %4, %5 - %q0 = COPY %6 - RET_ReallyLR implicit %q0 + $q0 = COPY %6 + RET_ReallyLR implicit $q0 ... # ALL-LABEL: name: f1_both_fmul_2s @@ -118,15 +118,15 @@ - { id: 6, class: fpr128 } body: | bb.0.entry: - %3:fpr128 = COPY %q3 - %2:fpr128 = COPY %q2 - %1:fpr128 = COPY %q1 - %0:fpr128 = COPY %q0 + %3:fpr128 = COPY $q3 + %2:fpr128 = COPY $q2 + %1:fpr128 = COPY $q1 + %0:fpr128 = COPY $q0 %4:fpr128 = FMULv4f32 %0, %1 %5:fpr128 = FMULv4f32 %2, %3 %6:fpr128 = FSUBv4f32 killed %4, %5 - %q0 = COPY %6 - RET_ReallyLR implicit %q0 + $q0 = COPY %6 + RET_ReallyLR implicit $q0 ... # ALL-LABEL: name: f1_both_fmul_4s @@ -144,15 +144,15 @@ - { id: 6, class: fpr128 } body: | bb.0.entry: - %3:fpr128 = COPY %q3 - %2:fpr128 = COPY %q2 - %1:fpr128 = COPY %q1 - %0:fpr128 = COPY %q0 + %3:fpr128 = COPY $q3 + %2:fpr128 = COPY $q2 + %1:fpr128 = COPY $q1 + %0:fpr128 = COPY $q0 %4:fpr128 = FMULv2f64 %0, %1 %5:fpr128 = FMULv2f64 %2, %3 %6:fpr128 = FSUBv2f64 killed %4, %5 - %q0 = COPY %6 - RET_ReallyLR implicit %q0 + $q0 = COPY %6 + RET_ReallyLR implicit $q0 ... # ALL-LABEL: name: f1_both_fmul_2d Index: test/CodeGen/AArch64/arm64-csldst-mmo.ll =================================================================== --- test/CodeGen/AArch64/arm64-csldst-mmo.ll +++ test/CodeGen/AArch64/arm64-csldst-mmo.ll @@ -10,8 +10,8 @@ ; ; CHECK: Before post-MI-sched: ; CHECK-LABEL: # Machine code for function test1: -; CHECK: SU(2): STRWui %wzr -; CHECK: SU(3): %x21, %x20 = frame-destroy LDPXi %sp, 2 +; CHECK: SU(2): STRWui $wzr +; CHECK: SU(3): $x21, $x20 = frame-destroy LDPXi $sp, 2 ; CHECK: Predecessors: ; CHECK-NEXT: SU(0): Out ; CHECK-NEXT: SU(0): Out Index: test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir =================================================================== --- test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir +++ test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir @@ -1,115 +1,115 @@ # RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass aarch64-ldst-opt -verify-machineinstrs -o - %s | FileCheck %s --- # CHECK-LABEL: name: test_LDURSi_post -# CHECK: LDRSpost %x0, -4 +# CHECK: LDRSpost $x0, -4 name: test_LDURSi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - %s0 = LDURSi %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + $s0 = LDURSi $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_LDURDi_post -# CHECK: LDRDpost %x0, -4 +# CHECK: LDRDpost $x0, -4 name: test_LDURDi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - %d0 = LDURDi %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + $d0 = LDURDi $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_LDURQi_post -# CHECK: LDRQpost %x0, -4 +# CHECK: LDRQpost $x0, -4 name: test_LDURQi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - %q0 = LDURQi %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + $q0 = LDURQi $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_LDURWi_post -# CHECK: LDRWpost %x0, -4 +# CHECK: LDRWpost $x0, -4 name: test_LDURWi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - %w1 = LDURWi %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + $w1 = LDURWi $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_LDURXi_post -# CHECK: %x1 = LDRXpost %x0, -4 +# CHECK: $x1 = LDRXpost $x0, -4 name: test_LDURXi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - %x1 = LDURXi %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + $x1 = LDURXi $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_STURSi_post -# CHECK: STRSpost %s0, %x0, -4 +# CHECK: STRSpost $s0, $x0, -4 name: test_STURSi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - %s0 = FMOVS0 - STURSi %s0, %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + $s0 = FMOVS0 + STURSi $s0, $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_STURDi_post -# CHECK: STRDpost %d0, %x0, -4 +# CHECK: STRDpost $d0, $x0, -4 name: test_STURDi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - %d0 = FMOVD0 - STURDi %d0, %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + $d0 = FMOVD0 + STURDi $d0, $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_STURQi_post -# CHECK: STRQpost %q0, %x0, -4 +# CHECK: STRQpost $q0, $x0, -4 name: test_STURQi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - %q0 = MOVIv4i32 0, 0 - STURQi %q0, %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + $q0 = MOVIv4i32 0, 0 + STURQi $q0, $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_STURWi_post -# CHECK: STRWpost %wzr, %x0, -4 +# CHECK: STRWpost $wzr, $x0, -4 name: test_STURWi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - STURWi %wzr, %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + STURWi $wzr, $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... # CHECK-LABEL: name: test_STURXi_post -# CHECK: STRXpost %xzr, %x0, -4 +# CHECK: STRXpost $xzr, $x0, -4 name: test_STURXi_post body: | bb.0.entry: - liveins: %x0 + liveins: $x0 - STURXi %xzr, %x0, 0 - %x0 = SUBXri %x0, 4, 0 - RET_ReallyLR implicit %x0 + STURXi $xzr, $x0, 0 + $x0 = SUBXri $x0, 4, 0 + RET_ReallyLR implicit $x0 ... Index: test/CodeGen/AArch64/arm64-misched-memdep-bug.ll =================================================================== --- test/CodeGen/AArch64/arm64-misched-memdep-bug.ll +++ test/CodeGen/AArch64/arm64-misched-memdep-bug.ll @@ -9,11 +9,11 @@ ; CHECK: Successors: ; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2 ; CHECK-NEXT: SU(4): Ord Latency=0 -; CHECK: SU(3): STRWui %wzr, %0:gpr64common, 0; mem:ST4[%ptr1] +; CHECK: SU(3): STRWui $wzr, %0:gpr64common, 0; mem:ST4[%ptr1] ; CHECK: Successors: ; CHECK: SU(4): Ord Latency=0 -; CHECK: SU(4): STRWui %wzr, %1:gpr64common, 0; mem:ST4[%ptr2] -; CHECK: SU(5): %w0 = COPY %2 +; CHECK: SU(4): STRWui $wzr, %1:gpr64common, 0; mem:ST4[%ptr2] +; CHECK: SU(5): $w0 = COPY %2 ; CHECK: ** ScheduleDAGMI::schedule picking next node define i32 @misched_bug(i32* %ptr1, i32* %ptr2) { entry: Index: test/CodeGen/AArch64/arm64-misched-multimmo.ll =================================================================== --- test/CodeGen/AArch64/arm64-misched-multimmo.ll +++ test/CodeGen/AArch64/arm64-misched-multimmo.ll @@ -8,11 +8,11 @@ ; Check that no scheduling dependencies are created between the paired loads and the store during post-RA MI scheduling. ; ; CHECK-LABEL: # Machine code for function foo: -; CHECK: SU(2): renamable %w{{[0-9]+}}, renamable %w{{[0-9]+}} = LDPWi +; CHECK: SU(2): renamable $w{{[0-9]+}}, renamable $w{{[0-9]+}} = LDPWi ; CHECK: Successors: ; CHECK-NOT: ch SU(4) ; CHECK: SU(3) -; CHECK: SU(4): STRWui %wzr, renamable %x{{[0-9]+}} +; CHECK: SU(4): STRWui $wzr, renamable $x{{[0-9]+}} define i32 @foo() { entry: %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G2, i64 0, i64 0), align 4 Index: test/CodeGen/AArch64/arm64-regress-opt-cmp.mir =================================================================== --- test/CodeGen/AArch64/arm64-regress-opt-cmp.mir +++ test/CodeGen/AArch64/arm64-regress-opt-cmp.mir @@ -1,6 +1,6 @@ # RUN: llc -mtriple=aarch64-linux-gnu -run-pass peephole-opt -o - %s | FileCheck %s # CHECK: %1:gpr32common = ANDWri {{.*}} -# CHECK-NEXT: %wzr = SUBSWri {{.*}} +# CHECK-NEXT: $wzr = SUBSWri {{.*}} --- | define i32 @test01() nounwind { entry: @@ -27,15 +27,15 @@ %0 = MOVi32imm 1 %1 = ANDWri killed %1, 15 - %wzr = SUBSWri killed %1, 0, 0, implicit-def %nzcv - Bcc 9, %bb.2.if.end, implicit %nzcv + $wzr = SUBSWri killed %1, 0, 0, implicit-def $nzcv + Bcc 9, %bb.2.if.end, implicit $nzcv bb.1.if.then: - %w0 = MOVi32imm 1 - RET_ReallyLR implicit %w0 + $w0 = MOVi32imm 1 + RET_ReallyLR implicit $w0 bb.2.if.end: - %w0 = MOVi32imm 0 - RET_ReallyLR implicit %w0 + $w0 = MOVi32imm 0 + RET_ReallyLR implicit $w0 ... Index: test/CodeGen/AArch64/ccmp-successor-probs.mir =================================================================== --- test/CodeGen/AArch64/ccmp-successor-probs.mir +++ test/CodeGen/AArch64/ccmp-successor-probs.mir @@ -6,7 +6,7 @@ # CHECK-LABEL: name: aarch64-ccmp-successor-probs # CHECK: bb.0: # CHECK-NEXT: successors: %bb.2(0x04000000), %bb.3(0x7c000000) -# CHECK: CCMPXr %5, %4, 0, 10, implicit-def %nzcv, implicit %nzcv +# CHECK: CCMPXr %5, %4, 0, 10, implicit-def $nzcv, implicit $nzcv # name: aarch64-ccmp-successor-probs registers: @@ -22,21 +22,21 @@ bb.0: successors: %bb.1(0x7e000000), %bb.2(0x02000000) - %0 = LDRXui killed %x0, 69 - %1 = COPY %xzr - %2 = SUBSXrr %1, %0, implicit-def dead %nzcv - %3 = SUBSXri %x1, 1, 0, implicit-def dead %nzcv + %0 = LDRXui killed $x0, 69 + %1 = COPY $xzr + %2 = SUBSXrr %1, %0, implicit-def dead $nzcv + %3 = SUBSXri $x1, 1, 0, implicit-def dead $nzcv %4 = COPY %0 %5 = COPY %3 - %6 = SUBSXrr %x1, killed %2, implicit-def %nzcv - Bcc 11, %bb.2, implicit %nzcv + %6 = SUBSXrr $x1, killed %2, implicit-def $nzcv + Bcc 11, %bb.2, implicit $nzcv B %bb.1 bb.1: successors: %bb.2(0x02082082), %bb.3(0x7df7df7e) - %7 = SUBSXrr %5, %4, implicit-def %nzcv - Bcc 12, %bb.2, implicit %nzcv + %7 = SUBSXrr %5, %4, implicit-def $nzcv + Bcc 12, %bb.2, implicit $nzcv B %bb.3 bb.2: Index: test/CodeGen/AArch64/cfi_restore.mir =================================================================== --- test/CodeGen/AArch64/cfi_restore.mir +++ test/CodeGen/AArch64/cfi_restore.mir @@ -9,29 +9,29 @@ hasCalls: true stack: - { id: 0, type: spill-slot, offset: -8, size: 8, alignment: 8, stack-id: 0, - callee-saved-register: '%lr' } + callee-saved-register: '$lr' } - { id: 1, type: spill-slot, offset: -16, size: 8, alignment: 8, stack-id: 0, - callee-saved-register: '%fp' } + callee-saved-register: '$fp' } body: | bb.0: - liveins: %fp, %lr + liveins: $fp, $lr - %sp = frame-setup SUBXri %sp, 16, 0 - frame-setup STRXui killed %fp, %sp, 0 :: (store 8 into %stack.1) - frame-setup CFI_INSTRUCTION offset %w29, -16 + $sp = frame-setup SUBXri $sp, 16, 0 + frame-setup STRXui killed $fp, $sp, 0 :: (store 8 into %stack.1) + frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK: .cfi_offset w29, -16 - frame-setup STRXui killed %lr, %sp, 1 :: (store 8 into %stack.0) - frame-setup CFI_INSTRUCTION offset %w30, -8 + frame-setup STRXui killed $lr, $sp, 1 :: (store 8 into %stack.0) + frame-setup CFI_INSTRUCTION offset $w30, -8 ; CHECK: .cfi_offset w30, -8 - %fp = frame-setup ADDXri %sp, 0, 0 - frame-setup CFI_INSTRUCTION def_cfa %w29, 16 - %lr = LDRXui %sp, 1 :: (load 8 from %stack.0) - CFI_INSTRUCTION restore %w30 + $fp = frame-setup ADDXri $sp, 0, 0 + frame-setup CFI_INSTRUCTION def_cfa $w29, 16 + $lr = LDRXui $sp, 1 :: (load 8 from %stack.0) + CFI_INSTRUCTION restore $w30 ; CHECK: .cfi_restore w30 - %fp = LDRXui %sp, 0 :: (load 8 from %stack.1) - CFI_INSTRUCTION restore %w29 + $fp = LDRXui $sp, 0 :: (load 8 from %stack.1) + CFI_INSTRUCTION restore $w29 ; CHECK: .cfi_restore w29 - %sp = ADDXri %sp, 16, 0 + $sp = ADDXri $sp, 16, 0 RET_ReallyLR ; CHECK: .cfi_endproc ... Index: test/CodeGen/AArch64/falkor-hwpf-fix.mir =================================================================== --- test/CodeGen/AArch64/falkor-hwpf-fix.mir +++ test/CodeGen/AArch64/falkor-hwpf-fix.mir @@ -3,147 +3,147 @@ # Verify that the tag collision between the loads is resolved for various load opcodes. # CHECK-LABEL: name: hwpf1 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LDRWui %[[BASE]], 0 -# CHECK: LDRWui %x1, 1 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LDRWui $[[BASE]], 0 +# CHECK: LDRWui $x1, 1 name: hwpf1 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1 + liveins: $w0, $x1 - %w2 = LDRWui %x1, 0 :: ("aarch64-strided-access" load 4) - %w2 = LDRWui %x1, 1 + $w2 = LDRWui $x1, 0 :: ("aarch64-strided-access" load 4) + $w2 = LDRWui $x1, 1 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpf2 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LD1i64 %q2, 0, %[[BASE]] -# CHECK: LDRWui %x1, 0 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LD1i64 $q2, 0, $[[BASE]] +# CHECK: LDRWui $x1, 0 name: hwpf2 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %q2 + liveins: $w0, $x1, $q2 - %q2 = LD1i64 %q2, 0, %x1 :: ("aarch64-strided-access" load 4) - %w2 = LDRWui %x1, 0 + $q2 = LD1i64 $q2, 0, $x1 :: ("aarch64-strided-access" load 4) + $w2 = LDRWui $x1, 0 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpf3 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LD1i8 %q2, 0, %[[BASE]] -# CHECK: LDRWui %x1, 0 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LD1i8 $q2, 0, $[[BASE]] +# CHECK: LDRWui $x1, 0 name: hwpf3 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %q2 + liveins: $w0, $x1, $q2 - %q2 = LD1i8 %q2, 0, %x1 :: ("aarch64-strided-access" load 4) - %w0 = LDRWui %x1, 0 + $q2 = LD1i8 $q2, 0, $x1 :: ("aarch64-strided-access" load 4) + $w0 = LDRWui $x1, 0 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpf4 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LD1Onev1d %[[BASE]] -# CHECK: LDRWui %x1, 0 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LD1Onev1d $[[BASE]] +# CHECK: LDRWui $x1, 0 name: hwpf4 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1 + liveins: $w0, $x1 - %d2 = LD1Onev1d %x1 :: ("aarch64-strided-access" load 4) - %w2 = LDRWui %x1, 0 + $d2 = LD1Onev1d $x1 :: ("aarch64-strided-access" load 4) + $w2 = LDRWui $x1, 0 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpf5 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LD1Twov1d %[[BASE]] -# CHECK: LDRWui %x1, 0 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LD1Twov1d $[[BASE]] +# CHECK: LDRWui $x1, 0 name: hwpf5 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1 + liveins: $w0, $x1 - %d2_d3 = LD1Twov1d %x1 :: ("aarch64-strided-access" load 4) - %w0 = LDRWui %x1, 0 + $d2_d3 = LD1Twov1d $x1 :: ("aarch64-strided-access" load 4) + $w0 = LDRWui $x1, 0 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpf6 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LDPQi %[[BASE]] -# CHECK: LDRWui %x1, 3 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LDPQi $[[BASE]] +# CHECK: LDRWui $x1, 3 name: hwpf6 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1 + liveins: $w0, $x1 - %q2, %q3 = LDPQi %x1, 3 :: ("aarch64-strided-access" load 4) - %w0 = LDRWui %x1, 3 + $q2, $q3 = LDPQi $x1, 3 :: ("aarch64-strided-access" load 4) + $w0 = LDRWui $x1, 3 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpf7 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LDPXi %[[BASE]] -# CHECK: LDRWui %x1, 2 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LDPXi $[[BASE]] +# CHECK: LDRWui $x1, 2 name: hwpf7 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1 + liveins: $w0, $x1 - %x2, %x3 = LDPXi %x1, 3 :: ("aarch64-strided-access" load 4) - %w2 = LDRWui %x1, 2 + $x2, $x3 = LDPXi $x1, 3 :: ("aarch64-strided-access" load 4) + $w2 = LDRWui $x1, 2 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR @@ -153,154 +153,154 @@ # for post increment addressing for various load opcodes. # CHECK-LABEL: name: hwpfinc1 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LDRWpost %[[BASE]], 0 -# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0 -# CHECK: LDRWui %x1, 1 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LDRWpost $[[BASE]], 0 +# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0 +# CHECK: LDRWui $x1, 1 name: hwpfinc1 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1 + liveins: $w0, $x1 - %x1, %w2 = LDRWpost %x1, 0 :: ("aarch64-strided-access" load 4) - %w2 = LDRWui %x1, 1 + $x1, $w2 = LDRWpost $x1, 0 :: ("aarch64-strided-access" load 4) + $w2 = LDRWui $x1, 1 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpfinc2 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LD1i64_POST %q2, 0, %[[BASE]] -# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0 -# CHECK: LDRWui %x1, 1 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LD1i64_POST $q2, 0, $[[BASE]] +# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0 +# CHECK: LDRWui $x1, 1 name: hwpfinc2 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %q2 + liveins: $w0, $x1, $q2 - %x1, %q2 = LD1i64_POST %q2, 0, %x1, %x1 :: ("aarch64-strided-access" load 4) - %w2 = LDRWui %x1, 132 + $x1, $q2 = LD1i64_POST $q2, 0, $x1, $x1 :: ("aarch64-strided-access" load 4) + $w2 = LDRWui $x1, 132 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpfinc3 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LD1i8_POST %q2, 0, %[[BASE]] -# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0 -# CHECK: LDRWui %x1, 132 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LD1i8_POST $q2, 0, $[[BASE]] +# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0 +# CHECK: LDRWui $x1, 132 name: hwpfinc3 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %q2 + liveins: $w0, $x1, $q2 - %x1, %q2 = LD1i8_POST %q2, 0, %x1, %x1 :: ("aarch64-strided-access" load 4) - %w0 = LDRWui %x1, 132 + $x1, $q2 = LD1i8_POST $q2, 0, $x1, $x1 :: ("aarch64-strided-access" load 4) + $w0 = LDRWui $x1, 132 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpfinc4 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LD1Rv1d_POST %[[BASE]] -# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0 -# CHECK: LDRWui %x1, 252 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LD1Rv1d_POST $[[BASE]] +# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0 +# CHECK: LDRWui $x1, 252 name: hwpfinc4 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %q2 + liveins: $w0, $x1, $q2 - %x1, %d2 = LD1Rv1d_POST %x1, %xzr :: ("aarch64-strided-access" load 4) - %w2 = LDRWui %x1, 252 + $x1, $d2 = LD1Rv1d_POST $x1, $xzr :: ("aarch64-strided-access" load 4) + $w2 = LDRWui $x1, 252 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpfinc5 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LD3Threev2s_POST %[[BASE]] -# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0 -# CHECK: LDRWroX %x17, %x0 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LD3Threev2s_POST $[[BASE]] +# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0 +# CHECK: LDRWroX $x17, $x0 name: hwpfinc5 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %x17, %q2 + liveins: $w0, $x1, $x17, $q2 - %x1, %d2_d3_d4 = LD3Threev2s_POST %x1, %x0 :: ("aarch64-strided-access" load 4) - %w0 = LDRWroX %x17, %x0, 0, 0 + $x1, $d2_d3_d4 = LD3Threev2s_POST $x1, $x0 :: ("aarch64-strided-access" load 4) + $w0 = LDRWroX $x17, $x0, 0, 0 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpfinc6 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LDPDpost %[[BASE]] -# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0 -# CHECK: LDRWui %x17, 2 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LDPDpost $[[BASE]] +# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0 +# CHECK: LDRWui $x17, 2 name: hwpfinc6 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %x17, %q2 + liveins: $w0, $x1, $x17, $q2 - %x1, %d2, %d3 = LDPDpost %x1, 3 :: ("aarch64-strided-access" load 4) - %w16 = LDRWui %x17, 2 + $x1, $d2, $d3 = LDPDpost $x1, 3 :: ("aarch64-strided-access" load 4) + $w16 = LDRWui $x17, 2 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR ... --- # CHECK-LABEL: name: hwpfinc7 -# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0 -# CHECK: LDPXpost %[[BASE]] -# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0 -# CHECK: LDRWui %x17, 2 +# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0 +# CHECK: LDPXpost $[[BASE]] +# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0 +# CHECK: LDRWui $x17, 2 name: hwpfinc7 tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %x17, %q2 + liveins: $w0, $x1, $x17, $q2 - %x1, %x2, %x3 = LDPXpost %x1, 3 :: ("aarch64-strided-access" load 4) - %w18 = LDRWui %x17, 2 + $x1, $x2, $x3 = LDPXpost $x1, 3 :: ("aarch64-strided-access" load 4) + $w18 = LDRWui $x17, 2 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR @@ -309,23 +309,23 @@ # Check that we handle case of strided load with no HW prefetcher tag correctly. # CHECK-LABEL: name: hwpf_notagbug -# CHECK-NOT: ORRXrs %xzr -# CHECK: LDARW %x1 -# CHECK-NOT: ORRXrs %xzr -# CHECK: LDRWui %x1 +# CHECK-NOT: ORRXrs $xzr +# CHECK: LDARW $x1 +# CHECK-NOT: ORRXrs $xzr +# CHECK: LDRWui $x1 name: hwpf_notagbug tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x1, %x17 + liveins: $w0, $x1, $x17 - %w1 = LDARW %x1 :: ("aarch64-strided-access" load 4) - %w1 = LDRWui %x1, 0 :: ("aarch64-strided-access" load 4) - %w17 = LDRWui %x17, 0 :: ("aarch64-strided-access" load 4) + $w1 = LDARW $x1 :: ("aarch64-strided-access" load 4) + $w1 = LDRWui $x1, 0 :: ("aarch64-strided-access" load 4) + $w17 = LDRWui $x17, 0 :: ("aarch64-strided-access" load 4) - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR @@ -334,21 +334,21 @@ # Check that we treat sp based loads as non-prefetching. # CHECK-LABEL: name: hwpf_spbase -# CHECK-NOT: ORRXrs %xzr -# CHECK: LDRWui %x15 -# CHECK: LDRWui %sp +# CHECK-NOT: ORRXrs $xzr +# CHECK: LDRWui $x15 +# CHECK: LDRWui $sp name: hwpf_spbase tracksRegLiveness: true body: | bb.0: - liveins: %w0, %x15 + liveins: $w0, $x15 - %w1 = LDRWui %x15, 0 :: ("aarch64-strided-access" load 4) - %w17 = LDRWui %sp, 0 + $w1 = LDRWui $x15, 0 :: ("aarch64-strided-access" load 4) + $w17 = LDRWui $sp, 0 - %w0 = SUBWri %w0, 1, 0 - %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv - Bcc 9, %bb.0, implicit %nzcv + $w0 = SUBWri $w0, 1, 0 + $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv + Bcc 9, %bb.0, implicit $nzcv bb.1: RET_ReallyLR Index: test/CodeGen/AArch64/ldst-opt-aa.mir =================================================================== --- test/CodeGen/AArch64/ldst-opt-aa.mir +++ test/CodeGen/AArch64/ldst-opt-aa.mir @@ -14,17 +14,17 @@ ... --- # CHECK-LABEL: name: ldr_str_aa -# CHECK: %w8, %w9 = LDPWi %x1, 0 -# CHECK: STPWi %w8, %w9, %x0, 0 +# CHECK: $w8, $w9 = LDPWi $x1, 0 +# CHECK: STPWi $w8, $w9, $x0, 0 name: ldr_str_aa tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1 + liveins: $x0, $x1 - %w8 = LDRWui %x1, 0 :: (load 4 from %ir.y) - STRWui killed %w8, %x0, 0 :: (store 4 into %ir.x) - %w9 = LDRWui killed %x1, 1 :: (load 4 from %ir.arrayidx2) - STRWui killed %w9, killed %x0, 1 :: (store 4 into %ir.arrayidx3) - RET undef %lr + $w8 = LDRWui $x1, 0 :: (load 4 from %ir.y) + STRWui killed $w8, $x0, 0 :: (store 4 into %ir.x) + $w9 = LDRWui killed $x1, 1 :: (load 4 from %ir.arrayidx2) + STRWui killed $w9, killed $x0, 1 :: (store 4 into %ir.arrayidx3) + RET undef $lr Index: test/CodeGen/AArch64/ldst-opt-zr-clobber.mir =================================================================== --- test/CodeGen/AArch64/ldst-opt-zr-clobber.mir +++ test/CodeGen/AArch64/ldst-opt-zr-clobber.mir @@ -10,17 +10,17 @@ # schedulers reordering instructions such that SUBS doesn't appear # between stores. # CHECK-LABEL: name: no-clobber-zr -# CHECK: STPXi %xzr, %xzr, %x0, 0 +# CHECK: STPXi $xzr, $xzr, $x0, 0 name: no-clobber-zr body: | bb.0: - liveins: %x0, %x1 - STRXui %xzr, %x0, 0 :: (store 8 into %ir.p) - dead %xzr = SUBSXri killed %x1, 0, 0, implicit-def %nzcv - %w8 = CSINCWr %wzr, %wzr, 1, implicit killed %nzcv - STRXui %xzr, killed %x0, 1 :: (store 8 into %ir.p) - %w0 = ORRWrs %wzr, killed %w8, 0 - RET %lr, implicit %w0 + liveins: $x0, $x1 + STRXui $xzr, $x0, 0 :: (store 8 into %ir.p) + dead $xzr = SUBSXri killed $x1, 0, 0, implicit-def $nzcv + $w8 = CSINCWr $wzr, $wzr, 1, implicit killed $nzcv + STRXui $xzr, killed $x0, 1 :: (store 8 into %ir.p) + $w0 = ORRWrs $wzr, killed $w8, 0 + RET $lr, implicit $w0 ... Index: test/CodeGen/AArch64/ldst-opt.mir =================================================================== --- test/CodeGen/AArch64/ldst-opt.mir +++ test/CodeGen/AArch64/ldst-opt.mir @@ -4,9 +4,9 @@ tracksRegLiveness: true body: | bb.0: - liveins: %w1, %x0, %lr + liveins: $w1, $x0, $lr - STRWui killed %w1, %x0, 0 :: (store 4) + STRWui killed $w1, $x0, 0 :: (store 4) CFI_INSTRUCTION 0 CFI_INSTRUCTION 0 CFI_INSTRUCTION 0 @@ -27,22 +27,22 @@ CFI_INSTRUCTION 0 CFI_INSTRUCTION 0 CFI_INSTRUCTION 0 - %w0 = LDRHHui killed %x0, 1 :: (load 2) - RET %lr, implicit %w0 + $w0 = LDRHHui killed $x0, 1 :: (load 2) + RET $lr, implicit $w0 ... # Don't count transient instructions towards search limits. # CHECK-LABEL: name: promote-load-from-store -# CHECK: STRWui %w1 -# CHECK: UBFMWri killed %w1 +# CHECK: STRWui $w1 +# CHECK: UBFMWri killed $w1 --- name: store-pair tracksRegLiveness: true body: | bb.0: - liveins: %w1, %x0, %lr + liveins: $w1, $x0, $lr - STRWui %w1, %x0, 0 :: (store 4) + STRWui $w1, $x0, 0 :: (store 4) CFI_INSTRUCTION 0 CFI_INSTRUCTION 0 CFI_INSTRUCTION 0 @@ -63,8 +63,8 @@ CFI_INSTRUCTION 0 CFI_INSTRUCTION 0 CFI_INSTRUCTION 0 - STRWui killed %w1, killed %x0, 1 :: (store 4) - RET %lr + STRWui killed $w1, killed $x0, 1 :: (store 4) + RET $lr ... # CHECK-LABEL: name: store-pair @@ -74,110 +74,110 @@ tracksRegLiveness: true body: | bb.0: - liveins: %w1, %x0, %lr + liveins: $w1, $x0, $lr - STRWui %w1, %x0, 0 :: (store 4) - %w2 = COPY %w1 - %x3 = COPY %x0 - STRWui killed %w1, killed %x0, 1 :: (store 4) - RET %lr + STRWui $w1, $x0, 0 :: (store 4) + $w2 = COPY $w1 + $x3 = COPY $x0 + STRWui killed $w1, killed $x0, 1 :: (store 4) + RET $lr ... # When merging a lower store with an upper one, we must clear kill flags on # the lower store. # CHECK-LABEL: store-pair-clearkill0 -# CHECK-NOT: STPWi %w1, killed %w1, %x0, 0 :: (store 4) -# CHECK: STPWi %w1, %w1, %x0, 0 :: (store 4) -# CHECK: %w2 = COPY %w1 -# CHECK: RET %lr +# CHECK-NOT: STPWi $w1, killed $w1, $x0, 0 :: (store 4) +# CHECK: STPWi $w1, $w1, $x0, 0 :: (store 4) +# CHECK: $w2 = COPY $w1 +# CHECK: RET $lr --- name: store-pair-clearkill1 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %lr + liveins: $x0, $lr - %w1 = MOVi32imm 13 - %w2 = MOVi32imm 7 - STRWui %w1, %x0, 1 :: (store 4) - %w2 = COPY killed %w1 - STRWui killed %w2, %x0, 0 :: (store 4) + $w1 = MOVi32imm 13 + $w2 = MOVi32imm 7 + STRWui $w1, $x0, 1 :: (store 4) + $w2 = COPY killed $w1 + STRWui killed $w2, $x0, 0 :: (store 4) - %w1 = MOVi32imm 42 - %w2 = MOVi32imm 7 - STRWui %w1, %x0, 0 :: (store 4) - %w2 = COPY killed %w1 - STRWui killed %w2, killed %x0, 1 :: (store 4) + $w1 = MOVi32imm 42 + $w2 = MOVi32imm 7 + STRWui $w1, $x0, 0 :: (store 4) + $w2 = COPY killed $w1 + STRWui killed $w2, killed $x0, 1 :: (store 4) - RET %lr + RET $lr ... # When merging an upper store with a lower one, kill flags along the way need -# to be removed; In this case the kill flag on %w1. +# to be removed; In this case the kill flag on $w1. # CHECK-LABEL: store-pair-clearkill1 -# CHECK: %w1 = MOVi32imm -# CHECK: %w2 = MOVi32imm -# CHECK-NOT: %w2 = COPY killed %w1 -# CHECK: %w2 = COPY %w1 -# CHECK: STPWi killed %w2, %w1, %x0, 0 +# CHECK: $w1 = MOVi32imm +# CHECK: $w2 = MOVi32imm +# CHECK-NOT: $w2 = COPY killed $w1 +# CHECK: $w2 = COPY $w1 +# CHECK: STPWi killed $w2, $w1, $x0, 0 -# CHECK: %w1 = MOVi32imm -# CHECK: %w2 = MOVi32imm -# CHECK-NOT: %w2 = COPY killed %w1 -# CHECK: %w2 = COPY %w1 -# CHECK: STPWi %w1, killed %w2, killed %x0, 0 +# CHECK: $w1 = MOVi32imm +# CHECK: $w2 = MOVi32imm +# CHECK-NOT: $w2 = COPY killed $w1 +# CHECK: $w2 = COPY $w1 +# CHECK: STPWi $w1, killed $w2, killed $x0, 0 --- name: store-load-clearkill tracksRegLiveness: true body: | bb.0: - liveins: %w1 + liveins: $w1 - STRWui %w1, %sp, 0 :: (store 4) - %wzr = COPY killed %w1 ; killing use of %w1 - %w11 = LDRWui %sp, 0 :: (load 4) - HINT 0, implicit %w11 ; some use of %w11 + STRWui $w1, $sp, 0 :: (store 4) + $wzr = COPY killed $w1 ; killing use of $w1 + $w11 = LDRWui $sp, 0 :: (load 4) + HINT 0, implicit $w11 ; some use of $w11 ... # When replaceing the load of a store-load pair with a copy the kill flags # along the way need to be cleared. # CHECK-LABEL: name: store-load-clearkill -# CHECK: STRWui %w1, %sp, 0 :: (store 4) -# CHECK-NOT: COPY killed %w1 -# CHECK: %wzr = COPY %w1 -# CHECK: %w11 = ORRWrs %wzr, %w1, 0 -# CHECK: HINT 0, implicit %w11 +# CHECK: STRWui $w1, $sp, 0 :: (store 4) +# CHECK-NOT: COPY killed $w1 +# CHECK: $wzr = COPY $w1 +# CHECK: $w11 = ORRWrs $wzr, $w1, 0 +# CHECK: HINT 0, implicit $w11 --- name: promote-load-from-store-undef tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x2, %lr + liveins: $x0, $x2, $lr - STRWui undef %w1, %x0, 0 :: (store 4) - %w0 = LDRBBui %x0, 1 :: (load 2) - STRHHui undef %w3, %x2, 0 :: (store 4) - %w1 = LDRBBui %x2, 0 :: (load 4) - RET %lr, implicit %w0 + STRWui undef $w1, $x0, 0 :: (store 4) + $w0 = LDRBBui $x0, 1 :: (load 2) + STRHHui undef $w3, $x2, 0 :: (store 4) + $w1 = LDRBBui $x2, 0 :: (load 4) + RET $lr, implicit $w0 ... # CHECK-LABEL: name: promote-load-from-store-undef -# CHECK: STRWui undef %w1 -# CHECK: UBFMWri undef %w1 -# CHECK: STRHHui undef %w3 -# CHECK: ANDWri undef %w3 +# CHECK: STRWui undef $w1 +# CHECK: UBFMWri undef $w1 +# CHECK: STRHHui undef $w3 +# CHECK: ANDWri undef $w3 --- name: promote-load-from-store-trivial-kills tracksRegLiveness: true body: | bb.0: - liveins: %x0, %lr + liveins: $x0, $lr - STRXui %x0, %sp, 0 :: (store 8) - STRXui killed %x0, %sp, 2 :: (store 8) - %x0 = LDRXui %sp, 0 :: (load 8) - BL &bar, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit-def %sp - RET %lr + STRXui $x0, $sp, 0 :: (store 8) + STRXui killed $x0, $sp, 2 :: (store 8) + $x0 = LDRXui $sp, 0 :: (load 8) + BL &bar, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit-def $sp + RET $lr ... # CHECK-LABEL: name: promote-load-from-store-trivial-kills -# CHECK: STRXui %x0, %sp, 0 -# CHECK: STRXui %x0, %sp, 2 +# CHECK: STRXui $x0, $sp, 0 +# CHECK: STRXui $x0, $sp, 2 # CHECK-NOT: LDRXui # CHECK-NOT: ORR -# CHECK: BL &bar, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit-def %sp +# CHECK: BL &bar, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit-def $sp Index: test/CodeGen/AArch64/live-interval-analysis.mir =================================================================== --- test/CodeGen/AArch64/live-interval-analysis.mir +++ test/CodeGen/AArch64/live-interval-analysis.mir @@ -14,9 +14,9 @@ tracksRegLiveness: true body: | bb.0: - liveins: %x28 - %6 : xseqpairsclass = COPY %x28_fp - %x28_fp = COPY %6 - %x28 = COPY %x28 - %fp = COPY %fp + liveins: $x28 + %6 : xseqpairsclass = COPY $x28_fp + $x28_fp = COPY %6 + $x28 = COPY $x28 + $fp = COPY $fp ... Index: test/CodeGen/AArch64/loh.mir =================================================================== --- test/CodeGen/AArch64/loh.mir +++ test/CodeGen/AArch64/loh.mir @@ -22,171 +22,171 @@ body: | bb.0: ; CHECK: Adding MCLOH_AdrpAdrp: - ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g3 - ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g4 + ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g3 + ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g4 ; CHECK-NEXT: Adding MCLOH_AdrpAdrp: - ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g2 - ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g3 + ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g2 + ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g3 ; CHECK-NEXT: Adding MCLOH_AdrpAdrp: - ; CHECK-NEXT: %x0 = ADRP target-flags(aarch64-page) @g0 - ; CHECK-NEXT: %x0 = ADRP target-flags(aarch64-page) @g1 - %x0 = ADRP target-flags(aarch64-page) @g0 - %x0 = ADRP target-flags(aarch64-page) @g1 - %x1 = ADRP target-flags(aarch64-page) @g2 - %x1 = ADRP target-flags(aarch64-page) @g3 - %x1 = ADRP target-flags(aarch64-page) @g4 + ; CHECK-NEXT: $x0 = ADRP target-flags(aarch64-page) @g0 + ; CHECK-NEXT: $x0 = ADRP target-flags(aarch64-page) @g1 + $x0 = ADRP target-flags(aarch64-page) @g0 + $x0 = ADRP target-flags(aarch64-page) @g1 + $x1 = ADRP target-flags(aarch64-page) @g2 + $x1 = ADRP target-flags(aarch64-page) @g3 + $x1 = ADRP target-flags(aarch64-page) @g4 bb.1: ; CHECK-NEXT: Adding MCLOH_AdrpAdd: - ; CHECK-NEXT: %x20 = ADRP target-flags(aarch64-page) @g0 - ; CHECK-NEXT: %x3 = ADDXri %x20, target-flags(aarch64-pageoff) @g0 + ; CHECK-NEXT: $x20 = ADRP target-flags(aarch64-page) @g0 + ; CHECK-NEXT: $x3 = ADDXri $x20, target-flags(aarch64-pageoff) @g0 ; CHECK-NEXT: Adding MCLOH_AdrpAdd: - ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g0 - ; CHECK-NEXT: %x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g0 - %x1 = ADRP target-flags(aarch64-page) @g0 - %x9 = SUBXri undef %x11, 5, 0 ; should not affect MCLOH formation - %x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g0, 0 - %x20 = ADRP target-flags(aarch64-page) @g0 + ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g0 + ; CHECK-NEXT: $x1 = ADDXri $x1, target-flags(aarch64-pageoff) @g0 + $x1 = ADRP target-flags(aarch64-page) @g0 + $x9 = SUBXri undef $x11, 5, 0 ; should not affect MCLOH formation + $x1 = ADDXri $x1, target-flags(aarch64-pageoff) @g0, 0 + $x20 = ADRP target-flags(aarch64-page) @g0 BL @extfunc, csr_aarch64_aapcs ; should not clobber X20 - %x3 = ADDXri %x20, target-flags(aarch64-pageoff) @g0, 0 + $x3 = ADDXri $x20, target-flags(aarch64-pageoff) @g0, 0 bb.2: ; CHECK-NOT: MCLOH_AdrpAdd - %x9 = ADRP target-flags(aarch64-page) @g0 + $x9 = ADRP target-flags(aarch64-page) @g0 BL @extfunc, csr_aarch64_aapcs ; clobbers x9 - ; Verification requires the use of 'undef' in front of the clobbered %x9 - %x9 = ADDXri undef %x9, target-flags(aarch64-pageoff) @g0, 0 + ; Verification requires the use of 'undef' in front of the clobbered $x9 + $x9 = ADDXri undef $x9, target-flags(aarch64-pageoff) @g0, 0 bb.3: ; CHECK-NOT: MCLOH_AdrpAdd - %x10 = ADRP target-flags(aarch64-page) @g0 - HINT 0, implicit def %x10 ; clobbers x10 - %x10 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0 + $x10 = ADRP target-flags(aarch64-page) @g0 + HINT 0, implicit def $x10 ; clobbers x10 + $x10 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0 bb.4: ; Cannot produce a LOH for multiple users ; CHECK-NOT: MCLOH_AdrpAdd - %x10 = ADRP target-flags(aarch64-page) @g0 - HINT 0, implicit def %x10 ; clobbers x10 - %x11 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0 - %x12 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0 + $x10 = ADRP target-flags(aarch64-page) @g0 + HINT 0, implicit def $x10 ; clobbers x10 + $x11 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0 + $x12 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0 bb.5: ; CHECK-NEXT: Adding MCLOH_AdrpLdr: - ; CHECK-NEXT: %x5 = ADRP target-flags(aarch64-page) @g2 - ; CHECK-NEXT: %s6 = LDRSui %x5, target-flags(aarch64-pageoff) @g2 + ; CHECK-NEXT: $x5 = ADRP target-flags(aarch64-page) @g2 + ; CHECK-NEXT: $s6 = LDRSui $x5, target-flags(aarch64-pageoff) @g2 ; CHECK-NEXT: Adding MCLOH_AdrpLdr: - ; CHECK-NEXT: %x4 = ADRP target-flags(aarch64-page) @g2 - ; CHECK-NEXT: %x4 = LDRXui %x4, target-flags(aarch64-pageoff) @g2 - %x4 = ADRP target-flags(aarch64-page) @g2 - %x4 = LDRXui %x4, target-flags(aarch64-pageoff) @g2 - %x5 = ADRP target-flags(aarch64-page) @g2 - %s6 = LDRSui %x5, target-flags(aarch64-pageoff) @g2 + ; CHECK-NEXT: $x4 = ADRP target-flags(aarch64-page) @g2 + ; CHECK-NEXT: $x4 = LDRXui $x4, target-flags(aarch64-pageoff) @g2 + $x4 = ADRP target-flags(aarch64-page) @g2 + $x4 = LDRXui $x4, target-flags(aarch64-pageoff) @g2 + $x5 = ADRP target-flags(aarch64-page) @g2 + $s6 = LDRSui $x5, target-flags(aarch64-pageoff) @g2 bb.6: ; CHECK-NEXT: Adding MCLOH_AdrpLdrGot: - ; CHECK-NEXT: %x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2 - ; CHECK-NEXT: %x6 = LDRXui %x5, target-flags(aarch64-pageoff, aarch64-got) @g2 + ; CHECK-NEXT: $x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2 + ; CHECK-NEXT: $x6 = LDRXui $x5, target-flags(aarch64-pageoff, aarch64-got) @g2 ; CHECK-NEXT: Adding MCLOH_AdrpLdrGot: - ; CHECK-NEXT: %x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2 - ; CHECK-NEXT: %x4 = LDRXui %x4, target-flags(aarch64-pageoff, aarch64-got) @g2 - %x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2 - %x4 = LDRXui %x4, target-flags(aarch64-pageoff, aarch64-got) @g2 - %x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2 - %x6 = LDRXui %x5, target-flags(aarch64-pageoff, aarch64-got) @g2 + ; CHECK-NEXT: $x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2 + ; CHECK-NEXT: $x4 = LDRXui $x4, target-flags(aarch64-pageoff, aarch64-got) @g2 + $x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2 + $x4 = LDRXui $x4, target-flags(aarch64-pageoff, aarch64-got) @g2 + $x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2 + $x6 = LDRXui $x5, target-flags(aarch64-pageoff, aarch64-got) @g2 bb.7: ; CHECK-NOT: Adding MCLOH_AdrpLdrGot: ; Loading a float value from a GOT table makes no sense so this should not ; produce an LOH. - %x11 = ADRP target-flags(aarch64-page, aarch64-got) @g5 - %s11 = LDRSui %x11, target-flags(aarch64-pageoff, aarch64-got) @g5 + $x11 = ADRP target-flags(aarch64-page, aarch64-got) @g5 + $s11 = LDRSui $x11, target-flags(aarch64-pageoff, aarch64-got) @g5 bb.8: ; CHECK-NEXT: Adding MCLOH_AdrpAddLdr: - ; CHECK-NEXT: %x7 = ADRP target-flags(aarch64-page) @g3 - ; CHECK-NEXT: %x8 = ADDXri %x7, target-flags(aarch64-pageoff) @g3 - ; CHECK-NEXT: %d1 = LDRDui %x8, 8 - %x7 = ADRP target-flags(aarch64-page) @g3 - %x8 = ADDXri %x7, target-flags(aarch64-pageoff) @g3, 0 - %d1 = LDRDui %x8, 8 + ; CHECK-NEXT: $x7 = ADRP target-flags(aarch64-page) @g3 + ; CHECK-NEXT: $x8 = ADDXri $x7, target-flags(aarch64-pageoff) @g3 + ; CHECK-NEXT: $d1 = LDRDui $x8, 8 + $x7 = ADRP target-flags(aarch64-page) @g3 + $x8 = ADDXri $x7, target-flags(aarch64-pageoff) @g3, 0 + $d1 = LDRDui $x8, 8 bb.9: ; CHECK-NEXT: Adding MCLOH_AdrpAdd: - ; CHECK-NEXT: %x3 = ADRP target-flags(aarch64-page) @g3 - ; CHECK-NEXT: %x3 = ADDXri %x3, target-flags(aarch64-pageoff) @g3 + ; CHECK-NEXT: $x3 = ADRP target-flags(aarch64-page) @g3 + ; CHECK-NEXT: $x3 = ADDXri $x3, target-flags(aarch64-pageoff) @g3 ; CHECK-NEXT: Adding MCLOH_AdrpAdd: - ; CHECK-NEXT: %x5 = ADRP target-flags(aarch64-page) @g3 - ; CHECK-NEXT: %x2 = ADDXri %x5, target-flags(aarch64-pageoff) @g3 + ; CHECK-NEXT: $x5 = ADRP target-flags(aarch64-page) @g3 + ; CHECK-NEXT: $x2 = ADDXri $x5, target-flags(aarch64-pageoff) @g3 ; CHECK-NEXT: Adding MCLOH_AdrpAddStr: - ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g3 - ; CHECK-NEXT: %x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g3 - ; CHECK-NEXT: STRXui %xzr, %x1, 16 - %x1 = ADRP target-flags(aarch64-page) @g3 - %x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g3, 0 - STRXui %xzr, %x1, 16 + ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g3 + ; CHECK-NEXT: $x1 = ADDXri $x1, target-flags(aarch64-pageoff) @g3 + ; CHECK-NEXT: STRXui $xzr, $x1, 16 + $x1 = ADRP target-flags(aarch64-page) @g3 + $x1 = ADDXri $x1, target-flags(aarch64-pageoff) @g3, 0 + STRXui $xzr, $x1, 16 ; This sequence should just produce an AdrpAdd (not AdrpAddStr) - %x5 = ADRP target-flags(aarch64-page) @g3 - %x2 = ADDXri %x5, target-flags(aarch64-pageoff) @g3, 0 - STRXui %x2, undef %x11, 16 + $x5 = ADRP target-flags(aarch64-page) @g3 + $x2 = ADDXri $x5, target-flags(aarch64-pageoff) @g3, 0 + STRXui $x2, undef $x11, 16 ; This sequence should just produce an AdrpAdd (not AdrpAddStr) - %x3 = ADRP target-flags(aarch64-page) @g3 - %x3 = ADDXri %x3, target-flags(aarch64-pageoff) @g3, 0 - STRXui %x3, %x3, 16 + $x3 = ADRP target-flags(aarch64-page) @g3 + $x3 = ADDXri $x3, target-flags(aarch64-pageoff) @g3, 0 + STRXui $x3, $x3, 16 bb.10: ; CHECK-NEXT: Adding MCLOH_AdrpLdr: - ; CHECK-NEXT: %x2 = ADRP target-flags(aarch64-page) @g3 - ; CHECK-NEXT: %x2 = LDRXui %x2, target-flags(aarch64-pageoff) @g3 + ; CHECK-NEXT: $x2 = ADRP target-flags(aarch64-page) @g3 + ; CHECK-NEXT: $x2 = LDRXui $x2, target-flags(aarch64-pageoff) @g3 ; CHECK-NEXT: Adding MCLOH_AdrpLdrGotLdr: - ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4 - ; CHECK-NEXT: %x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4 - ; CHECK-NEXT: %x1 = LDRXui %x1, 24 - %x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4 - %x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4 - %x1 = LDRXui %x1, 24 + ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4 + ; CHECK-NEXT: $x1 = LDRXui $x1, target-flags(aarch64-pageoff, aarch64-got) @g4 + ; CHECK-NEXT: $x1 = LDRXui $x1, 24 + $x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4 + $x1 = LDRXui $x1, target-flags(aarch64-pageoff, aarch64-got) @g4 + $x1 = LDRXui $x1, 24 ; Should just produce a MCLOH_AdrpLdr (not MCLOH_AdrpLdrGotLdr) - %x2 = ADRP target-flags(aarch64-page) @g3 - %x2 = LDRXui %x2, target-flags(aarch64-pageoff) @g3 - %x2 = LDRXui %x2, 24 + $x2 = ADRP target-flags(aarch64-page) @g3 + $x2 = LDRXui $x2, target-flags(aarch64-pageoff) @g3 + $x2 = LDRXui $x2, 24 bb.11: ; CHECK-NEXT: Adding MCLOH_AdrpLdr - ; CHECK-NEXT: %x5 = ADRP target-flags(aarch64-page) @g1 - ; CHECK-NEXT: %x5 = LDRXui %x5, target-flags(aarch64-pageoff) @g1 + ; CHECK-NEXT: $x5 = ADRP target-flags(aarch64-page) @g1 + ; CHECK-NEXT: $x5 = LDRXui $x5, target-flags(aarch64-pageoff) @g1 ; CHECK-NEXT: Adding MCLOH_AdrpLdrGotStr: - ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4 - ; CHECK-NEXT: %x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4 - ; CHECK-NEXT: STRXui %xzr, %x1, 32 - %x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4 - %x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4 - STRXui %xzr, %x1, 32 + ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4 + ; CHECK-NEXT: $x1 = LDRXui $x1, target-flags(aarch64-pageoff, aarch64-got) @g4 + ; CHECK-NEXT: STRXui $xzr, $x1, 32 + $x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4 + $x1 = LDRXui $x1, target-flags(aarch64-pageoff, aarch64-got) @g4 + STRXui $xzr, $x1, 32 ; Should just produce a MCLOH_AdrpLdr (not MCLOH_AdrpLdrGotStr) - %x5 = ADRP target-flags(aarch64-page) @g1 - %x5 = LDRXui %x5, target-flags(aarch64-pageoff) @g1 - STRXui undef %x11, %x5, 32 + $x5 = ADRP target-flags(aarch64-page) @g1 + $x5 = LDRXui $x5, target-flags(aarch64-pageoff) @g1 + STRXui undef $x11, $x5, 32 bb.12: ; CHECK-NOT: MCLOH_AdrpAdrp ; CHECK: Adding MCLOH_AdrpAddLdr - ; %x9 = ADRP @g4 - ; %x9 = ADDXri %x9, @g4 - ; %x5 = LDRXui %x9, 0 - %x9 = ADRP target-flags(aarch64-page, aarch64-got) @g4 - %x9 = ADDXri %x9, target-flags(aarch64-pageoff, aarch64-got) @g4, 0 - %x5 = LDRXui %x9, 0 - %x9 = ADRP target-flags(aarch64-page, aarch64-got) @g5 + ; $x9 = ADRP @g4 + ; $x9 = ADDXri $x9, @g4 + ; $x5 = LDRXui $x9, 0 + $x9 = ADRP target-flags(aarch64-page, aarch64-got) @g4 + $x9 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @g4, 0 + $x5 = LDRXui $x9, 0 + $x9 = ADRP target-flags(aarch64-page, aarch64-got) @g5 bb.13: ; Cannot produce a LOH for multiple users ; CHECK-NOT: MCLOH_AdrpAdd - %x10 = ADRP target-flags(aarch64-page) @g0 - %x11 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0 + $x10 = ADRP target-flags(aarch64-page) @g0 + $x11 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0 B %bb.14 bb.14: - liveins: %x10 - %x12 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0 + liveins: $x10 + $x12 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0 ... Index: test/CodeGen/AArch64/machine-combiner.mir =================================================================== --- test/CodeGen/AArch64/machine-combiner.mir +++ test/CodeGen/AArch64/machine-combiner.mir @@ -22,27 +22,27 @@ bb.0: successors: %bb.1, %bb.2 - %3 = COPY %w2 - %2 = COPY %w1 - %1 = COPY %w0 - %0 = COPY %d0 - %4 = SUBSWrr %1, %2, implicit-def %nzcv - Bcc 13, %bb.2, implicit %nzcv + %3 = COPY $w2 + %2 = COPY $w1 + %1 = COPY $w0 + %0 = COPY $d0 + %4 = SUBSWrr %1, %2, implicit-def $nzcv + Bcc 13, %bb.2, implicit $nzcv B %bb.1 bb.1: ; CHECK: MADDWrrr %1, %2, %3 - %5 = MADDWrrr %1, %2, %wzr + %5 = MADDWrrr %1, %2, $wzr %6 = ADDWrr %3, killed %5 %7 = SCVTFUWDri killed %6 ; CHECK: FMADDDrrr %7, %7, %0 %8 = FMULDrr %7, %7 %9 = FADDDrr %0, killed %8 - %d0 = COPY %9 - RET_ReallyLR implicit %d0 + $d0 = COPY %9 + RET_ReallyLR implicit $d0 bb.2: - %d0 = COPY %0 - RET_ReallyLR implicit %d0 + $d0 = COPY %0 + RET_ReallyLR implicit $d0 ... Index: test/CodeGen/AArch64/machine-copy-remove.mir =================================================================== --- test/CodeGen/AArch64/machine-copy-remove.mir +++ test/CodeGen/AArch64/machine-copy-remove.mir @@ -2,285 +2,285 @@ --- # Check that bb.0 COPY is seen through to allow the bb.1 COPY of XZR to be removed. # CHECK-LABEL: name: test1 -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test1 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 - %x0 = COPY %x1 - CBNZX %x1, %bb.2 + $x0 = COPY $x1 + CBNZX $x1, %bb.2 bb.1: - %x0 = COPY %xzr + $x0 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Similar to test1, but with reversed COPY. # CHECK-LABEL: name: test2 -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test2 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 - %x1 = COPY %x0 - CBNZX %x1, %bb.2 + $x1 = COPY $x0 + CBNZX $x1, %bb.2 bb.1: - %x0 = COPY %xzr + $x0 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Similar to test1, but with a clobber that prevents removal of the XZR COPY. # CHECK-LABEL: name: test3 -# CHECK: COPY %xzr +# CHECK: COPY $xzr name: test3 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x0 = COPY %x1 - %x1 = LDRXui %x1, 0 - CBNZX %x1, %bb.2 + $x0 = COPY $x1 + $x1 = LDRXui $x1, 0 + CBNZX $x1, %bb.2 bb.1: - %x0 = COPY %xzr + $x0 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Similar to test2, but with a clobber that prevents removal of the XZR COPY. # CHECK-LABEL: name: test4 -# CHECK: COPY %xzr +# CHECK: COPY $xzr name: test4 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x1 = COPY %x0 - %x1 = LDRXui %x1, 0 - CBNZX %x1, %bb.2 + $x1 = COPY $x0 + $x1 = LDRXui $x1, 0 + CBNZX $x1, %bb.2 bb.1: - %x0 = COPY %xzr + $x0 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Similar to test2, but with a clobber that prevents removal of the XZR COPY. # CHECK-LABEL: name: test5 -# CHECK: COPY %xzr +# CHECK: COPY $xzr name: test5 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x1 = COPY %x0 - %x0 = LDRXui %x1, 0 - CBNZX %x1, %bb.2 + $x1 = COPY $x0 + $x0 = LDRXui $x1, 0 + CBNZX $x1, %bb.2 bb.1: - %x0 = COPY %xzr + $x0 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Similar to test1, but with two levels of COPYs. # CHECK-LABEL: name: test6 -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test6 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x2 = COPY %x0 - %x1 = COPY %x2 - CBNZX %x1, %bb.2 + $x2 = COPY $x0 + $x1 = COPY $x2 + CBNZX $x1, %bb.2 bb.1: - %x0 = COPY %xzr + $x0 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Similar to test1, but with two levels of COPYs and a clobber preventing COPY of XZR removal. # CHECK-LABEL: name: test7 -# CHECK: COPY %xzr +# CHECK: COPY $xzr name: test7 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x2 = COPY %x0 - %x0 = LDRXui %x1, 0 - %x1 = COPY %x2 - CBNZX %x1, %bb.2 + $x2 = COPY $x0 + $x0 = LDRXui $x1, 0 + $x1 = COPY $x2 + CBNZX $x1, %bb.2 bb.1: - %x0 = COPY %xzr + $x0 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Check that the TargetRegs vector clobber update loop in # AArch64RedundantCopyElimination::optimizeCopy works correctly. # CHECK-LABEL: name: test8 -# CHECK: x0 = COPY %xzr -# CHECK: x1 = COPY %xzr +# CHECK: x0 = COPY $xzr +# CHECK: x1 = COPY $xzr name: test8 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 - %x1 = COPY %x0 - CBNZX %x1, %bb.2 + $x1 = COPY $x0 + CBNZX $x1, %bb.2 bb.1: - liveins: %x0, %x2 + liveins: $x0, $x2 - %x0, %x1 = LDPXi %x2, 0 - %x0 = COPY %xzr - %x1 = COPY %xzr + $x0, $x1 = LDPXi $x2, 0 + $x0 = COPY $xzr + $x1 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Check that copy isn't removed from a block with multiple predecessors. # CHECK-LABEL: name: test9 -# CHECK: x0 = COPY %xzr +# CHECK: x0 = COPY $xzr # CHECK-NEXT: B %bb.3 name: test9 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 - CBNZX %x0, %bb.2 + CBNZX $x0, %bb.2 bb.1: - liveins: %x0, %x2 + liveins: $x0, $x2 - %x0 = COPY %xzr + $x0 = COPY $xzr B %bb.3 bb.2: - liveins: %x1 + liveins: $x1 - %x0 = LDRXui %x1, 0 + $x0 = LDRXui $x1, 0 - CBNZX %x1, %bb.1 + CBNZX $x1, %bb.1 bb.3: - liveins: %x0 + liveins: $x0 - RET_ReallyLR implicit %x0 + RET_ReallyLR implicit $x0 ... # Eliminate redundant MOVi32imm 7 in bb.1 # Note: 32-bit compare/32-bit move imm # Kill marker should be removed from compare. # CHECK-LABEL: name: test10 -# CHECK: SUBSWri %w0, 7, 0, implicit-def %nzcv +# CHECK: SUBSWri $w0, 7, 0, implicit-def $nzcv # CHECK: bb.1: # CHECK-NOT: MOVi32imm name: test10 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1 + liveins: $w0, $x1 - dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $wzr = SUBSWri killed $w0, 7, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm 7 - STRWui killed %w0, killed %x1, 0 + $w0 = MOVi32imm 7 + STRWui killed $w0, killed $x1, 0 bb.2: RET_ReallyLR @@ -289,24 +289,24 @@ # Note: 64-bit compare/32-bit move imm w/implicit def # Kill marker should be removed from compare. # CHECK-LABEL: name: test11 -# CHECK: SUBSXri %x0, 7, 0, implicit-def %nzcv +# CHECK: SUBSXri $x0, 7, 0, implicit-def $nzcv # CHECK: bb.1: # CHECK-NOT: MOVi32imm name: test11 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1 + liveins: $x0, $x1 - dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $xzr = SUBSXri killed $x0, 7, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm 7, implicit-def %x0 - STRXui killed %x0, killed %x1, 0 + $w0 = MOVi32imm 7, implicit-def $x0 + STRXui killed $x0, killed $x1, 0 bb.2: RET_ReallyLR @@ -315,24 +315,24 @@ # Note: 64-bit compare/32-bit move imm # Kill marker should be removed from compare. # CHECK-LABEL: name: test12 -# CHECK: SUBSXri %x0, 7, 0, implicit-def %nzcv +# CHECK: SUBSXri $x0, 7, 0, implicit-def $nzcv # CHECK: bb.1: # CHECK-NOT: MOVi32imm name: test12 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1 + liveins: $x0, $x1 - dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $xzr = SUBSXri killed $x0, 7, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm 7 - STRWui killed %w0, killed %x1, 0 + $w0 = MOVi32imm 7 + STRWui killed $w0, killed $x1, 0 bb.2: RET_ReallyLR @@ -341,24 +341,24 @@ # Note: 32-bit compare/32-bit move imm w/implicit def # Kill marker should remain on compare. # CHECK-LABEL: name: test13 -# CHECK: SUBSWri killed %w0, 7, 0, implicit-def %nzcv +# CHECK: SUBSWri killed $w0, 7, 0, implicit-def $nzcv # CHECK: bb.1: # CHECK: MOVi32imm name: test13 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1 + liveins: $w0, $x1 - dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $wzr = SUBSWri killed $w0, 7, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm 7, implicit-def %x0 - STRXui killed %x0, killed %x1, 0 + $w0 = MOVi32imm 7, implicit-def $x0 + STRXui killed $x0, killed $x1, 0 bb.2: RET_ReallyLR @@ -371,19 +371,19 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1, %x2 + liveins: $w0, $x1, $x2 - dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv - %w0 = LDRWui %x1, 0 - STRWui killed %w0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + dead $wzr = SUBSWri killed $w0, 7, 0, implicit-def $nzcv + $w0 = LDRWui $x1, 0 + STRWui killed $w0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm 7 - STRWui killed %w0, killed %x1, 0 + $w0 = MOVi32imm 7 + STRWui killed $w0, killed $x1, 0 bb.2: RET_ReallyLR @@ -396,19 +396,19 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1, %x2 + liveins: $w0, $x1, $x2 - dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $wzr = SUBSWri killed $w0, 7, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1, %x2 + liveins: $x1, $x2 - %w0 = LDRWui %x1, 0 - STRWui killed %w0, killed %x2, 0 - %w0 = MOVi32imm 7 - STRWui killed %w0, killed %x1, 0 + $w0 = LDRWui $x1, 0 + STRWui killed $w0, killed $x2, 0 + $w0 = MOVi32imm 7 + STRWui killed $w0, killed $x1, 0 bb.2: RET_ReallyLR @@ -421,18 +421,18 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1 + liveins: $w0, $x1 - dead %wzr = SUBSWri %w0, 7, 0, implicit-def %nzcv - %w2 = COPY %w0 - Bcc 1, %bb.2, implicit killed %nzcv + dead $wzr = SUBSWri $w0, 7, 0, implicit-def $nzcv + $w2 = COPY $w0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w2 = MOVi32imm 7 - STRWui killed %w2, killed %x1, 0 + $w2 = MOVi32imm 7 + STRWui killed $w2, killed $x1, 0 bb.2: RET_ReallyLR @@ -445,17 +445,17 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1 + liveins: $w0, $x1 - dead %w0 = SUBSWri killed %w0, 7, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $w0 = SUBSWri killed $w0, 7, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm 7 - STRWui killed %w0, killed %x1, 0 + $w0 = MOVi32imm 7 + STRWui killed $w0, killed $x1, 0 bb.2: RET_ReallyLR @@ -470,16 +470,16 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1 + liveins: $x0, $x1 - CBNZX killed %x0, %bb.2 + CBNZX killed $x0, %bb.2 B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %x0 = MOVi64imm 4252017623040 - STRXui killed %x0, killed %x1, 0 + $x0 = MOVi64imm 4252017623040 + STRXui killed $x0, killed $x1, 0 bb.2: RET_ReallyLR @@ -488,24 +488,24 @@ # Note: 32-bit compare/32-bit move imm # Kill marker should be removed from compare. # CHECK-LABEL: name: test19 -# CHECK: ADDSWri %w0, 1, 0, implicit-def %nzcv +# CHECK: ADDSWri $w0, 1, 0, implicit-def $nzcv # CHECK: bb.1: # CHECK-NOT: MOVi32imm name: test19 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1 + liveins: $w0, $x1 - dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $wzr = ADDSWri killed $w0, 1, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm -1 - STRWui killed %w0, killed %x1, 0 + $w0 = MOVi32imm -1 + STRWui killed $w0, killed $x1, 0 bb.2: RET_ReallyLR @@ -514,24 +514,24 @@ # Note: 64-bit compare/64-bit move imm # Kill marker should be removed from compare. # CHECK-LABEL: name: test20 -# CHECK: ADDSXri %x0, 1, 0, implicit-def %nzcv +# CHECK: ADDSXri $x0, 1, 0, implicit-def $nzcv # CHECK: bb.1: # CHECK-NOT: MOVi64imm name: test20 tracksRegLiveness: true body: | bb.0: - liveins: %x0, %x1 + liveins: $x0, $x1 - dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $xzr = ADDSXri killed $x0, 1, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %x0 = MOVi64imm -1 - STRXui killed %x0, killed %x1, 0 + $x0 = MOVi64imm -1 + STRXui killed $x0, killed $x1, 0 bb.2: RET_ReallyLR @@ -540,24 +540,24 @@ # Note: 64-bit compare/32-bit move imm # Kill marker should be removed from compare. # CHECK-LABEL: name: test21 -# CHECK: ADDSXri %x0, 1, 0, implicit-def %nzcv +# CHECK: ADDSXri $x0, 1, 0, implicit-def $nzcv # CHECK: bb.1: # CHECK-NOT: MOVi32imm name: test21 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1 + liveins: $x0, $x1 - dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $xzr = ADDSXri killed $x0, 1, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm -1 - STRWui killed %w0, killed %x1, 0 + $w0 = MOVi32imm -1 + STRWui killed $w0, killed $x1, 0 bb.2: RET_ReallyLR @@ -571,17 +571,17 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1 + liveins: $w0, $x1 - dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $wzr = ADDSWri killed $w0, 1, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %x0 = MOVi64imm -1 - STRXui killed %x0, killed %x1, 0 + $x0 = MOVi64imm -1 + STRXui killed $x0, killed $x1, 0 bb.2: RET_ReallyLR @@ -594,17 +594,17 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1 + liveins: $w0, $x1 - dead %wzr = SUBSWri killed %w0, 1, 12, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + dead $wzr = SUBSWri killed $w0, 1, 12, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x1 + liveins: $x1 - %w0 = MOVi32imm 4096 - STRWui killed %w0, killed %x1, 0 + $w0 = MOVi32imm 4096 + STRWui killed $w0, killed $x1, 0 bb.2: RET_ReallyLR Index: test/CodeGen/AArch64/machine-dead-copy.mir =================================================================== --- test/CodeGen/AArch64/machine-dead-copy.mir +++ test/CodeGen/AArch64/machine-dead-copy.mir @@ -12,29 +12,29 @@ # The first copy is dead copy which is not used. # CHECK-LABEL: name: copyprop1 # CHECK: bb.0: -# CHECK-NOT: %w20 = COPY +# CHECK-NOT: $w20 = COPY name: copyprop1 body: | bb.0: - liveins: %w0, %w1 - %w20 = COPY %w1 - BL @foo, csr_aarch64_aapcs, implicit %w0, implicit-def %w0 - RET_ReallyLR implicit %w0 + liveins: $w0, $w1 + $w20 = COPY $w1 + BL @foo, csr_aarch64_aapcs, implicit $w0, implicit-def $w0 + RET_ReallyLR implicit $w0 ... --- # The first copy is not a dead copy which is used in the second copy after the # call. # CHECK-LABEL: name: copyprop2 # CHECK: bb.0: -# CHECK: %w20 = COPY +# CHECK: $w20 = COPY name: copyprop2 body: | bb.0: - liveins: %w0, %w1 - %w20 = COPY %w1 - BL @foo, csr_aarch64_aapcs, implicit %w0, implicit-def %w0 - %w0 = COPY %w20 - RET_ReallyLR implicit %w0 + liveins: $w0, $w1 + $w20 = COPY $w1 + BL @foo, csr_aarch64_aapcs, implicit $w0, implicit-def $w0 + $w0 = COPY $w20 + RET_ReallyLR implicit $w0 ... --- # Both the first and second copy are dead copies which are not used. @@ -44,11 +44,11 @@ name: copyprop3 body: | bb.0: - liveins: %w0, %w1 - %w20 = COPY %w1 - BL @foo, csr_aarch64_aapcs, implicit %w0, implicit-def %w0 - %w20 = COPY %w0 - RET_ReallyLR implicit %w0 + liveins: $w0, $w1 + $w20 = COPY $w1 + BL @foo, csr_aarch64_aapcs, implicit $w0, implicit-def $w0 + $w20 = COPY $w0 + RET_ReallyLR implicit $w0 ... # The second copy is removed as a NOP copy, after then the first copy become # dead which should be removed as well. @@ -58,10 +58,10 @@ name: copyprop4 body: | bb.0: - liveins: %w0, %w1 - %w20 = COPY %w0 - %w0 = COPY %w20 - BL @foo, csr_aarch64_aapcs, implicit %w0, implicit-def %w0 - RET_ReallyLR implicit %w0 + liveins: $w0, $w1 + $w20 = COPY $w0 + $w0 = COPY $w20 + BL @foo, csr_aarch64_aapcs, implicit $w0, implicit-def $w0 + RET_ReallyLR implicit $w0 ... Index: test/CodeGen/AArch64/machine-outliner.mir =================================================================== --- test/CodeGen/AArch64/machine-outliner.mir +++ test/CodeGen/AArch64/machine-outliner.mir @@ -28,126 +28,126 @@ # CHECK-LABEL: name: main # CHECK: BL @OUTLINED_FUNCTION_[[F0:[0-9]+]] -# CHECK-NEXT: early-clobber %sp, %lr = LDRXpost %sp, 16 -# CHECK-NEXT: %x16 = ADDXri %sp, 48, 0 -# CHECK-NEXT: STRHHroW %w16, %x9, %w30, 1, 1 -# CHECK-NEXT: %lr = ORRXri %xzr, 1 +# CHECK-NEXT: early-clobber $sp, $lr = LDRXpost $sp, 16 +# CHECK-NEXT: $x16 = ADDXri $sp, 48, 0 +# CHECK-NEXT: STRHHroW $w16, $x9, $w30, 1, 1 +# CHECK-NEXT: $lr = ORRXri $xzr, 1 # CHECK: BL @OUTLINED_FUNCTION_[[F0]] -# CHECK-NEXT: early-clobber %sp, %lr = LDRXpost %sp, 16 -# CHECK-NEXT: %x16 = ADDXri %sp, 48, 0 -# CHECK-NEXT: STRHHroW %w16, %x9, %w30, 1, 1 -# CHECK-NEXT: %lr = ORRXri %xzr, 1 +# CHECK-NEXT: early-clobber $sp, $lr = LDRXpost $sp, 16 +# CHECK-NEXT: $x16 = ADDXri $sp, 48, 0 +# CHECK-NEXT: STRHHroW $w16, $x9, $w30, 1, 1 +# CHECK-NEXT: $lr = ORRXri $xzr, 1 # CHECK: BL @OUTLINED_FUNCTION_[[F0]] -# CHECK-NEXT: early-clobber %sp, %lr = LDRXpost %sp, 16 -# CHECK-NEXT: %x16 = ADDXri %sp, 48, 0 -# CHECK-NEXT: STRHHroW %w16, %x9, %w30, 1, 1 -# CHECK-NEXT: %lr = ORRXri %xzr, 1 +# CHECK-NEXT: early-clobber $sp, $lr = LDRXpost $sp, 16 +# CHECK-NEXT: $x16 = ADDXri $sp, 48, 0 +# CHECK-NEXT: STRHHroW $w16, $x9, $w30, 1, 1 +# CHECK-NEXT: $lr = ORRXri $xzr, 1 name: main tracksRegLiveness: true body: | bb.0: - %sp = frame-setup SUBXri %sp, 16, 0 - renamable %x9 = ADRP target-flags(aarch64-page) @bar - %x9 = ORRXri %xzr, 1 - %w16 = ORRWri %wzr, 1 - %w30 = ORRWri %wzr, 1 - %lr = ORRXri %xzr, 1 + $sp = frame-setup SUBXri $sp, 16, 0 + renamable $x9 = ADRP target-flags(aarch64-page) @bar + $x9 = ORRXri $xzr, 1 + $w16 = ORRWri $wzr, 1 + $w30 = ORRWri $wzr, 1 + $lr = ORRXri $xzr, 1 - %x20, %x19 = LDPXi %sp, 10 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - renamable %x9 = ADRP target-flags(aarch64-page) @x - %x16 = ADDXri %sp, 48, 0; - STRHHroW %w16, %x9, %w30, 1, 1 - %lr = ORRXri %xzr, 1 - %w3 = ORRWri %wzr, 1993 + $x20, $x19 = LDPXi $sp, 10 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + renamable $x9 = ADRP target-flags(aarch64-page) @x + $x16 = ADDXri $sp, 48, 0; + STRHHroW $w16, $x9, $w30, 1, 1 + $lr = ORRXri $xzr, 1 + $w3 = ORRWri $wzr, 1993 - %x20, %x19 = LDPXi %sp, 10 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - renamable %x9 = ADRP target-flags(aarch64-page) @x - %x16 = ADDXri %sp, 48, 0; - STRHHroW %w16, %x9, %w30, 1, 1 - %lr = ORRXri %xzr, 1 + $x20, $x19 = LDPXi $sp, 10 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + renamable $x9 = ADRP target-flags(aarch64-page) @x + $x16 = ADDXri $sp, 48, 0; + STRHHroW $w16, $x9, $w30, 1, 1 + $lr = ORRXri $xzr, 1 - %w4 = ORRWri %wzr, 1994 + $w4 = ORRWri $wzr, 1994 - %x20, %x19 = LDPXi %sp, 10 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - %w16 = ORRWri %wzr, 1 - renamable %x9 = ADRP target-flags(aarch64-page) @x - %x16 = ADDXri %sp, 48, 0; - STRHHroW %w16, %x9, %w30, 1, 1 - %lr = ORRXri %xzr, 1 + $x20, $x19 = LDPXi $sp, 10 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + $w16 = ORRWri $wzr, 1 + renamable $x9 = ADRP target-flags(aarch64-page) @x + $x16 = ADDXri $sp, 48, 0; + STRHHroW $w16, $x9, $w30, 1, 1 + $lr = ORRXri $xzr, 1 - %sp = ADDXri %sp, 16, 0 - RET undef %lr + $sp = ADDXri $sp, 16, 0 + RET undef $lr ... --- # This test ensures that we can avoid saving LR when it's available. # CHECK-LABEL: bb.1: -# CHECK-NOT: BL @baz, implicit-def dead %lr, implicit %sp -# CHECK: BL @OUTLINED_FUNCTION_[[F1:[0-9]+]], implicit-def %lr, implicit %sp -# CHECK-NEXT: %w17 = ORRWri %wzr, 2 -# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F1]], implicit-def %lr, implicit %sp -# CHECK-NEXT: %w8 = ORRWri %wzr, 0 +# CHECK-NOT: BL @baz, implicit-def dead $lr, implicit $sp +# CHECK: BL @OUTLINED_FUNCTION_[[F1:[0-9]+]], implicit-def $lr, implicit $sp +# CHECK-NEXT: $w17 = ORRWri $wzr, 2 +# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F1]], implicit-def $lr, implicit $sp +# CHECK-NEXT: $w8 = ORRWri $wzr, 0 name: bar tracksRegLiveness: true body: | bb.0: - liveins: %w0, %lr, %w8 - %sp = frame-setup SUBXri %sp, 32, 0 - %fp = frame-setup ADDXri %sp, 16, 0 + liveins: $w0, $lr, $w8 + $sp = frame-setup SUBXri $sp, 32, 0 + $fp = frame-setup ADDXri $sp, 16, 0 bb.1: - BL @baz, implicit-def dead %lr, implicit %sp - %w17 = ORRWri %wzr, 1 - %w17 = ORRWri %wzr, 1 - %w17 = ORRWri %wzr, 1 - %w17 = ORRWri %wzr, 1 - BL @baz, implicit-def dead %lr, implicit %sp - %w17 = ORRWri %wzr, 2 - BL @baz, implicit-def dead %lr, implicit %sp - %w17 = ORRWri %wzr, 1 - %w17 = ORRWri %wzr, 1 - %w17 = ORRWri %wzr, 1 - %w17 = ORRWri %wzr, 1 - BL @baz, implicit-def dead %lr, implicit %sp - %w8 = ORRWri %wzr, 0 + BL @baz, implicit-def dead $lr, implicit $sp + $w17 = ORRWri $wzr, 1 + $w17 = ORRWri $wzr, 1 + $w17 = ORRWri $wzr, 1 + $w17 = ORRWri $wzr, 1 + BL @baz, implicit-def dead $lr, implicit $sp + $w17 = ORRWri $wzr, 2 + BL @baz, implicit-def dead $lr, implicit $sp + $w17 = ORRWri $wzr, 1 + $w17 = ORRWri $wzr, 1 + $w17 = ORRWri $wzr, 1 + $w17 = ORRWri $wzr, 1 + BL @baz, implicit-def dead $lr, implicit $sp + $w8 = ORRWri $wzr, 0 bb.2: - %w15 = ORRWri %wzr, 1 - %w15 = ORRWri %wzr, 1 - %w15 = ORRWri %wzr, 1 - %w15 = ORRWri %wzr, 1 - %x15 = ADDXri %sp, 48, 0; - %w9 = ORRWri %wzr, 0 - %w15 = ORRWri %wzr, 1 - %w15 = ORRWri %wzr, 1 - %w15 = ORRWri %wzr, 1 - %w15 = ORRWri %wzr, 1 - %x15 = ADDXri %sp, 48, 0; - %w8 = ORRWri %wzr, 0 + $w15 = ORRWri $wzr, 1 + $w15 = ORRWri $wzr, 1 + $w15 = ORRWri $wzr, 1 + $w15 = ORRWri $wzr, 1 + $x15 = ADDXri $sp, 48, 0; + $w9 = ORRWri $wzr, 0 + $w15 = ORRWri $wzr, 1 + $w15 = ORRWri $wzr, 1 + $w15 = ORRWri $wzr, 1 + $w15 = ORRWri $wzr, 1 + $x15 = ADDXri $sp, 48, 0; + $w8 = ORRWri $wzr, 0 bb.3: - %fp, %lr = LDPXi %sp, 2 - %sp = ADDXri %sp, 32, 0 - RET undef %lr + $fp, $lr = LDPXi $sp, 2 + $sp = ADDXri $sp, 32, 0 + RET undef $lr ... --- @@ -155,8 +155,8 @@ tracksRegLiveness: true body: | bb.0: - liveins: %w0, %lr, %w8 - RET undef %lr + liveins: $w0, $lr, $w8 + RET undef $lr # CHECK-LABEL: name: OUTLINED_FUNCTION_{{[0-9]}} # CHECK=LABEL: name: OUTLINED_FUNCTION_{{[1-9]}} Index: test/CodeGen/AArch64/machine-scheduler.mir =================================================================== --- test/CodeGen/AArch64/machine-scheduler.mir +++ test/CodeGen/AArch64/machine-scheduler.mir @@ -18,18 +18,18 @@ --- # CHECK-LABEL: name: load_imp-def # CHECK: bb.0.entry: -# CHECK: LDRWui %x0, 0 -# CHECK: LDRWui %x0, 1 -# CHECK: STRWui %w1, %x0, 2 +# CHECK: LDRWui $x0, 0 +# CHECK: LDRWui $x0, 1 +# CHECK: STRWui $w1, $x0, 2 name: load_imp-def tracksRegLiveness: true body: | bb.0.entry: - liveins: %w1, %x0 - %w8 = LDRWui %x0, 1, implicit-def %x8 :: (load 4 from %ir.0) - STRWui killed %w1, %x0, 2 :: (store 4 into %ir.arrayidx1) - %w9 = LDRWui killed %x0, 0, implicit-def %x9 :: (load 4 from %ir.arrayidx19, align 8) - %x0 = ADDXrr killed %x9, killed %x8 - RET_ReallyLR implicit %x0 + liveins: $w1, $x0 + $w8 = LDRWui $x0, 1, implicit-def $x8 :: (load 4 from %ir.0) + STRWui killed $w1, $x0, 2 :: (store 4 into %ir.arrayidx1) + $w9 = LDRWui killed $x0, 0, implicit-def $x9 :: (load 4 from %ir.arrayidx19, align 8) + $x0 = ADDXrr killed $x9, killed $x8 + RET_ReallyLR implicit $x0 ... Index: test/CodeGen/AArch64/machine-sink-zr.mir =================================================================== --- test/CodeGen/AArch64/machine-sink-zr.mir +++ test/CodeGen/AArch64/machine-sink-zr.mir @@ -15,24 +15,24 @@ ; Check that WZR copy is sunk into the loop preheader. ; CHECK-LABEL: name: sinkwzr ; CHECK-LABEL: bb.0: - ; CHECK-NOT: COPY %wzr + ; CHECK-NOT: COPY $wzr bb.0: - liveins: %w0 + liveins: $w0 - %0 = COPY %w0 - %1 = COPY %wzr + %0 = COPY $w0 + %1 = COPY $wzr CBZW %0, %bb.3 ; CHECK-LABEL: bb.1: - ; CHECK: COPY %wzr + ; CHECK: COPY $wzr bb.1: B %bb.2 bb.2: %2 = PHI %0, %bb.1, %4, %bb.2 - %w0 = COPY %1 - %3 = SUBSWri %2, 1, 0, implicit-def dead %nzcv + $w0 = COPY %1 + %3 = SUBSWri %2, 1, 0, implicit-def dead $nzcv %4 = COPY %3 CBZW %3, %bb.3 B %bb.2 Index: test/CodeGen/AArch64/machine-zero-copy-remove.mir =================================================================== --- test/CodeGen/AArch64/machine-zero-copy-remove.mir +++ test/CodeGen/AArch64/machine-zero-copy-remove.mir @@ -1,565 +1,565 @@ # RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-copyelim %s -verify-machineinstrs -o - | FileCheck %s --- # CHECK-LABEL: name: test1 -# CHECK: ANDSWri %w0, 1, implicit-def %nzcv +# CHECK: ANDSWri $w0, 1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test1 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1, %x2 + liveins: $w0, $x1, $x2 - %w0 = ANDSWri %w0, 1, implicit-def %nzcv - STRWui killed %w0, killed %x1, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = ANDSWri $w0, 1, implicit-def $nzcv + STRWui killed $w0, killed $x1, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %w0 = COPY %wzr - STRWui killed %w0, killed %x2, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x2, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test2 -# CHECK: ANDSXri %x0, 1, implicit-def %nzcv +# CHECK: ANDSXri $x0, 1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test2 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x0 = ANDSXri %x0, 1, implicit-def %nzcv - STRXui killed %x0, killed %x1, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = ANDSXri $x0, 1, implicit-def $nzcv + STRXui killed $x0, killed $x1, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %x0 = COPY %xzr - STRXui killed %x0, killed %x2, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x2, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test3 -# CHECK: ADDSWri %w0, 1, 0, implicit-def %nzcv +# CHECK: ADDSWri $w0, 1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test3 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1, %x2 + liveins: $w0, $x1, $x2 - %w0 = ADDSWri %w0, 1, 0, implicit-def %nzcv - STRWui killed %w0, killed %x1, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = ADDSWri $w0, 1, 0, implicit-def $nzcv + STRWui killed $w0, killed $x1, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %w0 = COPY %wzr - STRWui killed %w0, killed %x2, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x2, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test4 -# CHECK: ADDSXri %x0, 1, 0, implicit-def %nzcv +# CHECK: ADDSXri $x0, 1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test4 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x0 = ADDSXri %x0, 1, 0, implicit-def %nzcv - STRXui killed %x0, killed %x1, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = ADDSXri $x0, 1, 0, implicit-def $nzcv + STRXui killed $x0, killed $x1, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %x0 = COPY %xzr - STRXui killed %x0, killed %x2, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x2, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test5 -# CHECK: SUBSWri %w0, 1, 0, implicit-def %nzcv +# CHECK: SUBSWri $w0, 1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test5 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1, %x2 + liveins: $w0, $x1, $x2 - %w0 = SUBSWri %w0, 1, 0, implicit-def %nzcv - STRWui killed %w0, killed %x1, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = SUBSWri $w0, 1, 0, implicit-def $nzcv + STRWui killed $w0, killed $x1, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %w0 = COPY %wzr - STRWui killed %w0, killed %x2, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x2, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test6 -# CHECK: SUBSXri %x0, 1, 0, implicit-def %nzcv +# CHECK: SUBSXri $x0, 1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test6 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x0 = SUBSXri %x0, 1, 0, implicit-def %nzcv - STRXui killed %x0, killed %x1, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = SUBSXri $x0, 1, 0, implicit-def $nzcv + STRXui killed $x0, killed $x1, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %x0 = COPY %xzr - STRXui killed %x0, killed %x2, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x2, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test7 -# CHECK: ADDSWrr %w0, %w1, implicit-def %nzcv +# CHECK: ADDSWrr $w0, $w1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test7 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %w1, %x2, %x3 + liveins: $w0, $w1, $x2, $x3 - %w0 = ADDSWrr %w0, %w1, implicit-def %nzcv - STRWui killed %w0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = ADDSWrr $w0, $w1, implicit-def $nzcv + STRWui killed $w0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %w0 = COPY %wzr - STRWui killed %w0, killed %x3, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test8 -# CHECK: ADDSXrr %x0, %x1, implicit-def %nzcv +# CHECK: ADDSXrr $x0, $x1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test8 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 - %x0 = ADDSXrr %x0, %x1, implicit-def %nzcv - STRXui killed %x0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = ADDSXrr $x0, $x1, implicit-def $nzcv + STRXui killed $x0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %x0 = COPY %xzr - STRXui killed %x0, killed %x3, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test9 -# CHECK: ANDSWrr %w0, %w1, implicit-def %nzcv +# CHECK: ANDSWrr $w0, $w1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test9 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %w1, %x2, %x3 + liveins: $w0, $w1, $x2, $x3 - %w0 = ANDSWrr %w0, %w1, implicit-def %nzcv - STRWui killed %w0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = ANDSWrr $w0, $w1, implicit-def $nzcv + STRWui killed $w0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %w0 = COPY %wzr - STRWui killed %w0, killed %x3, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test10 -# CHECK: ANDSXrr %x0, %x1, implicit-def %nzcv +# CHECK: ANDSXrr $x0, $x1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test10 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 - %x0 = ANDSXrr %x0, %x1, implicit-def %nzcv - STRXui killed %x0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = ANDSXrr $x0, $x1, implicit-def $nzcv + STRXui killed $x0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %x0 = COPY %xzr - STRXui killed %x0, killed %x3, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test11 -# CHECK: BICSWrr %w0, %w1, implicit-def %nzcv +# CHECK: BICSWrr $w0, $w1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test11 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %w1, %x2, %x3 + liveins: $w0, $w1, $x2, $x3 - %w0 = BICSWrr %w0, %w1, implicit-def %nzcv - STRWui killed %w0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = BICSWrr $w0, $w1, implicit-def $nzcv + STRWui killed $w0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %w0 = COPY %wzr - STRWui killed %w0, killed %x3, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test12 -# CHECK: BICSXrr %x0, %x1, implicit-def %nzcv +# CHECK: BICSXrr $x0, $x1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test12 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 - %x0 = BICSXrr %x0, %x1, implicit-def %nzcv - STRXui killed %x0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = BICSXrr $x0, $x1, implicit-def $nzcv + STRXui killed $x0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %x0 = COPY %xzr - STRXui killed %x0, killed %x3, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test13 -# CHECK: SUBSWrr %w0, %w1, implicit-def %nzcv +# CHECK: SUBSWrr $w0, $w1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test13 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %w1, %x2, %x3 + liveins: $w0, $w1, $x2, $x3 - %w0 = SUBSWrr %w0, %w1, implicit-def %nzcv - STRWui killed %w0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = SUBSWrr $w0, $w1, implicit-def $nzcv + STRWui killed $w0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %w0 = COPY %wzr - STRWui killed %w0, killed %x3, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test14 -# CHECK: SUBSXrr %x0, %x1, implicit-def %nzcv +# CHECK: SUBSXrr $x0, $x1, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test14 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 - %x0 = SUBSXrr %x0, %x1, implicit-def %nzcv - STRXui killed %x0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = SUBSXrr $x0, $x1, implicit-def $nzcv + STRXui killed $x0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %x0 = COPY %xzr - STRXui killed %x0, killed %x3, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test15 -# CHECK: ADDSWrs %w0, %w1, 0, implicit-def %nzcv +# CHECK: ADDSWrs $w0, $w1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test15 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %w1, %x2, %x3 + liveins: $w0, $w1, $x2, $x3 - %w0 = ADDSWrs %w0, %w1, 0, implicit-def %nzcv - STRWui killed %w0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = ADDSWrs $w0, $w1, 0, implicit-def $nzcv + STRWui killed $w0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %w0 = COPY %wzr - STRWui killed %w0, killed %x3, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test16 -# CHECK: ADDSXrs %x0, %x1, 0, implicit-def %nzcv +# CHECK: ADDSXrs $x0, $x1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test16 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 - %x0 = ADDSXrs %x0, %x1, 0, implicit-def %nzcv - STRXui killed %x0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = ADDSXrs $x0, $x1, 0, implicit-def $nzcv + STRXui killed $x0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %x0 = COPY %xzr - STRXui killed %x0, killed %x3, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test17 -# CHECK: ANDSWrs %w0, %w1, 0, implicit-def %nzcv +# CHECK: ANDSWrs $w0, $w1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test17 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %w1, %x2, %x3 + liveins: $w0, $w1, $x2, $x3 - %w0 = ANDSWrs %w0, %w1, 0, implicit-def %nzcv - STRWui killed %w0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = ANDSWrs $w0, $w1, 0, implicit-def $nzcv + STRWui killed $w0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %w0 = COPY %wzr - STRWui killed %w0, killed %x3, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test18 -# CHECK: ANDSXrs %x0, %x1, 0, implicit-def %nzcv +# CHECK: ANDSXrs $x0, $x1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %xzr +# CHECK-NOT: COPY $xzr name: test18 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2, %x3 + liveins: $x0, $x1, $x2, $x3 - %x0 = ANDSXrs %x0, %x1, 0, implicit-def %nzcv - STRXui killed %x0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = ANDSXrs $x0, $x1, 0, implicit-def $nzcv + STRXui killed $x0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %x0 = COPY %xzr - STRXui killed %x0, killed %x3, 0 + $x0 = COPY $xzr + STRXui killed $x0, killed $x3, 0 bb.2: RET_ReallyLR ... # CHECK-LABEL: name: test19 -# CHECK: BICSWrs %w0, %w1, 0, implicit-def %nzcv +# CHECK: BICSWrs $w0, $w1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: COPY %wzr +# CHECK-NOT: COPY $wzr name: test19 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %w1, %x2, %x3 + liveins: $w0, $w1, $x2, $x3 - %w0 = BICSWrs %w0, %w1, 0, implicit-def %nzcv - STRWui killed %w0, killed %x2, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = BICSWrs $w0, $w1, 0, implicit-def $nzcv + STRWui killed $w0, killed $x2, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x3 + liveins: $x3 - %w0 = COPY %wzr - STRWui killed %w0, killed %x3, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x3, 0 bb.2: RET_ReallyLR ... # Unicorn test - we can remove a redundant copy and a redundant mov # CHECK-LABEL: name: test20 -# CHECK: SUBSWri %w1, 1, 0, implicit-def %nzcv +# CHECK: SUBSWri $w1, 1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK-NOT: %w0 = COPY %wzr -# CHECK-NOT: %w1 = MOVi32imm 1 +# CHECK-NOT: $w0 = COPY $wzr +# CHECK-NOT: $w1 = MOVi32imm 1 name: test20 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w1, %x2 + liveins: $w1, $x2 - %w0 = SUBSWri %w1, 1, 0, implicit-def %nzcv - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = SUBSWri $w1, 1, 0, implicit-def $nzcv + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %w0 = COPY %wzr - %w1 = MOVi32imm 1 - STRWui killed %w0, %x2, 0 - STRWui killed %w1, killed %x2, 1 + $w0 = COPY $wzr + $w1 = MOVi32imm 1 + STRWui killed $w0, $x2, 0 + STRWui killed $w1, killed $x2, 1 bb.2: RET_ReallyLR ... -# Negative test - MOVi32imm clobbers %w0 +# Negative test - MOVi32imm clobbers $w0 # CHECK-LABEL: name: test21 -# CHECK: ANDSWri %w0, 1, implicit-def %nzcv +# CHECK: ANDSWri $w0, 1, implicit-def $nzcv # CHECK: bb.1: -# CHECK: %w0 = COPY %wzr +# CHECK: $w0 = COPY $wzr name: test21 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1, %x2 + liveins: $w0, $x1, $x2 - %w0 = ANDSWri %w0, 1, implicit-def %nzcv - STRWui killed %w0, %x1, 0 - %w0 = MOVi32imm -1 - STRWui killed %w0, killed %x1, 1 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = ANDSWri $w0, 1, implicit-def $nzcv + STRWui killed $w0, $x1, 0 + $w0 = MOVi32imm -1 + STRWui killed $w0, killed $x1, 1 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %w0 = COPY %wzr - STRWui killed %w0, killed %x2, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x2, 0 bb.2: RET_ReallyLR ... # Negative test - SUBSXri self-clobbers x0, so MOVi64imm can't be removed # CHECK-LABEL: name: test22 -# CHECK: SUBSXri %x0, 1, 0, implicit-def %nzcv +# CHECK: SUBSXri $x0, 1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK: %x0 = MOVi64imm 1 +# CHECK: $x0 = MOVi64imm 1 name: test22 tracksRegLiveness: true body: | bb.0.entry: - liveins: %x0, %x1, %x2 + liveins: $x0, $x1, $x2 - %x0 = SUBSXri %x0, 1, 0, implicit-def %nzcv - STRXui killed %x0, killed %x1, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $x0 = SUBSXri $x0, 1, 0, implicit-def $nzcv + STRXui killed $x0, killed $x1, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %x0 = MOVi64imm 1 - STRXui killed %x0, killed %x2, 0 + $x0 = MOVi64imm 1 + STRXui killed $x0, killed $x2, 0 bb.2: RET_ReallyLR ... # Negative test - bb.1 has multiple preds # CHECK-LABEL: name: test23 -# CHECK: ADDSWri %w0, 1, 0, implicit-def %nzcv +# CHECK: ADDSWri $w0, 1, 0, implicit-def $nzcv # CHECK: bb.1: -# CHECK: COPY %wzr +# CHECK: COPY $wzr name: test23 tracksRegLiveness: true body: | bb.0.entry: - liveins: %w0, %x1, %x2 + liveins: $w0, $x1, $x2 - %w0 = ADDSWri %w0, 1, 0, implicit-def %nzcv - STRWui killed %w0, killed %x1, 0 - Bcc 1, %bb.2, implicit killed %nzcv + $w0 = ADDSWri $w0, 1, 0, implicit-def $nzcv + STRWui killed $w0, killed $x1, 0 + Bcc 1, %bb.2, implicit killed $nzcv B %bb.1 bb.3: B %bb.1 bb.1: - liveins: %x2 + liveins: $x2 - %w0 = COPY %wzr - STRWui killed %w0, killed %x2, 0 + $w0 = COPY $wzr + STRWui killed $w0, killed $x2, 0 bb.2: RET_ReallyLR Index: test/CodeGen/AArch64/movimm-wzr.mir =================================================================== --- test/CodeGen/AArch64/movimm-wzr.mir +++ test/CodeGen/AArch64/movimm-wzr.mir @@ -32,11 +32,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - %wzr = MOVi32imm 42 - %xzr = MOVi64imm 42 - RET_ReallyLR implicit killed %w0 + $wzr = MOVi32imm 42 + $xzr = MOVi64imm 42 + RET_ReallyLR implicit killed $w0 ... # CHECK: bb.0 -# CHECK-NEXT: RET undef %lr +# CHECK-NEXT: RET undef $lr Index: test/CodeGen/AArch64/phi-dbg.ll =================================================================== --- test/CodeGen/AArch64/phi-dbg.ll +++ test/CodeGen/AArch64/phi-dbg.ll @@ -30,7 +30,7 @@ ; CHECK: ldr w[[REG:[0-9]+]], [sp, #8] ; CHECK-NEXT: .Ltmp call void @llvm.dbg.value(metadata i32 %.0, i64 0, metadata !15, metadata !13), !dbg !16 -; CHECK-NEXT: //DEBUG_VALUE: func:c <- %w[[REG]] +; CHECK-NEXT: //DEBUG_VALUE: func:c <- $w[[REG]] %5 = add nsw i32 %.0, %0, !dbg !22 call void @llvm.dbg.value(metadata i32 %5, i64 0, metadata !15, metadata !13), !dbg !16 ret i32 %5, !dbg !23 Index: test/CodeGen/AArch64/reg-scavenge-frame.mir =================================================================== --- test/CodeGen/AArch64/reg-scavenge-frame.mir +++ test/CodeGen/AArch64/reg-scavenge-frame.mir @@ -12,75 +12,75 @@ - { id: 0, type: spill-slot, offset: 0, size: 32, alignment: 8 } body: | bb.0: - liveins: %d16_d17_d18_d19 - %x0 = COPY %xzr - %x1 = COPY %xzr - %x2 = COPY %xzr - %x3 = COPY %xzr - %x4 = COPY %xzr - %x5 = COPY %xzr - %x6 = COPY %xzr - %x7 = COPY %xzr - %x8 = COPY %xzr - %x9 = COPY %xzr - %x10 = COPY %xzr - %x11 = COPY %xzr - %x12 = COPY %xzr - %x13 = COPY %xzr - %x14 = COPY %xzr - %x15 = COPY %xzr - %x16 = COPY %xzr - %x17 = COPY %xzr - %x18 = COPY %xzr - %x19 = COPY %xzr - %x20 = COPY %xzr - %x21 = COPY %xzr - %x22 = COPY %xzr - %x23 = COPY %xzr - %x24 = COPY %xzr - %x25 = COPY %xzr - %x26 = COPY %xzr - %x27 = COPY %xzr - %x28 = COPY %xzr - %fp = COPY %xzr - %lr = COPY %xzr - ST1Fourv1d killed %d16_d17_d18_d19, %stack.0 :: (store 32 into %stack.0, align 8) - ; CHECK: STRXui killed %[[SCAVREG:x[0-9]+|fp|lr]], %sp, [[SPOFFSET:[0-9]+]] :: (store 8 into %stack.1) - ; CHECK-NEXT: %[[SCAVREG]] = ADDXri %sp, {{[0-9]+}}, 0 - ; CHECK-NEXT: ST1Fourv1d killed %d16_d17_d18_d19, killed %[[SCAVREG]] :: (store 32 into %stack.0, align 8) - ; CHECK-NEXT: %[[SCAVREG]] = LDRXui %sp, [[SPOFFSET]] :: (load 8 from %stack.1) + liveins: $d16_d17_d18_d19 + $x0 = COPY $xzr + $x1 = COPY $xzr + $x2 = COPY $xzr + $x3 = COPY $xzr + $x4 = COPY $xzr + $x5 = COPY $xzr + $x6 = COPY $xzr + $x7 = COPY $xzr + $x8 = COPY $xzr + $x9 = COPY $xzr + $x10 = COPY $xzr + $x11 = COPY $xzr + $x12 = COPY $xzr + $x13 = COPY $xzr + $x14 = COPY $xzr + $x15 = COPY $xzr + $x16 = COPY $xzr + $x17 = COPY $xzr + $x18 = COPY $xzr + $x19 = COPY $xzr + $x20 = COPY $xzr + $x21 = COPY $xzr + $x22 = COPY $xzr + $x23 = COPY $xzr + $x24 = COPY $xzr + $x25 = COPY $xzr + $x26 = COPY $xzr + $x27 = COPY $xzr + $x28 = COPY $xzr + $fp = COPY $xzr + $lr = COPY $xzr + ST1Fourv1d killed $d16_d17_d18_d19, %stack.0 :: (store 32 into %stack.0, align 8) + ; CHECK: STRXui killed $[[SCAVREG:x[0-9]+|fp|lr]], $sp, [[SPOFFSET:[0-9]+]] :: (store 8 into %stack.1) + ; CHECK-NEXT: $[[SCAVREG]] = ADDXri $sp, {{[0-9]+}}, 0 + ; CHECK-NEXT: ST1Fourv1d killed $d16_d17_d18_d19, killed $[[SCAVREG]] :: (store 32 into %stack.0, align 8) + ; CHECK-NEXT: $[[SCAVREG]] = LDRXui $sp, [[SPOFFSET]] :: (load 8 from %stack.1) - HINT 0, implicit %x0 - HINT 0, implicit %x1 - HINT 0, implicit %x2 - HINT 0, implicit %x3 - HINT 0, implicit %x4 - HINT 0, implicit %x5 - HINT 0, implicit %x6 - HINT 0, implicit %x7 - HINT 0, implicit %x8 - HINT 0, implicit %x9 - HINT 0, implicit %x10 - HINT 0, implicit %x11 - HINT 0, implicit %x12 - HINT 0, implicit %x13 - HINT 0, implicit %x14 - HINT 0, implicit %x15 - HINT 0, implicit %x16 - HINT 0, implicit %x17 - HINT 0, implicit %x18 - HINT 0, implicit %x19 - HINT 0, implicit %x20 - HINT 0, implicit %x21 - HINT 0, implicit %x22 - HINT 0, implicit %x23 - HINT 0, implicit %x24 - HINT 0, implicit %x25 - HINT 0, implicit %x26 - HINT 0, implicit %x27 - HINT 0, implicit %x28 - HINT 0, implicit %fp - HINT 0, implicit %lr + HINT 0, implicit $x0 + HINT 0, implicit $x1 + HINT 0, implicit $x2 + HINT 0, implicit $x3 + HINT 0, implicit $x4 + HINT 0, implicit $x5 + HINT 0, implicit $x6 + HINT 0, implicit $x7 + HINT 0, implicit $x8 + HINT 0, implicit $x9 + HINT 0, implicit $x10 + HINT 0, implicit $x11 + HINT 0, implicit $x12 + HINT 0, implicit $x13 + HINT 0, implicit $x14 + HINT 0, implicit $x15 + HINT 0, implicit $x16 + HINT 0, implicit $x17 + HINT 0, implicit $x18 + HINT 0, implicit $x19 + HINT 0, implicit $x20 + HINT 0, implicit $x21 + HINT 0, implicit $x22 + HINT 0, implicit $x23 + HINT 0, implicit $x24 + HINT 0, implicit $x25 + HINT 0, implicit $x26 + HINT 0, implicit $x27 + HINT 0, implicit $x28 + HINT 0, implicit $fp + HINT 0, implicit $lr RET_ReallyLR ... Index: test/CodeGen/AArch64/regcoal-physreg.mir =================================================================== --- test/CodeGen/AArch64/regcoal-physreg.mir +++ test/CodeGen/AArch64/regcoal-physreg.mir @@ -13,79 +13,79 @@ body: | bb.0: ; We usually should not coalesce copies from allocatable physregs. - ; CHECK: %0:gpr32 = COPY %w7 - ; CHECK: STRWui %0, %x1, 0 - %0 : gpr32 = COPY %w7 - STRWui %0, %x1, 0 + ; CHECK: %0:gpr32 = COPY $w7 + ; CHECK: STRWui %0, $x1, 0 + %0 : gpr32 = COPY $w7 + STRWui %0, $x1, 0 ; It is fine to coalesce copies from reserved physregs ; CHECK-NOT: COPY - ; CHECK: STRXui %fp, %x1, 0 - %1 : gpr64 = COPY %fp - STRXui %1, %x1, 0 + ; CHECK: STRXui $fp, $x1, 0 + %1 : gpr64 = COPY $fp + STRXui %1, $x1, 0 ; It is not fine to coalesce copies from reserved physregs when they are ; clobbered. - ; CHECK: %2:gpr64 = COPY %fp - ; CHECK: STRXui %2, %x1, 0 - %2 : gpr64 = COPY %fp - %fp = SUBXri %fp, 4, 0 - STRXui %2, %x1, 0 + ; CHECK: %2:gpr64 = COPY $fp + ; CHECK: STRXui %2, $x1, 0 + %2 : gpr64 = COPY $fp + $fp = SUBXri $fp, 4, 0 + STRXui %2, $x1, 0 ; Is is fine to coalesce copies from constant physregs even when they are ; clobbered. ; CHECK-NOT: COPY - ; CHECK: STRWui %wzr, %x1 - %3 : gpr32 = COPY %wzr - dead %wzr = SUBSWri %w1, 0, 0, implicit-def %nzcv - STRWui %3, %x1, 0 + ; CHECK: STRWui $wzr, $x1 + %3 : gpr32 = COPY $wzr + dead $wzr = SUBSWri $w1, 0, 0, implicit-def $nzcv + STRWui %3, $x1, 0 ; Is is fine to coalesce copies from constant physregs even when they are ; clobbered. ; CHECK-NOT: COPY - ; CHECK: STRXui %xzr, %x1 - %4 : gpr64 = COPY %xzr - dead %wzr = SUBSWri %w1, 0, 0, implicit-def %nzcv - STRXui %4, %x1, 0 + ; CHECK: STRXui $xzr, $x1 + %4 : gpr64 = COPY $xzr + dead $wzr = SUBSWri $w1, 0, 0, implicit-def $nzcv + STRXui %4, $x1, 0 ; Coalescing COPYs into constant physregs. - ; CHECK: %wzr = SUBSWri %w1, 0, 0 - %5 : gpr32 = SUBSWri %w1, 0, 0, implicit-def %nzcv - %wzr = COPY %5 + ; CHECK: $wzr = SUBSWri $w1, 0, 0 + %5 : gpr32 = SUBSWri $w1, 0, 0, implicit-def $nzcv + $wzr = COPY %5 ; Only coalesce when the source register is reserved as a whole (this is ; a limitation of the current code which cannot update liveness information ; of the non-reserved part). - ; CHECK: %6:xseqpairsclass = COPY %x28_fp + ; CHECK: %6:xseqpairsclass = COPY $x28_fp ; CHECK: HINT 0, implicit %6 - %6 : xseqpairsclass = COPY %x28_fp + %6 : xseqpairsclass = COPY $x28_fp HINT 0, implicit %6 ; It is not fine to coalesce copies from reserved physregs when they are ; clobbered by the regmask on a call. - ; CHECK: %7:gpr64 = COPY %x18 - ; CHECK: BL @f2, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp - ; CHECK: STRXui %7, %x1, 0 + ; CHECK: %7:gpr64 = COPY $x18 + ; CHECK: BL @f2, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ; CHECK: STRXui %7, $x1, 0 ; Need a def of x18 so that it's not deduced as "constant". - %x18 = COPY %xzr - %7 : gpr64 = COPY %x18 - BL @f2, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp - STRXui %7, %x1, 0 + $x18 = COPY $xzr + %7 : gpr64 = COPY $x18 + BL @f2, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + STRXui %7, $x1, 0 ; This can be coalesced. - ; CHECK: %fp = SUBXri %fp, 4, 0 - %8 : gpr64sp = SUBXri %fp, 4, 0 - %fp = COPY %8 + ; CHECK: $fp = SUBXri $fp, 4, 0 + %8 : gpr64sp = SUBXri $fp, 4, 0 + $fp = COPY %8 ; Cannot coalesce when there are reads of the physreg. - ; CHECK-NOT: %fp = SUBXri %fp, 8, 0 - ; CHECK: %9:gpr64sp = SUBXri %fp, 8, 0 - ; CHECK: STRXui %fp, %fp, 0 - ; CHECK: %fp = COPY %9 - %9 : gpr64sp = SUBXri %fp, 8, 0 - STRXui %fp, %fp, 0 - %fp = COPY %9 + ; CHECK-NOT: $fp = SUBXri $fp, 8, 0 + ; CHECK: %9:gpr64sp = SUBXri $fp, 8, 0 + ; CHECK: STRXui $fp, $fp, 0 + ; CHECK: $fp = COPY %9 + %9 : gpr64sp = SUBXri $fp, 8, 0 + STRXui $fp, $fp, 0 + $fp = COPY %9 ... --- # Check coalescing of COPYs from reserved physregs. @@ -95,20 +95,20 @@ bb.0: ; Cannot coalesce physreg because we have reads on other CFG paths (we ; currently abort for any control flow) - ; CHECK-NOT: %fp = SUBXri - ; CHECK: %0:gpr64sp = SUBXri %fp, 12, 0 - ; CHECK: CBZX undef %x0, %bb.1 + ; CHECK-NOT: $fp = SUBXri + ; CHECK: %0:gpr64sp = SUBXri $fp, 12, 0 + ; CHECK: CBZX undef $x0, %bb.1 ; CHECK: B %bb.2 - %0 : gpr64sp = SUBXri %fp, 12, 0 - CBZX undef %x0, %bb.1 + %0 : gpr64sp = SUBXri $fp, 12, 0 + CBZX undef $x0, %bb.1 B %bb.2 bb.1: - %fp = COPY %0 + $fp = COPY %0 RET_ReallyLR bb.2: - STRXui %fp, %fp, 0 + STRXui $fp, $fp, 0 RET_ReallyLR ... --- @@ -118,16 +118,16 @@ bb.0: ; We can coalesce copies from physreg to vreg across multiple blocks. ; CHECK-NOT: COPY - ; CHECK: CBZX undef %x0, %bb.1 + ; CHECK: CBZX undef $x0, %bb.1 ; CHECK-NEXT: B %bb.2 - %0 : gpr64sp = COPY %fp - CBZX undef %x0, %bb.1 + %0 : gpr64sp = COPY $fp + CBZX undef $x0, %bb.1 B %bb.2 bb.1: - ; CHECK: STRXui undef %x0, %fp, 0 + ; CHECK: STRXui undef $x0, $fp, 0 ; CHECK-NEXT: RET_ReallyLR - STRXui undef %x0, %0, 0 + STRXui undef $x0, %0, 0 RET_ReallyLR bb.2: Index: test/CodeGen/AArch64/scheduledag-constreg.mir =================================================================== --- test/CodeGen/AArch64/scheduledag-constreg.mir +++ test/CodeGen/AArch64/scheduledag-constreg.mir @@ -7,23 +7,23 @@ # Check that the instructions are not dependent on each other, even though # they all read/write to the zero register. # CHECK-LABEL: MI Scheduling -# CHECK: SU(0): dead %wzr = SUBSWri %w1, 0, 0, implicit-def dead %nzcv +# CHECK: SU(0): dead $wzr = SUBSWri $w1, 0, 0, implicit-def dead $nzcv # CHECK: # succs left : 0 # CHECK-NOT: Successors: -# CHECK: SU(1): %w2 = COPY %wzr +# CHECK: SU(1): $w2 = COPY $wzr # CHECK: # succs left : 0 # CHECK-NOT: Successors: -# CHECK: SU(2): dead %wzr = SUBSWri %w3, 0, 0, implicit-def dead %nzcv +# CHECK: SU(2): dead $wzr = SUBSWri $w3, 0, 0, implicit-def dead $nzcv # CHECK: # succs left : 0 # CHECK-NOT: Successors: -# CHECK: SU(3): %w4 = COPY %wzr +# CHECK: SU(3): $w4 = COPY $wzr # CHECK: # succs left : 0 # CHECK-NOT: Successors: name: func body: | bb.0: - dead %wzr = SUBSWri %w1, 0, 0, implicit-def dead %nzcv - %w2 = COPY %wzr - dead %wzr = SUBSWri %w3, 0, 0, implicit-def dead %nzcv - %w4 = COPY %wzr + dead $wzr = SUBSWri $w1, 0, 0, implicit-def dead $nzcv + $w2 = COPY $wzr + dead $wzr = SUBSWri $w3, 0, 0, implicit-def dead $nzcv + $w4 = COPY $wzr ... Index: test/CodeGen/AArch64/spill-fold.mir =================================================================== --- test/CodeGen/AArch64/spill-fold.mir +++ test/CodeGen/AArch64/spill-fold.mir @@ -14,11 +14,11 @@ - { id: 0, class: gpr64 } body: | bb.0: - ; CHECK: STRXui %xzr, %stack.0, 0 :: (store 8 into %stack.0) - undef %0.sub_32 = COPY %wzr - INLINEASM &nop, 1, 12, implicit-def dead %x0, 12, implicit-def dead %x1, 12, implicit-def dead %x2, 12, implicit-def dead %x3, 12, implicit-def dead %x4, 12, implicit-def dead %x5, 12, implicit-def dead %x6, 12, implicit-def dead %x7, 12, implicit-def dead %x8, 12, implicit-def dead %x9, 12, implicit-def dead %x10, 12, implicit-def dead %x11, 12, implicit-def dead %x12, 12, implicit-def dead %x13, 12, implicit-def dead %x14, 12, implicit-def dead %x15, 12, implicit-def dead %x16, 12, implicit-def dead %x17, 12, implicit-def dead %x18, 12, implicit-def dead %x19, 12, implicit-def dead %x20, 12, implicit-def dead %x21, 12, implicit-def dead %x22, 12, implicit-def dead %x23, 12, implicit-def dead %x24, 12, implicit-def dead %x25, 12, implicit-def dead %x26, 12, implicit-def dead %x27, 12, implicit-def dead %x28, 12, implicit-def dead %fp, 12, implicit-def dead %lr, 12, implicit-def %sp - %x0 = COPY %0 - RET_ReallyLR implicit %x0 + ; CHECK: STRXui $xzr, %stack.0, 0 :: (store 8 into %stack.0) + undef %0.sub_32 = COPY $wzr + INLINEASM &nop, 1, 12, implicit-def dead $x0, 12, implicit-def dead $x1, 12, implicit-def dead $x2, 12, implicit-def dead $x3, 12, implicit-def dead $x4, 12, implicit-def dead $x5, 12, implicit-def dead $x6, 12, implicit-def dead $x7, 12, implicit-def dead $x8, 12, implicit-def dead $x9, 12, implicit-def dead $x10, 12, implicit-def dead $x11, 12, implicit-def dead $x12, 12, implicit-def dead $x13, 12, implicit-def dead $x14, 12, implicit-def dead $x15, 12, implicit-def dead $x16, 12, implicit-def dead $x17, 12, implicit-def dead $x18, 12, implicit-def dead $x19, 12, implicit-def dead $x20, 12, implicit-def dead $x21, 12, implicit-def dead $x22, 12, implicit-def dead $x23, 12, implicit-def dead $x24, 12, implicit-def dead $x25, 12, implicit-def dead $x26, 12, implicit-def dead $x27, 12, implicit-def dead $x28, 12, implicit-def dead $fp, 12, implicit-def dead $lr, 12, implicit-def $sp + $x0 = COPY %0 + RET_ReallyLR implicit $x0 ... --- # CHECK-LABEL: name: test_subreg_spill_fold2 @@ -28,11 +28,11 @@ - { id: 0, class: gpr64sp } body: | bb.0: - ; CHECK: STRXui %xzr, %stack.0, 0 :: (store 8 into %stack.0) - undef %0.sub_32 = COPY %wzr - INLINEASM &nop, 1, 12, implicit-def dead %x0, 12, implicit-def dead %x1, 12, implicit-def dead %x2, 12, implicit-def dead %x3, 12, implicit-def dead %x4, 12, implicit-def dead %x5, 12, implicit-def dead %x6, 12, implicit-def dead %x7, 12, implicit-def dead %x8, 12, implicit-def dead %x9, 12, implicit-def dead %x10, 12, implicit-def dead %x11, 12, implicit-def dead %x12, 12, implicit-def dead %x13, 12, implicit-def dead %x14, 12, implicit-def dead %x15, 12, implicit-def dead %x16, 12, implicit-def dead %x17, 12, implicit-def dead %x18, 12, implicit-def dead %x19, 12, implicit-def dead %x20, 12, implicit-def dead %x21, 12, implicit-def dead %x22, 12, implicit-def dead %x23, 12, implicit-def dead %x24, 12, implicit-def dead %x25, 12, implicit-def dead %x26, 12, implicit-def dead %x27, 12, implicit-def dead %x28, 12, implicit-def dead %fp, 12, implicit-def dead %lr, 12, implicit-def %sp - %x0 = ADDXri %0, 1, 0 - RET_ReallyLR implicit %x0 + ; CHECK: STRXui $xzr, %stack.0, 0 :: (store 8 into %stack.0) + undef %0.sub_32 = COPY $wzr + INLINEASM &nop, 1, 12, implicit-def dead $x0, 12, implicit-def dead $x1, 12, implicit-def dead $x2, 12, implicit-def dead $x3, 12, implicit-def dead $x4, 12, implicit-def dead $x5, 12, implicit-def dead $x6, 12, implicit-def dead $x7, 12, implicit-def dead $x8, 12, implicit-def dead $x9, 12, implicit-def dead $x10, 12, implicit-def dead $x11, 12, implicit-def dead $x12, 12, implicit-def dead $x13, 12, implicit-def dead $x14, 12, implicit-def dead $x15, 12, implicit-def dead $x16, 12, implicit-def dead $x17, 12, implicit-def dead $x18, 12, implicit-def dead $x19, 12, implicit-def dead $x20, 12, implicit-def dead $x21, 12, implicit-def dead $x22, 12, implicit-def dead $x23, 12, implicit-def dead $x24, 12, implicit-def dead $x25, 12, implicit-def dead $x26, 12, implicit-def dead $x27, 12, implicit-def dead $x28, 12, implicit-def dead $fp, 12, implicit-def dead $lr, 12, implicit-def $sp + $x0 = ADDXri %0, 1, 0 + RET_ReallyLR implicit $x0 ... --- # CHECK-LABEL: name: test_subreg_spill_fold3 @@ -42,11 +42,11 @@ - { id: 0, class: fpr64 } body: | bb.0: - ; CHECK: STRXui %xzr, %stack.0, 0 :: (store 8 into %stack.0) - undef %0.ssub = COPY %wzr - INLINEASM &nop, 1, 12, implicit-def dead %d0, 12, implicit-def dead %d1, 12, implicit-def dead %d2, 12, implicit-def dead %d3, 12, implicit-def dead %d4, 12, implicit-def dead %d5, 12, implicit-def dead %d6, 12, implicit-def dead %d7, 12, implicit-def dead %d8, 12, implicit-def dead %d9, 12, implicit-def dead %d10, 12, implicit-def dead %d11, 12, implicit-def dead %d12, 12, implicit-def dead %d13, 12, implicit-def dead %d14, 12, implicit-def dead %d15, 12, implicit-def dead %d16, 12, implicit-def dead %d17, 12, implicit-def dead %d18, 12, implicit-def dead %d19, 12, implicit-def dead %d20, 12, implicit-def dead %d21, 12, implicit-def dead %d22, 12, implicit-def dead %d23, 12, implicit-def dead %d24, 12, implicit-def dead %d25, 12, implicit-def dead %d26, 12, implicit-def dead %d27, 12, implicit-def dead %d28, 12, implicit-def dead %d29, 12, implicit-def dead %d30, 12, implicit-def %d31 - %x0 = COPY %0 - RET_ReallyLR implicit %x0 + ; CHECK: STRXui $xzr, %stack.0, 0 :: (store 8 into %stack.0) + undef %0.ssub = COPY $wzr + INLINEASM &nop, 1, 12, implicit-def dead $d0, 12, implicit-def dead $d1, 12, implicit-def dead $d2, 12, implicit-def dead $d3, 12, implicit-def dead $d4, 12, implicit-def dead $d5, 12, implicit-def dead $d6, 12, implicit-def dead $d7, 12, implicit-def dead $d8, 12, implicit-def dead $d9, 12, implicit-def dead $d10, 12, implicit-def dead $d11, 12, implicit-def dead $d12, 12, implicit-def dead $d13, 12, implicit-def dead $d14, 12, implicit-def dead $d15, 12, implicit-def dead $d16, 12, implicit-def dead $d17, 12, implicit-def dead $d18, 12, implicit-def dead $d19, 12, implicit-def dead $d20, 12, implicit-def dead $d21, 12, implicit-def dead $d22, 12, implicit-def dead $d23, 12, implicit-def dead $d24, 12, implicit-def dead $d25, 12, implicit-def dead $d26, 12, implicit-def dead $d27, 12, implicit-def dead $d28, 12, implicit-def dead $d29, 12, implicit-def dead $d30, 12, implicit-def $d31 + $x0 = COPY %0 + RET_ReallyLR implicit $x0 ... --- # CHECK-LABEL: name: test_subreg_fill_fold @@ -57,12 +57,12 @@ - { id: 1, class: gpr64 } body: | bb.0: - %0 = COPY %wzr - INLINEASM &nop, 1, 12, implicit-def dead %x0, 12, implicit-def dead %x1, 12, implicit-def dead %x2, 12, implicit-def dead %x3, 12, implicit-def dead %x4, 12, implicit-def dead %x5, 12, implicit-def dead %x6, 12, implicit-def dead %x7, 12, implicit-def dead %x8, 12, implicit-def dead %x9, 12, implicit-def dead %x10, 12, implicit-def dead %x11, 12, implicit-def dead %x12, 12, implicit-def dead %x13, 12, implicit-def dead %x14, 12, implicit-def dead %x15, 12, implicit-def dead %x16, 12, implicit-def dead %x17, 12, implicit-def dead %x18, 12, implicit-def dead %x19, 12, implicit-def dead %x20, 12, implicit-def dead %x21, 12, implicit-def dead %x22, 12, implicit-def dead %x23, 12, implicit-def dead %x24, 12, implicit-def dead %x25, 12, implicit-def dead %x26, 12, implicit-def dead %x27, 12, implicit-def dead %x28, 12, implicit-def dead %fp, 12, implicit-def dead %lr, 12, implicit-def %sp + %0 = COPY $wzr + INLINEASM &nop, 1, 12, implicit-def dead $x0, 12, implicit-def dead $x1, 12, implicit-def dead $x2, 12, implicit-def dead $x3, 12, implicit-def dead $x4, 12, implicit-def dead $x5, 12, implicit-def dead $x6, 12, implicit-def dead $x7, 12, implicit-def dead $x8, 12, implicit-def dead $x9, 12, implicit-def dead $x10, 12, implicit-def dead $x11, 12, implicit-def dead $x12, 12, implicit-def dead $x13, 12, implicit-def dead $x14, 12, implicit-def dead $x15, 12, implicit-def dead $x16, 12, implicit-def dead $x17, 12, implicit-def dead $x18, 12, implicit-def dead $x19, 12, implicit-def dead $x20, 12, implicit-def dead $x21, 12, implicit-def dead $x22, 12, implicit-def dead $x23, 12, implicit-def dead $x24, 12, implicit-def dead $x25, 12, implicit-def dead $x26, 12, implicit-def dead $x27, 12, implicit-def dead $x28, 12, implicit-def dead $fp, 12, implicit-def dead $lr, 12, implicit-def $sp ; CHECK: undef %1.sub_32:gpr64 = LDRWui %stack.0, 0 :: (load 4 from %stack.0) undef %1.sub_32 = COPY %0 - %x0 = COPY %1 - RET_ReallyLR implicit %x0 + $x0 = COPY %1 + RET_ReallyLR implicit $x0 ... --- # CHECK-LABEL: name: test_subreg_fill_fold2 @@ -73,10 +73,10 @@ - { id: 1, class: fpr64 } body: | bb.0: - %0 = COPY %wzr - INLINEASM &nop, 1, 12, implicit-def dead %x0, 12, implicit-def dead %x1, 12, implicit-def dead %x2, 12, implicit-def dead %x3, 12, implicit-def dead %x4, 12, implicit-def dead %x5, 12, implicit-def dead %x6, 12, implicit-def dead %x7, 12, implicit-def dead %x8, 12, implicit-def dead %x9, 12, implicit-def dead %x10, 12, implicit-def dead %x11, 12, implicit-def dead %x12, 12, implicit-def dead %x13, 12, implicit-def dead %x14, 12, implicit-def dead %x15, 12, implicit-def dead %x16, 12, implicit-def dead %x17, 12, implicit-def dead %x18, 12, implicit-def dead %x19, 12, implicit-def dead %x20, 12, implicit-def dead %x21, 12, implicit-def dead %x22, 12, implicit-def dead %x23, 12, implicit-def dead %x24, 12, implicit-def dead %x25, 12, implicit-def dead %x26, 12, implicit-def dead %x27, 12, implicit-def dead %x28, 12, implicit-def dead %fp, 12, implicit-def dead %lr, 12, implicit-def %sp + %0 = COPY $wzr + INLINEASM &nop, 1, 12, implicit-def dead $x0, 12, implicit-def dead $x1, 12, implicit-def dead $x2, 12, implicit-def dead $x3, 12, implicit-def dead $x4, 12, implicit-def dead $x5, 12, implicit-def dead $x6, 12, implicit-def dead $x7, 12, implicit-def dead $x8, 12, implicit-def dead $x9, 12, implicit-def dead $x10, 12, implicit-def dead $x11, 12, implicit-def dead $x12, 12, implicit-def dead $x13, 12, implicit-def dead $x14, 12, implicit-def dead $x15, 12, implicit-def dead $x16, 12, implicit-def dead $x17, 12, implicit-def dead $x18, 12, implicit-def dead $x19, 12, implicit-def dead $x20, 12, implicit-def dead $x21, 12, implicit-def dead $x22, 12, implicit-def dead $x23, 12, implicit-def dead $x24, 12, implicit-def dead $x25, 12, implicit-def dead $x26, 12, implicit-def dead $x27, 12, implicit-def dead $x28, 12, implicit-def dead $fp, 12, implicit-def dead $lr, 12, implicit-def $sp ; CHECK: undef %1.ssub:fpr64 = LDRSui %stack.0, 0 :: (load 4 from %stack.0) undef %1.ssub = COPY %0 - %d0 = COPY %1 - RET_ReallyLR implicit %d0 + $d0 = COPY %1 + RET_ReallyLR implicit $d0 ... Index: test/CodeGen/AArch64/spill-undef.mir =================================================================== --- test/CodeGen/AArch64/spill-undef.mir +++ test/CodeGen/AArch64/spill-undef.mir @@ -30,7 +30,7 @@ - { id: 9, class: gpr64 } body: | bb.0: - liveins: %x0 + liveins: $x0 successors: %bb.1, %bb.2 ; %8 is going to be spilled. @@ -43,25 +43,25 @@ ; %9 us going to be spilled. ; But it is only partially undef. ; Make sure we spill it properly - ; CHECK: [[NINE:%[0-9]+]]:gpr64 = COPY %x0 + ; CHECK: [[NINE:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[NINE]].sub_32:gpr64 = IMPLICIT_DEF ; CHECK-NEXT: STRXui [[NINE]] - %9 = COPY %x0 + %9 = COPY $x0 %9.sub_32 = IMPLICIT_DEF - CBNZW %wzr, %bb.2 + CBNZW $wzr, %bb.2 B %bb.1 bb.1: %4 = ADRP target-flags(aarch64-page) @g %8 = LDRWui %4, target-flags(aarch64-pageoff, aarch64-nc) @g :: (volatile dereferenceable load 4 from @g) - INLINEASM &nop, 1, 12, implicit-def dead early-clobber %x0, 12, implicit-def dead early-clobber %x1, 12, implicit-def dead early-clobber %x2, 12, implicit-def dead early-clobber %x3, 12, implicit-def dead early-clobber %x4, 12, implicit-def dead early-clobber %x5, 12, implicit-def dead early-clobber %x6, 12, implicit-def dead early-clobber %x7, 12, implicit-def dead early-clobber %x8, 12, implicit-def dead early-clobber %x9, 12, implicit-def dead early-clobber %x10, 12, implicit-def dead early-clobber %x11, 12, implicit-def dead early-clobber %x12, 12, implicit-def dead early-clobber %x13, 12, implicit-def dead early-clobber %x14, 12, implicit-def dead early-clobber %x15, 12, implicit-def dead early-clobber %x16, 12, implicit-def dead early-clobber %x17, 12, implicit-def dead early-clobber %x18, 12, implicit-def dead early-clobber %x19, 12, implicit-def dead early-clobber %x20, 12, implicit-def dead early-clobber %x21, 12, implicit-def dead early-clobber %x22, 12, implicit-def dead early-clobber %x23, 12, implicit-def dead early-clobber %x24, 12, implicit-def dead early-clobber %x25, 12, implicit-def dead early-clobber %x26, 12, implicit-def dead early-clobber %x27, 12, implicit-def dead early-clobber %x28, 12, implicit-def dead early-clobber %fp, 12, implicit-def dead early-clobber %lr + INLINEASM &nop, 1, 12, implicit-def dead early-clobber $x0, 12, implicit-def dead early-clobber $x1, 12, implicit-def dead early-clobber $x2, 12, implicit-def dead early-clobber $x3, 12, implicit-def dead early-clobber $x4, 12, implicit-def dead early-clobber $x5, 12, implicit-def dead early-clobber $x6, 12, implicit-def dead early-clobber $x7, 12, implicit-def dead early-clobber $x8, 12, implicit-def dead early-clobber $x9, 12, implicit-def dead early-clobber $x10, 12, implicit-def dead early-clobber $x11, 12, implicit-def dead early-clobber $x12, 12, implicit-def dead early-clobber $x13, 12, implicit-def dead early-clobber $x14, 12, implicit-def dead early-clobber $x15, 12, implicit-def dead early-clobber $x16, 12, implicit-def dead early-clobber $x17, 12, implicit-def dead early-clobber $x18, 12, implicit-def dead early-clobber $x19, 12, implicit-def dead early-clobber $x20, 12, implicit-def dead early-clobber $x21, 12, implicit-def dead early-clobber $x22, 12, implicit-def dead early-clobber $x23, 12, implicit-def dead early-clobber $x24, 12, implicit-def dead early-clobber $x25, 12, implicit-def dead early-clobber $x26, 12, implicit-def dead early-clobber $x27, 12, implicit-def dead early-clobber $x28, 12, implicit-def dead early-clobber $fp, 12, implicit-def dead early-clobber $lr bb.2: - INLINEASM &nop, 1, 12, implicit-def dead early-clobber %x0, 12, implicit-def dead early-clobber %x1, 12, implicit-def dead early-clobber %x2, 12, implicit-def dead early-clobber %x3, 12, implicit-def dead early-clobber %x4, 12, implicit-def dead early-clobber %x5, 12, implicit-def dead early-clobber %x6, 12, implicit-def dead early-clobber %x7, 12, implicit-def dead early-clobber %x8, 12, implicit-def dead early-clobber %x9, 12, implicit-def dead early-clobber %x10, 12, implicit-def dead early-clobber %x11, 12, implicit-def dead early-clobber %x12, 12, implicit-def dead early-clobber %x13, 12, implicit-def dead early-clobber %x14, 12, implicit-def dead early-clobber %x15, 12, implicit-def dead early-clobber %x16, 12, implicit-def dead early-clobber %x17, 12, implicit-def dead early-clobber %x18, 12, implicit-def dead early-clobber %x19, 12, implicit-def dead early-clobber %x20, 12, implicit-def dead early-clobber %x21, 12, implicit-def dead early-clobber %x22, 12, implicit-def dead early-clobber %x23, 12, implicit-def dead early-clobber %x24, 12, implicit-def dead early-clobber %x25, 12, implicit-def dead early-clobber %x26, 12, implicit-def dead early-clobber %x27, 12, implicit-def dead early-clobber %x28, 12, implicit-def dead early-clobber %fp, 12, implicit-def dead early-clobber %lr + INLINEASM &nop, 1, 12, implicit-def dead early-clobber $x0, 12, implicit-def dead early-clobber $x1, 12, implicit-def dead early-clobber $x2, 12, implicit-def dead early-clobber $x3, 12, implicit-def dead early-clobber $x4, 12, implicit-def dead early-clobber $x5, 12, implicit-def dead early-clobber $x6, 12, implicit-def dead early-clobber $x7, 12, implicit-def dead early-clobber $x8, 12, implicit-def dead early-clobber $x9, 12, implicit-def dead early-clobber $x10, 12, implicit-def dead early-clobber $x11, 12, implicit-def dead early-clobber $x12, 12, implicit-def dead early-clobber $x13, 12, implicit-def dead early-clobber $x14, 12, implicit-def dead early-clobber $x15, 12, implicit-def dead early-clobber $x16, 12, implicit-def dead early-clobber $x17, 12, implicit-def dead early-clobber $x18, 12, implicit-def dead early-clobber $x19, 12, implicit-def dead early-clobber $x20, 12, implicit-def dead early-clobber $x21, 12, implicit-def dead early-clobber $x22, 12, implicit-def dead early-clobber $x23, 12, implicit-def dead early-clobber $x24, 12, implicit-def dead early-clobber $x25, 12, implicit-def dead early-clobber $x26, 12, implicit-def dead early-clobber $x27, 12, implicit-def dead early-clobber $x28, 12, implicit-def dead early-clobber $fp, 12, implicit-def dead early-clobber $lr %6 = ADRP target-flags(aarch64-page) @g - %w0 = MOVi32imm 42 + $w0 = MOVi32imm 42 STRWui %8, %6, target-flags(aarch64-pageoff, aarch64-nc) @g :: (volatile store 4 into @g) STRXui %9, %6, target-flags(aarch64-pageoff, aarch64-nc) @g :: (volatile store 8 into @g) - RET_ReallyLR implicit killed %w0 + RET_ReallyLR implicit killed $w0 ... Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir @@ -13,16 +13,16 @@ regBankSelected: true # GCN: global_addrspace -# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY %vgpr0_vgpr1 +# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 # GCN: FLAT_LOAD_DWORD [[PTR]], 0, 0, 0 body: | bb.0: - liveins: %vgpr0_vgpr1 + liveins: $vgpr0_vgpr1 - %0:vgpr(p1) = COPY %vgpr0_vgpr1 + %0:vgpr(p1) = COPY $vgpr0_vgpr1 %1:vgpr(s32) = G_LOAD %0 :: (load 4 from %ir.global0) - %vgpr0 = COPY %1 + $vgpr0 = COPY %1 ... --- Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir @@ -14,7 +14,7 @@ regBankSelected: true # GCN: body: -# GCN: [[PTR:%[0-9]+]]:sreg_64 = COPY %sgpr0_sgpr1 +# GCN: [[PTR:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 # Immediate offset: # SICI: S_LOAD_DWORD_IMM [[PTR]], 1, 0 @@ -89,54 +89,54 @@ body: | bb.0: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %0:sgpr(p2) = COPY %sgpr0_sgpr1 + %0:sgpr(p2) = COPY $sgpr0_sgpr1 %1:sgpr(s64) = G_CONSTANT i64 4 %2:sgpr(p2) = G_GEP %0, %1 %3:sgpr(s32) = G_LOAD %2 :: (load 4 from %ir.const0) - %sgpr0 = COPY %3 + $sgpr0 = COPY %3 %4:sgpr(s64) = G_CONSTANT i64 1020 %5:sgpr(p2) = G_GEP %0, %4 %6:sgpr(s32) = G_LOAD %5 :: (load 4 from %ir.const0) - %sgpr0 = COPY %6 + $sgpr0 = COPY %6 %7:sgpr(s64) = G_CONSTANT i64 1024 %8:sgpr(p2) = G_GEP %0, %7 %9:sgpr(s32) = G_LOAD %8 :: (load 4 from %ir.const0) - %sgpr0 = COPY %9 + $sgpr0 = COPY %9 %10:sgpr(s64) = G_CONSTANT i64 1048572 %11:sgpr(p2) = G_GEP %0, %10 %12:sgpr(s32) = G_LOAD %11 :: (load 4 from %ir.const0) - %sgpr0 = COPY %12 + $sgpr0 = COPY %12 %13:sgpr(s64) = G_CONSTANT i64 1048576 %14:sgpr(p2) = G_GEP %0, %13 %15:sgpr(s32) = G_LOAD %14 :: (load 4 from %ir.const0) - %sgpr0 = COPY %15 + $sgpr0 = COPY %15 %16:sgpr(s64) = G_CONSTANT i64 17179869180 %17:sgpr(p2) = G_GEP %0, %16 %18:sgpr(s32) = G_LOAD %17 :: (load 4 from %ir.const0) - %sgpr0 = COPY %18 + $sgpr0 = COPY %18 %19:sgpr(s64) = G_CONSTANT i64 17179869184 %20:sgpr(p2) = G_GEP %0, %19 %21:sgpr(s32) = G_LOAD %20 :: (load 4 from %ir.const0) - %sgpr0 = COPY %21 + $sgpr0 = COPY %21 %22:sgpr(s64) = G_CONSTANT i64 4294967292 %23:sgpr(p2) = G_GEP %0, %22 %24:sgpr(s32) = G_LOAD %23 :: (load 4 from %ir.const0) - %sgpr0 = COPY %24 + $sgpr0 = COPY %24 %25:sgpr(s64) = G_CONSTANT i64 4294967296 %26:sgpr(p2) = G_GEP %0, %25 %27:sgpr(s32) = G_LOAD %26 :: (load 4 from %ir.const0) - %sgpr0 = COPY %27 + $sgpr0 = COPY %27 ... --- Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir @@ -13,16 +13,16 @@ regBankSelected: true # GCN: global_addrspace -# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY %vgpr0_vgpr1 -# GCN: [[VAL:%[0-9]+]]:vgpr_32 = COPY %vgpr2 +# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 +# GCN: [[VAL:%[0-9]+]]:vgpr_32 = COPY $vgpr2 # GCN: FLAT_STORE_DWORD [[PTR]], [[VAL]], 0, 0, 0 body: | bb.0: - liveins: %vgpr0_vgpr1, %vgpr2 + liveins: $vgpr0_vgpr1, $vgpr2 - %0:vgpr(p1) = COPY %vgpr0_vgpr1 - %1:vgpr(s32) = COPY %vgpr2 + %0:vgpr(p1) = COPY $vgpr0_vgpr1 + %1:vgpr(s32) = COPY $vgpr2 G_STORE %1, %0 :: (store 4 into %ir.global0) ... Index: test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll +++ test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll @@ -2,7 +2,7 @@ ; CHECK-LABEL: name: test_f32_inreg -; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0 +; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[S0]] define amdgpu_vs void @test_f32_inreg(float inreg %arg0) { call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0 @@ -10,7 +10,7 @@ } ; CHECK-LABEL: name: test_f32 -; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY %vgpr0 +; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY $vgpr0 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[V0]] define amdgpu_vs void @test_f32(float %arg0) { call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0 @@ -18,7 +18,7 @@ } ; CHECK-LABEL: name: test_ptr2_byval -; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY %sgpr0_sgpr1 +; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY $sgpr0_sgpr1 ; CHECK: G_LOAD [[S01]] define amdgpu_vs void @test_ptr2_byval(i32 addrspace(2)* byval %arg0) { %tmp0 = load volatile i32, i32 addrspace(2)* %arg0 @@ -26,7 +26,7 @@ } ; CHECK-LABEL: name: test_ptr2_inreg -; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY %sgpr0_sgpr1 +; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY $sgpr0_sgpr1 ; CHECK: G_LOAD [[S01]] define amdgpu_vs void @test_ptr2_inreg(i32 addrspace(2)* inreg %arg0) { %tmp0 = load volatile i32, i32 addrspace(2)* %arg0 @@ -34,8 +34,8 @@ } ; CHECK-LABEL: name: test_sgpr_alignment0 -; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0 -; CHECK: [[S23:%[0-9]+]]:_(p2) = COPY %sgpr2_sgpr3 +; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0 +; CHECK: [[S23:%[0-9]+]]:_(p2) = COPY $sgpr2_sgpr3 ; CHECK: G_LOAD [[S23]] ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[S0]] define amdgpu_vs void @test_sgpr_alignment0(float inreg %arg0, i32 addrspace(2)* inreg %arg1) { @@ -45,10 +45,10 @@ } ; CHECK-LABEL: name: test_order -; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0 -; CHECK: [[S1:%[0-9]+]]:_(s32) = COPY %sgpr1 -; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY %vgpr0 -; CHECK: [[V1:%[0-9]+]]:_(s32) = COPY %vgpr1 +; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0 +; CHECK: [[S1:%[0-9]+]]:_(s32) = COPY $sgpr1 +; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY $vgpr0 +; CHECK: [[V1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[V0]](s32), [[S0]](s32), [[V1]](s32), [[S1]](s32) define amdgpu_vs void @test_order(float inreg %arg0, float inreg %arg1, float %arg2, float %arg3) { call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg2, float %arg0, float %arg3, float %arg1, i1 false, i1 false) #0 Index: test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir @@ -13,14 +13,14 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %vgpr0, %vgpr1 + liveins: $vgpr0, $vgpr1 ; CHECK-LABEL: name: test_add - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] - %0(s32) = COPY %vgpr0 - %1(s32) = COPY %vgpr1 + %0(s32) = COPY $vgpr0 + %1(s32) = COPY $vgpr1 %2(s32) = G_ADD %0, %1 - %vgpr0 = COPY %2 + $vgpr0 = COPY %2 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir @@ -13,14 +13,14 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %vgpr0, %vgpr1 + liveins: $vgpr0, $vgpr1 ; CHECK-LABEL: name: test_and - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]] - %0(s32) = COPY %vgpr0 - %1(s32) = COPY %vgpr1 + %0(s32) = COPY $vgpr0 + %1(s32) = COPY $vgpr1 %2(s32) = G_AND %0, %1 - %vgpr0 = COPY %2 + $vgpr0 = COPY %2 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir @@ -13,14 +13,14 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %vgpr0 + liveins: $vgpr0 ; CHECK-LABEL: name: test_bitcast - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY]](s32) ; CHECK: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST]](<2 x s16>) - %0(s32) = COPY %vgpr0 + %0(s32) = COPY $vgpr0 %1(<2 x s16>) = G_BITCAST %0 %2(s32) = G_BITCAST %1 - %vgpr0 = COPY %2 + $vgpr0 = COPY %2 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir @@ -47,7 +47,7 @@ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 7.500000e+00 %0(s32) = G_FCONSTANT float 1.0 - %vgpr0 = COPY %0 + $vgpr0 = COPY %0 %1(s32) = G_FCONSTANT float 7.5 - %vgpr0 = COPY %1 + $vgpr0 = COPY %1 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir @@ -16,12 +16,12 @@ - { id: 2, class: _ } body: | bb.0.entry: - liveins: %vgpr0, %vgpr1 + liveins: $vgpr0, $vgpr1 ; CHECK-LABEL: name: test_fadd ; CHECK: %2:_(s32) = G_FADD %0, %1 - %0(s32) = COPY %vgpr0 - %1(s32) = COPY %vgpr1 + %0(s32) = COPY $vgpr0 + %1(s32) = COPY $vgpr1 %2(s32) = G_FADD %0, %1 - %vgpr0 = COPY %2 + $vgpr0 = COPY %2 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir @@ -13,14 +13,14 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %vgpr0, %vgpr1 + liveins: $vgpr0, $vgpr1 ; CHECK-LABEL: name: test_fmul - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]] - %0(s32) = COPY %vgpr0 - %1(s32) = COPY %vgpr1 + %0(s32) = COPY $vgpr0 + %1(s32) = COPY $vgpr1 %2(s32) = G_FMUL %0, %1 - %vgpr0 = COPY %2 + $vgpr0 = COPY %2 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir @@ -16,16 +16,16 @@ - { id: 2, class: _ } body: | bb.0.entry: - liveins: %vgpr0 + liveins: $vgpr0 ; CHECK-LABEL: name: test_icmp ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]] ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[COPY]] - ; CHECK: %vgpr0 = COPY [[SELECT]](s32) + ; CHECK: $vgpr0 = COPY [[SELECT]](s32) %0(s32) = G_CONSTANT i32 0 - %1(s32) = COPY %vgpr0 + %1(s32) = COPY $vgpr0 %2(s1) = G_ICMP intpred(ne), %0, %1 %3:_(s32) = G_SELECT %2(s1), %0(s32), %1(s32) - %vgpr0 = COPY %3 + $vgpr0 = COPY %3 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir @@ -12,14 +12,14 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %vgpr0, %vgpr1 + liveins: $vgpr0, $vgpr1 ; CHECK-LABEL: name: test_or - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]] - %0(s32) = COPY %vgpr0 - %1(s32) = COPY %vgpr1 + %0(s32) = COPY $vgpr0 + %1(s32) = COPY $vgpr1 %2(s32) = G_OR %0, %1 - %vgpr0 = COPY %2 + $vgpr0 = COPY %2 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir @@ -16,21 +16,21 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %vgpr0 + liveins: $vgpr0 ; CHECK-LABEL: name: test_select ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]] ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] %0(s32) = G_CONSTANT i32 0 - %1(s32) = COPY %vgpr0 + %1(s32) = COPY $vgpr0 %2(s1) = G_ICMP intpred(ne), %0, %1 %3(s32) = G_CONSTANT i32 1 %4(s32) = G_CONSTANT i32 2 %5(s32) = G_SELECT %2, %3, %4 - %vgpr0 = COPY %5 + $vgpr0 = COPY %5 ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir @@ -9,14 +9,14 @@ - { id: 2, class: _ } body: | bb.0.entry: - liveins: %vgpr0, %vgpr1 + liveins: $vgpr0, $vgpr1 ; CHECK-LABEL: name: test_shl - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]] - %0(s32) = COPY %vgpr0 - %1(s32) = COPY %vgpr1 + %0(s32) = COPY $vgpr0 + %1(s32) = COPY $vgpr1 %2(s32) = G_SHL %0, %1 - %vgpr0 = COPY %2 + $vgpr0 = COPY %2 ... Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir @@ -29,8 +29,8 @@ body: | bb.0: - liveins: %sgpr0_sgpr1 - %0:_(p2) = COPY %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 + %0:_(p2) = COPY $sgpr0_sgpr1 %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr0) ... @@ -45,8 +45,8 @@ body: | bb.0: - liveins: %sgpr0_sgpr1 - %0:_(p1) = COPY %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 + %0:_(p1) = COPY $sgpr0_sgpr1 %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr1) ... @@ -63,7 +63,7 @@ body: | bb.0: - liveins: %sgpr0_sgpr1 - %0:_(p1) = COPY %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 + %0:_(p1) = COPY $sgpr0_sgpr1 %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.tmp1) ... Index: test/CodeGen/AMDGPU/break-smem-soft-clauses.mir =================================================================== --- test/CodeGen/AMDGPU/break-smem-soft-clauses.mir +++ test/CodeGen/AMDGPU/break-smem-soft-clauses.mir @@ -8,9 +8,9 @@ body: | bb.0: ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x1 - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 S_ENDPGM ... --- @@ -20,11 +20,11 @@ body: | bb.0: ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x2 - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr1 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr1 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -34,13 +34,13 @@ body: | bb.0: ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x3 - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 - ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr6_sgpr7, 0, 0 - ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 + ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr6_sgpr7, 0, 0 + ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 - %sgpr1 = S_LOAD_DWORD_IMM %sgpr6_sgpr7, 0, 0 - %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 + $sgpr1 = S_LOAD_DWORD_IMM $sgpr6_sgpr7, 0, 0 + $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0 S_ENDPGM ... --- @@ -50,15 +50,15 @@ body: | bb.0: ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x4 - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 - ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr8_sgpr9, 0, 0 - ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0 - ; GCN-NEXT: %sgpr3 = S_LOAD_DWORD_IMM %sgpr16_sgpr17, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 + ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr8_sgpr9, 0, 0 + ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0 + ; GCN-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr16_sgpr17, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 - %sgpr1 = S_LOAD_DWORD_IMM %sgpr8_sgpr9, 0, 0 - %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0 - %sgpr3 = S_LOAD_DWORD_IMM %sgpr16_sgpr17, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 + $sgpr1 = S_LOAD_DWORD_IMM $sgpr8_sgpr9, 0, 0 + $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0 + $sgpr3 = S_LOAD_DWORD_IMM $sgpr16_sgpr17, 0, 0 S_ENDPGM ... --- @@ -67,11 +67,11 @@ body: | bb.0: ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x2_sameptr - ; GCN: %sgpr12 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + ; GCN: $sgpr12 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr12 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + $sgpr12 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 S_ENDPGM ... --- @@ -81,9 +81,9 @@ body: | bb.0: ; GCN-LABEL: name: smrd_load4_overwrite_ptr_lo - ; GCN: %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + ; GCN: $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 S_ENDPGM ... --- @@ -93,9 +93,9 @@ body: | bb.0: ; GCN-LABEL: name: smrd_load4_overwrite_ptr_hi - ; GCN: %sgpr11 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + ; GCN: $sgpr11 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr11 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + $sgpr11 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 S_ENDPGM ... --- @@ -105,9 +105,9 @@ body: | bb.0: ; GCN-LABEL: name: smrd_load8_overwrite_ptr - ; GCN: %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0 + ; GCN: $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0 + $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0 S_ENDPGM ... --- @@ -119,47 +119,47 @@ body: | bb.0: ; GCN-LABEL: name: break_smem_clause_at_max_smem_clause_size_smrd_load4 - ; GCN: %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr14 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr15 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr16 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr17 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr18 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr19 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr20 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr21 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr22 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr23 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr24 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr25 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr26 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr27 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr28 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr0 = S_LOAD_DWORD_IMM %sgpr30_sgpr31, 0, 0 - ; GCN-NEXT: %sgpr0 = S_MOV_B32 %sgpr0, implicit %sgpr13, implicit %sgpr14, implicit %sgpr15, implicit %sgpr16, implicit %sgpr17, implicit %sgpr18, implicit %sgpr19, implicit %sgpr20, implicit %sgpr21, implicit %sgpr22, implicit %sgpr23, implicit %sgpr24, implicit %sgpr25, implicit %sgpr26, implicit %sgpr27, implicit %sgpr28 + ; GCN: $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr14 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr15 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr16 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr17 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr18 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr19 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr20 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr21 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr22 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr23 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr24 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr25 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr26 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr27 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr28 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr0 = S_LOAD_DWORD_IMM $sgpr30_sgpr31, 0, 0 + ; GCN-NEXT: $sgpr0 = S_MOV_B32 $sgpr0, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $sgpr16, implicit $sgpr17, implicit $sgpr18, implicit $sgpr19, implicit $sgpr20, implicit $sgpr21, implicit $sgpr22, implicit $sgpr23, implicit $sgpr24, implicit $sgpr25, implicit $sgpr26, implicit $sgpr27, implicit $sgpr28 ; GCN-NEXT: S_ENDPGM - %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr14 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr15 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr16 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - - %sgpr17 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr18 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr19 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr20 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - - %sgpr21 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr22 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr23 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr24 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - - %sgpr25 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr26 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr27 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr28 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - - %sgpr0 = S_LOAD_DWORD_IMM %sgpr30_sgpr31, 0, 0 - %sgpr0 = S_MOV_B32 %sgpr0, implicit %sgpr13, implicit %sgpr14, implicit %sgpr15, implicit %sgpr16, implicit %sgpr17, implicit %sgpr18, implicit %sgpr19, implicit %sgpr20, implicit %sgpr21, implicit %sgpr22, implicit %sgpr23, implicit %sgpr24, implicit %sgpr25, implicit %sgpr26, implicit %sgpr27, implicit %sgpr28 + $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr14 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr15 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr16 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + + $sgpr17 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr18 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr19 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr20 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + + $sgpr21 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr22 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr23 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr24 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + + $sgpr25 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr26 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr27 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr28 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + + $sgpr0 = S_LOAD_DWORD_IMM $sgpr30_sgpr31, 0, 0 + $sgpr0 = S_MOV_B32 $sgpr0, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $sgpr16, implicit $sgpr17, implicit $sgpr18, implicit $sgpr19, implicit $sgpr20, implicit $sgpr21, implicit $sgpr22, implicit $sgpr23, implicit $sgpr24, implicit $sgpr25, implicit $sgpr26, implicit $sgpr27, implicit $sgpr28 S_ENDPGM ... --- @@ -169,12 +169,12 @@ body: | bb.0: ; GCN-LABEL: name: break_smem_clause_simple_load_smrd4_lo_ptr - ; GCN: %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 + ; GCN: $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %sgpr12 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + ; GCN-NEXT: $sgpr12 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr12 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr12 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -184,11 +184,11 @@ body: | bb.0: ; GCN-LABEL: name: break_smem_clause_simple_load_smrd4_hi_ptr - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr3 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr3 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr3 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -198,12 +198,12 @@ body: | bb.0: ; GCN-LABEL: name: break_smem_clause_simple_load_smrd8_ptr - ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0 + ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0 ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0 + ; GCN-NEXT: $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0 - %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0 + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0 + $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -213,11 +213,11 @@ body: | bb.0: ; GCN-LABEL: name: break_smem_clause_simple_load_smrd16_ptr - ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM %sgpr6_sgpr7, 0, 0 + ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM $sgpr6_sgpr7, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0 - %sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM %sgpr6_sgpr7, 0, 0 + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0 + $sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM $sgpr6_sgpr7, 0, 0 S_ENDPGM ... --- @@ -228,16 +228,16 @@ ; GCN-LABEL: name: break_smem_clause_block_boundary_load_smrd8_ptr ; GCN: bb.0: ; GCN: successors: %bb.1(0x80000000) - ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0 + ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0 ; GCN: bb.1: ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0 + ; GCN-NEXT: $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM bb.0: - %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0 + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0 bb.1: - %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0 + $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -248,11 +248,11 @@ body: | bb.0: ; GCN-LABEL: name: break_smem_clause_store_load_into_ptr_smrd4 - ; GCN: S_STORE_DWORD_IMM %sgpr16, %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr12 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0 + ; GCN: S_STORE_DWORD_IMM $sgpr16, $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr12 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0 ; GCN-NEXT: S_ENDPGM - S_STORE_DWORD_IMM %sgpr16, %sgpr10_sgpr11, 0, 0 - %sgpr12 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0 + S_STORE_DWORD_IMM $sgpr16, $sgpr10_sgpr11, 0, 0 + $sgpr12 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0 S_ENDPGM ... --- @@ -264,11 +264,11 @@ body: | bb.0: ; GCN-LABEL: name: break_smem_clause_store_load_into_data_smrd4 - ; GCN: S_STORE_DWORD_IMM %sgpr8, %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr8 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + ; GCN: S_STORE_DWORD_IMM $sgpr8, $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr8 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - S_STORE_DWORD_IMM %sgpr8, %sgpr10_sgpr11, 0, 0 - %sgpr8 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + S_STORE_DWORD_IMM $sgpr8, $sgpr10_sgpr11, 0, 0 + $sgpr8 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -278,13 +278,13 @@ body: | bb.0: ; GCN-LABEL: name: valu_inst_breaks_smem_clause - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %vgpr8 = V_MOV_B32_e32 0, implicit %exec - ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %vgpr8 = V_MOV_B32_e32 0, implicit %exec - %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $vgpr8 = V_MOV_B32_e32 0, implicit $exec + $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -294,13 +294,13 @@ body: | bb.0: ; GCN-LABEL: name: salu_inst_breaks_smem_clause - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %sgpr8 = S_MOV_B32 0 - ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $sgpr8 = S_MOV_B32 0 + ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %sgpr8 = S_MOV_B32 0 - %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $sgpr8 = S_MOV_B32 0 + $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -309,13 +309,13 @@ body: | bb.0: ; GCN-LABEL: name: ds_inst_breaks_smem_clause - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec - ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec + ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec - %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec + $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -325,13 +325,13 @@ body: | bb.0: ; GCN-LABEL: name: flat_inst_breaks_smem_clause - ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0 - %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0 + $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0 S_ENDPGM ... --- @@ -341,11 +341,11 @@ body: | bb.0: ; GCN-LABEL: name: implicit_use_breaks_smem_clause - ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0, implicit %sgpr12_sgpr13 + ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0, implicit $sgpr12_sgpr13 ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM %sgpr6_sgpr7, 0, 0 + ; GCN-NEXT: $sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM $sgpr6_sgpr7, 0, 0 ; GCN-NEXT: S_ENDPGM - %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0, implicit %sgpr12_sgpr13 - %sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM %sgpr6_sgpr7, 0, 0 + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0, implicit $sgpr12_sgpr13 + $sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM $sgpr6_sgpr7, 0, 0 S_ENDPGM ... Index: test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir =================================================================== --- test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir +++ test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir @@ -7,10 +7,10 @@ body: | bb.0: ; GCN-LABEL: name: trivial_clause_load_flat4_x1 - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -20,12 +20,12 @@ body: | bb.0: ; GCN-LABEL: name: trivial_clause_load_flat4_x2 - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr1 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr1 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -35,14 +35,14 @@ body: | bb.0: ; GCN-LABEL: name: trivial_clause_load_flat4_x3 - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr5_vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr5_vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr1 = FLAT_LOAD_DWORD %vgpr5_vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr2 = FLAT_LOAD_DWORD %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr1 = FLAT_LOAD_DWORD $vgpr5_vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -52,16 +52,16 @@ body: | bb.0: ; GCN-LABEL: name: trivial_clause_load_flat4_x4 - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr8_vgpr9, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr10_vgpr11, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr8_vgpr9, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr10_vgpr11, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr1 = FLAT_LOAD_DWORD %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr2 = FLAT_LOAD_DWORD %vgpr8_vgpr9, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3 = FLAT_LOAD_DWORD %vgpr10_vgpr11, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr1 = FLAT_LOAD_DWORD $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr8_vgpr9, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = FLAT_LOAD_DWORD $vgpr10_vgpr11, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -71,12 +71,12 @@ body: | bb.0: ; GCN-LABEL: name: trivial_clause_load_flat4_x2_sameptr - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -86,10 +86,10 @@ body: | bb.0: ; GCN-LABEL: name: flat_load4_overwrite_ptr_lo - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -99,10 +99,10 @@ body: | bb.0: ; GCN-LABEL: name: flat_load4_overwrite_ptr_hi - ; GCN: %vgpr1 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr1 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr1 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr1 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -112,10 +112,10 @@ body: | bb.0: ; GCN-LABEL: name: flat_load8_overwrite_ptr - ; GCN: %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -128,49 +128,49 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_at_max_clause_size_flat_load4 - ; GCN: %vgpr2 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr4 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr5 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr6 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr7 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr8 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr9 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr12 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr13 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr14 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr15 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr16 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr17 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr4 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr5 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr6 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr7 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr8 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr9 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr12 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr13 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr14 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr15 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr16 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr17 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %sgpr0 = S_MOV_B32 %sgpr0, implicit %vgpr2, implicit %vgpr3, implicit %vgpr4, implicit %vgpr5, implicit %vgpr6, implicit %vgpr7, implicit %vgpr8, implicit %vgpr9, implicit %vgpr10, implicit %vgpr11, implicit %vgpr12, implicit %vgpr13, implicit %vgpr14, implicit %vgpr15, implicit %vgpr16, implicit %vgpr17, implicit %vgpr18 + ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $sgpr0 = S_MOV_B32 $sgpr0, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18 ; GCN-NEXT: S_ENDPGM - %vgpr2 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr4 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr5 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr4 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr5 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr - %vgpr6 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr7 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr8 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr9 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr6 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr7 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr8 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr9 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr - %vgpr10 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr11 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr12 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr13 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr10 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr11 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr12 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr13 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr - %vgpr14 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr15 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr16 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr17 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr14 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr15 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr16 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr17 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %sgpr0 = S_MOV_B32 %sgpr0, implicit %vgpr2, implicit %vgpr3, implicit %vgpr4, implicit %vgpr5, implicit %vgpr6, implicit %vgpr7, implicit %vgpr8, implicit %vgpr9, implicit %vgpr10, implicit %vgpr11, implicit %vgpr12, implicit %vgpr13, implicit %vgpr14, implicit %vgpr15, implicit %vgpr16, implicit %vgpr17, implicit %vgpr18 + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $sgpr0 = S_MOV_B32 $sgpr0, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18 S_ENDPGM ... --- @@ -180,13 +180,13 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_simple_load_flat4_lo_ptr - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -196,13 +196,13 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_simple_load_flat4_hi_ptr - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -212,13 +212,13 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_simple_load_flat8_ptr - ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -229,12 +229,12 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_simple_load_flat16_ptr - ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -249,17 +249,17 @@ ; GCN-LABEL: name: break_clause_block_boundary_load_flat8_ptr ; GCN: bb.0: ; GCN-NEXT: successors: %bb.1(0x80000000) - ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: bb.1: ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM bb.0: - %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr bb.1: - %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -270,12 +270,12 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_store_load_into_ptr_flat4 - ; GCN: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -287,12 +287,12 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_store_load_into_data_flat4 - ; GCN: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -303,15 +303,15 @@ body: | bb.0: ; GCN-LABEL: name: valu_inst_breaks_clause - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr8 = V_MOV_B32_e32 0, implicit %exec + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr8 = V_MOV_B32_e32 0, implicit $exec ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr8 = V_MOV_B32_e32 0, implicit %exec - %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr8 = V_MOV_B32_e32 0, implicit $exec + $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -322,15 +322,15 @@ body: | bb.0: ; GCN-LABEL: name: salu_inst_breaks_clause - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %sgpr8 = S_MOV_B32 0 + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $sgpr8 = S_MOV_B32 0 ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %sgpr8 = S_MOV_B32 0 - %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $sgpr8 = S_MOV_B32 0 + $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -340,15 +340,15 @@ body: | bb.0: ; GCN-LABEL: name: ds_inst_breaks_clause - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec - %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec + $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -358,14 +358,14 @@ body: | bb.0: ; GCN-LABEL: name: smrd_inst_breaks_clause - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %sgpr8 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 0, 0 - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $sgpr8 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %sgpr8 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 0, 0 - %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $sgpr8 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -375,13 +375,13 @@ body: | bb.0: ; GCN-LABEL: name: implicit_use_breaks_clause - ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr, implicit %vgpr4_vgpr5 + ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr4_vgpr5 ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr, implicit %vgpr4_vgpr5 - %vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr4_vgpr5 + $vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -390,12 +390,12 @@ body: | bb.0: ; GCN-LABEL: name: trivial_clause_load_mubuf4_x2 - ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - ; GCN-NEXT: %vgpr3 = BUFFER_LOAD_DWORD_OFFEN %vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + ; GCN-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec ; GCN-NEXT: S_ENDPGM - %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %vgpr3 = BUFFER_LOAD_DWORD_OFFEN %vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- @@ -404,13 +404,13 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_simple_load_mubuf_offen_ptr - ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + ; GCN-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec ; GCN-NEXT: S_ENDPGM - %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- @@ -421,13 +421,13 @@ body: | bb.0: ; GCN-LABEL: name: mubuf_load4_overwrite_ptr - ; GCN: %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - ; GCN-NEXT: %vgpr1 = V_MOV_B32_e32 0, implicit %exec - ; GCN-NEXT: %vgpr2 = V_MOV_B32_e32 %vgpr0, implicit %exec + ; GCN: $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + ; GCN-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec + ; GCN-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec ; GCN-NEXT: S_ENDPGM - %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %vgpr1 = V_MOV_B32_e32 0, implicit %exec - %vgpr2 = V_MOV_B32_e32 %vgpr0, implicit %exec + $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $vgpr1 = V_MOV_B32_e32 0, implicit $exec + $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec S_ENDPGM ... --- @@ -438,29 +438,29 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_flat_load_mubuf_load - ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + ; GCN-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec ; GCN-NEXT: S_ENDPGM - %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec S_ENDPGM ... # Break a clause from interference between mubuf and flat instructions # GCN-LABEL: name: break_clause_mubuf_load_flat_load # GCN: bb.0: -# GCN-NEXT: %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4 +# GCN-NEXT: $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4 # XNACK-NEXT: S_NOP 0 -# GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3 +# GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3 # GCN-NEXT: S_ENDPGM name: break_clause_mubuf_load_flat_load body: | bb.0: - %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... @@ -471,13 +471,13 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_atomic_rtn_into_ptr_flat4 - ; GCN: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr4 = FLAT_ATOMIC_ADD_RTN %vgpr5_vgpr6, %vgpr7, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr5_vgpr6, $vgpr7, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr4 = FLAT_ATOMIC_ADD_RTN %vgpr5_vgpr6, %vgpr7, 0, 0, implicit %exec, implicit %flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr5_vgpr6, $vgpr7, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -486,12 +486,12 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_atomic_nortn_ptr_load_flat4 - ; GCN: FLAT_ATOMIC_ADD %vgpr0_vgpr1, %vgpr2, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: FLAT_ATOMIC_ADD $vgpr0_vgpr1, $vgpr2, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN-NEXT: S_ENDPGM - FLAT_ATOMIC_ADD %vgpr0_vgpr1, %vgpr2, 0, 0, implicit %exec, implicit %flat_scr - %vgpr2 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_ATOMIC_ADD $vgpr0_vgpr1, $vgpr2, 0, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -501,13 +501,13 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_atomic_rtn_into_ptr_mubuf4 - ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: %vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN %vgpr2, %vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec + ; GCN-NEXT: $vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN $vgpr2, $vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec ; GCN-NEXT: S_ENDPGM - %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN %vgpr2, %vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec + $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN $vgpr2, $vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec S_ENDPGM ... --- @@ -517,12 +517,12 @@ body: | bb.0: ; GCN-LABEL: name: break_clause_atomic_nortn_ptr_load_mubuf4 - ; GCN: BUFFER_ATOMIC_ADD_OFFEN %vgpr0, %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec - ; GCN-NEXT: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + ; GCN: BUFFER_ATOMIC_ADD_OFFEN $vgpr0, $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec + ; GCN-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec ; GCN-NEXT: S_ENDPGM - BUFFER_ATOMIC_ADD_OFFEN %vgpr0, %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec - %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + BUFFER_ATOMIC_ADD_OFFEN $vgpr0, $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec + $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- @@ -533,11 +533,11 @@ body: | bb.0: ; GCN-LABEL: name: no_break_clause_mubuf_load_novaddr - ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - ; GCN-NEXT: %vgpr3 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + ; GCN-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec ; GCN-NEXT: S_ENDPGM - %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %vgpr3 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec + $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $vgpr3 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- @@ -547,16 +547,16 @@ body: | bb.0: ; GCN-LABEL: name: mix_load_store_clause - ; GCN: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr - FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -566,15 +566,15 @@ body: | bb.0: ; GCN-LABEL: name: mix_load_store_clause_same_address - ; GCN: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr ; XNACK-NEXT: S_NOP 0 - ; GCN-NEXT: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr - ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + ; GCN-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr - FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... Index: test/CodeGen/AMDGPU/clamp-omod-special-case.mir =================================================================== --- test/CodeGen/AMDGPU/clamp-omod-special-case.mir +++ test/CodeGen/AMDGPU/clamp-omod-special-case.mir @@ -1,8 +1,8 @@ # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s --- # GCN-LABEL: name: v_max_self_clamp_not_set_f32 -# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec -# GCN-NEXT: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit %exec +# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec +# GCN-NEXT: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $exec name: v_max_self_clamp_not_set_f32 tracksRegLiveness: true @@ -35,37 +35,37 @@ - { id: 25, class: vreg_64 } - { id: 26, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 %14 = S_MOV_B32 2 - %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %26 = V_LSHL_B64 killed %25, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 %18 = COPY %26 - %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec - %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec - %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit %exec - BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec + %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: v_clamp_omod_already_set_f32 -# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec -# GCN: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit %exec +# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec +# GCN: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $exec name: v_clamp_omod_already_set_f32 tracksRegLiveness: true registers: @@ -97,38 +97,38 @@ - { id: 25, class: vreg_64 } - { id: 26, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 %14 = S_MOV_B32 2 - %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %26 = V_LSHL_B64 killed %25, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 %18 = COPY %26 - %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec - %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec - %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit %exec - BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec + %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # Don't fold a mul that looks like an omod if itself has omod set # GCN-LABEL: name: v_omod_mul_omod_already_set_f32 -# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec -# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec +# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec +# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $exec name: v_omod_mul_omod_already_set_f32 tracksRegLiveness: true registers: @@ -160,30 +160,30 @@ - { id: 25, class: vreg_64 } - { id: 26, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 %14 = S_MOV_B32 2 - %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %26 = V_LSHL_B64 killed %25, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 %18 = COPY %26 - %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec - %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec - %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec - BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec + %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... @@ -191,8 +191,8 @@ # Don't fold a mul that looks like an omod if itself has clamp set # This might be OK, but would require folding the clamp at the same time. # GCN-LABEL: name: v_omod_mul_clamp_already_set_f32 -# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec -# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec +# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec +# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $exec name: v_omod_mul_clamp_already_set_f32 tracksRegLiveness: true @@ -225,30 +225,30 @@ - { id: 25, class: vreg_64 } - { id: 26, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 %14 = S_MOV_B32 2 - %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %26 = V_LSHL_B64 killed %25, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 %18 = COPY %26 - %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec - %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec - %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec - BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec + %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... @@ -269,8 +269,8 @@ # Don't fold a mul that looks like an omod if itself has omod set # GCN-LABEL: name: v_omod_add_omod_already_set_f32 -# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec -# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec +# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec +# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $exec name: v_omod_add_omod_already_set_f32 tracksRegLiveness: true registers: @@ -302,30 +302,30 @@ - { id: 25, class: vreg_64 } - { id: 26, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 %14 = S_MOV_B32 2 - %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %26 = V_LSHL_B64 killed %25, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 %18 = COPY %26 - %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec - %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec - %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec - BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec + %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... @@ -333,8 +333,8 @@ # Don't fold a mul that looks like an omod if itself has clamp set # This might be OK, but would require folding the clamp at the same time. # GCN-LABEL: name: v_omod_add_clamp_already_set_f32 -# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec -# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec +# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec +# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $exec name: v_omod_add_clamp_already_set_f32 tracksRegLiveness: true @@ -367,30 +367,30 @@ - { id: 25, class: vreg_64 } - { id: 26, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 %14 = S_MOV_B32 2 - %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %26 = V_LSHL_B64 killed %25, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 %18 = COPY %26 - %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec - %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec - %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec - BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec + %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... @@ -404,9 +404,9 @@ - { id: 1, class: vgpr_32 } body: | bb.0: - liveins: %vgpr0 + liveins: $vgpr0 - %0 = COPY %vgpr0 - %1 = V_MAX_F32_e64 0, killed %0, 0, 1056964608, 1, 0, implicit %exec + %0 = COPY $vgpr0 + %1 = V_MAX_F32_e64 0, killed %0, 0, 1056964608, 1, 0, implicit $exec ... Index: test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir =================================================================== --- test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir +++ test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir @@ -10,22 +10,22 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } body: | bb.0: - liveins: %vgpr0 + liveins: $vgpr0 - %vgpr0_vgpr1 = IMPLICIT_DEF - %vgpr4_vgpr5 = IMPLICIT_DEF - %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) - %vgpr4 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) - %vgpr2 = IMPLICIT_DEF - %vgpr3 = IMPLICIT_DEF - %vgpr6 = IMPLICIT_DEF - %vgpr0 = V_ADD_I32_e32 16, %vgpr2, implicit-def %vcc, implicit %exec - %vgpr1 = V_ADDC_U32_e32 %vgpr3, killed %vgpr6, implicit-def dead %vcc, implicit %vcc, implicit %exec - FLAT_STORE_DWORD %vgpr2_vgpr3, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) - FLAT_STORE_DWORD %vgpr0_vgpr1, killed %vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) + $vgpr0_vgpr1 = IMPLICIT_DEF + $vgpr4_vgpr5 = IMPLICIT_DEF + $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) + $vgpr4 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) + $vgpr2 = IMPLICIT_DEF + $vgpr3 = IMPLICIT_DEF + $vgpr6 = IMPLICIT_DEF + $vgpr0 = V_ADD_I32_e32 16, $vgpr2, implicit-def $vcc, implicit $exec + $vgpr1 = V_ADDC_U32_e32 $vgpr3, killed $vgpr6, implicit-def dead $vcc, implicit $vcc, implicit $exec + FLAT_STORE_DWORD $vgpr2_vgpr3, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) + FLAT_STORE_DWORD $vgpr0_vgpr1, killed $vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) S_ENDPGM ... Index: test/CodeGen/AMDGPU/cluster-flat-loads.mir =================================================================== --- test/CodeGen/AMDGPU/cluster-flat-loads.mir +++ test/CodeGen/AMDGPU/cluster-flat-loads.mir @@ -14,7 +14,7 @@ body: | bb.0: %0 = IMPLICIT_DEF - %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) - %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec - %3 = FLAT_LOAD_DWORD %0, 4, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) + %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) + %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec + %3 = FLAT_LOAD_DWORD %0, 4, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) ... Index: test/CodeGen/AMDGPU/coalescer-subreg-join.mir =================================================================== --- test/CodeGen/AMDGPU/coalescer-subreg-join.mir +++ test/CodeGen/AMDGPU/coalescer-subreg-join.mir @@ -22,9 +22,9 @@ - { id: 20, class: vreg_512 } - { id: 27, class: vgpr_32 } liveins: - - { reg: '%sgpr2_sgpr3', virtual-reg: '%0' } - - { reg: '%vgpr2', virtual-reg: '%1' } - - { reg: '%vgpr3', virtual-reg: '%2' } + - { reg: '$sgpr2_sgpr3', virtual-reg: '%0' } + - { reg: '$vgpr2', virtual-reg: '%1' } + - { reg: '$vgpr3', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -41,11 +41,11 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr2_sgpr3, %vgpr2, %vgpr3 + liveins: $sgpr2_sgpr3, $vgpr2, $vgpr3 - %0 = COPY %sgpr2_sgpr3 - %1 = COPY %vgpr2 - %2 = COPY %vgpr3 + %0 = COPY $sgpr2_sgpr3 + %1 = COPY $vgpr2 + %2 = COPY $vgpr3 %3 = S_LOAD_DWORDX8_IMM %0, 0, 0 %4 = S_LOAD_DWORDX4_IMM %0, 12, 0 %5 = S_LOAD_DWORDX8_IMM %0, 16, 0 @@ -61,7 +61,7 @@ %11.sub6 = COPY %1 %11.sub7 = COPY %1 %11.sub8 = COPY %1 - dead %18 = IMAGE_SAMPLE_C_D_O_V1_V16 %11, %3, %4, 1, 0, 0, 0, 0, 0, 0, -1, implicit %exec + dead %18 = IMAGE_SAMPLE_C_D_O_V1_V16 %11, %3, %4, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec %20.sub1 = COPY %2 %20.sub2 = COPY %2 %20.sub3 = COPY %2 @@ -70,6 +70,6 @@ %20.sub6 = COPY %2 %20.sub7 = COPY %2 %20.sub8 = COPY %2 - dead %27 = IMAGE_SAMPLE_C_D_O_V1_V16 %20, %5, %6, 1, 0, 0, 0, 0, 0, 0, -1, implicit %exec + dead %27 = IMAGE_SAMPLE_C_D_O_V1_V16 %20, %5, %6, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec ... Index: test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir =================================================================== --- test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir +++ test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir @@ -2,7 +2,7 @@ ... # GCN-LABEL: name: s_fold_and_imm_regimm_32{{$}} -# GCN: %10:vgpr_32 = V_MOV_B32_e32 1543, implicit %exec +# GCN: %10:vgpr_32 = V_MOV_B32_e32 1543, implicit $exec # GCN: BUFFER_STORE_DWORD_OFFSET killed %10, name: s_fold_and_imm_regimm_32 alignment: 0 @@ -24,7 +24,7 @@ - { id: 9, class: sreg_32_xm0 } - { id: 10, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -41,9 +41,9 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %0 = COPY %sgpr0_sgpr1 + %0 = COPY $sgpr0_sgpr1 %1 = S_LOAD_DWORDX2_IMM %0, 36, 0 %2 = COPY %1.sub1 %3 = COPY %1.sub0 @@ -52,9 +52,9 @@ %6 = REG_SEQUENCE killed %2, 1, killed %3, 2, killed %4, 3, killed %5, 4 %7 = S_MOV_B32 1234567 %8 = S_MOV_B32 9999 - %9 = S_AND_B32 killed %7, killed %8, implicit-def dead %scc + %9 = S_AND_B32 killed %7, killed %8, implicit-def dead $scc %10 = COPY %9 - BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... @@ -62,19 +62,19 @@ # GCN-LABEL: name: v_fold_and_imm_regimm_32{{$}} -# GCN: %9:vgpr_32 = V_MOV_B32_e32 646, implicit %exec +# GCN: %9:vgpr_32 = V_MOV_B32_e32 646, implicit $exec # GCN: FLAT_STORE_DWORD %19, %9, -# GCN: %10:vgpr_32 = V_MOV_B32_e32 646, implicit %exec +# GCN: %10:vgpr_32 = V_MOV_B32_e32 646, implicit $exec # GCN: FLAT_STORE_DWORD %19, %10 -# GCN: %11:vgpr_32 = V_MOV_B32_e32 646, implicit %exec +# GCN: %11:vgpr_32 = V_MOV_B32_e32 646, implicit $exec # GCN: FLAT_STORE_DWORD %19, %11, -# GCN: %12:vgpr_32 = V_MOV_B32_e32 1234567, implicit %exec +# GCN: %12:vgpr_32 = V_MOV_B32_e32 1234567, implicit $exec # GCN: FLAT_STORE_DWORD %19, %12, -# GCN: %13:vgpr_32 = V_MOV_B32_e32 63, implicit %exec +# GCN: %13:vgpr_32 = V_MOV_B32_e32 63, implicit $exec # GCN: FLAT_STORE_DWORD %19, %13, name: v_fold_and_imm_regimm_32 @@ -108,8 +108,8 @@ - { id: 44, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -126,37 +126,37 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 36, 0 - %31 = V_ASHRREV_I32_e64 31, %3, implicit %exec + %31 = V_ASHRREV_I32_e64 31, %3, implicit $exec %32 = REG_SEQUENCE %3, 1, %31, 2 - %33 = V_LSHLREV_B64 2, killed %32, implicit %exec + %33 = V_LSHLREV_B64 2, killed %32, implicit $exec %20 = COPY %4.sub1 - %44 = V_ADD_I32_e32 %4.sub0, %33.sub0, implicit-def %vcc, implicit %exec + %44 = V_ADD_I32_e32 %4.sub0, %33.sub0, implicit-def $vcc, implicit $exec %36 = COPY killed %20 - %35 = V_ADDC_U32_e32 %33.sub1, %36, implicit-def %vcc, implicit %vcc, implicit %exec + %35 = V_ADDC_U32_e32 %33.sub1, %36, implicit-def $vcc, implicit $vcc, implicit $exec %37 = REG_SEQUENCE %44, 1, killed %35, 2 - %24 = V_MOV_B32_e32 982, implicit %exec + %24 = V_MOV_B32_e32 982, implicit $exec %26 = S_MOV_B32 1234567 - %34 = V_MOV_B32_e32 63, implicit %exec + %34 = V_MOV_B32_e32 63, implicit $exec - %27 = V_AND_B32_e64 %26, %24, implicit %exec - FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit %exec, implicit %flat_scr + %27 = V_AND_B32_e64 %26, %24, implicit $exec + FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit $exec, implicit $flat_scr - %28 = V_AND_B32_e64 %24, %26, implicit %exec - FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit %exec, implicit %flat_scr + %28 = V_AND_B32_e64 %24, %26, implicit $exec + FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit $exec, implicit $flat_scr - %29 = V_AND_B32_e32 %26, %24, implicit %exec - FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit %exec, implicit %flat_scr + %29 = V_AND_B32_e32 %26, %24, implicit $exec + FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit $exec, implicit $flat_scr - %30 = V_AND_B32_e64 %26, %26, implicit %exec - FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit %exec, implicit %flat_scr + %30 = V_AND_B32_e64 %26, %26, implicit $exec + FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit $exec, implicit $flat_scr - %31 = V_AND_B32_e64 %34, %34, implicit %exec - FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit %exec, implicit %flat_scr + %31 = V_AND_B32_e64 %34, %34, implicit $exec + FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM @@ -164,7 +164,7 @@ --- # GCN-LABEL: name: s_fold_shl_imm_regimm_32{{$}} -# GC1: %13 = V_MOV_B32_e32 4096, implicit %exec +# GC1: %13 = V_MOV_B32_e32 4096, implicit $exec # GCN: BUFFER_STORE_DWORD_OFFSET killed %13, name: s_fold_shl_imm_regimm_32 @@ -190,7 +190,7 @@ - { id: 12, class: sreg_32_xm0 } - { id: 13, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -207,9 +207,9 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %0 = COPY %sgpr0_sgpr1 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 36, 0 %5 = S_MOV_B32 1 %6 = COPY %4.sub1 @@ -217,43 +217,43 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4 - %12 = S_LSHL_B32 killed %5, 12, implicit-def dead %scc + %12 = S_LSHL_B32 killed %5, 12, implicit-def dead $scc %13 = COPY %12 - BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: v_fold_shl_imm_regimm_32{{$}} -# GCN: %11:vgpr_32 = V_MOV_B32_e32 40955904, implicit %exec +# GCN: %11:vgpr_32 = V_MOV_B32_e32 40955904, implicit $exec # GCN: FLAT_STORE_DWORD %20, %11, -# GCN: %12:vgpr_32 = V_MOV_B32_e32 24, implicit %exec +# GCN: %12:vgpr_32 = V_MOV_B32_e32 24, implicit $exec # GCN: FLAT_STORE_DWORD %20, %12, -# GCN: %13:vgpr_32 = V_MOV_B32_e32 4096, implicit %exec +# GCN: %13:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec # GCN: FLAT_STORE_DWORD %20, %13, -# GCN: %14:vgpr_32 = V_MOV_B32_e32 24, implicit %exec +# GCN: %14:vgpr_32 = V_MOV_B32_e32 24, implicit $exec # GCN: FLAT_STORE_DWORD %20, %14, -# GCN: %15:vgpr_32 = V_MOV_B32_e32 0, implicit %exec +# GCN: %15:vgpr_32 = V_MOV_B32_e32 0, implicit $exec # GCN: FLAT_STORE_DWORD %20, %15, -# GCN: %22:vgpr_32 = V_MOV_B32_e32 4096, implicit %exec +# GCN: %22:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec # GCN: FLAT_STORE_DWORD %20, %22, -# GCN: %23:vgpr_32 = V_MOV_B32_e32 1, implicit %exec +# GCN: %23:vgpr_32 = V_MOV_B32_e32 1, implicit $exec # GCN: FLAT_STORE_DWORD %20, %23, -# GCN: %25:vgpr_32 = V_MOV_B32_e32 2, implicit %exec +# GCN: %25:vgpr_32 = V_MOV_B32_e32 2, implicit $exec # GCN: FLAT_STORE_DWORD %20, %25, -# GCN: %26:vgpr_32 = V_MOV_B32_e32 7927808, implicit %exec +# GCN: %26:vgpr_32 = V_MOV_B32_e32 7927808, implicit $exec # GCN: FLAT_STORE_DWORD %20, %26, -# GCN: %28:vgpr_32 = V_MOV_B32_e32 -8, implicit %exec +# GCN: %28:vgpr_32 = V_MOV_B32_e32 -8, implicit $exec # GCN: FLAT_STORE_DWORD %20, %28, name: v_fold_shl_imm_regimm_32 @@ -294,8 +294,8 @@ - { id: 27, class: sreg_32_xm0 } - { id: 28, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%2' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -312,54 +312,54 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %2 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %2 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %3 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec + %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec %16 = REG_SEQUENCE %2, 1, %15, 2 - %17 = V_LSHLREV_B64 2, killed %16, implicit %exec + %17 = V_LSHLREV_B64 2, killed %16, implicit $exec %9 = COPY %3.sub1 - %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec + %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec %19 = COPY killed %9 - %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec + %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec %20 = REG_SEQUENCE %21, 1, killed %18, 2 - %10 = V_MOV_B32_e32 9999, implicit %exec - %24 = V_MOV_B32_e32 3871, implicit %exec - %6 = V_MOV_B32_e32 1, implicit %exec + %10 = V_MOV_B32_e32 9999, implicit $exec + %24 = V_MOV_B32_e32 3871, implicit $exec + %6 = V_MOV_B32_e32 1, implicit $exec %7 = S_MOV_B32 1 %27 = S_MOV_B32 -4 - %11 = V_LSHLREV_B32_e64 12, %10, implicit %exec - FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr + %11 = V_LSHLREV_B32_e64 12, %10, implicit $exec + FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr - %12 = V_LSHLREV_B32_e64 %7, 12, implicit %exec - FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr + %12 = V_LSHLREV_B32_e64 %7, 12, implicit $exec + FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr - %13 = V_LSHL_B32_e64 %7, 12, implicit %exec - FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr + %13 = V_LSHL_B32_e64 %7, 12, implicit $exec + FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr - %14 = V_LSHL_B32_e64 12, %7, implicit %exec - FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr + %14 = V_LSHL_B32_e64 12, %7, implicit $exec + FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr - %15 = V_LSHL_B32_e64 12, %24, implicit %exec - FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr + %15 = V_LSHL_B32_e64 12, %24, implicit $exec + FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr - %22 = V_LSHL_B32_e64 %6, 12, implicit %exec - FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr + %22 = V_LSHL_B32_e64 %6, 12, implicit $exec + FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr - %23 = V_LSHL_B32_e64 %6, 32, implicit %exec - FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr + %23 = V_LSHL_B32_e64 %6, 32, implicit $exec + FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr - %25 = V_LSHL_B32_e32 %6, %6, implicit %exec - FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr + %25 = V_LSHL_B32_e32 %6, %6, implicit $exec + FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr - %26 = V_LSHLREV_B32_e32 11, %24, implicit %exec - FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr + %26 = V_LSHLREV_B32_e32 11, %24, implicit $exec + FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr - %28 = V_LSHL_B32_e32 %27, %6, implicit %exec - FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr + %28 = V_LSHL_B32_e32 %27, %6, implicit $exec + FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM @@ -367,7 +367,7 @@ --- # GCN-LABEL: name: s_fold_ashr_imm_regimm_32{{$}} -# GCN: %11:vgpr_32 = V_MOV_B32_e32 243, implicit %exec +# GCN: %11:vgpr_32 = V_MOV_B32_e32 243, implicit $exec # GCN: BUFFER_STORE_DWORD_OFFSET killed %11, killed %8, name: s_fold_ashr_imm_regimm_32 alignment: 0 @@ -390,7 +390,7 @@ - { id: 12, class: sreg_32_xm0 } - { id: 13, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -407,9 +407,9 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %0 = COPY %sgpr0_sgpr1 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 36, 0 %5 = S_MOV_B32 999123 %6 = COPY %4.sub1 @@ -417,42 +417,42 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4 - %12 = S_ASHR_I32 killed %5, 12, implicit-def dead %scc + %12 = S_ASHR_I32 killed %5, 12, implicit-def dead $scc %13 = COPY %12 - BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... # GCN-LABEL: name: v_fold_ashr_imm_regimm_32{{$}} -# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit %exec +# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit $exec # GCN: FLAT_STORE_DWORD %20, %11, -# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit %exec +# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit $exec # GCN: FLAT_STORE_DWORD %20, %12, -# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit %exec +# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit $exec # GCN: FLAT_STORE_DWORD %20, %13, -# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit %exec +# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit $exec # GCN: FLAT_STORE_DWORD %20, %14, -# GCN: %15:vgpr_32 = V_MOV_B32_e32 -1, implicit %exec +# GCN: %15:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec # GCN: FLAT_STORE_DWORD %20, %15, -# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit %exec +# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit $exec # GCN: FLAT_STORE_DWORD %20, %22, -# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit %exec +# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit $exec # GCN: FLAT_STORE_DWORD %20, %23, -# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit %exec +# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit $exec # GCN: FLAT_STORE_DWORD %20, %25, -# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit %exec +# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit $exec # GCN: FLAT_STORE_DWORD %20, %26, -# GCN: %28:vgpr_32 = V_MOV_B32_e32 -1, implicit %exec +# GCN: %28:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec # GCN: FLAT_STORE_DWORD %20, %28, name: v_fold_ashr_imm_regimm_32 @@ -497,8 +497,8 @@ - { id: 34, class: vgpr_32 } - { id: 35, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%2' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -515,59 +515,59 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %2 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %2 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %3 = S_LOAD_DWORDX2_IMM %0, 36, 0 - %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec + %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec %16 = REG_SEQUENCE %2, 1, %15, 2 - %17 = V_LSHLREV_B64 2, killed %16, implicit %exec + %17 = V_LSHLREV_B64 2, killed %16, implicit $exec %9 = COPY %3.sub1 - %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec + %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec %19 = COPY killed %9 - %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec + %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec %20 = REG_SEQUENCE %21, 1, killed %18, 2 - %10 = V_MOV_B32_e32 999234234, implicit %exec - %24 = V_MOV_B32_e32 3871, implicit %exec - %6 = V_MOV_B32_e32 1000000, implicit %exec + %10 = V_MOV_B32_e32 999234234, implicit $exec + %24 = V_MOV_B32_e32 3871, implicit $exec + %6 = V_MOV_B32_e32 1000000, implicit $exec %7 = S_MOV_B32 13424252 %8 = S_MOV_B32 4 %27 = S_MOV_B32 -4 %32 = S_MOV_B32 1 %33 = S_MOV_B32 3841 - %34 = V_MOV_B32_e32 3841, implicit %exec - %35 = V_MOV_B32_e32 2, implicit %exec + %34 = V_MOV_B32_e32 3841, implicit $exec + %35 = V_MOV_B32_e32 2, implicit $exec - %11 = V_ASHRREV_I32_e64 8, %10, implicit %exec - FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr + %11 = V_ASHRREV_I32_e64 8, %10, implicit $exec + FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr - %12 = V_ASHRREV_I32_e64 %8, %10, implicit %exec - FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr + %12 = V_ASHRREV_I32_e64 %8, %10, implicit $exec + FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr - %13 = V_ASHR_I32_e64 %7, 3, implicit %exec - FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr + %13 = V_ASHR_I32_e64 %7, 3, implicit $exec + FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr - %14 = V_ASHR_I32_e64 7, %32, implicit %exec - FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr + %14 = V_ASHR_I32_e64 7, %32, implicit $exec + FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr - %15 = V_ASHR_I32_e64 %27, %24, implicit %exec - FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr + %15 = V_ASHR_I32_e64 %27, %24, implicit $exec + FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr - %22 = V_ASHR_I32_e64 %6, 4, implicit %exec - FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr + %22 = V_ASHR_I32_e64 %6, 4, implicit $exec + FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr - %23 = V_ASHR_I32_e64 %6, %33, implicit %exec - FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr + %23 = V_ASHR_I32_e64 %6, %33, implicit $exec + FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr - %25 = V_ASHR_I32_e32 %34, %34, implicit %exec - FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr + %25 = V_ASHR_I32_e32 %34, %34, implicit $exec + FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr - %26 = V_ASHRREV_I32_e32 11, %10, implicit %exec - FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr + %26 = V_ASHRREV_I32_e32 11, %10, implicit $exec + FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr - %28 = V_ASHR_I32_e32 %27, %35, implicit %exec - FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr + %28 = V_ASHR_I32_e32 %27, %35, implicit $exec + FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM @@ -575,7 +575,7 @@ --- # GCN-LABEL: name: s_fold_lshr_imm_regimm_32{{$}} -# GCN: %11:vgpr_32 = V_MOV_B32_e32 1048332, implicit %exec +# GCN: %11:vgpr_32 = V_MOV_B32_e32 1048332, implicit $exec # GCN: BUFFER_STORE_DWORD_OFFSET killed %11, killed %8, name: s_fold_lshr_imm_regimm_32 alignment: 0 @@ -598,7 +598,7 @@ - { id: 12, class: sreg_32_xm0 } - { id: 13, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -615,9 +615,9 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %0 = COPY %sgpr0_sgpr1 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 36, 0 %5 = S_MOV_B32 -999123 %6 = COPY %4.sub1 @@ -625,43 +625,43 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4 - %12 = S_LSHR_B32 killed %5, 12, implicit-def dead %scc + %12 = S_LSHR_B32 killed %5, 12, implicit-def dead $scc %13 = COPY %12 - BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: v_fold_lshr_imm_regimm_32{{$}} -# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit %exec +# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit $exec # GCN: FLAT_STORE_DWORD %20, %11, -# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit %exec +# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit $exec # GCN: FLAT_STORE_DWORD %20, %12, -# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit %exec +# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit $exec # GCN: FLAT_STORE_DWORD %20, %13, -# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit %exec +# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit $exec # GCN: FLAT_STORE_DWORD %20, %14, -# GCN: %15:vgpr_32 = V_MOV_B32_e32 1, implicit %exec +# GCN: %15:vgpr_32 = V_MOV_B32_e32 1, implicit $exec # GCN: FLAT_STORE_DWORD %20, %15, -# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit %exec +# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit $exec # GCN: FLAT_STORE_DWORD %20, %22, -# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit %exec +# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit $exec # GCN: FLAT_STORE_DWORD %20, %23, -# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit %exec +# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit $exec # GCN: FLAT_STORE_DWORD %20, %25, -# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit %exec +# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit $exec # GCN: FLAT_STORE_DWORD %20, %26, -# GCN: %28:vgpr_32 = V_MOV_B32_e32 1073741823, implicit %exec +# GCN: %28:vgpr_32 = V_MOV_B32_e32 1073741823, implicit $exec # GCN: FLAT_STORE_DWORD %20, %28, name: v_fold_lshr_imm_regimm_32 @@ -706,8 +706,8 @@ - { id: 34, class: vgpr_32 } - { id: 35, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%2' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -724,59 +724,59 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %2 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %2 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %3 = S_LOAD_DWORDX2_IMM %0, 36, 0 - %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec + %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec %16 = REG_SEQUENCE %2, 1, %15, 2 - %17 = V_LSHLREV_B64 2, killed %16, implicit %exec + %17 = V_LSHLREV_B64 2, killed %16, implicit $exec %9 = COPY %3.sub1 - %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec + %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec %19 = COPY killed %9 - %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec + %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec %20 = REG_SEQUENCE %21, 1, killed %18, 2 - %10 = V_MOV_B32_e32 999234234, implicit %exec - %24 = V_MOV_B32_e32 3871, implicit %exec - %6 = V_MOV_B32_e32 1000000, implicit %exec + %10 = V_MOV_B32_e32 999234234, implicit $exec + %24 = V_MOV_B32_e32 3871, implicit $exec + %6 = V_MOV_B32_e32 1000000, implicit $exec %7 = S_MOV_B32 13424252 %8 = S_MOV_B32 4 %27 = S_MOV_B32 -4 %32 = S_MOV_B32 1 %33 = S_MOV_B32 3841 - %34 = V_MOV_B32_e32 3841, implicit %exec - %35 = V_MOV_B32_e32 2, implicit %exec + %34 = V_MOV_B32_e32 3841, implicit $exec + %35 = V_MOV_B32_e32 2, implicit $exec - %11 = V_LSHRREV_B32_e64 8, %10, implicit %exec - FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr + %11 = V_LSHRREV_B32_e64 8, %10, implicit $exec + FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr - %12 = V_LSHRREV_B32_e64 %8, %10, implicit %exec - FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr + %12 = V_LSHRREV_B32_e64 %8, %10, implicit $exec + FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr - %13 = V_LSHR_B32_e64 %7, 3, implicit %exec - FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr + %13 = V_LSHR_B32_e64 %7, 3, implicit $exec + FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr - %14 = V_LSHR_B32_e64 7, %32, implicit %exec - FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr + %14 = V_LSHR_B32_e64 7, %32, implicit $exec + FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr - %15 = V_LSHR_B32_e64 %27, %24, implicit %exec - FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr + %15 = V_LSHR_B32_e64 %27, %24, implicit $exec + FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr - %22 = V_LSHR_B32_e64 %6, 4, implicit %exec - FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr + %22 = V_LSHR_B32_e64 %6, 4, implicit $exec + FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr - %23 = V_LSHR_B32_e64 %6, %33, implicit %exec - FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr + %23 = V_LSHR_B32_e64 %6, %33, implicit $exec + FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr - %25 = V_LSHR_B32_e32 %34, %34, implicit %exec - FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr + %25 = V_LSHR_B32_e32 %34, %34, implicit $exec + FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr - %26 = V_LSHRREV_B32_e32 11, %10, implicit %exec - FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr + %26 = V_LSHRREV_B32_e32 11, %10, implicit $exec + FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr - %28 = V_LSHR_B32_e32 %27, %35, implicit %exec - FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr + %28 = V_LSHR_B32_e32 %27, %35, implicit $exec + FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM @@ -798,9 +798,9 @@ - { id: 3, class: vreg_64, preferred-register: '' } body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %2 = V_XOR_B32_e64 killed %0, undef %1, implicit %exec - FLAT_STORE_DWORD undef %3, %2, 0, 0, 0, implicit %exec, implicit %flat_scr + %0 = V_MOV_B32_e32 0, implicit $exec + %2 = V_XOR_B32_e64 killed %0, undef %1, implicit $exec + FLAT_STORE_DWORD undef %3, %2, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... Index: test/CodeGen/AMDGPU/dead_copy.mir =================================================================== --- test/CodeGen/AMDGPU/dead_copy.mir +++ test/CodeGen/AMDGPU/dead_copy.mir @@ -2,8 +2,8 @@ # GCN-LABEL: dead_copy # GCN: bb.0 -# GCN-NOT: dead %vgpr5 = COPY undef %vgpr11, implicit %exec -# GCN: %vgpr5 = COPY %vgpr11, implicit %exec +# GCN-NOT: dead $vgpr5 = COPY undef $vgpr11, implicit $exec +# GCN: $vgpr5 = COPY $vgpr11, implicit $exec --- name: dead_copy @@ -11,17 +11,17 @@ body: | bb.0: - liveins: %vgpr11, %sgpr0, %sgpr1, %vgpr6, %vgpr7, %vgpr4 + liveins: $vgpr11, $sgpr0, $sgpr1, $vgpr6, $vgpr7, $vgpr4 - dead %vgpr5 = COPY undef %vgpr11, implicit %exec + dead $vgpr5 = COPY undef $vgpr11, implicit $exec - %vgpr5 = COPY %vgpr11, implicit %exec + $vgpr5 = COPY $vgpr11, implicit $exec - %sgpr14 = S_ADD_U32 %sgpr0, target-flags(amdgpu-gotprel) 1136, implicit-def %scc - %sgpr15 = S_ADDC_U32 %sgpr1, target-flags(amdgpu-gotprel32-lo) 0, implicit-def dead %scc, implicit %scc + $sgpr14 = S_ADD_U32 $sgpr0, target-flags(amdgpu-gotprel) 1136, implicit-def $scc + $sgpr15 = S_ADDC_U32 $sgpr1, target-flags(amdgpu-gotprel32-lo) 0, implicit-def dead $scc, implicit $scc - %vgpr10 = COPY killed %sgpr14, implicit %exec - %vgpr11 = COPY killed %sgpr15, implicit %exec + $vgpr10 = COPY killed $sgpr14, implicit $exec + $vgpr11 = COPY killed $sgpr15, implicit $exec - FLAT_STORE_DWORDX4 %vgpr10_vgpr11, %vgpr4_vgpr5_vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORDX4 $vgpr10_vgpr11, $vgpr4_vgpr5_vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr ... Index: test/CodeGen/AMDGPU/debug-value2.ll =================================================================== --- test/CodeGen/AMDGPU/debug-value2.ll +++ test/CodeGen/AMDGPU/debug-value2.ll @@ -10,9 +10,9 @@ define <4 x float> @Scene_transformT(i32 %subshapeIdx, <4 x float> %v, float %time, i8 addrspace(1)* %gScene, i32 addrspace(1)* %gSceneOffsets) local_unnamed_addr !dbg !110 { entry: -; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] %vgpr6_vgpr7 +; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr6_vgpr7 call void @llvm.dbg.value(metadata i8 addrspace(1)* %gScene, metadata !120, metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !154 -; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] %vgpr8_vgpr9 +; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr8_vgpr9 call void @llvm.dbg.value(metadata i32 addrspace(1)* %gSceneOffsets, metadata !121, metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !155 %call = tail call %struct.ShapeData addrspace(1)* @Scene_getSubShapeData(i32 %subshapeIdx, i8 addrspace(1)* %gScene, i32 addrspace(1)* %gSceneOffsets) %m_linearMotion = getelementptr inbounds %struct.ShapeData, %struct.ShapeData addrspace(1)* %call, i64 0, i32 2 Index: test/CodeGen/AMDGPU/detect-dead-lanes.mir =================================================================== --- test/CodeGen/AMDGPU/detect-dead-lanes.mir +++ test/CodeGen/AMDGPU/detect-dead-lanes.mir @@ -42,9 +42,9 @@ # Check defined lanes transfer; Includes checking for some special cases like # undef operands or IMPLICIT_DEF definitions. # CHECK-LABEL: name: test1 -# CHECK: %0:sreg_128 = REG_SEQUENCE %sgpr0, %subreg.sub0, %sgpr0, %subreg.sub2 -# CHECK: %1:sreg_128 = INSERT_SUBREG %0, %sgpr1, %subreg.sub3 -# CHECK: %2:sreg_64 = INSERT_SUBREG %0.sub2_sub3, %sgpr42, %subreg.sub0 +# CHECK: %0:sreg_128 = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr0, %subreg.sub2 +# CHECK: %1:sreg_128 = INSERT_SUBREG %0, $sgpr1, %subreg.sub3 +# CHECK: %2:sreg_64 = INSERT_SUBREG %0.sub2_sub3, $sgpr42, %subreg.sub0 # CHECK: S_NOP 0, implicit %1.sub0 # CHECK: S_NOP 0, implicit undef %1.sub1 # CHECK: S_NOP 0, implicit %1.sub2 @@ -87,9 +87,9 @@ - { id: 10, class: sreg_128 } body: | bb.0: - %0 = REG_SEQUENCE %sgpr0, %subreg.sub0, %sgpr0, %subreg.sub2 - %1 = INSERT_SUBREG %0, %sgpr1, %subreg.sub3 - %2 = INSERT_SUBREG %0.sub2_sub3, %sgpr42, %subreg.sub0 + %0 = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr0, %subreg.sub2 + %1 = INSERT_SUBREG %0, $sgpr1, %subreg.sub3 + %2 = INSERT_SUBREG %0.sub2_sub3, $sgpr42, %subreg.sub0 S_NOP 0, implicit %1.sub0 S_NOP 0, implicit %1.sub1 S_NOP 0, implicit %1.sub2 @@ -204,8 +204,8 @@ # lanes. So we should not get a dead/undef flag here. # CHECK-LABEL: name: test3 # CHECK: S_NOP 0, implicit-def %0 -# CHECK: %vcc = COPY %0 -# CHECK: %1:sreg_64 = COPY %vcc +# CHECK: $vcc = COPY %0 +# CHECK: %1:sreg_64 = COPY $vcc # CHECK: S_NOP 0, implicit %1 name: test3 tracksRegLiveness: true @@ -215,9 +215,9 @@ body: | bb.0: S_NOP 0, implicit-def %0 - %vcc = COPY %0 + $vcc = COPY %0 - %1 = COPY %vcc + %1 = COPY $vcc S_NOP 0, implicit %1 ... --- @@ -296,7 +296,7 @@ ; let's swiffle some lanes around for fun... %5 = REG_SEQUENCE %4.sub0, %subreg.sub0, %4.sub2, %subreg.sub1, %4.sub1, %subreg.sub2, %4.sub3, %subreg.sub3 - S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc S_BRANCH %bb.2 bb.2: @@ -349,7 +349,7 @@ ; rotate lanes, but skip sub2 lane... %6 = REG_SEQUENCE %5.sub1, %subreg.sub0, %5.sub3, %subreg.sub1, %5.sub2, %subreg.sub2, %5.sub0, %subreg.sub3 - S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc S_BRANCH %bb.2 bb.2: @@ -392,7 +392,7 @@ ; rotate subreg lanes, skipping sub1 %3 = REG_SEQUENCE %2.sub3, %subreg.sub0, %2.sub1, %subreg.sub1, %2.sub0, %subreg.sub2, %2.sub2, %subreg.sub3 - S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc S_BRANCH %bb.2 bb.2: Index: test/CodeGen/AMDGPU/endpgm-dce.mir =================================================================== --- test/CodeGen/AMDGPU/endpgm-dce.mir +++ test/CodeGen/AMDGPU/endpgm-dce.mir @@ -13,19 +13,19 @@ - { id: 4, class: sgpr_32 } body: | bb.0: - %vcc = IMPLICIT_DEF + $vcc = IMPLICIT_DEF %0 = IMPLICIT_DEF %3 = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc - %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) - %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec - %4 = S_ADD_U32 %3, 1, implicit-def %scc + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc + %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) + %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec + %4 = S_ADD_U32 %3, 1, implicit-def $scc S_ENDPGM ... --- # GCN-LABEL: name: load_without_memoperand -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc -# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc +# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr # GCN-NEXT: S_ENDPGM name: load_without_memoperand tracksRegLiveness: true @@ -37,19 +37,19 @@ - { id: 4, class: sgpr_32 } body: | bb.0: - %vcc = IMPLICIT_DEF + $vcc = IMPLICIT_DEF %0 = IMPLICIT_DEF %3 = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc - %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr - %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec - %4 = S_ADD_U32 %3, 1, implicit-def %scc + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc + %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr + %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec + %4 = S_ADD_U32 %3, 1, implicit-def $scc S_ENDPGM ... --- # GCN-LABEL: name: load_volatile -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc -# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile load 4) +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc +# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load 4) # GCN-NEXT: S_ENDPGM name: load_volatile tracksRegLiveness: true @@ -61,19 +61,19 @@ - { id: 4, class: sgpr_32 } body: | bb.0: - %vcc = IMPLICIT_DEF + $vcc = IMPLICIT_DEF %0 = IMPLICIT_DEF %3 = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc - %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile load 4) - %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec - %4 = S_ADD_U32 %3, 1, implicit-def %scc + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc + %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load 4) + %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec + %4 = S_ADD_U32 %3, 1, implicit-def $scc S_ENDPGM ... --- # GCN-LABEL: name: store -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc -# GCN-NEXT: FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc +# GCN-NEXT: FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) # GCN-NEXT: S_ENDPGM name: store tracksRegLiveness: true @@ -82,45 +82,45 @@ - { id: 1, class: vgpr_32 } body: | bb.0: - %vcc = IMPLICIT_DEF + $vcc = IMPLICIT_DEF %0 = IMPLICIT_DEF %1 = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc - FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc + FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) S_ENDPGM ... --- # GCN-LABEL: name: barrier -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc # GCN-NEXT: S_BARRIER # GCN-NEXT: S_ENDPGM name: barrier tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc S_BARRIER S_ENDPGM ... --- # GCN-LABEL: name: call -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc -# GCN-NEXT: %sgpr4_sgpr5 = S_SWAPPC_B64 %sgpr2_sgpr3 +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc +# GCN-NEXT: $sgpr4_sgpr5 = S_SWAPPC_B64 $sgpr2_sgpr3 # GCN-NEXT: S_ENDPGM name: call tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc - %sgpr4_sgpr5 = S_SWAPPC_B64 %sgpr2_sgpr3 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc + $sgpr4_sgpr5 = S_SWAPPC_B64 $sgpr2_sgpr3 S_ENDPGM ... --- # GCN-LABEL: name: exp -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc -# GCN-NEXT: EXP 32, undef %0:vgpr_32, undef %1:vgpr_32, %2, undef %3:vgpr_32, 0, 0, 15, implicit %exec +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc +# GCN-NEXT: EXP 32, undef %0:vgpr_32, undef %1:vgpr_32, %2, undef %3:vgpr_32, 0, 0, 15, implicit $exec # GCN-NEXT: S_ENDPGM name: exp tracksRegLiveness: true @@ -131,24 +131,24 @@ - { id: 3, class: vgpr_32 } body: | bb.0: - %vcc = IMPLICIT_DEF + $vcc = IMPLICIT_DEF %2 = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc - EXP 32, undef %0, undef %1, killed %2, undef %3, 0, 0, 15, implicit %exec + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc + EXP 32, undef %0, undef %1, killed %2, undef %3, 0, 0, 15, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: return_to_epilog -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc -# GCN-NEXT: SI_RETURN_TO_EPILOG killed %vgpr0 +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc +# GCN-NEXT: SI_RETURN_TO_EPILOG killed $vgpr0 name: return_to_epilog tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %vgpr0 = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc - SI_RETURN_TO_EPILOG killed %vgpr0 + $vcc = IMPLICIT_DEF + $vgpr0 = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc + SI_RETURN_TO_EPILOG killed $vgpr0 ... --- # GCN-LABEL: name: split_block @@ -166,14 +166,14 @@ - { id: 3, class: sgpr_32 } body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc bb.1: %0 = IMPLICIT_DEF %2 = IMPLICIT_DEF - %1 = V_ADD_F32_e64 0, killed %0, 0, 1, 0, 0, implicit %exec - %3 = S_ADD_U32 %2, 1, implicit-def %scc + %1 = V_ADD_F32_e64 0, killed %0, 0, 1, 0, 0, implicit $exec + %3 = S_ADD_U32 %2, 1, implicit-def $scc S_ENDPGM ... --- @@ -188,8 +188,8 @@ tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc bb.1: @@ -208,8 +208,8 @@ tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc S_BRANCH %bb.1 bb.1: @@ -219,8 +219,8 @@ # GCN-LABEL: name: split_block_cond_branch # GCN: bb.0: # GCN-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, %vcc, implicit-def %scc -# GCN: S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, $vcc, implicit-def $scc +# GCN: S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc # GCN: bb.1: # GCN: bb.2: # GCN-NEXT: S_ENDPGM @@ -228,9 +228,9 @@ tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, %vcc, implicit-def %scc - S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, $vcc, implicit-def $scc + S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc bb.1: @@ -253,13 +253,13 @@ tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc S_BRANCH %bb.2 bb.1: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc S_BRANCH %bb.2 bb.2: @@ -269,7 +269,7 @@ # GCN-LABEL: name: two_preds_one_dead # GCN: bb.0: # GCN-NEXT: successors: %bb.2 -# GCN: %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc +# GCN: $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc # GCN-NEXT: S_BARRIER # GCN-NEXT: S_BRANCH %bb.2 # GCN: bb.1: @@ -282,14 +282,14 @@ tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc S_BARRIER S_BRANCH %bb.2 bb.1: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc S_BRANCH %bb.2 bb.2: Index: test/CodeGen/AMDGPU/fix-vgpr-copies.mir =================================================================== --- test/CodeGen/AMDGPU/fix-vgpr-copies.mir +++ test/CodeGen/AMDGPU/fix-vgpr-copies.mir @@ -1,8 +1,8 @@ # RUN: llc -march=amdgcn -start-after=greedy -stop-after=si-optimize-exec-masking -o - %s | FileCheck %s # Check that we first do all vector instructions and only then change exec -# CHECK-DAG: COPY %vgpr10_vgpr11 -# CHECK-DAG: COPY %vgpr12_vgpr13 -# CHECK: %exec = COPY +# CHECK-DAG: COPY $vgpr10_vgpr11 +# CHECK-DAG: COPY $vgpr12_vgpr13 +# CHECK: $exec = COPY --- name: main @@ -13,9 +13,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%sgpr4_sgpr5' } - - { reg: '%sgpr6' } - - { reg: '%vgpr0' } + - { reg: '$sgpr4_sgpr5' } + - { reg: '$sgpr6' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -32,13 +32,13 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %vgpr3, %vgpr10_vgpr11, %vgpr12_vgpr13 + liveins: $vgpr3, $vgpr10_vgpr11, $vgpr12_vgpr13 - %vcc = V_CMP_NE_U32_e64 0, killed %vgpr3, implicit %exec - %sgpr4_sgpr5 = COPY %exec, implicit-def %exec - %sgpr6_sgpr7 = S_AND_B64 %sgpr4_sgpr5, killed %vcc, implicit-def dead %scc - %sgpr4_sgpr5 = S_XOR_B64 %sgpr6_sgpr7, killed %sgpr4_sgpr5, implicit-def dead %scc - %vgpr61_vgpr62 = COPY %vgpr10_vgpr11 - %vgpr155_vgpr156 = COPY %vgpr12_vgpr13 - %exec = S_MOV_B64_term killed %sgpr6_sgpr7 + $vcc = V_CMP_NE_U32_e64 0, killed $vgpr3, implicit $exec + $sgpr4_sgpr5 = COPY $exec, implicit-def $exec + $sgpr6_sgpr7 = S_AND_B64 $sgpr4_sgpr5, killed $vcc, implicit-def dead $scc + $sgpr4_sgpr5 = S_XOR_B64 $sgpr6_sgpr7, killed $sgpr4_sgpr5, implicit-def dead $scc + $vgpr61_vgpr62 = COPY $vgpr10_vgpr11 + $vgpr155_vgpr156 = COPY $vgpr12_vgpr13 + $exec = S_MOV_B64_term killed $sgpr6_sgpr7 ... Index: test/CodeGen/AMDGPU/fix-wwm-liveness.mir =================================================================== --- test/CodeGen/AMDGPU/fix-wwm-liveness.mir +++ test/CodeGen/AMDGPU/fix-wwm-liveness.mir @@ -1,5 +1,5 @@ # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fix-wwm-liveness -o - %s | FileCheck %s -#CHECK: %exec = EXIT_WWM killed %19, implicit %21 +#CHECK: $exec = EXIT_WWM killed %19, implicit %21 --- name: test_wwm_liveness @@ -18,7 +18,7 @@ - { id: 5, class: vgpr_32, preferred-register: '' } - { id: 6, class: vgpr_32, preferred-register: '' } - { id: 7, class: vgpr_32, preferred-register: '' } - - { id: 8, class: sreg_64, preferred-register: '%vcc' } + - { id: 8, class: sreg_64, preferred-register: '$vcc' } - { id: 9, class: sreg_64, preferred-register: '' } - { id: 10, class: sreg_32_xm0, preferred-register: '' } - { id: 11, class: sreg_64, preferred-register: '' } @@ -39,15 +39,15 @@ bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) - %21 = V_MOV_B32_e32 0, implicit %exec - %5 = V_MBCNT_LO_U32_B32_e64 -1, 0, implicit %exec - %6 = V_MBCNT_HI_U32_B32_e32 -1, killed %5, implicit %exec - %8 = V_CMP_GT_U32_e64 32, killed %6, implicit %exec - %22 = COPY %exec, implicit-def %exec - %23 = S_AND_B64 %22, %8, implicit-def dead %scc - %0 = S_XOR_B64 %23, %22, implicit-def dead %scc - %exec = S_MOV_B64_term killed %23 - SI_MASK_BRANCH %bb.2, implicit %exec + %21 = V_MOV_B32_e32 0, implicit $exec + %5 = V_MBCNT_LO_U32_B32_e64 -1, 0, implicit $exec + %6 = V_MBCNT_HI_U32_B32_e32 -1, killed %5, implicit $exec + %8 = V_CMP_GT_U32_e64 32, killed %6, implicit $exec + %22 = COPY $exec, implicit-def $exec + %23 = S_AND_B64 %22, %8, implicit-def dead $scc + %0 = S_XOR_B64 %23, %22, implicit-def dead $scc + $exec = S_MOV_B64_term killed %23 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1: @@ -56,18 +56,18 @@ %13 = S_MOV_B32 61440 %14 = S_MOV_B32 -1 %15 = REG_SEQUENCE undef %12, 1, undef %10, 2, killed %14, 3, killed %13, 4 - %19 = COPY %exec - %exec = S_MOV_B64 -1 - %16 = BUFFER_LOAD_DWORD_OFFSET %15, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4) - %17 = V_ADD_F32_e32 1065353216, killed %16, implicit %exec - %exec = EXIT_WWM killed %19 - %21 = V_MOV_B32_e32 1, implicit %exec - early-clobber %18 = WWM killed %17, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %18, killed %15, 0, 0, 0, 0, 0, implicit %exec :: (store 4) + %19 = COPY $exec + $exec = S_MOV_B64 -1 + %16 = BUFFER_LOAD_DWORD_OFFSET %15, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4) + %17 = V_ADD_F32_e32 1065353216, killed %16, implicit $exec + $exec = EXIT_WWM killed %19 + %21 = V_MOV_B32_e32 1, implicit $exec + early-clobber %18 = WWM killed %17, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed %18, killed %15, 0, 0, 0, 0, 0, implicit $exec :: (store 4) bb.2: - %exec = S_OR_B64 %exec, killed %0, implicit-def %scc - %vgpr0 = COPY killed %21 - SI_RETURN_TO_EPILOG killed %vgpr0 + $exec = S_OR_B64 $exec, killed %0, implicit-def $scc + $vgpr0 = COPY killed %21 + SI_RETURN_TO_EPILOG killed $vgpr0 ... Index: test/CodeGen/AMDGPU/flat-load-clustering.mir =================================================================== --- test/CodeGen/AMDGPU/flat-load-clustering.mir +++ test/CodeGen/AMDGPU/flat-load-clustering.mir @@ -46,32 +46,32 @@ - { id: 12, class: vreg_64 } - { id: 13, class: vreg_64 } liveins: - - { reg: '%vgpr0', virtual-reg: '%0' } - - { reg: '%sgpr4_sgpr5', virtual-reg: '%1' } + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$sgpr4_sgpr5', virtual-reg: '%1' } body: | bb.0.bb: - liveins: %vgpr0, %sgpr4_sgpr5 + liveins: $vgpr0, $sgpr4_sgpr5 - %1 = COPY %sgpr4_sgpr5 - %0 = COPY %vgpr0 + %1 = COPY $sgpr4_sgpr5 + %0 = COPY $vgpr0 %3 = S_LOAD_DWORDX2_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %4 = S_LOAD_DWORDX2_IMM %1, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %7 = V_LSHLREV_B32_e32 2, %0, implicit %exec - %2 = V_MOV_B32_e32 0, implicit %exec - undef %12.sub0 = V_ADD_I32_e32 %4.sub0, %7, implicit-def %vcc, implicit %exec + %7 = V_LSHLREV_B32_e32 2, %0, implicit $exec + %2 = V_MOV_B32_e32 0, implicit $exec + undef %12.sub0 = V_ADD_I32_e32 %4.sub0, %7, implicit-def $vcc, implicit $exec %11 = COPY %4.sub1 - %12.sub1 = V_ADDC_U32_e32 %11, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec - %5 = FLAT_LOAD_DWORD %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.gep1) - undef %9.sub0 = V_ADD_I32_e32 %3.sub0, %7, implicit-def %vcc, implicit %exec + %12.sub1 = V_ADDC_U32_e32 %11, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec + %5 = FLAT_LOAD_DWORD %12, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.gep1) + undef %9.sub0 = V_ADD_I32_e32 %3.sub0, %7, implicit-def $vcc, implicit $exec %8 = COPY %3.sub1 - %9.sub1 = V_ADDC_U32_e32 %8, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec - undef %13.sub0 = V_ADD_I32_e32 16, %12.sub0, implicit-def %vcc, implicit %exec - %13.sub1 = V_ADDC_U32_e32 %12.sub1, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec - %6 = FLAT_LOAD_DWORD %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.gep34) - undef %10.sub0 = V_ADD_I32_e32 16, %9.sub0, implicit-def %vcc, implicit %exec - %10.sub1 = V_ADDC_U32_e32 %9.sub1, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec - FLAT_STORE_DWORD %9, %5, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.gep2) - FLAT_STORE_DWORD %10, %6, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.gep4) + %9.sub1 = V_ADDC_U32_e32 %8, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec + undef %13.sub0 = V_ADD_I32_e32 16, %12.sub0, implicit-def $vcc, implicit $exec + %13.sub1 = V_ADDC_U32_e32 %12.sub1, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec + %6 = FLAT_LOAD_DWORD %13, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.gep34) + undef %10.sub0 = V_ADD_I32_e32 16, %9.sub0, implicit-def $vcc, implicit $exec + %10.sub1 = V_ADDC_U32_e32 %9.sub1, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec + FLAT_STORE_DWORD %9, %5, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.gep2) + FLAT_STORE_DWORD %10, %6, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.gep4) S_ENDPGM ... Index: test/CodeGen/AMDGPU/fold-cndmask.mir =================================================================== --- test/CodeGen/AMDGPU/fold-cndmask.mir +++ test/CodeGen/AMDGPU/fold-cndmask.mir @@ -1,10 +1,10 @@ # RUN: llc -march=amdgcn -run-pass si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s -# CHECK: %1:vgpr_32 = V_MOV_B32_e32 0, implicit %exec -# CHECK: %2:vgpr_32 = V_MOV_B32_e32 0, implicit %exec +# CHECK: %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec +# CHECK: %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec # CHECK: %4:vgpr_32 = COPY %3 -# CHECK: %5:vgpr_32 = V_MOV_B32_e32 0, implicit %exec -# CHECK: %6:vgpr_32 = V_MOV_B32_e32 0, implicit %exec +# CHECK: %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec +# CHECK: %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec # CHECK: %7:vgpr_32 = COPY %3 --- @@ -22,13 +22,13 @@ body: | bb.0.entry: %0 = IMPLICIT_DEF - %1 = V_CNDMASK_B32_e64 0, 0, %0, implicit %exec - %2 = V_CNDMASK_B32_e64 %1, %1, %0, implicit %exec + %1 = V_CNDMASK_B32_e64 0, 0, %0, implicit $exec + %2 = V_CNDMASK_B32_e64 %1, %1, %0, implicit $exec %3 = IMPLICIT_DEF - %4 = V_CNDMASK_B32_e64 %3, %3, %0, implicit %exec + %4 = V_CNDMASK_B32_e64 %3, %3, %0, implicit $exec %5 = COPY %1 - %6 = V_CNDMASK_B32_e64 %5, 0, %0, implicit %exec - %vcc = IMPLICIT_DEF - %7 = V_CNDMASK_B32_e32 %3, %3, implicit %exec, implicit %vcc + %6 = V_CNDMASK_B32_e64 %5, 0, %0, implicit $exec + $vcc = IMPLICIT_DEF + %7 = V_CNDMASK_B32_e32 %3, %3, implicit $exec, implicit $vcc ... Index: test/CodeGen/AMDGPU/fold-imm-f16-f32.mir =================================================================== --- test/CodeGen/AMDGPU/fold-imm-f16-f32.mir +++ test/CodeGen/AMDGPU/fold-imm-f16-f32.mir @@ -111,7 +111,7 @@ # literal constant. # CHECK-LABEL: name: add_f32_1.0_one_f16_use -# CHECK: %13:vgpr_32 = V_ADD_F16_e32 1065353216, killed %11, implicit %exec +# CHECK: %13:vgpr_32 = V_ADD_F16_e32 1065353216, killed %11, implicit $exec name: add_f32_1.0_one_f16_use alignment: 0 @@ -158,10 +158,10 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %12 = V_MOV_B32_e32 1065353216, implicit %exec - %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit %exec - BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %12 = V_MOV_B32_e32 1065353216, implicit $exec + %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit $exec + BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) S_ENDPGM ... @@ -170,9 +170,9 @@ # operands # CHECK-LABEL: name: add_f32_1.0_multi_f16_use -# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1065353216, implicit %exec -# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit %exec -# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit %exec +# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec +# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit $exec +# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit $exec name: add_f32_1.0_multi_f16_use @@ -222,13 +222,13 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %13 = V_MOV_B32_e32 1065353216, implicit %exec - %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit %exec - %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit %exec - BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + %13 = V_MOV_B32_e32 1065353216, implicit $exec + %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec + %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec + BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) S_ENDPGM ... @@ -238,8 +238,8 @@ # immediate, and folded into the single f16 use as a literal constant # CHECK-LABEL: name: add_f32_1.0_one_f32_use_one_f16_use -# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit %exec -# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit %exec +# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit $exec +# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec name: add_f32_1.0_one_f32_use_one_f16_use alignment: 0 @@ -289,14 +289,14 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %14 = V_MOV_B32_e32 1065353216, implicit %exec - %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec - %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) - BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + %14 = V_MOV_B32_e32 1065353216, implicit $exec + %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec + %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) + BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`) S_ENDPGM ... @@ -306,10 +306,10 @@ # constant, and not folded as a multi-use literal for the f16 cases # CHECK-LABEL: name: add_f32_1.0_one_f32_use_multi_f16_use -# CHECK: %14:vgpr_32 = V_MOV_B32_e32 1065353216, implicit %exec -# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %11, %14, implicit %exec -# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12, %14, implicit %exec -# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit %exec +# CHECK: %14:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec +# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %11, %14, implicit $exec +# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12, %14, implicit $exec +# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec name: add_f32_1.0_one_f32_use_multi_f16_use alignment: 0 @@ -360,24 +360,24 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %14 = V_MOV_B32_e32 1065353216, implicit %exec - %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec - %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit %exec - %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) - BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + %14 = V_MOV_B32_e32 1065353216, implicit $exec + %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec + %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec + %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) + BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: add_i32_1_multi_f16_use -# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1, implicit %exec -# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit %exec -# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit %exec +# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1, implicit $exec +# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit $exec +# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit $exec name: add_i32_1_multi_f16_use @@ -427,23 +427,23 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %13 = V_MOV_B32_e32 1, implicit %exec - %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit %exec - %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit %exec - BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + %13 = V_MOV_B32_e32 1, implicit $exec + %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec + %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec + BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: add_i32_m2_one_f32_use_multi_f16_use -# CHECK: %14:vgpr_32 = V_MOV_B32_e32 -2, implicit %exec -# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit %exec -# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit %exec -# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit %exec +# CHECK: %14:vgpr_32 = V_MOV_B32_e32 -2, implicit $exec +# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit $exec +# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit $exec +# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit $exec name: add_i32_m2_one_f32_use_multi_f16_use alignment: 0 @@ -494,16 +494,16 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %14 = V_MOV_B32_e32 -2, implicit %exec - %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec - %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit %exec - %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) - BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + %14 = V_MOV_B32_e32 -2, implicit $exec + %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec + %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec + %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) + BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`) S_ENDPGM ... @@ -513,9 +513,9 @@ # constant, and not folded as a multi-use literal for the f16 cases # CHECK-LABEL: name: add_f16_1.0_multi_f32_use -# CHECK: %13:vgpr_32 = V_MOV_B32_e32 15360, implicit %exec -# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit %exec -# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit %exec +# CHECK: %13:vgpr_32 = V_MOV_B32_e32 15360, implicit $exec +# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec +# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit $exec name: add_f16_1.0_multi_f32_use alignment: 0 @@ -564,13 +564,13 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %13 = V_MOV_B32_e32 15360, implicit %exec - %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit %exec - %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`) - BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + %13 = V_MOV_B32_e32 15360, implicit $exec + %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec + %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`) + BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`) S_ENDPGM ... @@ -580,9 +580,9 @@ # FIXME: Should be able to fold this # CHECK-LABEL: name: add_f16_1.0_other_high_bits_multi_f16_use -# CHECK: %13:vgpr_32 = V_MOV_B32_e32 80886784, implicit %exec -# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit %exec -# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit %exec +# CHECK: %13:vgpr_32 = V_MOV_B32_e32 80886784, implicit $exec +# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit $exec +# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec name: add_f16_1.0_other_high_bits_multi_f16_use alignment: 0 @@ -631,13 +631,13 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %13 = V_MOV_B32_e32 80886784, implicit %exec - %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit %exec - %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit %exec - BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %13 = V_MOV_B32_e32 80886784, implicit $exec + %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit $exec + %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec + BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) S_ENDPGM ... @@ -647,9 +647,9 @@ # f32 instruction. # CHECK-LABEL: name: add_f16_1.0_other_high_bits_use_f16_f32 -# CHECK: %13:vgpr_32 = V_MOV_B32_e32 305413120, implicit %exec -# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit %exec -# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit %exec +# CHECK: %13:vgpr_32 = V_MOV_B32_e32 305413120, implicit $exec +# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec +# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec name: add_f16_1.0_other_high_bits_use_f16_f32 alignment: 0 exposesReturnsTwice: false @@ -697,13 +697,13 @@ %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`) - %13 = V_MOV_B32_e32 305413120, implicit %exec - %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit %exec - %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`) + %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`) + %13 = V_MOV_B32_e32 305413120, implicit $exec + %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec + %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`) S_ENDPGM ... Index: test/CodeGen/AMDGPU/fold-immediate-output-mods.mir =================================================================== --- test/CodeGen/AMDGPU/fold-immediate-output-mods.mir +++ test/CodeGen/AMDGPU/fold-immediate-output-mods.mir @@ -1,8 +1,8 @@ # RUN: llc -march=amdgcn -run-pass peephole-opt -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s ... # GCN-LABEL: name: no_fold_imm_madak_mac_clamp_f32 -# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec -# GCN-NEXT: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec +# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec +# GCN-NEXT: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec name: no_fold_imm_madak_mac_clamp_f32 tracksRegLiveness: true @@ -38,42 +38,42 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 - %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec %28 = REG_SEQUENCE %3, 1, %27, 2 %11 = S_MOV_B32 61440 %12 = S_MOV_B32 0 %13 = REG_SEQUENCE killed %12, 1, killed %11, 2 %14 = REG_SEQUENCE killed %5, 17, %13, 18 %15 = S_MOV_B32 2 - %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec + %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec %17 = REG_SEQUENCE killed %6, 17, %13, 18 %18 = REG_SEQUENCE killed %4, 17, %13, 18 %20 = COPY %29 - %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec %22 = COPY %29 - %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec - %23 = V_MOV_B32_e32 1090519040, implicit %exec - %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec + %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec + %23 = V_MOV_B32_e32 1090519040, implicit $exec + %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec %26 = COPY %29 - BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: no_fold_imm_madak_mac_omod_f32 -# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec -# GCN: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit %exec +# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec +# GCN: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $exec name: no_fold_imm_madak_mac_omod_f32 tracksRegLiveness: true @@ -109,42 +109,42 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 - %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec %28 = REG_SEQUENCE %3, 1, %27, 2 %11 = S_MOV_B32 61440 %12 = S_MOV_B32 0 %13 = REG_SEQUENCE killed %12, 1, killed %11, 2 %14 = REG_SEQUENCE killed %5, 17, %13, 18 %15 = S_MOV_B32 2 - %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec + %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec %17 = REG_SEQUENCE killed %6, 17, %13, 18 %18 = REG_SEQUENCE killed %4, 17, %13, 18 %20 = COPY %29 - %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec %22 = COPY %29 - %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec - %23 = V_MOV_B32_e32 1090519040, implicit %exec - %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit %exec + %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec + %23 = V_MOV_B32_e32 1090519040, implicit $exec + %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $exec %26 = COPY %29 - BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN: name: no_fold_imm_madak_mad_clamp_f32 -# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec -# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec +# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec +# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec name: no_fold_imm_madak_mad_clamp_f32 tracksRegLiveness: true @@ -180,42 +180,42 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 - %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec %28 = REG_SEQUENCE %3, 1, %27, 2 %11 = S_MOV_B32 61440 %12 = S_MOV_B32 0 %13 = REG_SEQUENCE killed %12, 1, killed %11, 2 %14 = REG_SEQUENCE killed %5, 17, %13, 18 %15 = S_MOV_B32 2 - %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec + %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec %17 = REG_SEQUENCE killed %6, 17, %13, 18 %18 = REG_SEQUENCE killed %4, 17, %13, 18 %20 = COPY %29 - %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec %22 = COPY %29 - %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec - %23 = V_MOV_B32_e32 1090519040, implicit %exec - %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec + %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec + %23 = V_MOV_B32_e32 1090519040, implicit $exec + %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec %26 = COPY %29 - BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN: name: no_fold_imm_madak_mad_omod_f32 -# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec -# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit %exec +# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec +# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $exec name: no_fold_imm_madak_mad_omod_f32 tracksRegLiveness: true @@ -251,35 +251,35 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 - %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec %28 = REG_SEQUENCE %3, 1, %27, 2 %11 = S_MOV_B32 61440 %12 = S_MOV_B32 0 %13 = REG_SEQUENCE killed %12, 1, killed %11, 2 %14 = REG_SEQUENCE killed %5, 17, %13, 18 %15 = S_MOV_B32 2 - %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec + %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec %17 = REG_SEQUENCE killed %6, 17, %13, 18 %18 = REG_SEQUENCE killed %4, 17, %13, 18 %20 = COPY %29 - %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec %22 = COPY %29 - %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec - %23 = V_MOV_B32_e32 1090519040, implicit %exec - %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit %exec + %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec + %23 = V_MOV_B32_e32 1090519040, implicit $exec + %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $exec %26 = COPY %29 - BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/fold-multiple.mir =================================================================== --- test/CodeGen/AMDGPU/fold-multiple.mir +++ test/CodeGen/AMDGPU/fold-multiple.mir @@ -14,8 +14,8 @@ # being processed twice. # CHECK-LABEL: name: test -# CHECK: %2:vgpr_32 = V_LSHLREV_B32_e32 2, killed %0, implicit %exec -# CHECK: %4:vgpr_32 = V_AND_B32_e32 8, killed %2, implicit %exec +# CHECK: %2:vgpr_32 = V_LSHLREV_B32_e32 2, killed %0, implicit $exec +# CHECK: %4:vgpr_32 = V_AND_B32_e32 8, killed %2, implicit $exec name: test tracksRegLiveness: true @@ -30,11 +30,11 @@ bb.0 (%ir-block.0): %0 = IMPLICIT_DEF %1 = S_MOV_B32 2 - %2 = V_LSHLREV_B32_e64 %1, killed %0, implicit %exec - %3 = S_LSHL_B32 %1, killed %1, implicit-def dead %scc - %4 = V_AND_B32_e64 killed %2, killed %3, implicit %exec + %2 = V_LSHLREV_B32_e64 %1, killed %0, implicit $exec + %3 = S_LSHL_B32 %1, killed %1, implicit-def dead $scc + %4 = V_AND_B32_e64 killed %2, killed %3, implicit $exec %5 = IMPLICIT_DEF - BUFFER_STORE_DWORD_OFFSET killed %4, killed %5, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %4, killed %5, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/fold-operands-order.mir =================================================================== --- test/CodeGen/AMDGPU/fold-operands-order.mir +++ test/CodeGen/AMDGPU/fold-operands-order.mir @@ -6,10 +6,10 @@ # aren't made in users before the def is seen. # GCN-LABEL: name: mov_in_use_list_2x{{$}} -# GCN: %2:vgpr_32 = V_MOV_B32_e32 0, implicit %exec +# GCN: %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec # GCN-NEXT: %3:vgpr_32 = COPY undef %0 -# GCN: %1:vgpr_32 = V_MOV_B32_e32 0, implicit %exec +# GCN: %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec name: mov_in_use_list_2x @@ -30,12 +30,12 @@ successors: %bb.2 %2 = COPY %1 - %3 = V_XOR_B32_e64 killed %2, undef %0, implicit %exec + %3 = V_XOR_B32_e64 killed %2, undef %0, implicit $exec bb.2: successors: %bb.1 - %1 = V_MOV_B32_e32 0, implicit %exec + %1 = V_MOV_B32_e32 0, implicit $exec S_BRANCH %bb.1 ... Index: test/CodeGen/AMDGPU/hazard-inlineasm.mir =================================================================== --- test/CodeGen/AMDGPU/hazard-inlineasm.mir +++ test/CodeGen/AMDGPU/hazard-inlineasm.mir @@ -16,8 +16,8 @@ body: | bb.0: - FLAT_STORE_DWORDX4 %vgpr49_vgpr50, %vgpr26_vgpr27_vgpr28_vgpr29, 0, 0, 0, implicit %exec, implicit %flat_scr - INLINEASM &"v_mad_u64_u32 $0, $1, $2, $3, $4", 0, 2621450, def %vgpr26_vgpr27, 2818058, def dead %sgpr14_sgpr15, 589833, %sgpr12, 327689, killed %vgpr51, 2621449, %vgpr46_vgpr47 + FLAT_STORE_DWORDX4 $vgpr49_vgpr50, $vgpr26_vgpr27_vgpr28_vgpr29, 0, 0, 0, implicit $exec, implicit $flat_scr + INLINEASM &"v_mad_u64_u32 $0, $1, $2, $3, $4", 0, 2621450, def $vgpr26_vgpr27, 2818058, def dead $sgpr14_sgpr15, 589833, $sgpr12, 327689, killed $vgpr51, 2621449, $vgpr46_vgpr47 S_ENDPGM ... Index: test/CodeGen/AMDGPU/hazard.mir =================================================================== --- test/CodeGen/AMDGPU/hazard.mir +++ test/CodeGen/AMDGPU/hazard.mir @@ -3,7 +3,7 @@ # GCN-LABEL: name: hazard_implicit_def # GCN: bb.0.entry: -# GCN: %m0 = S_MOV_B32 +# GCN: $m0 = S_MOV_B32 # GFX9: S_NOP 0 # VI-NOT: S_NOP_0 # GCN: V_INTERP_P1_F32 @@ -18,22 +18,22 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%sgpr7', virtual-reg: '' } - - { reg: '%vgpr4', virtual-reg: '' } + - { reg: '$sgpr7', virtual-reg: '' } + - { reg: '$vgpr4', virtual-reg: '' } body: | bb.0.entry: - liveins: %sgpr7, %vgpr4 + liveins: $sgpr7, $vgpr4 - %m0 = S_MOV_B32 killed %sgpr7 - %vgpr5 = IMPLICIT_DEF - %vgpr0 = V_INTERP_P1_F32 killed %vgpr4, 0, 0, implicit %m0, implicit %exec - SI_RETURN_TO_EPILOG killed %vgpr5, killed %vgpr0 + $m0 = S_MOV_B32 killed $sgpr7 + $vgpr5 = IMPLICIT_DEF + $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec + SI_RETURN_TO_EPILOG killed $vgpr5, killed $vgpr0 ... # GCN-LABEL: name: hazard_inlineasm # GCN: bb.0.entry: -# GCN: %m0 = S_MOV_B32 +# GCN: $m0 = S_MOV_B32 # GFX9: S_NOP 0 # VI-NOT: S_NOP_0 # GCN: V_INTERP_P1_F32 @@ -47,14 +47,14 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%sgpr7', virtual-reg: '' } - - { reg: '%vgpr4', virtual-reg: '' } + - { reg: '$sgpr7', virtual-reg: '' } + - { reg: '$vgpr4', virtual-reg: '' } body: | bb.0.entry: - liveins: %sgpr7, %vgpr4 + liveins: $sgpr7, $vgpr4 - %m0 = S_MOV_B32 killed %sgpr7 - INLINEASM &"; no-op", 1, 327690, def %vgpr5 - %vgpr0 = V_INTERP_P1_F32 killed %vgpr4, 0, 0, implicit %m0, implicit %exec - SI_RETURN_TO_EPILOG killed %vgpr5, killed %vgpr0 + $m0 = S_MOV_B32 killed $sgpr7 + INLINEASM &"; no-op", 1, 327690, def $vgpr5 + $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec + SI_RETURN_TO_EPILOG killed $vgpr5, killed $vgpr0 ... Index: test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir =================================================================== --- test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir +++ test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir @@ -10,11 +10,11 @@ # CHECK-LABEL: name: kill_uncond_branch # CHECK: bb.0: -# CHECK: S_CBRANCH_VCCNZ %bb.1, implicit %vcc +# CHECK: S_CBRANCH_VCCNZ %bb.1, implicit $vcc # CHECK: bb.1: # CHECK: V_CMPX_LE_F32_e32 -# CHECK-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit %exec +# CHECK-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit $exec # CHECK: bb.3: # CHECK-NEXT: EXP_DONE @@ -28,12 +28,12 @@ body: | bb.0: successors: %bb.1 - S_CBRANCH_VCCNZ %bb.1, implicit %vcc + S_CBRANCH_VCCNZ %bb.1, implicit $vcc bb.1: successors: %bb.2 - %vgpr0 = V_MOV_B32_e32 0, implicit %exec - SI_KILL_F32_COND_IMM_TERMINATOR %vgpr0, 0, 3, implicit-def %exec, implicit-def %vcc, implicit %exec + $vgpr0 = V_MOV_B32_e32 0, implicit $exec + SI_KILL_F32_COND_IMM_TERMINATOR $vgpr0, 0, 3, implicit-def $exec, implicit-def $vcc, implicit $exec S_BRANCH %bb.2 bb.2: Index: test/CodeGen/AMDGPU/insert-waits-callee.mir =================================================================== --- test/CodeGen/AMDGPU/insert-waits-callee.mir +++ test/CodeGen/AMDGPU/insert-waits-callee.mir @@ -13,13 +13,13 @@ # CHECK-NEXT: V_ADD_F32 # CHECK-NEXT: S_SETPC_B64 liveins: - - { reg: '%sgpr0_sgpr1' } - - { reg: '%vgpr0' } + - { reg: '$sgpr0_sgpr1' } + - { reg: '$vgpr0' } name: entry_callee_wait body: | bb.0: - %vgpr0 = V_ADD_F32_e32 %vgpr0, %vgpr0, implicit %exec - S_SETPC_B64 killed %sgpr0_sgpr1 + $vgpr0 = V_ADD_F32_e32 $vgpr0, $vgpr0, implicit $exec + S_SETPC_B64 killed $sgpr0_sgpr1 ... Index: test/CodeGen/AMDGPU/insert-waits-exp.mir =================================================================== --- test/CodeGen/AMDGPU/insert-waits-exp.mir +++ test/CodeGen/AMDGPU/insert-waits-exp.mir @@ -20,10 +20,10 @@ # CHECK-LABEL: name: exp_done_waitcnt{{$}} # CHECK: EXP_DONE # CHECK-NEXT: S_WAITCNT 3855 -# CHECK: %vgpr0 = V_MOV_B32 -# CHECK: %vgpr1 = V_MOV_B32 -# CHECK: %vgpr2 = V_MOV_B32 -# CHECK: %vgpr3 = V_MOV_B32 +# CHECK: $vgpr0 = V_MOV_B32 +# CHECK: $vgpr1 = V_MOV_B32 +# CHECK: $vgpr2 = V_MOV_B32 +# CHECK: $vgpr3 = V_MOV_B32 name: exp_done_waitcnt alignment: 0 exposesReturnsTwice: false @@ -47,17 +47,17 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.2): - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %vgpr2 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - %vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`) - EXP_DONE 0, killed %vgpr0, killed %vgpr1, killed %vgpr2, killed %vgpr3, -1, -1, 15, implicit %exec - %vgpr0 = V_MOV_B32_e32 1056964608, implicit %exec - %vgpr1 = V_MOV_B32_e32 1065353216, implicit %exec - %vgpr2 = V_MOV_B32_e32 1073741824, implicit %exec - %vgpr3 = V_MOV_B32_e32 1082130432, implicit %exec - SI_RETURN_TO_EPILOG killed %vgpr0, killed %vgpr1, killed %vgpr2, killed %vgpr3 + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + $vgpr2 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + $vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`) + EXP_DONE 0, killed $vgpr0, killed $vgpr1, killed $vgpr2, killed $vgpr3, -1, -1, 15, implicit $exec + $vgpr0 = V_MOV_B32_e32 1056964608, implicit $exec + $vgpr1 = V_MOV_B32_e32 1065353216, implicit $exec + $vgpr2 = V_MOV_B32_e32 1073741824, implicit $exec + $vgpr3 = V_MOV_B32_e32 1082130432, implicit $exec + SI_RETURN_TO_EPILOG killed $vgpr0, killed $vgpr1, killed $vgpr2, killed $vgpr3 ... Index: test/CodeGen/AMDGPU/inserted-wait-states.mir =================================================================== --- test/CodeGen/AMDGPU/inserted-wait-states.mir +++ test/CodeGen/AMDGPU/inserted-wait-states.mir @@ -78,23 +78,23 @@ body: | bb.0: - %vcc = S_MOV_B64 0 - %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec + $vcc = S_MOV_B64 0 + $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec S_BRANCH %bb.1 bb.1: - implicit %vcc = V_CMP_EQ_I32_e32 %vgpr1, %vgpr2, implicit %exec - %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec + implicit $vcc = V_CMP_EQ_I32_e32 $vgpr1, $vgpr2, implicit $exec + $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec S_BRANCH %bb.2 bb.2: - %vcc = V_CMP_EQ_I32_e64 %vgpr1, %vgpr2, implicit %exec - %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec + $vcc = V_CMP_EQ_I32_e64 $vgpr1, $vgpr2, implicit $exec + $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec S_BRANCH %bb.3 bb.3: - %vgpr4, %vcc = V_DIV_SCALE_F32 %vgpr1, %vgpr1, %vgpr3, implicit %exec - %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec + $vgpr4, $vcc = V_DIV_SCALE_F32 $vgpr1, $vgpr1, $vgpr3, implicit $exec + $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec S_ENDPGM ... @@ -128,24 +128,24 @@ body: | bb.0: - S_SETREG_B32 %sgpr0, 1 - %sgpr1 = S_GETREG_B32 1 + S_SETREG_B32 $sgpr0, 1 + $sgpr1 = S_GETREG_B32 1 S_BRANCH %bb.1 bb.1: S_SETREG_IMM32_B32 0, 1 - %sgpr1 = S_GETREG_B32 1 + $sgpr1 = S_GETREG_B32 1 S_BRANCH %bb.2 bb.2: - S_SETREG_B32 %sgpr0, 1 - %sgpr1 = S_MOV_B32 0 - %sgpr2 = S_GETREG_B32 1 + S_SETREG_B32 $sgpr0, 1 + $sgpr1 = S_MOV_B32 0 + $sgpr2 = S_GETREG_B32 1 S_BRANCH %bb.3 bb.3: - S_SETREG_B32 %sgpr0, 0 - %sgpr1 = S_GETREG_B32 1 + S_SETREG_B32 $sgpr0, 0 + $sgpr1 = S_GETREG_B32 1 S_ENDPGM ... @@ -173,18 +173,18 @@ body: | bb.0: - S_SETREG_B32 %sgpr0, 1 - S_SETREG_B32 %sgpr1, 1 + S_SETREG_B32 $sgpr0, 1 + S_SETREG_B32 $sgpr1, 1 S_BRANCH %bb.1 bb.1: - S_SETREG_B32 %sgpr0, 64 - S_SETREG_B32 %sgpr1, 128 + S_SETREG_B32 $sgpr0, 64 + S_SETREG_B32 $sgpr1, 128 S_BRANCH %bb.2 bb.2: - S_SETREG_B32 %sgpr0, 1 - S_SETREG_B32 %sgpr1, 0 + S_SETREG_B32 $sgpr0, 1 + S_SETREG_B32 $sgpr1, 0 S_ENDPGM ... @@ -230,33 +230,33 @@ body: | bb.0: - BUFFER_STORE_DWORD_OFFSET %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - BUFFER_STORE_DWORDX3_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - BUFFER_STORE_DWORDX4_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - BUFFER_STORE_DWORDX4_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - BUFFER_STORE_FORMAT_XYZ_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - BUFFER_STORE_FORMAT_XYZW_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - BUFFER_ATOMIC_CMPSWAP_X2_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, implicit %exec - %vgpr3 = V_MOV_B32_e32 0, implicit %exec + BUFFER_STORE_DWORD_OFFSET $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + BUFFER_STORE_DWORDX3_OFFSET $vgpr2_vgpr3_vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + BUFFER_STORE_DWORDX4_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + BUFFER_STORE_DWORDX4_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + BUFFER_STORE_FORMAT_XYZ_OFFSET $vgpr2_vgpr3_vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + BUFFER_STORE_FORMAT_XYZW_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + BUFFER_ATOMIC_CMPSWAP_X2_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, implicit $exec + $vgpr3 = V_MOV_B32_e32 0, implicit $exec S_BRANCH %bb.1 bb.1: - FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - FLAT_STORE_DWORDX4 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - FLAT_ATOMIC_CMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3 = V_MOV_B32_e32 0, implicit %exec - FLAT_ATOMIC_FCMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3 = V_MOV_B32_e32 0, implicit %exec + FLAT_STORE_DWORDX2 $vgpr0_vgpr1, $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + FLAT_STORE_DWORDX3 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + FLAT_STORE_DWORDX4 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + FLAT_ATOMIC_CMPSWAP_X2 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = V_MOV_B32_e32 0, implicit $exec + FLAT_ATOMIC_FCMPSWAP_X2 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = V_MOV_B32_e32 0, implicit $exec S_ENDPGM ... @@ -302,23 +302,23 @@ body: | bb.0: - %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec - %sgpr4 = V_READLANE_B32 %vgpr4, %sgpr0 + $vgpr0,$sgpr0_sgpr1 = V_ADD_I32_e64 $vgpr1, $vgpr2, implicit $vcc, implicit $exec + $sgpr4 = V_READLANE_B32 $vgpr4, $sgpr0 S_BRANCH %bb.1 bb.1: - %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec - %vgpr4 = V_WRITELANE_B32 %sgpr0, %sgpr0 + $vgpr0,$sgpr0_sgpr1 = V_ADD_I32_e64 $vgpr1, $vgpr2, implicit $vcc, implicit $exec + $vgpr4 = V_WRITELANE_B32 $sgpr0, $sgpr0 S_BRANCH %bb.2 bb.2: - %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec - %sgpr4 = V_READLANE_B32 %vgpr4, %vcc_lo + $vgpr0,implicit $vcc = V_ADD_I32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec + $sgpr4 = V_READLANE_B32 $vgpr4, $vcc_lo S_BRANCH %bb.3 bb.3: - %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec - %vgpr4 = V_WRITELANE_B32 %sgpr4, %vcc_lo + $vgpr0,implicit $vcc = V_ADD_I32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec + $vgpr4 = V_WRITELANE_B32 $sgpr4, $vcc_lo S_ENDPGM ... @@ -341,13 +341,13 @@ body: | bb.0: - S_SETREG_B32 %sgpr0, 3 - S_RFE_B64 %sgpr2_sgpr3 + S_SETREG_B32 $sgpr0, 3 + S_RFE_B64 $sgpr2_sgpr3 S_BRANCH %bb.1 bb.1: - S_SETREG_B32 %sgpr0, 0 - S_RFE_B64 %sgpr2_sgpr3 + S_SETREG_B32 $sgpr0, 0 + S_RFE_B64 $sgpr2_sgpr3 S_ENDPGM ... @@ -370,13 +370,13 @@ body: | bb.0: - %sgpr0 = S_MOV_FED_B32 %sgpr0 - %sgpr0 = S_MOV_B32 %sgpr0 + $sgpr0 = S_MOV_FED_B32 $sgpr0 + $sgpr0 = S_MOV_B32 $sgpr0 S_BRANCH %bb.1 bb.1: - %sgpr0 = S_MOV_FED_B32 %sgpr0 - %vgpr0 = V_MOV_B32_e32 %sgpr0, implicit %exec + $sgpr0 = S_MOV_FED_B32 $sgpr0 + $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec S_ENDPGM ... @@ -410,23 +410,23 @@ body: | bb.0: - %m0 = S_MOV_B32 0 - %sgpr0 = S_MOVRELS_B32 %sgpr0, implicit %m0 + $m0 = S_MOV_B32 0 + $sgpr0 = S_MOVRELS_B32 $sgpr0, implicit $m0 S_BRANCH %bb.1 bb.1: - %m0 = S_MOV_B32 0 - %sgpr0_sgpr1 = S_MOVRELS_B64 %sgpr0_sgpr1, implicit %m0 + $m0 = S_MOV_B32 0 + $sgpr0_sgpr1 = S_MOVRELS_B64 $sgpr0_sgpr1, implicit $m0 S_BRANCH %bb.2 bb.2: - %m0 = S_MOV_B32 0 - %sgpr0 = S_MOVRELD_B32 %sgpr0, implicit %m0 + $m0 = S_MOV_B32 0 + $sgpr0 = S_MOVRELD_B32 $sgpr0, implicit $m0 S_BRANCH %bb.3 bb.3: - %m0 = S_MOV_B32 0 - %sgpr0_sgpr1 = S_MOVRELD_B64 %sgpr0_sgpr1, implicit %m0 + $m0 = S_MOV_B32 0 + $sgpr0_sgpr1 = S_MOVRELD_B64 $sgpr0_sgpr1, implicit $m0 S_ENDPGM ... @@ -459,23 +459,23 @@ body: | bb.0: - %m0 = S_MOV_B32 0 - %vgpr0 = V_INTERP_P1_F32 %vgpr0, 0, 0, implicit %m0, implicit %exec + $m0 = S_MOV_B32 0 + $vgpr0 = V_INTERP_P1_F32 $vgpr0, 0, 0, implicit $m0, implicit $exec S_BRANCH %bb.1 bb.1: - %m0 = S_MOV_B32 0 - %vgpr0 = V_INTERP_P2_F32 %vgpr0, %vgpr1, 0, 0, implicit %m0, implicit %exec + $m0 = S_MOV_B32 0 + $vgpr0 = V_INTERP_P2_F32 $vgpr0, $vgpr1, 0, 0, implicit $m0, implicit $exec S_BRANCH %bb.2 bb.2: - %m0 = S_MOV_B32 0 - %vgpr0 = V_INTERP_P1_F32_16bank %vgpr0, 0, 0, implicit %m0, implicit %exec + $m0 = S_MOV_B32 0 + $vgpr0 = V_INTERP_P1_F32_16bank $vgpr0, 0, 0, implicit $m0, implicit $exec S_BRANCH %bb.3 bb.3: - %m0 = S_MOV_B32 0 - %vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit %m0, implicit %exec + $m0 = S_MOV_B32 0 + $vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit $m0, implicit $exec S_ENDPGM ... @@ -503,13 +503,13 @@ body: | bb.0: - %vgpr0 = V_MOV_B32_e32 0, implicit %exec - %vgpr1 = V_MOV_B32_dpp %vgpr1, %vgpr0, 0, 15, 15, 0, implicit %exec + $vgpr0 = V_MOV_B32_e32 0, implicit $exec + $vgpr1 = V_MOV_B32_dpp $vgpr1, $vgpr0, 0, 15, 15, 0, implicit $exec S_BRANCH %bb.1 bb.1: - implicit %exec, implicit %vcc = V_CMPX_EQ_I32_e32 %vgpr0, %vgpr1, implicit %exec - %vgpr3 = V_MOV_B32_dpp %vgpr3, %vgpr0, 0, 15, 15, 0, implicit %exec + implicit $exec, implicit $vcc = V_CMPX_EQ_I32_e32 $vgpr0, $vgpr1, implicit $exec + $vgpr3 = V_MOV_B32_dpp $vgpr3, $vgpr0, 0, 15, 15, 0, implicit $exec S_ENDPGM ... --- @@ -521,10 +521,10 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%sgpr4_sgpr5' } - - { reg: '%sgpr6_sgpr7' } - - { reg: '%sgpr9' } - - { reg: '%sgpr0_sgpr1_sgpr2_sgpr3' } + - { reg: '$sgpr4_sgpr5' } + - { reg: '$sgpr6_sgpr7' } + - { reg: '$sgpr9' } + - { reg: '$sgpr0_sgpr1_sgpr2_sgpr3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -544,19 +544,19 @@ - { id: 1, offset: 8, size: 4, alignment: 4 } body: | bb.0.entry: - liveins: %sgpr4_sgpr5, %sgpr6_sgpr7, %sgpr9, %sgpr0_sgpr1_sgpr2_sgpr3 - - %flat_scr_lo = S_ADD_U32 %sgpr6, %sgpr9, implicit-def %scc - %flat_scr_hi = S_ADDC_U32 %sgpr7, 0, implicit-def %scc, implicit %scc - DBG_VALUE %noreg, 2, !5, !11, debug-location !12 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - dead %sgpr6_sgpr7 = KILL %sgpr4_sgpr5 - %sgpr8 = S_MOV_B32 %sgpr5 - %vgpr0 = V_MOV_B32_e32 killed %sgpr8, implicit %exec - BUFFER_STORE_DWORD_OFFSET %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr9, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.A.addr + 4) - %sgpr8 = S_MOV_B32 %sgpr4, implicit killed %sgpr4_sgpr5 - %vgpr0 = V_MOV_B32_e32 killed %sgpr8, implicit %exec - BUFFER_STORE_DWORD_OFFSET %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr9, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.A.addr) + liveins: $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr9, $sgpr0_sgpr1_sgpr2_sgpr3 + + $flat_scr_lo = S_ADD_U32 $sgpr6, $sgpr9, implicit-def $scc + $flat_scr_hi = S_ADDC_U32 $sgpr7, 0, implicit-def $scc, implicit $scc + DBG_VALUE $noreg, 2, !5, !11, debug-location !12 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + dead $sgpr6_sgpr7 = KILL $sgpr4_sgpr5 + $sgpr8 = S_MOV_B32 $sgpr5 + $vgpr0 = V_MOV_B32_e32 killed $sgpr8, implicit $exec + BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr9, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.A.addr + 4) + $sgpr8 = S_MOV_B32 $sgpr4, implicit killed $sgpr4_sgpr5 + $vgpr0 = V_MOV_B32_e32 killed $sgpr8, implicit $exec + BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr9, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.A.addr) S_ENDPGM ... Index: test/CodeGen/AMDGPU/invert-br-undef-vcc.mir =================================================================== --- test/CodeGen/AMDGPU/invert-br-undef-vcc.mir +++ test/CodeGen/AMDGPU/invert-br-undef-vcc.mir @@ -26,7 +26,7 @@ ... --- # CHECK-LABEL: name: invert_br_undef_vcc -# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef %vcc +# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef $vcc name: invert_br_undef_vcc alignment: 0 @@ -36,7 +36,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%sgpr0_sgpr1' } + - { reg: '$sgpr0_sgpr1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -53,34 +53,34 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc bb.1.else: - liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %vgpr0 = V_MOV_B32_e32 100, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) - %vgpr0 = V_MOV_B32_e32 1, implicit %exec + $vgpr0 = V_MOV_B32_e32 100, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + $vgpr0 = V_MOV_B32_e32 1, implicit $exec S_BRANCH %bb.3 bb.2.if: - liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %vgpr0 = V_MOV_B32_e32 9, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) - %vgpr0 = V_MOV_B32_e32 0, implicit %exec + $vgpr0 = V_MOV_B32_e32 9, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + $vgpr0 = V_MOV_B32_e32 0, implicit $exec bb.3.done: - liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out) + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out) S_ENDPGM ... Index: test/CodeGen/AMDGPU/limit-coalesce.mir =================================================================== --- test/CodeGen/AMDGPU/limit-coalesce.mir +++ test/CodeGen/AMDGPU/limit-coalesce.mir @@ -11,8 +11,8 @@ # CHECK: - { id: 8, class: vreg_128, preferred-register: '' } # No more registers shall be defined # CHECK-NEXT: liveins: -# CHECK: FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %4, -# CHECK: FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %6, +# CHECK: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, %4, +# CHECK: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, %6, --- name: main @@ -33,7 +33,7 @@ - { id: 8, class: vreg_128 } - { id: 9, class: vreg_128 } liveins: - - { reg: '%sgpr6', virtual-reg: '%1' } + - { reg: '$sgpr6', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -50,22 +50,22 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %sgpr0, %vgpr0_vgpr1 + liveins: $sgpr0, $vgpr0_vgpr1 %3 = IMPLICIT_DEF - undef %4.sub0 = COPY %sgpr0 + undef %4.sub0 = COPY $sgpr0 %4.sub1 = COPY %3.sub0 undef %5.sub0 = COPY %4.sub1 %5.sub1 = COPY %4.sub0 - FLAT_STORE_DWORDX2 %vgpr0_vgpr1, killed %5, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORDX2 $vgpr0_vgpr1, killed %5, 0, 0, 0, implicit $exec, implicit $flat_scr %6 = IMPLICIT_DEF undef %7.sub0_sub1 = COPY %6 %7.sub2 = COPY %3.sub0 - FLAT_STORE_DWORDX3 %vgpr0_vgpr1, killed %7, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORDX3 $vgpr0_vgpr1, killed %7, 0, 0, 0, implicit $exec, implicit $flat_scr %8 = IMPLICIT_DEF undef %9.sub0_sub1_sub2 = COPY %8 %9.sub3 = COPY %3.sub0 - FLAT_STORE_DWORDX4 %vgpr0_vgpr1, killed %9, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORDX4 $vgpr0_vgpr1, killed %9, 0, 0, 0, implicit $exec, implicit $flat_scr ... Index: test/CodeGen/AMDGPU/liveness.mir =================================================================== --- test/CodeGen/AMDGPU/liveness.mir +++ test/CodeGen/AMDGPU/liveness.mir @@ -17,7 +17,7 @@ body: | bb.0: S_NOP 0, implicit-def undef %0.sub0 - S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc S_BRANCH %bb.2 bb.1: Index: test/CodeGen/AMDGPU/llvm.dbg.value.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.dbg.value.ll +++ test/CodeGen/AMDGPU/llvm.dbg.value.ll @@ -5,7 +5,7 @@ ; NOOPT: s_load_dwordx2 s[4:5] ; FIXME: Why is the SGPR4_SGPR5 reference being removed from DBG_VALUE? -; NOOPT: ; kill: def %sgpr8_sgpr9 killed %sgpr4_sgpr5 +; NOOPT: ; kill: def $sgpr8_sgpr9 killed $sgpr4_sgpr5 ; NOOPT-NEXT: ;DEBUG_VALUE: test_debug_value:globalptr_arg <- undef ; GCN: flat_store_dword Index: test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir =================================================================== --- test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir +++ test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir @@ -1,9 +1,9 @@ # RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -run-pass machine-scheduler -o - %s | FileCheck -check-prefix=GCN %s # GCN-LABEL: name: cluster_add_addc -# GCN: S_NOP 0, implicit-def %vcc -# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec -# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %3, implicit %exec +# GCN: S_NOP 0, implicit-def $vcc +# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec +# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %3, implicit $exec name: cluster_add_addc registers: - { id: 0, class: vgpr_32 } @@ -17,20 +17,20 @@ body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %1 = V_MOV_B32_e32 0, implicit %exec - %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec - %6 = V_MOV_B32_e32 0, implicit %exec - %7 = V_MOV_B32_e32 0, implicit %exec - S_NOP 0, implicit def %vcc - %4, %5 = V_ADDC_U32_e64 %6, %7, %3, implicit %exec + %0 = V_MOV_B32_e32 0, implicit $exec + %1 = V_MOV_B32_e32 0, implicit $exec + %2, %3 = V_ADD_I32_e64 %0, %1, implicit $exec + %6 = V_MOV_B32_e32 0, implicit $exec + %7 = V_MOV_B32_e32 0, implicit $exec + S_NOP 0, implicit def $vcc + %4, %5 = V_ADDC_U32_e64 %6, %7, %3, implicit $exec ... # GCN-LABEL: name: interleave_add64s -# GCN: dead %8:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec -# GCN-NEXT: dead %12:vgpr_32, dead %13:sreg_64_xexec = V_ADDC_U32_e64 %4, %5, %9, implicit %exec -# GCN-NEXT: dead %10:vgpr_32, %11:sreg_64_xexec = V_ADD_I32_e64 %2, %3, implicit %exec -# GCN-NEXT: dead %14:vgpr_32, dead %15:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %11, implicit %exec +# GCN: dead %8:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec +# GCN-NEXT: dead %12:vgpr_32, dead %13:sreg_64_xexec = V_ADDC_U32_e64 %4, %5, %9, implicit $exec +# GCN-NEXT: dead %10:vgpr_32, %11:sreg_64_xexec = V_ADD_I32_e64 %2, %3, implicit $exec +# GCN-NEXT: dead %14:vgpr_32, dead %15:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %11, implicit $exec name: interleave_add64s registers: - { id: 0, class: vgpr_32 } @@ -52,27 +52,27 @@ body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %1 = V_MOV_B32_e32 0, implicit %exec - %2 = V_MOV_B32_e32 0, implicit %exec - %3 = V_MOV_B32_e32 0, implicit %exec - %4 = V_MOV_B32_e32 0, implicit %exec - %5 = V_MOV_B32_e32 0, implicit %exec - %6 = V_MOV_B32_e32 0, implicit %exec - %7 = V_MOV_B32_e32 0, implicit %exec + %0 = V_MOV_B32_e32 0, implicit $exec + %1 = V_MOV_B32_e32 0, implicit $exec + %2 = V_MOV_B32_e32 0, implicit $exec + %3 = V_MOV_B32_e32 0, implicit $exec + %4 = V_MOV_B32_e32 0, implicit $exec + %5 = V_MOV_B32_e32 0, implicit $exec + %6 = V_MOV_B32_e32 0, implicit $exec + %7 = V_MOV_B32_e32 0, implicit $exec - %8, %9 = V_ADD_I32_e64 %0, %1, implicit %exec - %10, %11 = V_ADD_I32_e64 %2, %3, implicit %exec + %8, %9 = V_ADD_I32_e64 %0, %1, implicit $exec + %10, %11 = V_ADD_I32_e64 %2, %3, implicit $exec - %12, %13 = V_ADDC_U32_e64 %4, %5, %9, implicit %exec - %14, %15 = V_ADDC_U32_e64 %6, %7, %11, implicit %exec + %12, %13 = V_ADDC_U32_e64 %4, %5, %9, implicit $exec + %14, %15 = V_ADDC_U32_e64 %6, %7, %11, implicit $exec ... # GCN-LABEL: name: cluster_mov_addc -# GCN: S_NOP 0, implicit-def %vcc +# GCN: S_NOP 0, implicit-def $vcc # GCN-NEXT: %2:sreg_64_xexec = S_MOV_B64 0 -# GCN-NEXT: dead %3:vgpr_32, dead %4:sreg_64_xexec = V_ADDC_U32_e64 %0, %1, %2, implicit %exec +# GCN-NEXT: dead %3:vgpr_32, dead %4:sreg_64_xexec = V_ADDC_U32_e64 %0, %1, %2, implicit $exec name: cluster_mov_addc registers: - { id: 0, class: vgpr_32 } @@ -85,20 +85,20 @@ body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %1 = V_MOV_B32_e32 0, implicit %exec + %0 = V_MOV_B32_e32 0, implicit $exec + %1 = V_MOV_B32_e32 0, implicit $exec %2 = S_MOV_B64 0 - S_NOP 0, implicit def %vcc - %3, %4 = V_ADDC_U32_e64 %0, %1, %2, implicit %exec + S_NOP 0, implicit def $vcc + %3, %4 = V_ADDC_U32_e64 %0, %1, %2, implicit $exec ... # GCN-LABEL: name: no_cluster_add_addc_diff_sgpr -# GCN: dead %2:vgpr_32, dead %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec -# GCN-NEXT: %6:vgpr_32 = V_MOV_B32_e32 0, implicit %exec -# GCN-NEXT: %7:vgpr_32 = V_MOV_B32_e32 0, implicit %exec -# GCN-NEXT: S_NOP 0, implicit-def %vcc +# GCN: dead %2:vgpr_32, dead %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec +# GCN-NEXT: %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec +# GCN-NEXT: %7:vgpr_32 = V_MOV_B32_e32 0, implicit $exec +# GCN-NEXT: S_NOP 0, implicit-def $vcc # GCN-NEXT: %8:sreg_64_xexec = S_MOV_B64 0 -# GCN-NEXT: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %8, implicit %exec +# GCN-NEXT: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %8, implicit $exec name: no_cluster_add_addc_diff_sgpr registers: - { id: 0, class: vgpr_32 } @@ -112,19 +112,19 @@ - { id: 8, class: sreg_64_xexec } body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %1 = V_MOV_B32_e32 0, implicit %exec + %0 = V_MOV_B32_e32 0, implicit $exec + %1 = V_MOV_B32_e32 0, implicit $exec %8 = S_MOV_B64 0 - %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec - %6 = V_MOV_B32_e32 0, implicit %exec - %7 = V_MOV_B32_e32 0, implicit %exec - S_NOP 0, implicit def %vcc - %4, %5 = V_ADDC_U32_e64 %6, %7, %8, implicit %exec + %2, %3 = V_ADD_I32_e64 %0, %1, implicit $exec + %6 = V_MOV_B32_e32 0, implicit $exec + %7 = V_MOV_B32_e32 0, implicit $exec + S_NOP 0, implicit def $vcc + %4, %5 = V_ADDC_U32_e64 %6, %7, %8, implicit $exec ... # GCN-LABEL: name: cluster_sub_subb -# GCN: S_NOP 0, implicit-def %vcc -# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_SUB_I32_e64 %0, %1, implicit %exec -# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_SUBB_U32_e64 %6, %7, %3, implicit %exec +# GCN: S_NOP 0, implicit-def $vcc +# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_SUB_I32_e64 %0, %1, implicit $exec +# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_SUBB_U32_e64 %6, %7, %3, implicit $exec name: cluster_sub_subb registers: - { id: 0, class: vgpr_32 } @@ -138,19 +138,19 @@ body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %1 = V_MOV_B32_e32 0, implicit %exec - %2, %3 = V_SUB_I32_e64 %0, %1, implicit %exec - %6 = V_MOV_B32_e32 0, implicit %exec - %7 = V_MOV_B32_e32 0, implicit %exec - S_NOP 0, implicit def %vcc - %4, %5 = V_SUBB_U32_e64 %6, %7, %3, implicit %exec + %0 = V_MOV_B32_e32 0, implicit $exec + %1 = V_MOV_B32_e32 0, implicit $exec + %2, %3 = V_SUB_I32_e64 %0, %1, implicit $exec + %6 = V_MOV_B32_e32 0, implicit $exec + %7 = V_MOV_B32_e32 0, implicit $exec + S_NOP 0, implicit def $vcc + %4, %5 = V_SUBB_U32_e64 %6, %7, %3, implicit $exec ... # GCN-LABEL: name: cluster_cmp_cndmask -# GCN: S_NOP 0, implicit-def %vcc -# GCN-NEXT: %3:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec -# GCN-NEXT: dead %4:vgpr_32 = V_CNDMASK_B32_e64 %0, %1, %3, implicit %exec +# GCN: S_NOP 0, implicit-def $vcc +# GCN-NEXT: %3:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec +# GCN-NEXT: dead %4:vgpr_32 = V_CNDMASK_B32_e64 %0, %1, %3, implicit $exec name: cluster_cmp_cndmask registers: - { id: 0, class: vgpr_32 } @@ -164,17 +164,17 @@ body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %1 = V_MOV_B32_e32 0, implicit %exec - %3 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec - S_NOP 0, implicit def %vcc - %4 = V_CNDMASK_B32_e64 %0, %1, %3, implicit %exec + %0 = V_MOV_B32_e32 0, implicit $exec + %1 = V_MOV_B32_e32 0, implicit $exec + %3 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec + S_NOP 0, implicit def $vcc + %4 = V_CNDMASK_B32_e64 %0, %1, %3, implicit $exec ... # GCN-LABEL: name: cluster_multi_use_cmp_cndmask -# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec -# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec -# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec +# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec +# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec +# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec name: cluster_multi_use_cmp_cndmask registers: - { id: 0, class: vgpr_32 } @@ -188,22 +188,22 @@ body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %1 = V_MOV_B32_e32 0, implicit %exec - %2 = V_MOV_B32_e32 0, implicit %exec - %3 = V_MOV_B32_e32 0, implicit %exec - - %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec - S_NOP 0, implicit def %vcc - %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec - %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec + %0 = V_MOV_B32_e32 0, implicit $exec + %1 = V_MOV_B32_e32 0, implicit $exec + %2 = V_MOV_B32_e32 0, implicit $exec + %3 = V_MOV_B32_e32 0, implicit $exec + + %4 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec + S_NOP 0, implicit def $vcc + %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec + %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec ... # GCN-LABEL: name: cluster_multi_use_cmp_cndmask2 -# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec -# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec -# GCN-NEXT: %3:vgpr_32 = V_MOV_B32_e32 0, implicit %exec -# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec +# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec +# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec +# GCN-NEXT: %3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec +# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec name: cluster_multi_use_cmp_cndmask2 registers: - { id: 0, class: vgpr_32 } @@ -217,11 +217,11 @@ body: | bb.0: - %0 = V_MOV_B32_e32 0, implicit %exec - %1 = V_MOV_B32_e32 0, implicit %exec - %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec - %2 = V_MOV_B32_e32 0, implicit %exec - %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec - %3 = V_MOV_B32_e32 0, implicit %exec - %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec + %0 = V_MOV_B32_e32 0, implicit $exec + %1 = V_MOV_B32_e32 0, implicit $exec + %4 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec + %2 = V_MOV_B32_e32 0, implicit $exec + %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec + %3 = V_MOV_B32_e32 0, implicit $exec + %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec ... Index: test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir =================================================================== --- test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir +++ test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir @@ -65,8 +65,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%sgpr0_sgpr1' } - - { reg: '%vgpr0' } + - { reg: '$sgpr0_sgpr1' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -84,38 +84,38 @@ body: | bb.0 (%ir-block.0): successors: %bb.1.atomic(0x40000000), %bb.2.exit(0x40000000) - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %vgpr1 = V_ASHRREV_I32_e32 31, %vgpr0, implicit %exec - %vgpr1_vgpr2 = V_LSHL_B64 %vgpr0_vgpr1, 3, implicit %exec - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 0 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $vgpr1 = V_ASHRREV_I32_e32 31, $vgpr0, implicit $exec + $vgpr1_vgpr2 = V_LSHL_B64 $vgpr0_vgpr1, 3, implicit $exec + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 0 S_WAITCNT 127 - %vgpr1_vgpr2 = BUFFER_LOAD_DWORDX2_ADDR64 killed %vgpr1_vgpr2, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 8 from %ir.tid.gep) - %vgpr0 = V_XOR_B32_e32 1, killed %vgpr0, implicit %exec - V_CMP_NE_U32_e32 0, killed %vgpr0, implicit-def %vcc, implicit %exec - %sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed %vcc, implicit-def %exec, implicit-def %scc, implicit %exec - %sgpr2_sgpr3 = S_XOR_B64 %exec, killed %sgpr2_sgpr3, implicit-def dead %scc - SI_MASK_BRANCH %bb.2.exit, implicit %exec + $vgpr1_vgpr2 = BUFFER_LOAD_DWORDX2_ADDR64 killed $vgpr1_vgpr2, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 8 from %ir.tid.gep) + $vgpr0 = V_XOR_B32_e32 1, killed $vgpr0, implicit $exec + V_CMP_NE_U32_e32 0, killed $vgpr0, implicit-def $vcc, implicit $exec + $sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec + $sgpr2_sgpr3 = S_XOR_B64 $exec, killed $sgpr2_sgpr3, implicit-def dead $scc + SI_MASK_BRANCH %bb.2.exit, implicit $exec bb.1.atomic: successors: %bb.2.exit(0x80000000) - liveins: %sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, %sgpr0_sgpr1, %sgpr2_sgpr3, %vgpr1_vgpr2_vgpr3_vgpr4:0x00000003 + liveins: $sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, $sgpr0_sgpr1, $sgpr2_sgpr3, $vgpr1_vgpr2_vgpr3_vgpr4:0x00000003 - %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) - dead %vgpr0 = V_MOV_B32_e32 -1, implicit %exec - dead %vgpr0 = V_MOV_B32_e32 61440, implicit %exec - %sgpr4_sgpr5 = S_MOV_B64 0 + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + dead $vgpr0 = V_MOV_B32_e32 -1, implicit $exec + dead $vgpr0 = V_MOV_B32_e32 61440, implicit $exec + $sgpr4_sgpr5 = S_MOV_B64 0 S_WAITCNT 127 - %vgpr0 = V_MOV_B32_e32 killed %sgpr0, implicit %exec, implicit %exec + $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec S_WAITCNT 3952 - BUFFER_ATOMIC_SMAX_ADDR64 killed %vgpr0, killed %vgpr1_vgpr2, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 400, 0, implicit %exec :: (volatile load seq_cst 4 from %ir.gep) + BUFFER_ATOMIC_SMAX_ADDR64 killed $vgpr0, killed $vgpr1_vgpr2, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 400, 0, implicit $exec :: (volatile load seq_cst 4 from %ir.gep) bb.2.exit: - liveins: %sgpr2_sgpr3 + liveins: $sgpr2_sgpr3 - %exec = S_OR_B64 %exec, killed %sgpr2_sgpr3, implicit-def %scc + $exec = S_OR_B64 $exec, killed $sgpr2_sgpr3, implicit-def $scc S_ENDPGM ... Index: test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir =================================================================== --- test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir +++ test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir @@ -79,8 +79,8 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '' } - - { reg: '%sgpr3', virtual-reg: '' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '' } + - { reg: '$sgpr3', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -112,52 +112,52 @@ body: | bb.0.entry: successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000) - liveins: %sgpr0_sgpr1, %sgpr3 - - %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) - %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %vgpr0 = V_MOV_B32_e32 1, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01) + liveins: $sgpr0_sgpr1, $sgpr3 + + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $vgpr0 = V_MOV_B32_e32 1, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01) S_WAITCNT 127 - S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc + S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 2, implicit %exec - %vgpr1 = V_MOV_B32_e32 32772, implicit %exec - BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12) - S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc + $vgpr0 = V_MOV_B32_e32 2, implicit $exec + $vgpr1 = V_MOV_B32_e32 32772, implicit $exec + BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12) + S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc bb.2.else: successors: %bb.3.done(0x80000000) - liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 32772, implicit %exec + $vgpr0 = V_MOV_B32_e32 32772, implicit $exec S_BRANCH %bb.3.done bb.1.if: successors: %bb.3.done(0x80000000) - liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 4, implicit %exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec bb.3.done: - liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0 + liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0 S_WAITCNT 127 - %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc - %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec - %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load syncscope("agent") unordered 4 from %ir.else_ptr), (load syncscope("workgroup") seq_cst 4 from %ir.if_ptr) - %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5 - %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec + $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc + $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec + $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (load syncscope("agent") unordered 4 from %ir.else_ptr), (load syncscope("workgroup") seq_cst 4 from %ir.if_ptr) + $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5 + $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec S_WAITCNT 3952 - FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out) + FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out) S_ENDPGM ... Index: test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir =================================================================== --- test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir +++ test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir @@ -66,7 +66,7 @@ # CHECK-LABEL: name: multiple_mem_operands # CHECK-LABEL: bb.3.done: -# CHECK: BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 1, 1, 0 +# CHECK: BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 1, 1, 0 name: multiple_mem_operands alignment: 0 @@ -77,8 +77,8 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '' } - - { reg: '%sgpr3', virtual-reg: '' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '' } + - { reg: '$sgpr3', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -110,52 +110,52 @@ body: | bb.0.entry: successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000) - liveins: %sgpr0_sgpr1, %sgpr3 - - %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) - %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %vgpr0 = V_MOV_B32_e32 1, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01) + liveins: $sgpr0_sgpr1, $sgpr3 + + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $vgpr0 = V_MOV_B32_e32 1, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01) S_WAITCNT 127 - S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc + S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 2, implicit %exec - %vgpr1 = V_MOV_B32_e32 32772, implicit %exec - BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12) - S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc + $vgpr0 = V_MOV_B32_e32 2, implicit $exec + $vgpr1 = V_MOV_B32_e32 32772, implicit $exec + BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12) + S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc bb.2.else: successors: %bb.3.done(0x80000000) - liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 32772, implicit %exec + $vgpr0 = V_MOV_B32_e32 32772, implicit $exec S_BRANCH %bb.3.done bb.1.if: successors: %bb.3.done(0x80000000) - liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 4, implicit %exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec bb.3.done: - liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0 + liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0 S_WAITCNT 127 - %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc - %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec - %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (non-temporal load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr) - %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5 - %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec + $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc + $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec + $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (non-temporal load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr) + $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5 + $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec S_WAITCNT 3952 - FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out) + FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out) S_ENDPGM ... Index: test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir =================================================================== --- test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir +++ test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir @@ -66,7 +66,7 @@ # CHECK-LABEL: name: multiple_mem_operands # CHECK-LABEL: bb.3.done: -# CHECK: BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0 +# CHECK: BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0 name: multiple_mem_operands alignment: 0 @@ -77,8 +77,8 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '' } - - { reg: '%sgpr3', virtual-reg: '' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '' } + - { reg: '$sgpr3', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -110,52 +110,52 @@ body: | bb.0.entry: successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000) - liveins: %sgpr0_sgpr1, %sgpr3 - - %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) - %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 - %vgpr0 = V_MOV_B32_e32 1, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01) + liveins: $sgpr0_sgpr1, $sgpr3 + + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 + $vgpr0 = V_MOV_B32_e32 1, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01) S_WAITCNT 127 - S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc + S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 2, implicit %exec - %vgpr1 = V_MOV_B32_e32 32772, implicit %exec - BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12) - S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc + $vgpr0 = V_MOV_B32_e32 2, implicit $exec + $vgpr1 = V_MOV_B32_e32 32772, implicit $exec + BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12) + S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc bb.2.else: successors: %bb.3.done(0x80000000) - liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 32772, implicit %exec + $vgpr0 = V_MOV_B32_e32 32772, implicit $exec S_BRANCH %bb.3.done bb.1.if: successors: %bb.3.done(0x80000000) - liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11 - %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) S_WAITCNT 3855 - %vgpr0 = V_MOV_B32_e32 4, implicit %exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec bb.3.done: - liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0 + liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0 S_WAITCNT 127 - %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc - %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec - %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr) - %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5 - %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec + $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc + $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec + $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr) + $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5 + $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec S_WAITCNT 3952 - FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out) + FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out) S_ENDPGM ... Index: test/CodeGen/AMDGPU/merge-load-store-vreg.mir =================================================================== --- test/CodeGen/AMDGPU/merge-load-store-vreg.mir +++ test/CodeGen/AMDGPU/merge-load-store-vreg.mir @@ -3,7 +3,7 @@ # If there's a base offset, check that SILoadStoreOptimizer creates # V_ADD_{I|U}32_e64 for that offset; _e64 uses a vreg for the carry (rather than -# %vcc, which is used in _e32); this ensures that %vcc is not inadvertently +# $vcc, which is used in _e32); this ensures that $vcc is not inadvertently # clobbered. # GCN-LABEL: name: kernel @@ -46,15 +46,15 @@ S_ENDPGM bb.2: - %1:sreg_64_xexec = V_CMP_NE_U32_e64 %0, 0, implicit %exec - %2:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %1, implicit %exec - V_CMP_NE_U32_e32 1, %2, implicit-def %vcc, implicit %exec - DS_WRITE_B32 %0, %0, 1024, 0, implicit %m0, implicit %exec :: (store 4 into %ir.tmp) - %3:vgpr_32 = V_MOV_B32_e32 0, implicit %exec - DS_WRITE_B32 %0, %3, 1056, 0, implicit %m0, implicit %exec :: (store 4 into %ir.tmp1) - %4:vgpr_32 = DS_READ_B32 %3, 1088, 0, implicit %m0, implicit %exec :: (load 4 from %ir.tmp2) - %5:vgpr_32 = DS_READ_B32 %3, 1120, 0, implicit %m0, implicit %exec :: (load 4 from %ir.tmp3) - %vcc = S_AND_B64 %exec, %vcc, implicit-def %scc - S_CBRANCH_VCCNZ %bb.1, implicit %vcc + %1:sreg_64_xexec = V_CMP_NE_U32_e64 %0, 0, implicit $exec + %2:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %1, implicit $exec + V_CMP_NE_U32_e32 1, %2, implicit-def $vcc, implicit $exec + DS_WRITE_B32 %0, %0, 1024, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp) + %3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + DS_WRITE_B32 %0, %3, 1056, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp1) + %4:vgpr_32 = DS_READ_B32 %3, 1088, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp2) + %5:vgpr_32 = DS_READ_B32 %3, 1120, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp3) + $vcc = S_AND_B64 $exec, $vcc, implicit-def $scc + S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.1 ... Index: test/CodeGen/AMDGPU/merge-load-store.mir =================================================================== --- test/CodeGen/AMDGPU/merge-load-store.mir +++ test/CodeGen/AMDGPU/merge-load-store.mir @@ -34,7 +34,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0', virtual-reg: '%1' } + - { reg: '$vgpr0', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -51,20 +51,20 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %vgpr0 + liveins: $vgpr0 - %1:vgpr_32 = COPY %vgpr0 - %m0 = S_MOV_B32 -1 - %2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit %m0, implicit %exec :: (load 4 from %ir.ptr.0) - DS_WRITE_B32 %1, killed %2, 64, 0, implicit %m0, implicit %exec :: (store 4 into %ir.ptr.64) + %1:vgpr_32 = COPY $vgpr0 + $m0 = S_MOV_B32 -1 + %2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit $m0, implicit $exec :: (load 4 from %ir.ptr.0) + DS_WRITE_B32 %1, killed %2, 64, 0, implicit $m0, implicit $exec :: (store 4 into %ir.ptr.64) ; Make this load unmergeable, to tempt SILoadStoreOptimizer into merging the ; other two loads. - %6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit %m0, implicit %exec :: (load 8 from %ir.ptr.64, align 4) + %6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit $m0, implicit $exec :: (load 8 from %ir.ptr.64, align 4) %3:vgpr_32 = COPY %6.sub0 - %4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit %m0, implicit %exec :: (load 4 from %ir.ptr.4) - %5:vgpr_32 = V_ADD_I32_e32 killed %3, killed %4, implicit-def %vcc, implicit %exec - DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed %m0, implicit %exec :: (store 4 into %ir.ptr.0) + %4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit $m0, implicit $exec :: (load 4 from %ir.ptr.4) + %5:vgpr_32 = V_ADD_I32_e32 killed %3, killed %4, implicit-def $vcc, implicit $exec + DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed $m0, implicit $exec :: (store 4 into %ir.ptr.0) S_ENDPGM ... Index: test/CodeGen/AMDGPU/merge-m0.mir =================================================================== --- test/CodeGen/AMDGPU/merge-m0.mir +++ test/CodeGen/AMDGPU/merge-m0.mir @@ -64,68 +64,68 @@ %0 = IMPLICIT_DEF %1 = IMPLICIT_DEF - SI_INIT_M0 -1, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 -1, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 65536, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 65536, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 -1, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 65536, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - S_CBRANCH_VCCZ %bb.1, implicit undef %vcc + SI_INIT_M0 -1, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 -1, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 65536, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 65536, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 -1, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 65536, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + S_CBRANCH_VCCZ %bb.1, implicit undef $vcc S_BRANCH %bb.2 bb.1: successors: %bb.2 - SI_INIT_M0 -1, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 -1, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec + SI_INIT_M0 -1, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 -1, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec S_BRANCH %bb.2 bb.2: successors: %bb.3 - SI_INIT_M0 65536, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec + SI_INIT_M0 65536, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec S_BRANCH %bb.3 bb.3: successors: %bb.4, %bb.5 - S_CBRANCH_VCCZ %bb.4, implicit undef %vcc + S_CBRANCH_VCCZ %bb.4, implicit undef $vcc S_BRANCH %bb.5 bb.4: successors: %bb.6 - SI_INIT_M0 3, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 4, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec + SI_INIT_M0 3, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 4, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec S_BRANCH %bb.6 bb.5: successors: %bb.6 - SI_INIT_M0 3, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 4, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec + SI_INIT_M0 3, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 4, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec S_BRANCH %bb.6 bb.6: successors: %bb.0.entry, %bb.6 - SI_INIT_M0 -1, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec + SI_INIT_M0 -1, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec %2 = IMPLICIT_DEF - SI_INIT_M0 %2, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 %2, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - SI_INIT_M0 -1, implicit-def %m0 - DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec - S_CBRANCH_VCCZ %bb.6, implicit undef %vcc + SI_INIT_M0 %2, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 %2, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + SI_INIT_M0 -1, implicit-def $m0 + DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec + S_CBRANCH_VCCZ %bb.6, implicit undef $vcc S_BRANCH %bb.0.entry ... Index: test/CodeGen/AMDGPU/misched-killflags.mir =================================================================== --- test/CodeGen/AMDGPU/misched-killflags.mir +++ test/CodeGen/AMDGPU/misched-killflags.mir @@ -5,41 +5,41 @@ tracksRegLiveness: true body: | bb.0: - liveins: %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3 + liveins: $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3 - %sgpr33 = S_MOV_B32 %sgpr7 - %sgpr32 = S_MOV_B32 %sgpr33 - %sgpr10 = S_MOV_B32 5 - %sgpr9 = S_MOV_B32 4 - %sgpr8 = S_MOV_B32 3 - BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc { - %sgpr6_sgpr7 = S_GETPC_B64 - %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc - %sgpr7 = S_ADDC_U32 internal %sgpr7,0, implicit-def %scc, implicit internal %scc + $sgpr33 = S_MOV_B32 $sgpr7 + $sgpr32 = S_MOV_B32 $sgpr33 + $sgpr10 = S_MOV_B32 5 + $sgpr9 = S_MOV_B32 4 + $sgpr8 = S_MOV_B32 3 + BUNDLE implicit-def $sgpr6_sgpr7, implicit-def $sgpr6, implicit-def $sgpr7, implicit-def $scc { + $sgpr6_sgpr7 = S_GETPC_B64 + $sgpr6 = S_ADD_U32 internal $sgpr6, 0, implicit-def $scc + $sgpr7 = S_ADDC_U32 internal $sgpr7,0, implicit-def $scc, implicit internal $scc } - %sgpr4 = S_MOV_B32 %sgpr33 - %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11 - %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11 - %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11 - %vgpr3 = V_MOV_B32_e32 %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec - S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3 + $sgpr4 = S_MOV_B32 $sgpr33 + $vgpr0 = V_MOV_B32_e32 $sgpr8, implicit $exec, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3, implicit $sgpr8_sgpr9_sgpr10_sgpr11 + $vgpr1 = V_MOV_B32_e32 $sgpr9, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11 + $vgpr2 = V_MOV_B32_e32 $sgpr10, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11 + $vgpr3 = V_MOV_B32_e32 $sgpr11, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec + S_NOP 0, implicit killed $sgpr6_sgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit killed $vgpr0_vgpr1_vgpr2_vgpr3 S_ENDPGM ... # CHECK-LABEL: name: func0 -# CHECK: %sgpr10 = S_MOV_B32 5 -# CHECK: %sgpr9 = S_MOV_B32 4 -# CHECK: %sgpr8 = S_MOV_B32 3 -# CHECK: %sgpr33 = S_MOV_B32 killed %sgpr7 -# CHECK: %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11 -# CHECK: BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc { -# CHECK: %sgpr6_sgpr7 = S_GETPC_B64 -# CHECK: %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc -# CHECK: %sgpr7 = S_ADDC_U32 internal %sgpr7, 0, implicit-def %scc, implicit internal %scc +# CHECK: $sgpr10 = S_MOV_B32 5 +# CHECK: $sgpr9 = S_MOV_B32 4 +# CHECK: $sgpr8 = S_MOV_B32 3 +# CHECK: $sgpr33 = S_MOV_B32 killed $sgpr7 +# CHECK: $vgpr0 = V_MOV_B32_e32 $sgpr8, implicit $exec, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3, implicit $sgpr8_sgpr9_sgpr10_sgpr11 +# CHECK: BUNDLE implicit-def $sgpr6_sgpr7, implicit-def $sgpr6, implicit-def $sgpr7, implicit-def $scc { +# CHECK: $sgpr6_sgpr7 = S_GETPC_B64 +# CHECK: $sgpr6 = S_ADD_U32 internal $sgpr6, 0, implicit-def $scc +# CHECK: $sgpr7 = S_ADDC_U32 internal $sgpr7, 0, implicit-def $scc, implicit internal $scc # CHECK: } -# CHECK: %sgpr4 = S_MOV_B32 %sgpr33 -# CHECK: %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11 -# CHECK: %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11 -# CHECK: %vgpr3 = V_MOV_B32_e32 killed %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec -# CHECK: %sgpr32 = S_MOV_B32 killed %sgpr33 -# CHECK: S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3 +# CHECK: $sgpr4 = S_MOV_B32 $sgpr33 +# CHECK: $vgpr1 = V_MOV_B32_e32 $sgpr9, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11 +# CHECK: $vgpr2 = V_MOV_B32_e32 $sgpr10, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11 +# CHECK: $vgpr3 = V_MOV_B32_e32 killed $sgpr11, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec +# CHECK: $sgpr32 = S_MOV_B32 killed $sgpr33 +# CHECK: S_NOP 0, implicit killed $sgpr6_sgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit killed $vgpr0_vgpr1_vgpr2_vgpr3 # CHECK: S_ENDPGM Index: test/CodeGen/AMDGPU/movrels-bug.mir =================================================================== --- test/CodeGen/AMDGPU/movrels-bug.mir +++ test/CodeGen/AMDGPU/movrels-bug.mir @@ -20,12 +20,12 @@ tracksRegLiveness: true body: | bb.0: - %m0 = S_MOV_B32 undef %sgpr0 - V_MOVRELD_B32_e32 undef %vgpr2, 0, implicit %m0, implicit %exec, implicit-def %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8, implicit undef %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8(tied-def 4) - %m0 = S_MOV_B32 undef %sgpr0 - %vgpr1 = V_MOVRELS_B32_e32 undef %vgpr1, implicit %m0, implicit %exec, implicit killed %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8 - %vgpr4 = V_MAC_F32_e32 undef %vgpr0, undef %vgpr0, undef %vgpr4, implicit %exec - EXP_DONE 15, undef %vgpr0, killed %vgpr1, killed %vgpr4, undef %vgpr0, 0, 0, 12, implicit %exec + $m0 = S_MOV_B32 undef $sgpr0 + V_MOVRELD_B32_e32 undef $vgpr2, 0, implicit $m0, implicit $exec, implicit-def $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8, implicit undef $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8(tied-def 4) + $m0 = S_MOV_B32 undef $sgpr0 + $vgpr1 = V_MOVRELS_B32_e32 undef $vgpr1, implicit $m0, implicit $exec, implicit killed $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8 + $vgpr4 = V_MAC_F32_e32 undef $vgpr0, undef $vgpr0, undef $vgpr4, implicit $exec + EXP_DONE 15, undef $vgpr0, killed $vgpr1, killed $vgpr4, undef $vgpr0, 0, 0, 12, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir =================================================================== --- test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir +++ test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir @@ -6,19 +6,19 @@ # GCN: %[[HI:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0 # GCN-NEXT: %[[LO:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576 # GCN-NEXT: %[[SGPR_PAIR:[0-9]+]]:sreg_64 = REG_SEQUENCE killed %[[LO]], %subreg.sub0, killed %[[HI]], %subreg.sub1 -# GCN-NEXT: V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec +# GCN-NEXT: V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec # GCN-LABEL: {{^}}name: const_to_sgpr_multiple_use{{$}} # GCN: %[[HI:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0 # GCN-NEXT: %[[LO:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576 # GCN-NEXT: %[[SGPR_PAIR:[0-9]+]]:sreg_64 = REG_SEQUENCE killed %[[LO]], %subreg.sub0, killed %[[HI]], %subreg.sub1 -# GCN-NEXT: V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec -# GCN-NEXT: V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec +# GCN-NEXT: V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec +# GCN-NEXT: V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec # GCN-LABEL: {{^}}name: const_to_sgpr_subreg{{$}} # GCN: %[[OP0:[0-9]+]]:vreg_64 = REG_SEQUENCE killed %{{[0-9]+}}, %subreg.sub0, killed %{{[0-9]+}}, %subreg.sub1 -# GCN-NEXT: V_CMP_LT_U32_e64 killed %[[OP0]].sub0, 12, implicit %exec +# GCN-NEXT: V_CMP_LT_U32_e64 killed %[[OP0]].sub0, 12, implicit $exec --- | define amdgpu_kernel void @const_to_sgpr(i32 addrspace(1)* nocapture %arg, i64 %id) { @@ -96,15 +96,15 @@ - { id: 29, class: vgpr_32 } - { id: 30, class: vreg_64 } liveins: - - { reg: '%vgpr0', virtual-reg: '%2' } - - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' } + - { reg: '$vgpr0', virtual-reg: '%2' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' } body: | bb.0.bb: successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000) - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %3 = COPY %sgpr0_sgpr1 - %2 = COPY %vgpr0 + %3 = COPY $sgpr0_sgpr1 + %2 = COPY $vgpr0 %7 = S_LOAD_DWORDX2_IMM %3, 9, 0 %8 = S_LOAD_DWORDX2_IMM %3, 11, 0 %6 = COPY %7 @@ -115,32 +115,32 @@ %12 = COPY %10.sub1 %13 = COPY %8.sub0 %14 = COPY %8.sub1 - %15 = S_ADD_U32 killed %11, killed %13, implicit-def %scc - %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead %scc, implicit %scc + %15 = S_ADD_U32 killed %11, killed %13, implicit-def $scc + %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead $scc, implicit $scc %17 = REG_SEQUENCE killed %15, %subreg.sub0, killed %16, %subreg.sub1 %18 = S_MOV_B32 0 %19 = S_MOV_B32 1048576 %20 = REG_SEQUENCE killed %19, %subreg.sub0, killed %18, %subreg.sub1 %22 = COPY killed %20 - %21 = V_CMP_LT_U64_e64 killed %17, %22, implicit %exec - %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec + %21 = V_CMP_LT_U64_e64 killed %17, %22, implicit $exec + %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec S_BRANCH %bb.1.bb1 bb.1.bb1: successors: %bb.2.bb2(0x80000000) %23 = S_MOV_B32 2 - %24 = S_LSHL_B64 %0, killed %23, implicit-def dead %scc + %24 = S_LSHL_B64 %0, killed %23, implicit-def dead $scc %25 = S_MOV_B32 61440 %26 = S_MOV_B32 0 %27 = REG_SEQUENCE killed %26, %subreg.sub0, killed %25, %subreg.sub1 %28 = REG_SEQUENCE %6, 17, killed %27, 18 - %29 = V_MOV_B32_e32 0, implicit %exec + %29 = V_MOV_B32_e32 0, implicit $exec %30 = COPY %24 - BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit $exec bb.2.bb2: - SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec + SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec S_ENDPGM ... @@ -194,15 +194,15 @@ - { id: 38, class: vgpr_32 } - { id: 39, class: vreg_64 } liveins: - - { reg: '%vgpr0', virtual-reg: '%2' } - - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' } + - { reg: '$vgpr0', virtual-reg: '%2' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' } body: | bb.0.bb: successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000) - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %3 = COPY %sgpr0_sgpr1 - %2 = COPY %vgpr0 + %3 = COPY $sgpr0_sgpr1 + %2 = COPY $vgpr0 %7 = S_LOAD_DWORDX2_IMM %3, 9, 0 %8 = S_LOAD_DWORDX2_IMM %3, 11, 0 %9 = S_LOAD_DWORDX2_IMM %3, 13, 0 @@ -214,39 +214,39 @@ %13 = COPY %11.sub1 %14 = COPY %8.sub0 %15 = COPY %8.sub1 - %16 = S_ADD_U32 %12, killed %14, implicit-def %scc - %17 = S_ADDC_U32 %13, killed %15, implicit-def dead %scc, implicit %scc + %16 = S_ADD_U32 %12, killed %14, implicit-def $scc + %17 = S_ADDC_U32 %13, killed %15, implicit-def dead $scc, implicit $scc %18 = REG_SEQUENCE killed %16, %subreg.sub0, killed %17, %subreg.sub1 %19 = COPY %9.sub0 %20 = COPY %9.sub1 - %21 = S_ADD_U32 %12, killed %19, implicit-def %scc - %22 = S_ADDC_U32 %13, killed %20, implicit-def dead %scc, implicit %scc + %21 = S_ADD_U32 %12, killed %19, implicit-def $scc + %22 = S_ADDC_U32 %13, killed %20, implicit-def dead $scc, implicit $scc %23 = REG_SEQUENCE killed %21, %subreg.sub0, killed %22, %subreg.sub1 %24 = S_MOV_B32 0 %25 = S_MOV_B32 1048576 %26 = REG_SEQUENCE killed %25, %subreg.sub0, killed %24, %subreg.sub1 %28 = COPY %26 - %27 = V_CMP_LT_U64_e64 killed %18, %28, implicit %exec - %29 = V_CMP_LT_U64_e64 killed %23, %28, implicit %exec - %31 = S_AND_B64 killed %27, killed %29, implicit-def dead %scc - %1 = SI_IF killed %31, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec + %27 = V_CMP_LT_U64_e64 killed %18, %28, implicit $exec + %29 = V_CMP_LT_U64_e64 killed %23, %28, implicit $exec + %31 = S_AND_B64 killed %27, killed %29, implicit-def dead $scc + %1 = SI_IF killed %31, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec S_BRANCH %bb.1.bb1 bb.1.bb1: successors: %bb.2.bb2(0x80000000) %32 = S_MOV_B32 2 - %33 = S_LSHL_B64 %0, killed %32, implicit-def dead %scc + %33 = S_LSHL_B64 %0, killed %32, implicit-def dead $scc %34 = S_MOV_B32 61440 %35 = S_MOV_B32 0 %36 = REG_SEQUENCE killed %35, %subreg.sub0, killed %34, %subreg.sub1 %37 = REG_SEQUENCE %6, 17, killed %36, 18 - %38 = V_MOV_B32_e32 0, implicit %exec + %38 = V_MOV_B32_e32 0, implicit $exec %39 = COPY %33 - BUFFER_STORE_DWORD_ADDR64 killed %38, killed %39, killed %37, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %38, killed %39, killed %37, 0, 0, 0, 0, 0, implicit $exec bb.2.bb2: - SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec + SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec S_ENDPGM ... @@ -291,15 +291,15 @@ - { id: 29, class: vgpr_32 } - { id: 30, class: vreg_64 } liveins: - - { reg: '%vgpr0', virtual-reg: '%2' } - - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' } + - { reg: '$vgpr0', virtual-reg: '%2' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' } body: | bb.0.bb: successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000) - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %3 = COPY %sgpr0_sgpr1 - %2 = COPY %vgpr0 + %3 = COPY $sgpr0_sgpr1 + %2 = COPY $vgpr0 %7 = S_LOAD_DWORDX2_IMM %3, 9, 0 %8 = S_LOAD_DWORDX2_IMM %3, 11, 0 %6 = COPY %7 @@ -310,32 +310,32 @@ %12 = COPY %10.sub1 %13 = COPY %8.sub0 %14 = COPY %8.sub1 - %15 = S_ADD_U32 killed %11, killed %13, implicit-def %scc - %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead %scc, implicit %scc + %15 = S_ADD_U32 killed %11, killed %13, implicit-def $scc + %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead $scc, implicit $scc %17 = REG_SEQUENCE killed %15, %subreg.sub0, killed %16, %subreg.sub1 %18 = S_MOV_B32 12 %19 = S_MOV_B32 1048576 %20 = REG_SEQUENCE killed %19, %subreg.sub0, killed %18, %subreg.sub1 %22 = COPY killed %20.sub1 - %21 = V_CMP_LT_U32_e64 killed %17.sub0, %22, implicit %exec - %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec + %21 = V_CMP_LT_U32_e64 killed %17.sub0, %22, implicit $exec + %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec S_BRANCH %bb.1.bb1 bb.1.bb1: successors: %bb.2.bb2(0x80000000) %23 = S_MOV_B32 2 - %24 = S_LSHL_B64 %0, killed %23, implicit-def dead %scc + %24 = S_LSHL_B64 %0, killed %23, implicit-def dead $scc %25 = S_MOV_B32 61440 %26 = S_MOV_B32 0 %27 = REG_SEQUENCE killed %26, %subreg.sub0, killed %25, %subreg.sub1 %28 = REG_SEQUENCE %6, 17, killed %27, 18 - %29 = V_MOV_B32_e32 0, implicit %exec + %29 = V_MOV_B32_e32 0, implicit $exec %30 = COPY %24 - BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit $exec bb.2.bb2: - SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec + SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/optimize-if-exec-masking.mir =================================================================== --- test/CodeGen/AMDGPU/optimize-if-exec-masking.mir +++ test/CodeGen/AMDGPU/optimize-if-exec-masking.mir @@ -147,8 +147,8 @@ ... --- # CHECK-LABEL: name: optimize_if_and_saveexec_xor{{$}} -# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec -# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc +# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec +# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc # CHECK-NEXT: SI_MASK_BRANCH name: optimize_if_and_saveexec_xor @@ -159,7 +159,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -176,37 +176,37 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: optimize_if_and_saveexec{{$}} -# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec +# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec # CHECK-NEXT: SI_MASK_BRANCH name: optimize_if_and_saveexec @@ -217,7 +217,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -234,36 +234,36 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: optimize_if_or_saveexec{{$}} -# CHECK: %sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec +# CHECK: $sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec # CHECK-NEXT: SI_MASK_BRANCH name: optimize_if_or_saveexec @@ -274,7 +274,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -291,39 +291,39 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_OR_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: optimize_if_and_saveexec_xor_valu_middle -# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc -# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) -# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc -# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 +# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc +# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) +# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc +# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3 # CHECK-NEXT: SI_MASK_BRANCH name: optimize_if_and_saveexec_xor_valu_middle alignment: 0 @@ -333,7 +333,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -350,41 +350,41 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) - %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) + $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: optimize_if_and_saveexec_xor_wrong_reg{{$}} -# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc -# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc -# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec +# CHECK: $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc +# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc +# CHECK-NEXT: $exec = COPY $sgpr0_sgpr1 +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec name: optimize_if_and_saveexec_xor_wrong_reg alignment: 0 exposesReturnsTwice: false @@ -393,7 +393,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -410,40 +410,40 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr6 = S_MOV_B32 -1 - %sgpr7 = S_MOV_B32 61440 - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc - %exec = S_MOV_B64_term %sgpr0_sgpr1 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr6 = S_MOV_B32 -1 + $sgpr7 = S_MOV_B32 61440 + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc + $exec = S_MOV_B64_term $sgpr0_sgpr1 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + liveins: $sgpr0_sgpr1 , $sgpr4_sgpr5_sgpr6_sgpr7 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1, %sgpr4_sgpr5_sgpr6_sgpr7 + liveins: $vgpr0, $sgpr0_sgpr1, $sgpr4_sgpr5_sgpr6_sgpr7 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: optimize_if_and_saveexec_xor_modify_copy_to_exec{{$}} -# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc -# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc -# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc -# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec +# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc +# CHECK-NEXT: $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc +# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc +# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3 +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec name: optimize_if_and_saveexec_xor_modify_copy_to_exec alignment: 0 @@ -453,7 +453,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -470,42 +470,42 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc - %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc + $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 - - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr0 = S_MOV_B32 0 - %sgpr1 = S_MOV_B32 1 - %sgpr2 = S_MOV_B32 -1 - %sgpr3 = S_MOV_B32 61440 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + liveins: $vgpr0, $sgpr0_sgpr1 + + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $sgpr2 = S_MOV_B32 -1 + $sgpr3 = S_MOV_B32 61440 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: optimize_if_and_saveexec_xor_live_out_setexec{{$}} -# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc -# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc -# CHECK-NEXT: %exec = COPY %sgpr2_sgpr3 +# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc +# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc +# CHECK-NEXT: $exec = COPY $sgpr2_sgpr3 # CHECK-NEXT: SI_MASK_BRANCH name: optimize_if_and_saveexec_xor_live_out_setexec alignment: 0 @@ -515,7 +515,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -532,40 +532,40 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc - %exec = S_MOV_B64_term %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc + $exec = S_MOV_B64_term $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1, %sgpr2_sgpr3 - S_SLEEP 0, implicit %sgpr2_sgpr3 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 + S_SLEEP 0, implicit $sgpr2_sgpr3 + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... # CHECK-LABEL: name: optimize_if_unknown_saveexec{{$}} -# CHECK: %sgpr0_sgpr1 = COPY %exec -# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc -# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec +# CHECK: $sgpr0_sgpr1 = COPY $exec +# CHECK: $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc +# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3 +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec name: optimize_if_unknown_saveexec alignment: 0 @@ -575,7 +575,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -592,36 +592,36 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: optimize_if_andn2_saveexec{{$}} -# CHECK: %sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec +# CHECK: $sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec # CHECK-NEXT: SI_MASK_BRANCH name: optimize_if_andn2_saveexec @@ -632,7 +632,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -649,38 +649,38 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_ANDN2_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... --- # CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}} -# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc -# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec +# CHECK: $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc +# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3 +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec name: optimize_if_andn2_saveexec_no_commute alignment: 0 exposesReturnsTwice: false @@ -689,7 +689,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0' } + - { reg: '$vgpr0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -706,30 +706,30 @@ hasMustTailInVarArgFunc: false body: | bb.0.main_body: - liveins: %vgpr0 - - %sgpr0_sgpr1 = COPY %exec - %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec - %vgpr0 = V_MOV_B32_e32 4, implicit %exec - %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1.if: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) bb.2.end: - liveins: %vgpr0, %sgpr0_sgpr1 + liveins: $vgpr0, $sgpr0_sgpr1 - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) S_ENDPGM ... Index: test/CodeGen/AMDGPU/readlane_exec0.mir =================================================================== --- test/CodeGen/AMDGPU/readlane_exec0.mir +++ test/CodeGen/AMDGPU/readlane_exec0.mir @@ -10,23 +10,23 @@ body: | bb.0: successors: %bb.1, %bb.2 - liveins: %vgpr1_vgpr2:0x00000001, %vgpr2_vgpr3:0x00000003 + liveins: $vgpr1_vgpr2:0x00000001, $vgpr2_vgpr3:0x00000003 - %vgpr4 = V_AND_B32_e32 1, %vgpr1, implicit %exec - V_CMP_EQ_U32_e32 1, killed %vgpr4, implicit-def %vcc, implicit %exec - %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 killed %vcc, implicit-def %exec, implicit-def %scc, implicit %exec - SI_MASK_BRANCH %bb.2, implicit %exec + $vgpr4 = V_AND_B32_e32 1, $vgpr1, implicit $exec + V_CMP_EQ_U32_e32 1, killed $vgpr4, implicit-def $vcc, implicit $exec + $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1: - %sgpr10 = V_READFIRSTLANE_B32 %vgpr2, implicit %exec - %sgpr11 = V_READFIRSTLANE_B32 %vgpr3, implicit %exec - %sgpr10 = S_LOAD_DWORD_IMM killed %sgpr10_sgpr11, 0, 0 + $sgpr10 = V_READFIRSTLANE_B32 $vgpr2, implicit $exec + $sgpr11 = V_READFIRSTLANE_B32 $vgpr3, implicit $exec + $sgpr10 = S_LOAD_DWORD_IMM killed $sgpr10_sgpr11, 0, 0 S_WAITCNT 127 - %vgpr0 = V_XOR_B32_e32 killed %sgpr10, killed %vgpr0, implicit %exec + $vgpr0 = V_XOR_B32_e32 killed $sgpr10, killed $vgpr0, implicit $exec bb.2: - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc ... Index: test/CodeGen/AMDGPU/reduce-saveexec.mir =================================================================== --- test/CodeGen/AMDGPU/reduce-saveexec.mir +++ test/CodeGen/AMDGPU/reduce-saveexec.mir @@ -2,146 +2,146 @@ --- # GCN-LABEL: name: reduce_and_saveexec -# GCN: %exec = S_AND_B64 %exec, killed %vcc +# GCN: $exec = S_AND_B64 $exec, killed $vcc # GCN-NEXT: S_ENDPGM name: reduce_and_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: reduce_and_saveexec_commuted -# GCN: %exec = S_AND_B64 killed %vcc, %exec +# GCN: $exec = S_AND_B64 killed $vcc, $exec # GCN-NEXT: S_ENDPGM name: reduce_and_saveexec_commuted tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_AND_B64 killed %vcc, %exec, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_AND_B64 killed $vcc, $exec, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: reduce_and_saveexec_liveout -# GCN: %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc -# GCN-NEXT: %exec = COPY +# GCN: $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc +# GCN-NEXT: $exec = COPY name: reduce_and_saveexec_liveout tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: and_saveexec -# GCN: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc +# GCN: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc # GCN-NEXT: S_ENDPGM name: and_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = COPY %exec - %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc - %exec = S_MOV_B64_term %sgpr2_sgpr3 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = COPY $exec + $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $exec = S_MOV_B64_term $sgpr2_sgpr3 S_ENDPGM ... --- # GCN-LABEL: name: reduce_or_saveexec -# GCN: %exec = S_OR_B64 %exec, killed %vcc +# GCN: $exec = S_OR_B64 $exec, killed $vcc # GCN-NEXT: S_ENDPGM name: reduce_or_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: reduce_xor_saveexec -# GCN: %exec = S_XOR_B64 %exec, killed %vcc +# GCN: $exec = S_XOR_B64 $exec, killed $vcc # GCN-NEXT: S_ENDPGM name: reduce_xor_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: reduce_andn2_saveexec -# GCN: %exec = S_ANDN2_B64 %exec, killed %vcc +# GCN: $exec = S_ANDN2_B64 $exec, killed $vcc # GCN-NEXT: S_ENDPGM name: reduce_andn2_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_ANDN2_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_ANDN2_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: reduce_orn2_saveexec -# GCN: %exec = S_ORN2_B64 %exec, killed %vcc +# GCN: $exec = S_ORN2_B64 $exec, killed $vcc # GCN-NEXT: S_ENDPGM name: reduce_orn2_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_ORN2_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_ORN2_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: reduce_nand_saveexec -# GCN: %exec = S_NAND_B64 %exec, killed %vcc +# GCN: $exec = S_NAND_B64 $exec, killed $vcc # GCN-NEXT: S_ENDPGM name: reduce_nand_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_NAND_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_NAND_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: reduce_nor_saveexec -# GCN: %exec = S_NOR_B64 %exec, killed %vcc +# GCN: $exec = S_NOR_B64 $exec, killed $vcc # GCN-NEXT: S_ENDPGM name: reduce_nor_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_NOR_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_NOR_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- # GCN-LABEL: name: reduce_xnor_saveexec -# GCN: %exec = S_XNOR_B64 %exec, killed %vcc +# GCN: $exec = S_XNOR_B64 $exec, killed $vcc # GCN-NEXT: S_ENDPGM name: reduce_xnor_saveexec tracksRegLiveness: true body: | bb.0: - %vcc = IMPLICIT_DEF - %sgpr0_sgpr1 = S_XNOR_B64 %exec, killed %vcc, implicit-def %scc - %exec = COPY killed %sgpr0_sgpr1 + $vcc = IMPLICIT_DEF + $sgpr0_sgpr1 = S_XNOR_B64 $exec, killed $vcc, implicit-def $scc + $exec = COPY killed $sgpr0_sgpr1 S_ENDPGM ... --- Index: test/CodeGen/AMDGPU/regcoal-subrange-join.mir =================================================================== --- test/CodeGen/AMDGPU/regcoal-subrange-join.mir +++ test/CodeGen/AMDGPU/regcoal-subrange-join.mir @@ -4,8 +4,8 @@ # This test will provoke a subrange join (see annotations below) during simple register coalescing # Without a fix for PR33524 this causes an unreachable in SubRange Join # -# GCN-DAG: undef %[[REG0:[0-9]+]].sub0:sgpr_64 = COPY %sgpr5 -# GCN-DAG: undef %[[REG1:[0-9]+]].sub0:sgpr_64 = COPY %sgpr2 +# GCN-DAG: undef %[[REG0:[0-9]+]].sub0:sgpr_64 = COPY $sgpr5 +# GCN-DAG: undef %[[REG1:[0-9]+]].sub0:sgpr_64 = COPY $sgpr2 # GCN-DAG: %[[REG0]].sub1:sgpr_64 = S_MOV_B32 1 # GCN-DAG: %[[REG1]].sub1:sgpr_64 = S_MOV_B32 1 @@ -82,14 +82,14 @@ - { id: 60, class: sreg_32_xm0 } - { id: 61, class: vreg_128 } liveins: - - { reg: '%sgpr2', virtual-reg: '%12' } - - { reg: '%sgpr5', virtual-reg: '%15' } + - { reg: '$sgpr2', virtual-reg: '%12' } + - { reg: '$sgpr5', virtual-reg: '%15' } body: | bb.0: - liveins: %sgpr2, %sgpr5 + liveins: $sgpr2, $sgpr5 - %15 = COPY killed %sgpr5 - %12 = COPY killed %sgpr2 + %15 = COPY killed $sgpr5 + %12 = COPY killed $sgpr2 %17 = S_MOV_B32 1 undef %18.sub1 = COPY %17 %0 = COPY %18 @@ -104,7 +104,7 @@ %1 = COPY killed %25 %26 = S_LOAD_DWORDX2_IMM %0, 2, 0 dead %27 = S_LOAD_DWORD_IMM killed %26, 0, 0 - S_CBRANCH_SCC0 %bb.1, implicit undef %scc + S_CBRANCH_SCC0 %bb.1, implicit undef $scc bb.5: %58 = COPY killed %1 @@ -112,11 +112,11 @@ S_BRANCH %bb.2 bb.1: - %30 = V_MOV_B32_e32 1036831949, implicit %exec - %31 = V_ADD_F32_e32 %30, %1.sub3, implicit %exec - %33 = V_ADD_F32_e32 %30, %1.sub2, implicit %exec - %35 = V_ADD_F32_e32 %30, %1.sub1, implicit %exec - %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit %exec + %30 = V_MOV_B32_e32 1036831949, implicit $exec + %31 = V_ADD_F32_e32 %30, %1.sub3, implicit $exec + %33 = V_ADD_F32_e32 %30, %1.sub2, implicit $exec + %35 = V_ADD_F32_e32 %30, %1.sub1, implicit $exec + %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit $exec undef %56.sub0 = COPY killed %37 %56.sub1 = COPY killed %35 %56.sub2 = COPY killed %33 @@ -131,7 +131,7 @@ %3 = COPY killed %58 %39 = S_LOAD_DWORDX2_IMM killed %0, 6, 0 %40 = S_LOAD_DWORD_IMM killed %39, 0, 0 - %43 = V_MOV_B32_e32 -1102263091, implicit %exec + %43 = V_MOV_B32_e32 -1102263091, implicit $exec %60 = COPY killed %4 %61 = COPY killed %3 @@ -140,23 +140,23 @@ %7 = COPY killed %61 %6 = COPY killed %60 - %8 = S_ADD_I32 killed %6, 1, implicit-def dead %scc - %44 = V_ADD_F32_e32 %43, %7.sub3, implicit %exec - %46 = V_ADD_F32_e32 %43, %7.sub2, implicit %exec - %48 = V_ADD_F32_e32 %43, %7.sub1, implicit %exec - %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit %exec + %8 = S_ADD_I32 killed %6, 1, implicit-def dead $scc + %44 = V_ADD_F32_e32 %43, %7.sub3, implicit $exec + %46 = V_ADD_F32_e32 %43, %7.sub2, implicit $exec + %48 = V_ADD_F32_e32 %43, %7.sub1, implicit $exec + %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit $exec undef %57.sub0 = COPY killed %50 %57.sub1 = COPY killed %48 %57.sub2 = COPY %46 %57.sub3 = COPY killed %44 - S_CMP_LT_I32 %8, %40, implicit-def %scc + S_CMP_LT_I32 %8, %40, implicit-def $scc %60 = COPY killed %8 %61 = COPY killed %57 - S_CBRANCH_SCC1 %bb.3, implicit killed %scc + S_CBRANCH_SCC1 %bb.3, implicit killed $scc S_BRANCH %bb.4 bb.4: - EXP 32, undef %53, undef %54, killed %46, undef %55, 0, 0, 15, implicit %exec + EXP 32, undef %53, undef %54, killed %46, undef %55, 0, 0, 15, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/regcoalesce-dbg.mir =================================================================== --- test/CodeGen/AMDGPU/regcoalesce-dbg.mir +++ test/CodeGen/AMDGPU/regcoalesce-dbg.mir @@ -48,29 +48,29 @@ - { id: 19, class: vreg_64 } - { id: 20, class: vreg_64 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY killed %vgpr0 - %0 = COPY killed %sgpr0_sgpr1 + %3 = COPY killed $vgpr0 + %0 = COPY killed $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %5 = S_LOAD_DWORD_IMM killed %0, 13, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) - %18 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %18 = V_ASHRREV_I32_e32 31, %3, implicit $exec undef %19.sub0 = COPY killed %3 %19.sub1 = COPY killed %18 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 - DBG_VALUE debug-use %11, debug-use %noreg, !1, !8, debug-location !9 + DBG_VALUE debug-use %11, debug-use $noreg, !1, !8, debug-location !9 undef %12.sub0 = COPY killed %11 %12.sub1 = COPY killed %10 undef %13.sub0_sub1 = COPY killed %4 %13.sub2_sub3 = COPY killed %12 - %20 = V_LSHL_B64 killed %19, 2, implicit %exec + %20 = V_LSHL_B64 killed %19, 2, implicit $exec %16 = COPY killed %5 - BUFFER_STORE_DWORD_ADDR64 killed %16, killed %20, killed %13, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out) + BUFFER_STORE_DWORD_ADDR64 killed %16, killed %20, killed %13, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out) S_ENDPGM ... Index: test/CodeGen/AMDGPU/regcoalesce-prune.mir =================================================================== --- test/CodeGen/AMDGPU/regcoalesce-prune.mir +++ test/CodeGen/AMDGPU/regcoalesce-prune.mir @@ -10,9 +10,9 @@ tracksRegLiveness: true body: | bb.0: - undef %5.sub1 = V_MOV_B32_e32 0, implicit %exec + undef %5.sub1 = V_MOV_B32_e32 0, implicit $exec %6 = COPY %5 - S_CBRANCH_VCCZ %bb.2, implicit undef %vcc + S_CBRANCH_VCCZ %bb.2, implicit undef $vcc bb.1: %1 : sreg_32_xm0 = S_MOV_B32 0 @@ -23,9 +23,9 @@ %6 : vreg_64 = COPY killed %4 bb.2: - %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit %exec + %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit $exec bb.3: - %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit %exec + %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir =================================================================== --- test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir +++ test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir @@ -2,7 +2,7 @@ --- # GCN-LABEL: name: mac_invalid_operands -# GCN: undef %18.sub0:vreg_128 = V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, implicit %exec +# GCN: undef %18.sub0:vreg_128 = V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, implicit $exec name: mac_invalid_operands alignment: 0 @@ -34,14 +34,14 @@ bb.0: successors: %bb.2, %bb.1 - %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit %exec - %vcc = COPY killed %7 - S_CBRANCH_VCCZ %bb.2, implicit killed %vcc + %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit $exec + $vcc = COPY killed %7 + S_CBRANCH_VCCZ %bb.2, implicit killed $vcc bb.1: successors: %bb.3 - %4 = V_ADD_F32_e32 undef %6, undef %5, implicit %exec + %4 = V_ADD_F32_e32 undef %6, undef %5, implicit $exec undef %12.sub0 = COPY killed %4 %17 = COPY killed %12 S_BRANCH %bb.3 @@ -49,7 +49,7 @@ bb.2: successors: %bb.3 - %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit %exec + %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit $exec undef %13.sub0 = COPY %8 %13.sub1 = COPY %8 %13.sub2 = COPY killed %8 @@ -58,12 +58,12 @@ bb.3: %1 = COPY killed %17 - FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit $exec, implicit $flat_scr %14 = COPY %1.sub1 %16 = COPY killed %1.sub0 undef %15.sub0 = COPY killed %16 %15.sub1 = COPY killed %14 - FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... @@ -73,13 +73,13 @@ # GCN-LABEL: name: vreg_does_not_dominate -# GCN: undef %8.sub1:vreg_128 = V_MAC_F32_e32 undef %2:vgpr_32, undef %1:vgpr_32, undef %8.sub1, implicit %exec -# GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit %exec +# GCN: undef %8.sub1:vreg_128 = V_MAC_F32_e32 undef %2:vgpr_32, undef %1:vgpr_32, undef %8.sub1, implicit $exec +# GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec # GCN: undef %9.sub2:vreg_128 = COPY %7.sub0 -# GCN: undef %6.sub3:vreg_128 = V_ADD_F32_e32 undef %3:vgpr_32, undef %3:vgpr_32, implicit %exec -# GCN: undef %7.sub0:vreg_128 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec -# GCN: %8.sub1:vreg_128 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit %exec +# GCN: undef %6.sub3:vreg_128 = V_ADD_F32_e32 undef %3:vgpr_32, undef %3:vgpr_32, implicit $exec +# GCN: undef %7.sub0:vreg_128 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec +# GCN: %8.sub1:vreg_128 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit $exec # GCN: BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, # GCN: BUFFER_STORE_DWORD_OFFEN %9.sub2, %0, @@ -101,43 +101,43 @@ - { id: 5, class: sreg_64, preferred-register: '' } - { id: 6, class: vreg_128, preferred-register: '' } liveins: - - { reg: '%vgpr0', virtual-reg: '%0' } - - { reg: '%sgpr30_sgpr31', virtual-reg: '%5' } + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' } body: | bb.0: successors: %bb.2, %bb.1 - liveins: %vgpr0, %sgpr30_sgpr31, %sgpr5 + liveins: $vgpr0, $sgpr30_sgpr31, $sgpr5 - %5 = COPY %sgpr30_sgpr31 - %0 = COPY %vgpr0 - undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit %exec - %6.sub0 = V_MOV_B32_e32 0, implicit %exec + %5 = COPY $sgpr30_sgpr31 + %0 = COPY $vgpr0 + undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit $exec + %6.sub0 = V_MOV_B32_e32 0, implicit $exec %6.sub2 = COPY %6.sub0 - S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc S_BRANCH %bb.1 bb.1: successors: %bb.2 - %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit %exec - %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec - %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit %exec + %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit $exec + %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec + %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit $exec %6.sub2 = COPY %6.sub0 bb.2: - BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 12, 0, 0, 0, implicit %exec - BUFFER_STORE_DWORD_OFFEN %6.sub2, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 8, 0, 0, 0, implicit %exec - BUFFER_STORE_DWORD_OFFEN %6.sub1, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 4, 0, 0, 0, implicit %exec - BUFFER_STORE_DWORD_OFFEN %6.sub0, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec - %sgpr30_sgpr31 = COPY %5 - %sgpr5 = COPY %sgpr5 - S_SETPC_B64_return %sgpr30_sgpr31, implicit %sgpr5 + BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 12, 0, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFEN %6.sub2, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 0, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFEN %6.sub1, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFEN %6.sub0, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec + $sgpr30_sgpr31 = COPY %5 + $sgpr5 = COPY $sgpr5 + S_SETPC_B64_return $sgpr30_sgpr31, implicit $sgpr5 ... # GCN-LABEL: name: inf_loop_tied_operand # GCN: bb.0: -# GCN-NEXT: undef %2.sub0:vreg_128 = V_MAC_F32_e32 1073741824, undef %0:vgpr_32, undef %2.sub0, implicit %exec +# GCN-NEXT: undef %2.sub0:vreg_128 = V_MAC_F32_e32 1073741824, undef %0:vgpr_32, undef %2.sub0, implicit $exec # GCN-NEXT: dead undef %3.sub1:vreg_128 = COPY %2.sub0 name: inf_loop_tied_operand @@ -148,7 +148,7 @@ - { id: 2, class: vreg_128, preferred-register: '' } body: | bb.0: - %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit %exec + %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit $exec undef %2.sub0 = COPY %1 %2.sub1 = COPY %1 Index: test/CodeGen/AMDGPU/rename-independent-subregs.mir =================================================================== --- test/CodeGen/AMDGPU/rename-independent-subregs.mir +++ test/CodeGen/AMDGPU/rename-independent-subregs.mir @@ -50,7 +50,7 @@ body: | bb.0: S_NOP 0, implicit-def undef %0.sub2 - S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc S_BRANCH %bb.2 bb.1: Index: test/CodeGen/AMDGPU/scalar-store-cache-flush.mir =================================================================== --- test/CodeGen/AMDGPU/scalar-store-cache-flush.mir +++ test/CodeGen/AMDGPU/scalar-store-cache-flush.mir @@ -56,7 +56,7 @@ body: | bb.0: - S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0 + S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0 S_ENDPGM ... --- @@ -72,7 +72,7 @@ body: | bb.0: - S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0 + S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0 S_DCACHE_WB S_ENDPGM ... @@ -91,7 +91,7 @@ body: | bb.0: S_DCACHE_WB - S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0 + S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0 S_ENDPGM ... --- @@ -122,11 +122,11 @@ body: | bb.0: - S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0 + S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0 S_ENDPGM bb.1: - S_STORE_DWORD_SGPR undef %sgpr4, undef %sgpr6_sgpr7, undef %m0, 0 + S_STORE_DWORD_SGPR undef $sgpr4, undef $sgpr6_sgpr7, undef $m0, 0 S_ENDPGM ... ... @@ -152,7 +152,7 @@ S_ENDPGM bb.1: - S_STORE_DWORD_SGPR undef %sgpr4, undef %sgpr6_sgpr7, undef %m0, 0 + S_STORE_DWORD_SGPR undef $sgpr4, undef $sgpr6_sgpr7, undef $m0, 0 S_ENDPGM ... --- @@ -168,6 +168,6 @@ body: | bb.0: - S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0 - SI_RETURN_TO_EPILOG undef %vgpr0 + S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0 + SI_RETURN_TO_EPILOG undef $vgpr0 ... Index: test/CodeGen/AMDGPU/sched-crash-dbg-value.mir =================================================================== --- test/CodeGen/AMDGPU/sched-crash-dbg-value.mir +++ test/CodeGen/AMDGPU/sched-crash-dbg-value.mir @@ -169,7 +169,7 @@ --- # CHECK: name: sched_dbg_value_crash -# CHECK: DBG_VALUE debug-use %99, debug-use %noreg, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8 +# CHECK: DBG_VALUE debug-use %99, debug-use $noreg, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8 name: sched_dbg_value_crash alignment: 0 @@ -179,11 +179,11 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%vgpr0', virtual-reg: '%0' } - - { reg: '%vgpr1', virtual-reg: '%1' } - - { reg: '%vgpr2', virtual-reg: '%2' } - - { reg: '%sgpr4_sgpr5', virtual-reg: '%3' } - - { reg: '%sgpr6_sgpr7', virtual-reg: '%4' } + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$vgpr1', virtual-reg: '%1' } + - { reg: '$vgpr2', virtual-reg: '%2' } + - { reg: '$sgpr4_sgpr5', virtual-reg: '%3' } + - { reg: '$sgpr6_sgpr7', virtual-reg: '%4' } fixedStack: stack: - { id: 0, name: tmp5, type: default, offset: 0, size: 128, alignment: 16, @@ -192,104 +192,104 @@ constants: body: | bb.0.bb: - liveins: %vgpr0, %vgpr1, %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4_sgpr5, %sgpr6_sgpr7, %sgpr32, %sgpr101 + liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr32, $sgpr101 - %4:sgpr_64 = COPY %sgpr6_sgpr7 - %3:sgpr_64 = COPY %sgpr4_sgpr5 - %2:vgpr_32 = COPY %vgpr2 - %1:vgpr_32 = COPY %vgpr1 - %0:vgpr_32 = COPY %vgpr0 + %4:sgpr_64 = COPY $sgpr6_sgpr7 + %3:sgpr_64 = COPY $sgpr4_sgpr5 + %2:vgpr_32 = COPY $vgpr2 + %1:vgpr_32 = COPY $vgpr1 + %0:vgpr_32 = COPY $vgpr0 %5:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %6:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %7:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 16, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %8:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 24, 0 %9:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 32, 0 %10:sreg_64_xexec = S_LOAD_DWORDX2_IMM %3, 4, 0 - %11:sreg_32_xm0 = S_LSHR_B32 %10.sub0, 16, implicit-def dead %scc + %11:sreg_32_xm0 = S_LSHR_B32 %10.sub0, 16, implicit-def dead $scc %12:sreg_32_xm0 = S_MUL_I32 %11, %10.sub1 - %13:vgpr_32 = V_MUL_LO_I32 0, %0, implicit %exec - %14:vgpr_32 = V_MUL_LO_I32 %1, %10.sub1, implicit %exec - %15:vgpr_32 = V_ADD_I32_e32 0, %13, implicit-def dead %vcc, implicit %exec - %16:vgpr_32 = V_ADD_I32_e32 0, %15, implicit-def dead %vcc, implicit %exec + %13:vgpr_32 = V_MUL_LO_I32 0, %0, implicit $exec + %14:vgpr_32 = V_MUL_LO_I32 %1, %10.sub1, implicit $exec + %15:vgpr_32 = V_ADD_I32_e32 0, %13, implicit-def dead $vcc, implicit $exec + %16:vgpr_32 = V_ADD_I32_e32 0, %15, implicit-def dead $vcc, implicit $exec %17:vgpr_32 = IMPLICIT_DEF %18:sreg_64 = S_MOV_B64 0 %19:sreg_32_xm0_xexec = IMPLICIT_DEF - %20:vgpr_32 = V_ADD_I32_e32 %19, %0, implicit-def dead %vcc, implicit %exec - %21:vreg_64, dead %22:sreg_64 = V_MAD_I64_I32 %20, 12, %7, 0, implicit %exec - %23:vgpr_32 = GLOBAL_LOAD_DWORD %21, 4, 0, 0, implicit %exec - %24:vreg_64, dead %25:sreg_64 = V_MAD_I64_I32 %20, 48, %8, 0, implicit %exec + %20:vgpr_32 = V_ADD_I32_e32 %19, %0, implicit-def dead $vcc, implicit $exec + %21:vreg_64, dead %22:sreg_64 = V_MAD_I64_I32 %20, 12, %7, 0, implicit $exec + %23:vgpr_32 = GLOBAL_LOAD_DWORD %21, 4, 0, 0, implicit $exec + %24:vreg_64, dead %25:sreg_64 = V_MAD_I64_I32 %20, 48, %8, 0, implicit $exec %26:vreg_128 = IMPLICIT_DEF undef %27.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %6, 0, 0 %27.sub1:sreg_64_xexec = S_MOV_B32 0 - %28:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead %scc - undef %29.sub0:sreg_64 = S_ADD_U32 %5.sub0, %28.sub0, implicit-def %scc - %29.sub1:sreg_64 = S_ADDC_U32 %5.sub1, %28.sub1, implicit-def dead %scc, implicit killed %scc + %28:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead $scc + undef %29.sub0:sreg_64 = S_ADD_U32 %5.sub0, %28.sub0, implicit-def $scc + %29.sub1:sreg_64 = S_ADDC_U32 %5.sub1, %28.sub1, implicit-def dead $scc, implicit killed $scc undef %30.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %6, 4, 0 %27.sub0:sreg_64_xexec = IMPLICIT_DEF - %31:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead %scc - %32:sreg_32_xm0 = S_ADD_U32 0, %31.sub0, implicit-def %scc - %33:sgpr_32 = S_ADDC_U32 %5.sub1, %31.sub1, implicit-def dead %scc, implicit killed %scc + %31:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead $scc + %32:sreg_32_xm0 = S_ADD_U32 0, %31.sub0, implicit-def $scc + %33:sgpr_32 = S_ADDC_U32 %5.sub1, %31.sub1, implicit-def dead $scc, implicit killed $scc %34:vgpr_32 = IMPLICIT_DEF - %35:vreg_64, dead %36:sreg_64 = V_MAD_I64_I32 %23, %34, 0, 0, implicit %exec - %37:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 32, 0, 0, implicit %exec - undef %38.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %37.sub0, implicit %exec + %35:vreg_64, dead %36:sreg_64 = V_MAD_I64_I32 %23, %34, 0, 0, implicit $exec + %37:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 32, 0, 0, implicit $exec + undef %38.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %37.sub0, implicit $exec %38.sub0:vreg_64 = COPY %37.sub0 - %39:vreg_64 = V_LSHLREV_B64 3, %38, implicit %exec - undef %40.sub0:vreg_64, %41:sreg_64_xexec = V_ADD_I32_e64 0, %39.sub0, implicit %exec + %39:vreg_64 = V_LSHLREV_B64 3, %38, implicit $exec + undef %40.sub0:vreg_64, %41:sreg_64_xexec = V_ADD_I32_e64 0, %39.sub0, implicit $exec %42:vgpr_32 = COPY %33 - %40.sub1:vreg_64, dead %43:sreg_64_xexec = V_ADDC_U32_e64 %42, %39.sub1, %41, implicit %exec - %44:vreg_64 = GLOBAL_LOAD_DWORDX2 %40, 0, 0, 0, implicit %exec :: (load 8 from %ir.tmp34) + %40.sub1:vreg_64, dead %43:sreg_64_xexec = V_ADDC_U32_e64 %42, %39.sub1, %41, implicit $exec + %44:vreg_64 = GLOBAL_LOAD_DWORDX2 %40, 0, 0, 0, implicit $exec :: (load 8 from %ir.tmp34) undef %45.sub1:vreg_64 = IMPLICIT_DEF %45.sub0:vreg_64 = COPY %37.sub1 - %46:vreg_64 = V_LSHLREV_B64 3, %45, implicit %exec - undef %47.sub0:vreg_64, %48:sreg_64_xexec = V_ADD_I32_e64 %32, %46.sub0, implicit %exec + %46:vreg_64 = V_LSHLREV_B64 3, %45, implicit $exec + undef %47.sub0:vreg_64, %48:sreg_64_xexec = V_ADD_I32_e64 %32, %46.sub0, implicit $exec %49:vgpr_32 = COPY %33 - %47.sub1:vreg_64, dead %50:sreg_64_xexec = V_ADDC_U32_e64 %49, %46.sub1, %48, implicit %exec + %47.sub1:vreg_64, dead %50:sreg_64_xexec = V_ADDC_U32_e64 %49, %46.sub1, %48, implicit $exec %51:vreg_64 = IMPLICIT_DEF - undef %52.sub0:vreg_64 = GLOBAL_LOAD_DWORD %35, 40, 0, 0, implicit %exec :: (load 4 from %ir.18 + 8) + undef %52.sub0:vreg_64 = GLOBAL_LOAD_DWORD %35, 40, 0, 0, implicit $exec :: (load 4 from %ir.18 + 8) %52.sub1:vreg_64 = IMPLICIT_DEF - %53:vreg_64 = V_LSHLREV_B64 3, %52, implicit %exec - undef %54.sub0:vreg_64, %55:sreg_64_xexec = V_ADD_I32_e64 0, %53.sub0, implicit %exec + %53:vreg_64 = V_LSHLREV_B64 3, %52, implicit $exec + undef %54.sub0:vreg_64, %55:sreg_64_xexec = V_ADD_I32_e64 0, %53.sub0, implicit $exec %56:vgpr_32 = COPY %33 - %54.sub1:vreg_64, dead %57:sreg_64_xexec = V_ADDC_U32_e64 0, %53.sub1, %55, implicit %exec + %54.sub1:vreg_64, dead %57:sreg_64_xexec = V_ADDC_U32_e64 0, %53.sub1, %55, implicit $exec %58:vreg_64 = IMPLICIT_DEF %30.sub1:sreg_64_xexec = IMPLICIT_DEF %59:sreg_64 = IMPLICIT_DEF - %60:sreg_32_xm0 = S_ADD_U32 %5.sub0, %59.sub0, implicit-def %scc - %61:sgpr_32 = S_ADDC_U32 %5.sub1, %59.sub1, implicit-def dead %scc, implicit killed %scc - %62:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 0, 0, 0, implicit %exec :: (load 8 from %ir.20, align 4) - undef %63.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %62.sub0, implicit %exec + %60:sreg_32_xm0 = S_ADD_U32 %5.sub0, %59.sub0, implicit-def $scc + %61:sgpr_32 = S_ADDC_U32 %5.sub1, %59.sub1, implicit-def dead $scc, implicit killed $scc + %62:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 0, 0, 0, implicit $exec :: (load 8 from %ir.20, align 4) + undef %63.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %62.sub0, implicit $exec %63.sub0:vreg_64 = COPY %62.sub0 %64:vreg_64 = IMPLICIT_DEF - undef %65.sub0:vreg_64, %66:sreg_64_xexec = V_ADD_I32_e64 %60, %64.sub0, implicit %exec + undef %65.sub0:vreg_64, %66:sreg_64_xexec = V_ADD_I32_e64 %60, %64.sub0, implicit $exec %67:vgpr_32 = COPY %61 - %65.sub1:vreg_64, dead %68:sreg_64_xexec = V_ADDC_U32_e64 %67, %64.sub1, %66, implicit %exec - %69:vreg_128 = GLOBAL_LOAD_DWORDX4 %65, 0, 0, 0, implicit %exec :: (load 16 from %ir.tmp58) + %65.sub1:vreg_64, dead %68:sreg_64_xexec = V_ADDC_U32_e64 %67, %64.sub1, %66, implicit $exec + %69:vreg_128 = GLOBAL_LOAD_DWORDX4 %65, 0, 0, 0, implicit $exec :: (load 16 from %ir.tmp58) undef %70.sub1:vreg_64 = IMPLICIT_DEF %70.sub0:vreg_64 = IMPLICIT_DEF %71:vreg_64 = IMPLICIT_DEF - undef %72.sub0:vreg_64, %73:sreg_64_xexec = V_ADD_I32_e64 %60, %71.sub0, implicit %exec + undef %72.sub0:vreg_64, %73:sreg_64_xexec = V_ADD_I32_e64 %60, %71.sub0, implicit $exec %74:vgpr_32 = COPY %61 - %72.sub1:vreg_64, dead %75:sreg_64_xexec = V_ADDC_U32_e64 0, %71.sub1, %73, implicit %exec - %76:vreg_128 = GLOBAL_LOAD_DWORDX4 %72, 0, 0, 0, implicit %exec + %72.sub1:vreg_64, dead %75:sreg_64_xexec = V_ADDC_U32_e64 0, %71.sub1, %73, implicit $exec + %76:vreg_128 = GLOBAL_LOAD_DWORDX4 %72, 0, 0, 0, implicit $exec %77:vgpr_32 = IMPLICIT_DEF %78:vgpr_32 = IMPLICIT_DEF - %79:vgpr_32 = V_MUL_F32_e32 0, %77, implicit %exec + %79:vgpr_32 = V_MUL_F32_e32 0, %77, implicit $exec %80:vgpr_32 = IMPLICIT_DEF %81:vgpr_32 = IMPLICIT_DEF %84:vgpr_32 = IMPLICIT_DEF - BUFFER_STORE_DWORD_OFFEN %84, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 108, 0, 0, 0, implicit %exec - BUFFER_STORE_DWORD_OFFEN %81, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 104, 0, 0, 0, implicit %exec - BUFFER_STORE_DWORD_OFFEN %80, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 100, 0, 0, 0, implicit %exec - BUFFER_STORE_DWORD_OFFEN %78, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 96, 0, 0, 0, implicit %exec + BUFFER_STORE_DWORD_OFFEN %84, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 108, 0, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFEN %81, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 104, 0, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFEN %80, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 100, 0, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFEN %78, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 96, 0, 0, 0, implicit $exec %85:vgpr_32 = IMPLICIT_DEF %86:vgpr_32 = IMPLICIT_DEF %87:vgpr_32 = IMPLICIT_DEF %88:vgpr_32 = IMPLICIT_DEF %90:vgpr_32 = IMPLICIT_DEF - %91:vgpr_32, dead %92:sreg_64 = V_DIV_SCALE_F32 %90, %90, 1065353216, implicit %exec - %95:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, undef %93:vgpr_32, 0, 0, implicit %exec - %96:vgpr_32, %97:sreg_64 = V_DIV_SCALE_F32 1065353216, %90, 1065353216, implicit %exec + %91:vgpr_32, dead %92:sreg_64 = V_DIV_SCALE_F32 %90, %90, 1065353216, implicit $exec + %95:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, undef %93:vgpr_32, 0, 0, implicit $exec + %96:vgpr_32, %97:sreg_64 = V_DIV_SCALE_F32 1065353216, %90, 1065353216, implicit $exec %98:vgpr_32 = IMPLICIT_DEF %99:vgpr_32 = IMPLICIT_DEF %100:vgpr_32 = IMPLICIT_DEF @@ -298,18 +298,18 @@ %103:vgpr_32 = IMPLICIT_DEF %104:vgpr_32 = IMPLICIT_DEF %105:vgpr_32 = IMPLICIT_DEF - %106:vgpr_32, dead %107:sreg_64 = V_DIV_SCALE_F32 %90, %90, %105, implicit %exec - %108:vgpr_32 = V_RCP_F32_e32 0, implicit %exec + %106:vgpr_32, dead %107:sreg_64 = V_DIV_SCALE_F32 %90, %90, %105, implicit $exec + %108:vgpr_32 = V_RCP_F32_e32 0, implicit $exec %109:vgpr_32 = IMPLICIT_DEF - %110:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, 0, 0, 0, implicit %exec - %111:vgpr_32, %112:sreg_64 = V_DIV_SCALE_F32 0, 0, 0, implicit %exec - %113:vgpr_32 = V_MUL_F32_e32 0, %110, implicit %exec + %110:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec + %111:vgpr_32, %112:sreg_64 = V_DIV_SCALE_F32 0, 0, 0, implicit $exec + %113:vgpr_32 = V_MUL_F32_e32 0, %110, implicit $exec %114:vgpr_32 = IMPLICIT_DEF %115:vgpr_32 = IMPLICIT_DEF %116:vgpr_32 = IMPLICIT_DEF - %vcc = IMPLICIT_DEF - %117:vgpr_32 = V_DIV_FMAS_F32 0, %116, 0, %110, 0, %115, 0, 0, implicit killed %vcc, implicit %exec - %118:vgpr_32 = V_DIV_FIXUP_F32 0, %117, 0, %90, 0, %105, 0, 0, implicit %exec + $vcc = IMPLICIT_DEF + %117:vgpr_32 = V_DIV_FMAS_F32 0, %116, 0, %110, 0, %115, 0, 0, implicit killed $vcc, implicit $exec + %118:vgpr_32 = V_DIV_FIXUP_F32 0, %117, 0, %90, 0, %105, 0, 0, implicit $exec %119:vgpr_32 = IMPLICIT_DEF %120:vgpr_32 = IMPLICIT_DEF %121:vgpr_32 = IMPLICIT_DEF @@ -319,15 +319,15 @@ %125:vgpr_32 = IMPLICIT_DEF %126:vgpr_32 = IMPLICIT_DEF DBG_VALUE debug-use %103, debug-use _, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8 - ADJCALLSTACKUP 0, 0, implicit-def %sgpr32, implicit %sgpr32 - %127:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead %scc - %sgpr4 = COPY %sgpr101 - %vgpr0 = COPY %124 - %vgpr1_vgpr2 = IMPLICIT_DEF - %vgpr3 = COPY %126 - dead %sgpr30_sgpr31 = SI_CALL %127, @func, csr_amdgpu_highregs, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit %vgpr0, implicit %vgpr1_vgpr2, implicit killed %vgpr3 - ADJCALLSTACKDOWN 0, 0, implicit-def %sgpr32, implicit %sgpr32 - %128:vreg_64, dead %129:sreg_64 = V_MAD_I64_I32 %20, %34, 0, 0, implicit %exec + ADJCALLSTACKUP 0, 0, implicit-def $sgpr32, implicit $sgpr32 + %127:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc + $sgpr4 = COPY $sgpr101 + $vgpr0 = COPY %124 + $vgpr1_vgpr2 = IMPLICIT_DEF + $vgpr3 = COPY %126 + dead $sgpr30_sgpr31 = SI_CALL %127, @func, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $vgpr0, implicit $vgpr1_vgpr2, implicit killed $vgpr3 + ADJCALLSTACKDOWN 0, 0, implicit-def $sgpr32, implicit $sgpr32 + %128:vreg_64, dead %129:sreg_64 = V_MAD_I64_I32 %20, %34, 0, 0, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/schedule-regpressure.mir =================================================================== --- test/CodeGen/AMDGPU/schedule-regpressure.mir +++ test/CodeGen/AMDGPU/schedule-regpressure.mir @@ -4,7 +4,7 @@ # Check there is no SReg_32 pressure created by DS_* instructions because of M0 use # CHECK: ScheduleDAGMILive::schedule starting -# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} implicit %m0, implicit %exec +# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} implicit $m0, implicit $exec # CHECK: Pressure Diff : {{$}} # CHECK: SU({{.*}} DS_WRITE_B32 @@ -27,7 +27,7 @@ - { id: 7, class: vgpr_32 } - { id: 8, class: vgpr_32 } liveins: - - { reg: '%sgpr4_sgpr5', virtual-reg: '%1' } + - { reg: '$sgpr4_sgpr5', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -44,14 +44,14 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr4_sgpr5 + liveins: $sgpr4_sgpr5 - %1 = COPY %sgpr4_sgpr5 + %1 = COPY $sgpr4_sgpr5 %5 = S_LOAD_DWORD_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) - %m0 = S_MOV_B32 -1 + $m0 = S_MOV_B32 -1 %7 = COPY %5 - %6 = DS_READ_B32 %7, 0, 0, implicit %m0, implicit %exec - DS_WRITE_B32 %7, %6, 4, 0, implicit killed %m0, implicit %exec + %6 = DS_READ_B32 %7, 0, 0, implicit $m0, implicit $exec + DS_WRITE_B32 %7, %6, 4, 0, implicit killed $m0, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/sdwa-gfx9.mir =================================================================== --- test/CodeGen/AMDGPU/sdwa-gfx9.mir +++ test/CodeGen/AMDGPU/sdwa-gfx9.mir @@ -5,13 +5,13 @@ # GCN-LABEL: {{^}}name: add_shr_i32 # GCN: [[SMOV:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 123 -# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec -# CI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 [[SMOV]], killed [[SHIFT]], implicit-def %vcc, implicit %exec +# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec +# CI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 [[SMOV]], killed [[SHIFT]], implicit-def $vcc, implicit $exec -# VI: [[VMOV:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[SMOV]], implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[VMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def %vcc, implicit %exec +# VI: [[VMOV:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[SMOV]], implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[VMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def $vcc, implicit $exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[SMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def %vcc, implicit %exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[SMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def $vcc, implicit $exec --- name: add_shr_i32 @@ -32,30 +32,30 @@ - { id: 12, class: sreg_32_xm0 } body: | bb.0: - liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31 + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31 - %2 = COPY %sgpr30_sgpr31 - %1 = COPY %vgpr2_vgpr3 - %0 = COPY %vgpr0_vgpr1 - %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) + %2 = COPY $sgpr30_sgpr31 + %1 = COPY $vgpr2_vgpr3 + %0 = COPY $vgpr0_vgpr1 + %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) %12 = S_MOV_B32 123 - %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec - %11 = V_ADD_I32_e32 %12, killed %10, implicit-def %vcc, implicit %exec - FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) - %sgpr30_sgpr31 = COPY %2 - S_SETPC_B64_return %sgpr30_sgpr31 + %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec + %11 = V_ADD_I32_e32 %12, killed %10, implicit-def $vcc, implicit $exec + FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) + $sgpr30_sgpr31 = COPY %2 + S_SETPC_B64_return $sgpr30_sgpr31 ... # GCN-LABEL: {{^}}name: trunc_shr_f32 -# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec -# CI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def %vcc, implicit %exec +# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec +# CI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def $vcc, implicit $exec -# VI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def %vcc, implicit %exec +# VI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def $vcc, implicit $exec -#GFX9: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_sdwa 0, %{{[0-9]+}}, 1, 2, 6, 0, 5, implicit %exec +#GFX9: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_sdwa 0, %{{[0-9]+}}, 1, 2, 6, 0, 5, implicit $exec --- name: trunc_shr_f32 @@ -75,14 +75,14 @@ - { id: 11, class: vgpr_32 } body: | bb.0: - liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31 + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31 - %2 = COPY %sgpr30_sgpr31 - %1 = COPY %vgpr2_vgpr3 - %0 = COPY %vgpr0_vgpr1 - %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) - %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec - %11 = V_TRUNC_F32_e64 0, killed %10, 1, 2, implicit-def %vcc, implicit %exec - FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) - %sgpr30_sgpr31 = COPY %2 - S_SETPC_B64_return %sgpr30_sgpr31 + %2 = COPY $sgpr30_sgpr31 + %1 = COPY $vgpr2_vgpr3 + %0 = COPY $vgpr0_vgpr1 + %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) + %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec + %11 = V_TRUNC_F32_e64 0, killed %10, 1, 2, implicit-def $vcc, implicit $exec + FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) + $sgpr30_sgpr31 = COPY %2 + S_SETPC_B64_return $sgpr30_sgpr31 Index: test/CodeGen/AMDGPU/sdwa-peephole-instr.mir =================================================================== --- test/CodeGen/AMDGPU/sdwa-peephole-instr.mir +++ test/CodeGen/AMDGPU/sdwa-peephole-instr.mir @@ -3,29 +3,29 @@ # GFX89-LABEL: {{^}}name: vop1_instructions -# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit %exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec -# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit $exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec -# VI: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit %exec +# VI: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit $exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit %exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit $exec --- @@ -84,105 +84,105 @@ - { id: 100, class: vgpr_32 } body: | bb.0: - liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31 + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31 - %2 = COPY %sgpr30_sgpr31 - %1 = COPY %vgpr2_vgpr3 - %0 = COPY %vgpr0_vgpr1 - %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) + %2 = COPY $sgpr30_sgpr31 + %1 = COPY $vgpr2_vgpr3 + %0 = COPY $vgpr0_vgpr1 + %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) %5 = S_MOV_B32 65535 %6 = S_MOV_B32 65535 - %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec - %11 = V_MOV_B32_e32 %10, implicit %exec - %12 = V_LSHLREV_B32_e64 16, %11, implicit %exec - %14 = V_FRACT_F32_e32 123, implicit %exec - %15 = V_LSHLREV_B32_e64 16, %14, implicit %exec - %16 = V_LSHRREV_B32_e64 16, %15, implicit %exec - %17 = V_SIN_F32_e32 %16, implicit %exec - %18 = V_LSHLREV_B32_e64 16, %17, implicit %exec - %19 = V_LSHRREV_B32_e64 16, %18, implicit %exec - %20 = V_CVT_U32_F32_e32 %19, implicit %exec - %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec - %23 = V_CVT_F32_I32_e32 123, implicit %exec - %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec - - %25 = V_LSHRREV_B32_e64 16, %3, implicit %exec - %26 = V_MOV_B32_e64 %25, implicit %exec - %26 = V_LSHLREV_B32_e64 16, %26, implicit %exec - %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit %exec - %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec - %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec - %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit %exec - %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec - %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec - %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit %exec - %34 = V_LSHLREV_B32_e64 16, %33, implicit %exec - %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit %exec - %36 = V_LSHLREV_B32_e64 16, %35, implicit %exec - - - %37 = V_LSHRREV_B32_e64 16, %36, implicit %exec - %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit %exec - %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec - %40 = V_LSHRREV_B32_e64 16, %39, implicit %exec - %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit %exec - %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec - %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec - %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit %exec - %45 = V_LSHLREV_B32_e64 16, %44, implicit %exec - %46 = V_LSHRREV_B32_e64 16, %45, implicit %exec - %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit %exec - %48 = V_LSHLREV_B32_e64 16, %47, implicit %exec - - - %100 = V_MOV_B32_e32 %48, implicit %exec - - FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) - %sgpr30_sgpr31 = COPY %2 - S_SETPC_B64_return %sgpr30_sgpr31 + %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec + %11 = V_MOV_B32_e32 %10, implicit $exec + %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec + %14 = V_FRACT_F32_e32 123, implicit $exec + %15 = V_LSHLREV_B32_e64 16, %14, implicit $exec + %16 = V_LSHRREV_B32_e64 16, %15, implicit $exec + %17 = V_SIN_F32_e32 %16, implicit $exec + %18 = V_LSHLREV_B32_e64 16, %17, implicit $exec + %19 = V_LSHRREV_B32_e64 16, %18, implicit $exec + %20 = V_CVT_U32_F32_e32 %19, implicit $exec + %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec + %23 = V_CVT_F32_I32_e32 123, implicit $exec + %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec + + %25 = V_LSHRREV_B32_e64 16, %3, implicit $exec + %26 = V_MOV_B32_e64 %25, implicit $exec + %26 = V_LSHLREV_B32_e64 16, %26, implicit $exec + %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit $exec + %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec + %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec + %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit $exec + %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec + %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec + %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit $exec + %34 = V_LSHLREV_B32_e64 16, %33, implicit $exec + %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit $exec + %36 = V_LSHLREV_B32_e64 16, %35, implicit $exec + + + %37 = V_LSHRREV_B32_e64 16, %36, implicit $exec + %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit $exec + %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec + %40 = V_LSHRREV_B32_e64 16, %39, implicit $exec + %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit $exec + %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec + %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec + %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit $exec + %45 = V_LSHLREV_B32_e64 16, %44, implicit $exec + %46 = V_LSHRREV_B32_e64 16, %45, implicit $exec + %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit $exec + %48 = V_LSHLREV_B32_e64 16, %47, implicit $exec + + + %100 = V_MOV_B32_e32 %48, implicit $exec + + FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) + $sgpr30_sgpr31 = COPY %2 + S_SETPC_B64_return $sgpr30_sgpr31 ... --- # GCN-LABEL: {{^}}name: vop2_instructions -# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec +# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec -# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec +# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec -# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit %exec -# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec +# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit $exec +# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec name: vop2_instructions tracksRegLiveness: true @@ -251,114 +251,114 @@ - { id: 100, class: vgpr_32 } body: | bb.0: - liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31 + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31 - %2 = COPY %sgpr30_sgpr31 - %1 = COPY %vgpr2_vgpr3 - %0 = COPY %vgpr0_vgpr1 - %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) + %2 = COPY $sgpr30_sgpr31 + %1 = COPY $vgpr2_vgpr3 + %0 = COPY $vgpr0_vgpr1 + %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) %5 = S_MOV_B32 65535 %6 = S_MOV_B32 65535 - %11 = V_LSHRREV_B32_e64 16, %3, implicit %exec - %12 = V_AND_B32_e32 %6, %11, implicit %exec - %13 = V_LSHLREV_B32_e64 16, %12, implicit %exec - %14 = V_LSHRREV_B32_e64 16, %13, implicit %exec - %15 = V_BFE_U32 %13, 8, 8, implicit %exec - %16 = V_ADD_F32_e32 %14, %15, implicit %exec - %17 = V_LSHLREV_B32_e64 16, %16, implicit %exec - %18 = V_LSHRREV_B32_e64 16, %17, implicit %exec - %19 = V_BFE_U32 %17, 8, 8, implicit %exec - %20 = V_SUB_F16_e32 %18, %19, implicit %exec - %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec - %22 = V_BFE_U32 %20, 8, 8, implicit %exec - %23 = V_MAC_F32_e32 %21, %22, %22, implicit %exec - %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec - %25 = V_LSHRREV_B32_e64 16, %24, implicit %exec - %26 = V_BFE_U32 %24, 8, 8, implicit %exec - %27 = V_MAC_F16_e32 %25, %26, %26, implicit %exec - %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec - - %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec - %30 = V_AND_B32_e64 23, %29, implicit %exec - %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec - %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec - %33 = V_BFE_U32 %31, 8, 8, implicit %exec - %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit %exec - %35 = V_LSHLREV_B32_e64 16, %34, implicit %exec - %37 = V_BFE_U32 %35, 8, 8, implicit %exec - %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit %exec - %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec - %40 = V_BFE_U32 %39, 8, 8, implicit %exec - %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit %exec - %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec - %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec - %44 = V_BFE_U32 %42, 8, 8, implicit %exec - %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit %exec - %46 = V_LSHLREV_B32_e64 16, %45, implicit %exec - - %47 = V_LSHRREV_B32_e64 16, %46, implicit %exec - %48 = V_BFE_U32 %46, 8, 8, implicit %exec - %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit %exec - %50 = V_LSHLREV_B32_e64 16, %49, implicit %exec - %51 = V_BFE_U32 %50, 8, 8, implicit %exec - %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit %exec - %53 = V_LSHLREV_B32_e64 16, %52, implicit %exec - %54 = V_BFE_U32 %53, 8, 8, implicit %exec - %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit %exec - %56 = V_LSHLREV_B32_e64 16, %55, implicit %exec - %57 = V_LSHRREV_B32_e64 16, %56, implicit %exec - %58 = V_BFE_U32 %56, 8, 8, implicit %exec - %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit %exec - %60 = V_LSHLREV_B32_e64 16, %59, implicit %exec - - %100 = V_MOV_B32_e32 %60, implicit %exec - - FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) - %sgpr30_sgpr31 = COPY %2 - S_SETPC_B64_return %sgpr30_sgpr31 + %11 = V_LSHRREV_B32_e64 16, %3, implicit $exec + %12 = V_AND_B32_e32 %6, %11, implicit $exec + %13 = V_LSHLREV_B32_e64 16, %12, implicit $exec + %14 = V_LSHRREV_B32_e64 16, %13, implicit $exec + %15 = V_BFE_U32 %13, 8, 8, implicit $exec + %16 = V_ADD_F32_e32 %14, %15, implicit $exec + %17 = V_LSHLREV_B32_e64 16, %16, implicit $exec + %18 = V_LSHRREV_B32_e64 16, %17, implicit $exec + %19 = V_BFE_U32 %17, 8, 8, implicit $exec + %20 = V_SUB_F16_e32 %18, %19, implicit $exec + %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec + %22 = V_BFE_U32 %20, 8, 8, implicit $exec + %23 = V_MAC_F32_e32 %21, %22, %22, implicit $exec + %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec + %25 = V_LSHRREV_B32_e64 16, %24, implicit $exec + %26 = V_BFE_U32 %24, 8, 8, implicit $exec + %27 = V_MAC_F16_e32 %25, %26, %26, implicit $exec + %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec + + %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec + %30 = V_AND_B32_e64 23, %29, implicit $exec + %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec + %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec + %33 = V_BFE_U32 %31, 8, 8, implicit $exec + %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit $exec + %35 = V_LSHLREV_B32_e64 16, %34, implicit $exec + %37 = V_BFE_U32 %35, 8, 8, implicit $exec + %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit $exec + %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec + %40 = V_BFE_U32 %39, 8, 8, implicit $exec + %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit $exec + %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec + %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec + %44 = V_BFE_U32 %42, 8, 8, implicit $exec + %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit $exec + %46 = V_LSHLREV_B32_e64 16, %45, implicit $exec + + %47 = V_LSHRREV_B32_e64 16, %46, implicit $exec + %48 = V_BFE_U32 %46, 8, 8, implicit $exec + %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit $exec + %50 = V_LSHLREV_B32_e64 16, %49, implicit $exec + %51 = V_BFE_U32 %50, 8, 8, implicit $exec + %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit $exec + %53 = V_LSHLREV_B32_e64 16, %52, implicit $exec + %54 = V_BFE_U32 %53, 8, 8, implicit $exec + %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit $exec + %56 = V_LSHLREV_B32_e64 16, %55, implicit $exec + %57 = V_LSHRREV_B32_e64 16, %56, implicit $exec + %58 = V_BFE_U32 %56, 8, 8, implicit $exec + %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit $exec + %60 = V_LSHLREV_B32_e64 16, %59, implicit $exec + + %100 = V_MOV_B32_e32 %60, implicit $exec + + FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) + $sgpr30_sgpr31 = COPY %2 + S_SETPC_B64_return $sgpr30_sgpr31 ... --- # GCN-LABEL: {{^}}name: vopc_instructions -# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 123, implicit %exec -# GFX89: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec -# GFX89: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# GFX89: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec -# GFX89: %vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec +# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 123, implicit $exec +# GFX89: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec +# GFX89: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# GFX89: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec +# GFX89: $vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec -# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec -# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def %exec, implicit %exec -# VI: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def %vcc, implicit %exec -# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def %exec, implicit %exec +# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec +# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def $exec, implicit $exec +# VI: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def $vcc, implicit $exec +# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def $exec, implicit $exec -# GFX9: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit %exec -# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# GFX9: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec -# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit %exec -# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec +# GFX9: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit $exec +# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# GFX9: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec +# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit $exec +# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec -# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec -# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec -# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec +# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $exec +# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $exec +# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec -# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit %exec -# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit %exec -# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec -# GFX9: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def %exec, implicit %exec +# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $exec +# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $exec +# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec +# GFX9: $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def $exec, implicit $exec @@ -396,52 +396,52 @@ - { id: 100, class: vgpr_32 } body: | bb.0: - liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31 + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31 - %2 = COPY %sgpr30_sgpr31 - %1 = COPY %vgpr2_vgpr3 - %0 = COPY %vgpr0_vgpr1 - %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) + %2 = COPY $sgpr30_sgpr31 + %1 = COPY $vgpr2_vgpr3 + %0 = COPY $vgpr0_vgpr1 + %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) %5 = S_MOV_B32 65535 %6 = S_MOV_B32 65535 - %10 = V_AND_B32_e64 %5, %3, implicit %exec - V_CMP_EQ_F32_e32 123, killed %10, implicit-def %vcc, implicit %exec - %11 = V_AND_B32_e64 %5, %3, implicit %exec - V_CMPX_GT_F32_e32 123, killed %11, implicit-def %vcc, implicit-def %exec, implicit %exec - %12 = V_AND_B32_e64 %5, %3, implicit %exec - V_CMP_LT_I32_e32 123, killed %12, implicit-def %vcc, implicit %exec - %13 = V_AND_B32_e64 %5, %3, implicit %exec - V_CMPX_EQ_I32_e32 123, killed %13, implicit-def %vcc, implicit-def %exec, implicit %exec - - %14 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit %exec - %15 = V_AND_B32_e64 %5, %3, implicit %exec - %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def %exec, implicit %exec - %16 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit %exec - %17 = V_AND_B32_e64 %5, %3, implicit %exec - %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def %exec, implicit %exec - - %20 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit %exec - %21 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def %exec, implicit %exec - %23 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit %exec - %24 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def %exec, implicit %exec - %25 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def %exec, implicit %exec - %26 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def %exec, implicit %exec - %27 = V_AND_B32_e64 %5, %3, implicit %exec - %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def %exec, implicit %exec - - - %100 = V_MOV_B32_e32 %vcc_lo, implicit %exec - - FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) - %sgpr30_sgpr31 = COPY %2 - S_SETPC_B64_return %sgpr30_sgpr31 + %10 = V_AND_B32_e64 %5, %3, implicit $exec + V_CMP_EQ_F32_e32 123, killed %10, implicit-def $vcc, implicit $exec + %11 = V_AND_B32_e64 %5, %3, implicit $exec + V_CMPX_GT_F32_e32 123, killed %11, implicit-def $vcc, implicit-def $exec, implicit $exec + %12 = V_AND_B32_e64 %5, %3, implicit $exec + V_CMP_LT_I32_e32 123, killed %12, implicit-def $vcc, implicit $exec + %13 = V_AND_B32_e64 %5, %3, implicit $exec + V_CMPX_EQ_I32_e32 123, killed %13, implicit-def $vcc, implicit-def $exec, implicit $exec + + %14 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit $exec + %15 = V_AND_B32_e64 %5, %3, implicit $exec + %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def $exec, implicit $exec + %16 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit $exec + %17 = V_AND_B32_e64 %5, %3, implicit $exec + %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def $exec, implicit $exec + + %20 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit $exec + %21 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def $exec, implicit $exec + %23 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit $exec + %24 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def $exec, implicit $exec + %25 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def $exec, implicit $exec + %26 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def $exec, implicit $exec + %27 = V_AND_B32_e64 %5, %3, implicit $exec + $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def $exec, implicit $exec + + + %100 = V_MOV_B32_e32 $vcc_lo, implicit $exec + + FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) + $sgpr30_sgpr31 = COPY %2 + S_SETPC_B64_return $sgpr30_sgpr31 Index: test/CodeGen/AMDGPU/sdwa-preserve.mir =================================================================== --- test/CodeGen/AMDGPU/sdwa-preserve.mir +++ test/CodeGen/AMDGPU/sdwa-preserve.mir @@ -31,26 +31,26 @@ - { id: 13, class: vgpr_32 } body: | bb.0: - liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31 + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31 - %2 = COPY %sgpr30_sgpr31 - %1 = COPY %vgpr2_vgpr3 - %0 = COPY %vgpr0_vgpr1 - %3 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) - %4 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) - - %5 = V_AND_B32_e32 65535, %3, implicit %exec - %6 = V_LSHRREV_B32_e64 16, %4, implicit %exec - %7 = V_BFE_U32 %3, 8, 8, implicit %exec - %8 = V_LSHRREV_B32_e32 24, %4, implicit %exec - - %9 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit %exec - %10 = V_LSHLREV_B16_e64 8, %9, implicit %exec - %11 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit %exec - %12 = V_LSHLREV_B32_e64 16, %11, implicit %exec - - %13 = V_OR_B32_e64 %10, %12, implicit %exec - - FLAT_STORE_DWORD %0, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) - %sgpr30_sgpr31 = COPY %2 - S_SETPC_B64_return %sgpr30_sgpr31 + %2 = COPY $sgpr30_sgpr31 + %1 = COPY $vgpr2_vgpr3 + %0 = COPY $vgpr0_vgpr1 + %3 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) + %4 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) + + %5 = V_AND_B32_e32 65535, %3, implicit $exec + %6 = V_LSHRREV_B32_e64 16, %4, implicit $exec + %7 = V_BFE_U32 %3, 8, 8, implicit $exec + %8 = V_LSHRREV_B32_e32 24, %4, implicit $exec + + %9 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit $exec + %10 = V_LSHLREV_B16_e64 8, %9, implicit $exec + %11 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit $exec + %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec + + %13 = V_OR_B32_e64 %10, %12, implicit $exec + + FLAT_STORE_DWORD %0, %13, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) + $sgpr30_sgpr31 = COPY %2 + S_SETPC_B64_return $sgpr30_sgpr31 Index: test/CodeGen/AMDGPU/sdwa-scalar-ops.mir =================================================================== --- test/CodeGen/AMDGPU/sdwa-scalar-ops.mir +++ test/CodeGen/AMDGPU/sdwa-scalar-ops.mir @@ -183,7 +183,7 @@ - { id: 82, class: vgpr_32 } - { id: 83, class: vgpr_32 } liveins: - - { reg: '%sgpr4_sgpr5', virtual-reg: '%4' } + - { reg: '$sgpr4_sgpr5', virtual-reg: '%4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -200,13 +200,13 @@ body: | bb.0.bb: successors: %bb.2.bb2(0x80000000) - liveins: %sgpr4_sgpr5 + liveins: $sgpr4_sgpr5 - %4 = COPY %sgpr4_sgpr5 + %4 = COPY $sgpr4_sgpr5 %9 = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %8 = S_MOV_B64 0 %7 = COPY %9 - %30 = V_MOV_B32_e32 1, implicit %exec + %30 = V_MOV_B32_e32 1, implicit $exec S_BRANCH %bb.2.bb2 bb.1.bb1: @@ -217,36 +217,36 @@ %0 = PHI %8, %bb.0.bb, %1, %bb.2.bb2 %13 = COPY %7.sub1 - %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def %scc - %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead %scc, implicit %scc + %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def $scc + %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead $scc, implicit $scc %16 = REG_SEQUENCE %14, 1, %15, 2 %18 = COPY %16 - %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.uglygep45) - %60 = V_BFE_U32 %17, 8, 8, implicit %exec - %61 = V_LSHLREV_B32_e32 2, killed %60, implicit %exec - %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def %vcc, implicit %exec + %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.uglygep45) + %60 = V_BFE_U32 %17, 8, 8, implicit $exec + %61 = V_LSHLREV_B32_e32 2, killed %60, implicit $exec + %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def $vcc, implicit $exec %66 = COPY %13 - %65 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec + %65 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec %67 = REG_SEQUENCE %70, 1, killed %65, 2 - FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp9) - %37 = S_ADD_U32 %14, 4, implicit-def %scc - %38 = S_ADDC_U32 %15, 0, implicit-def dead %scc, implicit %scc + FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp9) + %37 = S_ADD_U32 %14, 4, implicit-def $scc + %38 = S_ADDC_U32 %15, 0, implicit-def dead $scc, implicit $scc %71 = COPY killed %37 %72 = COPY killed %38 %41 = REG_SEQUENCE killed %71, 1, killed %72, 2 - %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.scevgep) - %73 = V_BFE_U32 %40, 8, 8, implicit %exec - %74 = V_LSHLREV_B32_e32 2, killed %73, implicit %exec - %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def %vcc, implicit %exec - %78 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec + %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.scevgep) + %73 = V_BFE_U32 %40, 8, 8, implicit $exec + %74 = V_LSHLREV_B32_e32 2, killed %73, implicit $exec + %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def $vcc, implicit $exec + %78 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec %80 = REG_SEQUENCE %83, 1, killed %78, 2 - FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp17) - %55 = S_ADD_U32 %0.sub0, 8, implicit-def %scc - %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead %scc, implicit %scc + FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp17) + %55 = S_ADD_U32 %0.sub0, 8, implicit-def $scc + %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead $scc, implicit $scc %57 = REG_SEQUENCE %55, 1, killed %56, 2 %1 = COPY %57 - S_CMPK_EQ_I32 %55, 4096, implicit-def %scc - S_CBRANCH_SCC1 %bb.1.bb1, implicit %scc + S_CMPK_EQ_I32 %55, 4096, implicit-def $scc + S_CBRANCH_SCC1 %bb.1.bb1, implicit $scc S_BRANCH %bb.2.bb2 ... @@ -345,7 +345,7 @@ - { id: 83, class: vgpr_32 } - { id: 84, class: sreg_32_xm0 } liveins: - - { reg: '%sgpr4_sgpr5', virtual-reg: '%4' } + - { reg: '$sgpr4_sgpr5', virtual-reg: '%4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -362,13 +362,13 @@ body: | bb.0.bb: successors: %bb.2.bb2(0x80000000) - liveins: %sgpr4_sgpr5 + liveins: $sgpr4_sgpr5 - %4 = COPY %sgpr4_sgpr5 + %4 = COPY $sgpr4_sgpr5 %9 = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %8 = S_MOV_B64 0 %7 = COPY %9 - %30 = V_MOV_B32_e32 1, implicit %exec + %30 = V_MOV_B32_e32 1, implicit $exec %84 = S_MOV_B32 2 S_BRANCH %bb.2.bb2 @@ -380,36 +380,36 @@ %0 = PHI %8, %bb.0.bb, %1, %bb.2.bb2 %13 = COPY %7.sub1 - %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def %scc - %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead %scc, implicit %scc + %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def $scc + %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead $scc, implicit $scc %16 = REG_SEQUENCE %14, 1, %15, 2 %18 = COPY %16 - %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.uglygep45) - %60 = V_BFE_U32 %17, 8, 8, implicit %exec - %61 = V_LSHLREV_B32_e32 %84, killed %60, implicit %exec - %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def %vcc, implicit %exec + %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.uglygep45) + %60 = V_BFE_U32 %17, 8, 8, implicit $exec + %61 = V_LSHLREV_B32_e32 %84, killed %60, implicit $exec + %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def $vcc, implicit $exec %66 = COPY %13 - %65 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec + %65 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec %67 = REG_SEQUENCE %70, 1, killed %65, 2 - FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp9) - %37 = S_ADD_U32 %14, 4, implicit-def %scc - %38 = S_ADDC_U32 %15, 0, implicit-def dead %scc, implicit %scc + FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp9) + %37 = S_ADD_U32 %14, 4, implicit-def $scc + %38 = S_ADDC_U32 %15, 0, implicit-def dead $scc, implicit $scc %71 = COPY killed %37 %72 = COPY killed %38 %41 = REG_SEQUENCE killed %71, 1, killed %72, 2 - %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.scevgep) - %73 = V_BFE_U32 %40, 8, 8, implicit %exec - %74 = V_LSHLREV_B32_e32 %84, killed %73, implicit %exec - %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def %vcc, implicit %exec - %78 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec + %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.scevgep) + %73 = V_BFE_U32 %40, 8, 8, implicit $exec + %74 = V_LSHLREV_B32_e32 %84, killed %73, implicit $exec + %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def $vcc, implicit $exec + %78 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec %80 = REG_SEQUENCE %83, 1, killed %78, 2 - FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp17) - %55 = S_ADD_U32 %0.sub0, 8, implicit-def %scc - %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead %scc, implicit %scc + FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp17) + %55 = S_ADD_U32 %0.sub0, 8, implicit-def $scc + %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead $scc, implicit $scc %57 = REG_SEQUENCE %55, 1, killed %56, 2 %1 = COPY %57 - S_CMPK_EQ_I32 %55, 4096, implicit-def %scc - S_CBRANCH_SCC1 %bb.1.bb1, implicit %scc + S_CMPK_EQ_I32 %55, 4096, implicit-def $scc + S_CBRANCH_SCC1 %bb.1.bb1, implicit $scc S_BRANCH %bb.2.bb2 ... Index: test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir =================================================================== --- test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir +++ test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir @@ -6,10 +6,10 @@ # GCN-LABEL: {{^}}name: vop2_64bit -# GCN: %{{[0-9]+}}:vgpr_32 = V_BCNT_U32_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def %vcc, implicit %exec -# GCN: %{{[0-9]+}}:vgpr_32 = V_BFM_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def %vcc, implicit %exec -# GCN: %{{[0-9]+}}:vgpr_32 = V_CVT_PKNORM_I16_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 0, implicit-def %vcc, implicit %exec -# GCN: %{{[0-9]+}}:sgpr_32 = V_READLANE_B32 killed %{{[0-9]+}}, 0, implicit-def %vcc, implicit %exec +# GCN: %{{[0-9]+}}:vgpr_32 = V_BCNT_U32_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def $vcc, implicit $exec +# GCN: %{{[0-9]+}}:vgpr_32 = V_BFM_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def $vcc, implicit $exec +# GCN: %{{[0-9]+}}:vgpr_32 = V_CVT_PKNORM_I16_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 0, implicit-def $vcc, implicit $exec +# GCN: %{{[0-9]+}}:sgpr_32 = V_READLANE_B32 killed %{{[0-9]+}}, 0, implicit-def $vcc, implicit $exec --- name: vop2_64bit @@ -36,26 +36,26 @@ - { id: 20, class: vgpr_32 } body: | bb.0: - liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31 + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31 - %2 = COPY %sgpr30_sgpr31 - %1 = COPY %vgpr2_vgpr3 - %0 = COPY %vgpr0_vgpr1 - %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4) + %2 = COPY $sgpr30_sgpr31 + %1 = COPY $vgpr2_vgpr3 + %0 = COPY $vgpr0_vgpr1 + %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4) - %12 = V_LSHRREV_B32_e64 16, %3, implicit %exec - %13 = V_BCNT_U32_B32_e64 %3, killed %12, implicit-def %vcc, implicit %exec + %12 = V_LSHRREV_B32_e64 16, %3, implicit $exec + %13 = V_BCNT_U32_B32_e64 %3, killed %12, implicit-def $vcc, implicit $exec - %14 = V_LSHRREV_B32_e64 16, %13, implicit %exec - %15 = V_BFM_B32_e64 %13, killed %14, implicit-def %vcc, implicit %exec + %14 = V_LSHRREV_B32_e64 16, %13, implicit $exec + %15 = V_BFM_B32_e64 %13, killed %14, implicit-def $vcc, implicit $exec - %16 = V_LSHRREV_B32_e64 16, %15, implicit %exec - %17 = V_CVT_PKNORM_I16_F32_e64 0, %15, 0, killed %16, 0, implicit-def %vcc, implicit %exec + %16 = V_LSHRREV_B32_e64 16, %15, implicit $exec + %17 = V_CVT_PKNORM_I16_F32_e64 0, %15, 0, killed %16, 0, implicit-def $vcc, implicit $exec - %18 = V_LSHRREV_B32_e64 16, %17, implicit %exec - %19 = V_READLANE_B32 killed %18, 0, implicit-def %vcc, implicit %exec - %20 = V_MOV_B32_e64 %19, implicit %exec + %18 = V_LSHRREV_B32_e64 16, %17, implicit $exec + %19 = V_READLANE_B32 killed %18, 0, implicit-def $vcc, implicit $exec + %20 = V_MOV_B32_e64 %19, implicit $exec - FLAT_STORE_DWORD %0, %20, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4) - %sgpr30_sgpr31 = COPY %2 - S_SETPC_B64_return %sgpr30_sgpr31 + FLAT_STORE_DWORD %0, %20, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4) + $sgpr30_sgpr31 = COPY %2 + S_SETPC_B64_return $sgpr30_sgpr31 Index: test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir =================================================================== --- test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir +++ test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir @@ -7,14 +7,14 @@ name: m0_sendmsg body: | ; GCN-LABEL: name: m0_sendmsg - ; GCN: %m0 = S_MOV_B32 -1 + ; GCN: $m0 = S_MOV_B32 -1 ; VI-NEXT: S_NOP 0 ; GFX9-NEXT: S_NOP 0 - ; GCN-NEXT: S_SENDMSG 3, implicit %exec, implicit %m0 + ; GCN-NEXT: S_SENDMSG 3, implicit $exec, implicit $m0 bb.0: - %m0 = S_MOV_B32 -1 - S_SENDMSG 3, implicit %exec, implicit %m0 + $m0 = S_MOV_B32 -1 + S_SENDMSG 3, implicit $exec, implicit $m0 S_ENDPGM ... --- @@ -22,14 +22,14 @@ name: m0_sendmsghalt body: | ; GCN-LABEL: name: m0_sendmsghalt - ; GCN: %m0 = S_MOV_B32 -1 + ; GCN: $m0 = S_MOV_B32 -1 ; VI-NEXT: S_NOP 0 ; GFX9-NEXT: S_NOP 0 - ; GCN-NEXT: S_SENDMSGHALT 3, implicit %exec, implicit %m0 + ; GCN-NEXT: S_SENDMSGHALT 3, implicit $exec, implicit $m0 bb.0: - %m0 = S_MOV_B32 -1 - S_SENDMSGHALT 3, implicit %exec, implicit %m0 + $m0 = S_MOV_B32 -1 + S_SENDMSGHALT 3, implicit $exec, implicit $m0 S_ENDPGM ... --- @@ -37,13 +37,13 @@ name: m0_ttracedata body: | ; GCN-LABEL: name: m0_ttracedata - ; GCN: %m0 = S_MOV_B32 -1 + ; GCN: $m0 = S_MOV_B32 -1 ; VI-NEXT: S_NOP 0 ; GFX9-NEXT: S_NOP 0 - ; GCN-NEXT: S_TTRACEDATA implicit %m0 + ; GCN-NEXT: S_TTRACEDATA implicit $m0 bb.0: - %m0 = S_MOV_B32 -1 - S_TTRACEDATA implicit %m0 + $m0 = S_MOV_B32 -1 + S_TTRACEDATA implicit $m0 S_ENDPGM ... Index: test/CodeGen/AMDGPU/shrink-carry.mir =================================================================== --- test/CodeGen/AMDGPU/shrink-carry.mir +++ test/CodeGen/AMDGPU/shrink-carry.mir @@ -91,7 +91,7 @@ %0 = IMPLICIT_DEF %1 = IMPLICIT_DEF %2 = IMPLICIT_DEF - %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec - %4, %5 = V_ADDC_U32_e64 %0, 0, %3, implicit %exec + %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec + %4, %5 = V_ADDC_U32_e64 %0, 0, %3, implicit $exec ... Index: test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir =================================================================== --- test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir +++ test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir @@ -8,8 +8,8 @@ ... # GCN-LABEL: name: shrink_add_vop3{{$}} -# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %19, %17, implicit %exec -# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec +# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %19, %17, implicit $exec +# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec name: shrink_add_vop3 alignment: 0 exposesReturnsTwice: false @@ -49,8 +49,8 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -67,32 +67,32 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 - %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec %27 = REG_SEQUENCE %3, 1, %26, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 - %28 = V_LSHL_B64 killed %27, 2, implicit %exec + %28 = V_LSHL_B64 killed %27, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 - %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec - %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec - %29, %9 = V_ADD_I32_e64 %19, %17, implicit %exec - %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec - BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec + %29, %9 = V_ADD_I32_e64 %19, %17, implicit $exec + %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec + BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: shrink_sub_vop3{{$}} -# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_SUB_I32_e64 %19, %17, implicit %exec -# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec +# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_SUB_I32_e64 %19, %17, implicit $exec +# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec name: shrink_sub_vop3 alignment: 0 @@ -133,8 +133,8 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -151,32 +151,32 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 - %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec %27 = REG_SEQUENCE %3, 1, %26, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 - %28 = V_LSHL_B64 killed %27, 2, implicit %exec + %28 = V_LSHL_B64 killed %27, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 - %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec - %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec - %29, %9 = V_SUB_I32_e64 %19, %17, implicit %exec - %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec - BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec + %29, %9 = V_SUB_I32_e64 %19, %17, implicit $exec + %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec + BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: shrink_subrev_vop3{{$}} -# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_SUBREV_I32_e64 %19, %17, implicit %exec -# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec +# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_SUBREV_I32_e64 %19, %17, implicit $exec +# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec name: shrink_subrev_vop3 alignment: 0 @@ -217,8 +217,8 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -235,32 +235,32 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 - %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec %27 = REG_SEQUENCE %3, 1, %26, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 - %28 = V_LSHL_B64 killed %27, 2, implicit %exec + %28 = V_LSHL_B64 killed %27, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 - %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec - %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec - %29, %9 = V_SUBREV_I32_e64 %19, %17, implicit %exec - %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec - BUFFER_STORE_DWORD_ADDR64 %29, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec + %29, %9 = V_SUBREV_I32_e64 %19, %17, implicit $exec + %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec + BUFFER_STORE_DWORD_ADDR64 %29, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: check_addc_src2_vop3{{$}} -# GCN: %29:vgpr_32, %vcc = V_ADDC_U32_e64 %19, %17, %9, implicit %exec -# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec +# GCN: %29:vgpr_32, $vcc = V_ADDC_U32_e64 %19, %17, %9, implicit $exec +# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec name: check_addc_src2_vop3 alignment: 0 exposesReturnsTwice: false @@ -300,8 +300,8 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -318,33 +318,33 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 - %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec %27 = REG_SEQUENCE %3, 1, %26, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 - %28 = V_LSHL_B64 killed %27, 2, implicit %exec + %28 = V_LSHL_B64 killed %27, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 - %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec - %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec %9 = S_MOV_B64 0 - %29, %vcc = V_ADDC_U32_e64 %19, %17, %9, implicit %exec - %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec - BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec + %29, $vcc = V_ADDC_U32_e64 %19, %17, %9, implicit $exec + %24 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec + BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: shrink_addc_vop3{{$}} -# GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def %vcc, implicit %vcc, implicit %exec -# GCN %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec +# GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def $vcc, implicit $vcc, implicit $exec +# GCN %24 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec name: shrink_addc_vop3 alignment: 0 @@ -385,8 +385,8 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -403,34 +403,34 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 - %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec %27 = REG_SEQUENCE %3, 1, %26, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 - %28 = V_LSHL_B64 killed %27, 2, implicit %exec + %28 = V_LSHL_B64 killed %27, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 - %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec - %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec - %vcc = S_MOV_B64 0 - %29, %vcc = V_ADDC_U32_e64 %19, %17, %vcc, implicit %exec - %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec - BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec + $vcc = S_MOV_B64 0 + %29, $vcc = V_ADDC_U32_e64 %19, %17, $vcc, implicit $exec + %24 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec + BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- # GCN-LABEL: name: shrink_addc_undef_vcc{{$}} -# GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def %vcc, implicit undef %vcc, implicit %exec -# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec +# GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def $vcc, implicit undef $vcc, implicit $exec +# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec name: shrink_addc_undef_vcc alignment: 0 exposesReturnsTwice: false @@ -470,8 +470,8 @@ - { id: 28, class: vreg_64 } - { id: 29, class: vgpr_32 } liveins: - - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '$vgpr0', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -488,25 +488,25 @@ hasMustTailInVarArgFunc: false body: | bb.0: - liveins: %sgpr0_sgpr1, %vgpr0 + liveins: $sgpr0_sgpr1, $vgpr0 - %3 = COPY %vgpr0 - %0 = COPY %sgpr0_sgpr1 + %3 = COPY $vgpr0 + %0 = COPY $sgpr0_sgpr1 %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 - %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec %27 = REG_SEQUENCE %3, 1, %26, 2 %10 = S_MOV_B32 61440 %11 = S_MOV_B32 0 %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 %13 = REG_SEQUENCE killed %5, 17, %12, 18 - %28 = V_LSHL_B64 killed %27, 2, implicit %exec + %28 = V_LSHL_B64 killed %27, 2, implicit $exec %16 = REG_SEQUENCE killed %4, 17, %12, 18 - %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec - %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec - %29, %vcc = V_ADDC_U32_e64 %19, %17, undef %vcc, implicit %exec - %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec - BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec + %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec + %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec + %29, $vcc = V_ADDC_U32_e64 %19, %17, undef $vcc, implicit $exec + %24 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec + BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir =================================================================== --- test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir +++ test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir @@ -19,21 +19,21 @@ ; GCN-LABEL: name: phi_visit_order ; GCN: V_ADD_I32 bb.0: - liveins: %vgpr0 - %7 = COPY %vgpr0 + liveins: $vgpr0 + %7 = COPY $vgpr0 %8 = S_MOV_B32 0 bb.1: %0 = PHI %8, %bb.0, %0, %bb.1, %2, %bb.2 - %9 = V_MOV_B32_e32 9, implicit %exec - %10 = V_CMP_EQ_U32_e64 %7, %9, implicit %exec - %1 = SI_IF %10, %bb.2, implicit-def %exec, implicit-def %scc, implicit %exec + %9 = V_MOV_B32_e32 9, implicit $exec + %10 = V_CMP_EQ_U32_e64 %7, %9, implicit $exec + %1 = SI_IF %10, %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec S_BRANCH %bb.1 bb.2: - SI_END_CF %1, implicit-def %exec, implicit-def %scc, implicit %exec + SI_END_CF %1, implicit-def $exec, implicit-def $scc, implicit $exec %11 = S_MOV_B32 1 - %2 = S_ADD_I32 %0, %11, implicit-def %scc + %2 = S_ADD_I32 %0, %11, implicit-def $scc S_BRANCH %bb.1 ... Index: test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll =================================================================== --- test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll +++ test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll @@ -3,7 +3,7 @@ ; register operands in the correct order when modifying the opcode of an ; instruction to V_ADD_I32_e32. -; CHECK: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 %{{[0-9]+}}, %{{[0-9]+}}, implicit-def %vcc, implicit %exec +; CHECK: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 %{{[0-9]+}}, %{{[0-9]+}}, implicit-def $vcc, implicit $exec define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { entry: Index: test/CodeGen/AMDGPU/spill-empty-live-interval.mir =================================================================== --- test/CodeGen/AMDGPU/spill-empty-live-interval.mir +++ test/CodeGen/AMDGPU/spill-empty-live-interval.mir @@ -7,13 +7,13 @@ # CHECK-LABEL: name: expecting_non_empty_interval -# CHECK: undef %7.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %7.sub1, implicit %exec -# CHECK-NEXT: SI_SPILL_V64_SAVE %7, %stack.0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr5, 0, implicit %exec :: (store 8 into %stack.0, align 4) -# CHECK-NEXT: undef %5.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit %exec -# CHECK-NEXT: dead %2:vgpr_32 = V_MUL_F32_e32 0, %5.sub1, implicit %exec +# CHECK: undef %7.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %7.sub1, implicit $exec +# CHECK-NEXT: SI_SPILL_V64_SAVE %7, %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec :: (store 8 into %stack.0, align 4) +# CHECK-NEXT: undef %5.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit $exec +# CHECK-NEXT: dead %2:vgpr_32 = V_MUL_F32_e32 0, %5.sub1, implicit $exec # CHECK: S_NOP 0, implicit %6.sub1 -# CHECK-NEXT: %8:vreg_64 = SI_SPILL_V64_RESTORE %stack.0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr5, 0, implicit %exec :: (load 8 from %stack.0, align 4) +# CHECK-NEXT: %8:vreg_64 = SI_SPILL_V64_RESTORE %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec :: (load 8 from %stack.0, align 4) # CHECK-NEXT: S_NOP 0, implicit %8.sub1 # CHECK-NEXT: S_NOP 0, implicit undef %9.sub0 @@ -27,9 +27,9 @@ body: | bb.0: successors: %bb.1 - undef %0.sub1 = V_MAC_F32_e32 0, undef %1, undef %0.sub1, implicit %exec - undef %3.sub1 = V_MOV_B32_e32 1786773504, implicit %exec - dead %2 = V_MUL_F32_e32 0, %3.sub1, implicit %exec + undef %0.sub1 = V_MAC_F32_e32 0, undef %1, undef %0.sub1, implicit $exec + undef %3.sub1 = V_MOV_B32_e32 1786773504, implicit $exec + dead %2 = V_MUL_F32_e32 0, %3.sub1, implicit $exec bb.1: S_NOP 0, implicit %3.sub1 @@ -44,12 +44,12 @@ # CHECK-LABEL: name: rematerialize_empty_interval_has_reference # CHECK-NOT: MOV -# CHECK: undef %3.sub2:vreg_128 = V_MOV_B32_e32 1786773504, implicit %exec +# CHECK: undef %3.sub2:vreg_128 = V_MOV_B32_e32 1786773504, implicit $exec # CHECK: bb.1: # CHECK-NEXT: S_NOP 0, implicit %3.sub2 # CHECK-NEXT: S_NOP 0, implicit undef %6.sub0 -# CHECK-NEXT: undef %4.sub2:vreg_128 = V_MOV_B32_e32 0, implicit %exec +# CHECK-NEXT: undef %4.sub2:vreg_128 = V_MOV_B32_e32 0, implicit $exec # CHECK-NEXT: S_NOP 0, implicit %4.sub2 name: rematerialize_empty_interval_has_reference tracksRegLiveness: true @@ -62,8 +62,8 @@ bb.0: successors: %bb.1 - undef %0.sub2 = V_MOV_B32_e32 0, implicit %exec - undef %3.sub2 = V_MOV_B32_e32 1786773504, implicit %exec + undef %0.sub2 = V_MOV_B32_e32 0, implicit $exec + undef %3.sub2 = V_MOV_B32_e32 1786773504, implicit $exec bb.1: S_NOP 0, implicit %3.sub2 Index: test/CodeGen/AMDGPU/splitkit.mir =================================================================== --- test/CodeGen/AMDGPU/splitkit.mir +++ test/CodeGen/AMDGPU/splitkit.mir @@ -22,7 +22,7 @@ S_NOP 0, implicit-def %0.sub3 : sreg_128 ; Clobber registers - S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11 + S_NOP 0, implicit-def dead $sgpr0, implicit-def dead $sgpr1, implicit-def dead $sgpr2, implicit-def dead $sgpr3, implicit-def dead $sgpr4, implicit-def dead $sgpr5, implicit-def dead $sgpr6, implicit-def dead $sgpr7, implicit-def dead $sgpr8, implicit-def dead $sgpr9, implicit-def dead $sgpr10, implicit-def dead $sgpr11 S_NOP 0, implicit %0.sub0 S_NOP 0, implicit %0.sub3 @@ -34,31 +34,31 @@ # allocated to sgpr0_sgpr1 and the first to something else so we see two copies # in between for the two subregisters that are alive. # CHECK-LABEL: name: func1 -# CHECK: [[REG0:%sgpr[0-9]+]] = COPY %sgpr0 -# CHECK: [[REG1:%sgpr[0-9]+]] = COPY %sgpr2 +# CHECK: [[REG0:\$sgpr[0-9]+]] = COPY $sgpr0 +# CHECK: [[REG1:\$sgpr[0-9]+]] = COPY $sgpr2 # CHECK: S_NOP 0 # CHECK: S_NOP 0, implicit renamable [[REG0]] # CHECK: S_NOP 0, implicit renamable [[REG1]] -# CHECK: %sgpr0 = COPY renamable [[REG0]] -# CHECK: %sgpr2 = COPY renamable [[REG1]] +# CHECK: $sgpr0 = COPY renamable [[REG0]] +# CHECK: $sgpr2 = COPY renamable [[REG1]] # CHECK: S_NOP -# CHECK: S_NOP 0, implicit renamable %sgpr0 -# CHECK: S_NOP 0, implicit renamable %sgpr2 +# CHECK: S_NOP 0, implicit renamable $sgpr0 +# CHECK: S_NOP 0, implicit renamable $sgpr2 name: func1 tracksRegLiveness: true body: | bb.0: - liveins: %sgpr0, %sgpr1, %sgpr2 - undef %0.sub0 : sreg_128 = COPY %sgpr0 - %0.sub2 = COPY %sgpr2 + liveins: $sgpr0, $sgpr1, $sgpr2 + undef %0.sub0 : sreg_128 = COPY $sgpr0 + %0.sub2 = COPY $sgpr2 - S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1 + S_NOP 0, implicit-def dead $sgpr0, implicit-def dead $sgpr1 S_NOP 0, implicit %0.sub0 S_NOP 0, implicit %0.sub2 ; Clobber everything but sgpr0-sgpr3 - S_NOP 0, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11, implicit-def dead %sgpr12, implicit-def dead %sgpr13, implicit-def dead %sgpr14, implicit-def dead %sgpr15, implicit-def dead %vcc_lo, implicit-def dead %vcc_hi + S_NOP 0, implicit-def dead $sgpr4, implicit-def dead $sgpr5, implicit-def dead $sgpr6, implicit-def dead $sgpr7, implicit-def dead $sgpr8, implicit-def dead $sgpr9, implicit-def dead $sgpr10, implicit-def dead $sgpr11, implicit-def dead $sgpr12, implicit-def dead $sgpr13, implicit-def dead $sgpr14, implicit-def dead $sgpr15, implicit-def dead $vcc_lo, implicit-def dead $vcc_hi S_NOP 0, implicit %0.sub0 S_NOP 0, implicit %0.sub2 @@ -67,8 +67,8 @@ # Check that copy hoisting out of loops works. This mainly should not crash the # compiler when it hoists a subreg copy sequence. # CHECK-LABEL: name: splitHoist -# CHECK: S_NOP 0, implicit-def renamable %sgpr0 -# CHECK: S_NOP 0, implicit-def renamable %sgpr3 +# CHECK: S_NOP 0, implicit-def renamable $sgpr0 +# CHECK: S_NOP 0, implicit-def renamable $sgpr3 # CHECK-NEXT: SI_SPILL_S128_SAVE name: splitHoist tracksRegLiveness: true @@ -78,7 +78,7 @@ S_NOP 0, implicit-def undef %0.sub0 : sreg_128 S_NOP 0, implicit-def %0.sub3 : sreg_128 - S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc S_BRANCH %bb.2 bb.1: @@ -86,15 +86,15 @@ S_NOP 0, implicit %0.sub0 ; Clobber registers - S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11 + S_NOP 0, implicit-def dead $sgpr0, implicit-def dead $sgpr1, implicit-def dead $sgpr2, implicit-def dead $sgpr3, implicit-def dead $sgpr4, implicit-def dead $sgpr5, implicit-def dead $sgpr6, implicit-def dead $sgpr7, implicit-def dead $sgpr8, implicit-def dead $sgpr9, implicit-def dead $sgpr10, implicit-def dead $sgpr11 - S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc S_BRANCH %bb.3 bb.2: successors: %bb.3 ; Clobber registers - S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11 + S_NOP 0, implicit-def dead $sgpr0, implicit-def dead $sgpr1, implicit-def dead $sgpr2, implicit-def dead $sgpr3, implicit-def dead $sgpr4, implicit-def dead $sgpr5, implicit-def dead $sgpr6, implicit-def dead $sgpr7, implicit-def dead $sgpr8, implicit-def dead $sgpr9, implicit-def dead $sgpr10, implicit-def dead $sgpr11 S_BRANCH %bb.3 bb.3: Index: test/CodeGen/AMDGPU/stack-slot-color-sgpr-vgpr-spills.mir =================================================================== --- test/CodeGen/AMDGPU/stack-slot-color-sgpr-vgpr-spills.mir +++ test/CodeGen/AMDGPU/stack-slot-color-sgpr-vgpr-spills.mir @@ -25,10 +25,10 @@ body: | bb.0: - %0 = FLAT_LOAD_DWORD undef %vgpr0_vgpr1, 0, 0, 0, implicit %flat_scr, implicit %exec - %2 = FLAT_LOAD_DWORD undef %vgpr0_vgpr1, 0, 0, 0, implicit %flat_scr, implicit %exec + %0 = FLAT_LOAD_DWORD undef $vgpr0_vgpr1, 0, 0, 0, implicit $flat_scr, implicit $exec + %2 = FLAT_LOAD_DWORD undef $vgpr0_vgpr1, 0, 0, 0, implicit $flat_scr, implicit $exec S_NOP 0, implicit %0 - %1 = S_LOAD_DWORD_IMM undef %sgpr0_sgpr1, 0, 0 - %3 = S_LOAD_DWORD_IMM undef %sgpr0_sgpr1, 0, 0 + %1 = S_LOAD_DWORD_IMM undef $sgpr0_sgpr1, 0, 0 + %3 = S_LOAD_DWORD_IMM undef $sgpr0_sgpr1, 0, 0 S_NOP 0, implicit %1 ... Index: test/CodeGen/AMDGPU/subreg-intervals.mir =================================================================== --- test/CodeGen/AMDGPU/subreg-intervals.mir +++ test/CodeGen/AMDGPU/subreg-intervals.mir @@ -31,7 +31,7 @@ - { id: 0, class: sreg_64 } body: | bb.0: - S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc S_BRANCH %bb.2 bb.1: Index: test/CodeGen/AMDGPU/subreg_interference.mir =================================================================== --- test/CodeGen/AMDGPU/subreg_interference.mir +++ test/CodeGen/AMDGPU/subreg_interference.mir @@ -12,12 +12,12 @@ # sgpr0-sgpr3. # # CHECK-LABEL: func0 -# CHECK: S_NOP 0, implicit-def renamable %sgpr0 -# CHECK: S_NOP 0, implicit-def renamable %sgpr3 -# CHECK: S_NOP 0, implicit-def renamable %sgpr1 -# CHECK: S_NOP 0, implicit-def renamable %sgpr2 -# CHECK: S_NOP 0, implicit renamable %sgpr0, implicit renamable %sgpr3 -# CHECK: S_NOP 0, implicit renamable %sgpr1, implicit renamable %sgpr2 +# CHECK: S_NOP 0, implicit-def renamable $sgpr0 +# CHECK: S_NOP 0, implicit-def renamable $sgpr3 +# CHECK: S_NOP 0, implicit-def renamable $sgpr1 +# CHECK: S_NOP 0, implicit-def renamable $sgpr2 +# CHECK: S_NOP 0, implicit renamable $sgpr0, implicit renamable $sgpr3 +# CHECK: S_NOP 0, implicit renamable $sgpr1, implicit renamable $sgpr2 name: func0 body: | bb.0: Index: test/CodeGen/AMDGPU/syncscopes.ll =================================================================== --- test/CodeGen/AMDGPU/syncscopes.ll +++ test/CodeGen/AMDGPU/syncscopes.ll @@ -1,9 +1,9 @@ ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -stop-before=si-debugger-insert-nops < %s | FileCheck --check-prefix=GCN %s ; GCN-LABEL: name: syncscopes -; GCN: FLAT_STORE_DWORD killed renamable %vgpr1_vgpr2, killed renamable %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out, addrspace 4) -; GCN: FLAT_STORE_DWORD killed renamable %vgpr4_vgpr5, killed renamable %vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out, addrspace 4) -; GCN: FLAT_STORE_DWORD killed renamable %vgpr7_vgpr8, killed renamable %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out, addrspace 4) +; GCN: FLAT_STORE_DWORD killed renamable $vgpr1_vgpr2, killed renamable $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out, addrspace 4) +; GCN: FLAT_STORE_DWORD killed renamable $vgpr4_vgpr5, killed renamable $vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out, addrspace 4) +; GCN: FLAT_STORE_DWORD killed renamable $vgpr7_vgpr8, killed renamable $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out, addrspace 4) define void @syncscopes( i32 %agent, i32 addrspace(4)* %agent_out, Index: test/CodeGen/AMDGPU/twoaddr-mad.mir =================================================================== --- test/CodeGen/AMDGPU/twoaddr-mad.mir +++ test/CodeGen/AMDGPU/twoaddr-mad.mir @@ -1,7 +1,7 @@ # RUN: llc -march=amdgcn %s -run-pass twoaddressinstruction -verify-machineinstrs -o - | FileCheck -check-prefix=GCN %s # GCN-LABEL: name: test_madmk_reg_imm_f32 -# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit %exec +# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec --- name: test_madmk_reg_imm_f32 registers: @@ -14,13 +14,13 @@ %0 = IMPLICIT_DEF %1 = COPY %0.sub1 - %2 = V_MOV_B32_e32 1078523331, implicit %exec - %3 = V_MAC_F32_e32 killed %0.sub0, %2, killed %1, implicit %exec + %2 = V_MOV_B32_e32 1078523331, implicit $exec + %3 = V_MAC_F32_e32 killed %0.sub0, %2, killed %1, implicit $exec ... # GCN-LABEL: name: test_madmk_imm_reg_f32 -# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit %exec +# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec --- name: test_madmk_imm_reg_f32 registers: @@ -33,13 +33,13 @@ %0 = IMPLICIT_DEF %1 = COPY %0.sub1 - %2 = V_MOV_B32_e32 1078523331, implicit %exec - %3 = V_MAC_F32_e32 %2, killed %0.sub0, killed %1, implicit %exec + %2 = V_MOV_B32_e32 1078523331, implicit $exec + %3 = V_MAC_F32_e32 %2, killed %0.sub0, killed %1, implicit $exec ... # GCN-LABEL: name: test_madak_f32 -# GCN: V_MADAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit %exec +# GCN: V_MADAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit $exec --- name: test_madak_f32 registers: @@ -50,13 +50,13 @@ bb.0: %0 = IMPLICIT_DEF - %1 = V_MOV_B32_e32 1078523331, implicit %exec - %2 = V_MAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit %exec + %1 = V_MOV_B32_e32 1078523331, implicit $exec + %2 = V_MAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit $exec ... # GCN-LABEL: name: test_madmk_reg_imm_f16 -# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit %exec +# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec --- name: test_madmk_reg_imm_f16 registers: @@ -69,13 +69,13 @@ %0 = IMPLICIT_DEF %1 = COPY %0.sub1 - %2 = V_MOV_B32_e32 1078523331, implicit %exec - %3 = V_MAC_F16_e32 killed %0.sub0, %2, killed %1, implicit %exec + %2 = V_MOV_B32_e32 1078523331, implicit $exec + %3 = V_MAC_F16_e32 killed %0.sub0, %2, killed %1, implicit $exec ... # GCN-LABEL: name: test_madmk_imm_reg_f16 -# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit %exec +# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec --- name: test_madmk_imm_reg_f16 registers: @@ -88,13 +88,13 @@ %0 = IMPLICIT_DEF %1 = COPY %0.sub1 - %2 = V_MOV_B32_e32 1078523331, implicit %exec - %3 = V_MAC_F16_e32 %2, killed %0.sub0, killed %1, implicit %exec + %2 = V_MOV_B32_e32 1078523331, implicit $exec + %3 = V_MAC_F16_e32 %2, killed %0.sub0, killed %1, implicit $exec ... # GCN-LABEL: name: test_madak_f16 -# GCN: V_MADAK_F16 killed %0.sub0, %0.sub1, 1078523331, implicit %exec +# GCN: V_MADAK_F16 killed %0.sub0, %0.sub1, 1078523331, implicit $exec --- name: test_madak_f16 registers: @@ -105,15 +105,15 @@ bb.0: %0 = IMPLICIT_DEF - %1 = V_MOV_B32_e32 1078523331, implicit %exec - %2 = V_MAC_F16_e32 killed %0.sub0, %0.sub1, %1, implicit %exec + %1 = V_MOV_B32_e32 1078523331, implicit $exec + %2 = V_MAC_F16_e32 killed %0.sub0, %0.sub1, %1, implicit $exec ... # Make sure constant bus restriction isn't violated if src0 is an SGPR. # GCN-LABEL: name: test_madak_sgpr_src0_f32 -# GCN: %1:vgpr_32 = V_MOV_B32_e32 1078523331, implicit %exec -# GCN: %2:vgpr_32 = V_MAD_F32 0, killed %0, 0, %1, 0, %3:vgpr_32, 0, 0, implicit %exec +# GCN: %1:vgpr_32 = V_MOV_B32_e32 1078523331, implicit $exec +# GCN: %2:vgpr_32 = V_MAD_F32 0, killed %0, 0, %1, 0, %3:vgpr_32, 0, 0, implicit $exec --- name: test_madak_sgpr_src0_f32 @@ -126,15 +126,15 @@ bb.0: %0 = IMPLICIT_DEF - %1 = V_MOV_B32_e32 1078523331, implicit %exec - %2 = V_MAC_F32_e32 killed %0, %1, %3, implicit %exec + %1 = V_MOV_B32_e32 1078523331, implicit $exec + %2 = V_MAC_F32_e32 killed %0, %1, %3, implicit $exec ... # This can still fold if this is an inline immediate. # GCN-LABEL: name: test_madak_inlineimm_src0_f32 -# GCN: %1:vgpr_32 = V_MADMK_F32 1073741824, 1078523331, %2:vgpr_32, implicit %exec +# GCN: %1:vgpr_32 = V_MADMK_F32 1073741824, 1078523331, %2:vgpr_32, implicit $exec --- name: test_madak_inlineimm_src0_f32 @@ -145,14 +145,14 @@ body: | bb.0: - %0 = V_MOV_B32_e32 1078523331, implicit %exec - %1 = V_MAC_F32_e32 1073741824, %0, %2, implicit %exec + %0 = V_MOV_B32_e32 1078523331, implicit $exec + %1 = V_MAC_F32_e32 1073741824, %0, %2, implicit $exec ... # Non-inline immediate uses constant bus already. # GCN-LABEL: name: test_madak_otherimm_src0_f32 -# GCN: %1:vgpr_32 = V_MAC_F32_e32 1120403456, %0, %1, implicit %exec +# GCN: %1:vgpr_32 = V_MAC_F32_e32 1120403456, %0, %1, implicit $exec --- name: test_madak_otherimm_src0_f32 @@ -163,14 +163,14 @@ body: | bb.0: - %0 = V_MOV_B32_e32 1078523331, implicit %exec - %1 = V_MAC_F32_e32 1120403456, %0, %2, implicit %exec + %0 = V_MOV_B32_e32 1078523331, implicit $exec + %1 = V_MAC_F32_e32 1120403456, %0, %2, implicit $exec ... # Non-inline immediate uses constant bus already. # GCN-LABEL: name: test_madak_other_constantlike_src0_f32 -# GCN: %1:vgpr_32 = V_MAC_F32_e32 %stack.0, %0, %1, implicit %exec +# GCN: %1:vgpr_32 = V_MAC_F32_e32 %stack.0, %0, %1, implicit $exec --- name: test_madak_other_constantlike_src0_f32 registers: @@ -184,7 +184,7 @@ body: | bb.0: - %0 = V_MOV_B32_e32 1078523331, implicit %exec - %1 = V_MAC_F32_e32 %stack.0, %0, %2, implicit %exec + %0 = V_MOV_B32_e32 1078523331, implicit $exec + %1 = V_MAC_F32_e32 %stack.0, %0, %2, implicit $exec ... Index: test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir =================================================================== --- test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir +++ test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir @@ -17,10 +17,10 @@ # leaving a spill of the undefined register. # CHECK-LABEL: name: undefined_physreg_sgpr_spill -# CHECK: %sgpr0_sgpr1 = COPY %exec, implicit-def %exec -# CHECK-NEXT: SI_SPILL_S64_SAVE %sgpr0_sgpr1, -# CHECK-NEXT: %sgpr2_sgpr3 = S_AND_B64 killed %sgpr0_sgpr1, killed %vcc, implicit-def dead %scc -# CHECK: %exec = COPY killed %sgpr2_sgpr3 +# CHECK: $sgpr0_sgpr1 = COPY $exec, implicit-def $exec +# CHECK-NEXT: SI_SPILL_S64_SAVE $sgpr0_sgpr1, +# CHECK-NEXT: $sgpr2_sgpr3 = S_AND_B64 killed $sgpr0_sgpr1, killed $vcc, implicit-def dead $scc +# CHECK: $exec = COPY killed $sgpr2_sgpr3 name: undefined_physreg_sgpr_spill alignment: 0 exposesReturnsTwice: false @@ -30,8 +30,8 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%vgpr0', virtual-reg: '' } - - { reg: '%sgpr4_sgpr5', virtual-reg: '' } + - { reg: '$vgpr0', virtual-reg: '' } + - { reg: '$sgpr4_sgpr5', virtual-reg: '' } stack: - { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4, stack-id: 1, callee-saved-register: '', callee-saved-restored: true, @@ -40,39 +40,39 @@ body: | bb.0: successors: %bb.1, %bb.2 - liveins: %vgpr0, %sgpr4_sgpr5 - - %vgpr1_vgpr2 = COPY killed %sgpr4_sgpr5, implicit %exec - %vgpr1 = GLOBAL_LOAD_UBYTE killed %vgpr1_vgpr2, 0, 0, 0, implicit %exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(2)* undef`) - %vcc = V_CMP_NE_U32_e64 0, %vgpr0, implicit %exec - %sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed %vgpr1, implicit %exec - %vgpr1 = V_CNDMASK_B32_e64 0, -1, killed %sgpr0_sgpr1, implicit %exec - %sgpr0_sgpr1 = COPY %exec, implicit-def %exec - SI_SPILL_S64_SAVE %sgpr0_sgpr1, %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (store 8 into %stack.0, align 4) - %sgpr2_sgpr3 = S_AND_B64 killed %sgpr0_sgpr1, killed %vcc, implicit-def dead %scc - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0, $sgpr4_sgpr5 + + $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec + $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(2)* undef`) + $vcc = V_CMP_NE_U32_e64 0, $vgpr0, implicit $exec + $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed $vgpr1, implicit $exec + $vgpr1 = V_CNDMASK_B32_e64 0, -1, killed $sgpr0_sgpr1, implicit $exec + $sgpr0_sgpr1 = COPY $exec, implicit-def $exec + SI_SPILL_S64_SAVE $sgpr0_sgpr1, %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (store 8 into %stack.0, align 4) + $sgpr2_sgpr3 = S_AND_B64 killed $sgpr0_sgpr1, killed $vcc, implicit-def dead $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1: successors: %bb.3(0x80000000) - liveins: %vgpr0, %vgpr1 + liveins: $vgpr0, $vgpr1 - %sgpr2_sgpr3 = S_MOV_B64 0 - %vgpr2 = V_MOV_B32_e32 0, implicit %exec - %sgpr4_sgpr5 = IMPLICIT_DEF + $sgpr2_sgpr3 = S_MOV_B64 0 + $vgpr2 = V_MOV_B32_e32 0, implicit $exec + $sgpr4_sgpr5 = IMPLICIT_DEF S_BRANCH %bb.3 bb.2: successors: - %sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (load 8 from %stack.0, align 4) - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc + $sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (load 8 from %stack.0, align 4) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc bb.3: - liveins: %vgpr0, %vgpr1, %vgpr2, %sgpr2_sgpr3, %sgpr4_sgpr5 + liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr2_sgpr3, $sgpr4_sgpr5 - %vcc = COPY %vgpr1 + $vcc = COPY $vgpr1 S_ENDPGM ... @@ -80,10 +80,10 @@ # Move spill to after future save instruction # CHECK-LABEL: {{^}}name: undefined_physreg_sgpr_spill_reorder -# CHECK: %sgpr0_sgpr1 = COPY %exec, implicit-def %exec -# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def dead %scc -# CHECK: SI_SPILL_S64_SAVE killed %sgpr0_sgpr1, %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (store 8 into %stack.0, align 4) -# CHECK: %exec = COPY killed %sgpr2_sgpr3 +# CHECK: $sgpr0_sgpr1 = COPY $exec, implicit-def $exec +# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def dead $scc +# CHECK: SI_SPILL_S64_SAVE killed $sgpr0_sgpr1, %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (store 8 into %stack.0, align 4) +# CHECK: $exec = COPY killed $sgpr2_sgpr3 name: undefined_physreg_sgpr_spill_reorder alignment: 0 exposesReturnsTwice: false @@ -93,8 +93,8 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%vgpr0', virtual-reg: '' } - - { reg: '%sgpr4_sgpr5', virtual-reg: '' } + - { reg: '$vgpr0', virtual-reg: '' } + - { reg: '$sgpr4_sgpr5', virtual-reg: '' } stack: - { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4, stack-id: 1, callee-saved-register: '', callee-saved-restored: true, @@ -103,39 +103,39 @@ body: | bb.0: successors: %bb.1, %bb.2 - liveins: %vgpr0, %sgpr4_sgpr5 - - %vgpr1_vgpr2 = COPY killed %sgpr4_sgpr5, implicit %exec - %vgpr1 = GLOBAL_LOAD_UBYTE killed %vgpr1_vgpr2, 0, 0, 0, implicit %exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(2)* undef`) - %vcc = V_CMP_NE_U32_e64 0, %vgpr0, implicit %exec - %sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed %vgpr1, implicit %exec - %vgpr1 = V_CNDMASK_B32_e64 0, -1, killed %sgpr0_sgpr1, implicit %exec - %sgpr0_sgpr1 = COPY %exec, implicit-def %exec - %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def dead %scc - SI_SPILL_S64_SAVE killed %sgpr0_sgpr1, %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (store 8 into %stack.0, align 4) - %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2, implicit %exec + liveins: $vgpr0, $sgpr4_sgpr5 + + $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec + $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(2)* undef`) + $vcc = V_CMP_NE_U32_e64 0, $vgpr0, implicit $exec + $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed $vgpr1, implicit $exec + $vgpr1 = V_CNDMASK_B32_e64 0, -1, killed $sgpr0_sgpr1, implicit $exec + $sgpr0_sgpr1 = COPY $exec, implicit-def $exec + $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def dead $scc + SI_SPILL_S64_SAVE killed $sgpr0_sgpr1, %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (store 8 into %stack.0, align 4) + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec S_BRANCH %bb.1 bb.1: successors: %bb.3(0x80000000) - liveins: %vgpr0, %vgpr1 + liveins: $vgpr0, $vgpr1 - %sgpr2_sgpr3 = S_MOV_B64 0 - %vgpr2 = V_MOV_B32_e32 0, implicit %exec - %sgpr4_sgpr5 = IMPLICIT_DEF + $sgpr2_sgpr3 = S_MOV_B64 0 + $vgpr2 = V_MOV_B32_e32 0, implicit $exec + $sgpr4_sgpr5 = IMPLICIT_DEF S_BRANCH %bb.3 bb.2: successors: - %sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (load 8 from %stack.0, align 4) - %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc + $sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (load 8 from %stack.0, align 4) + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc bb.3: - liveins: %vgpr0, %vgpr1, %vgpr2, %sgpr2_sgpr3, %sgpr4_sgpr5 + liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr2_sgpr3, $sgpr4_sgpr5 - %vcc = COPY %vgpr1 + $vcc = COPY $vgpr1 S_ENDPGM ... Index: test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir =================================================================== --- test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir +++ test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir @@ -46,9 +46,9 @@ ... --- # CHECK-LABEL: name: vccz_corrupt_workaround -# CHECK: %vcc = V_CMP_EQ_F32 -# CHECK-NEXT: %vcc = S_MOV_B64 %vcc -# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit killed %vcc +# CHECK: $vcc = V_CMP_EQ_F32 +# CHECK-NEXT: $vcc = S_MOV_B64 $vcc +# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit killed $vcc name: vccz_corrupt_workaround alignment: 0 @@ -58,7 +58,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%sgpr0_sgpr1' } + - { reg: '$sgpr0_sgpr1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -75,43 +75,43 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 9, 0 :: (non-temporal dereferenceable invariant load 4 from `float addrspace(2)* undef`) - %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, implicit %exec - S_CBRANCH_VCCZ %bb.1, implicit killed %vcc + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 9, 0 :: (non-temporal dereferenceable invariant load 4 from `float addrspace(2)* undef`) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vcc = V_CMP_EQ_F32_e64 0, 0, 0, $sgpr2, 0, implicit $exec + S_CBRANCH_VCCZ %bb.1, implicit killed $vcc bb.2.if: - liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %vgpr0 = V_MOV_B32_e32 9, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) - %vgpr0 = V_MOV_B32_e32 0, implicit %exec + $vgpr0 = V_MOV_B32_e32 9, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + $vgpr0 = V_MOV_B32_e32 0, implicit $exec S_BRANCH %bb.3 bb.1.else: - liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %vgpr0 = V_MOV_B32_e32 100, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) - %vgpr0 = V_MOV_B32_e32 1, implicit %exec + $vgpr0 = V_MOV_B32_e32 100, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + $vgpr0 = V_MOV_B32_e32 1, implicit $exec bb.3.done: - liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out) + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out) S_ENDPGM ... --- # CHECK-LABEL: name: vccz_corrupt_undef_vcc # CHECK: S_WAITCNT -# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef %vcc +# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc name: vccz_corrupt_undef_vcc alignment: 0 @@ -121,7 +121,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%sgpr0_sgpr1' } + - { reg: '$sgpr0_sgpr1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -138,34 +138,34 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - S_CBRANCH_VCCZ %bb.1, implicit undef %vcc + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + S_CBRANCH_VCCZ %bb.1, implicit undef $vcc bb.2.if: - liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %vgpr0 = V_MOV_B32_e32 9, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) - %vgpr0 = V_MOV_B32_e32 0, implicit %exec + $vgpr0 = V_MOV_B32_e32 9, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + $vgpr0 = V_MOV_B32_e32 0, implicit $exec S_BRANCH %bb.3 bb.1.else: - liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %vgpr0 = V_MOV_B32_e32 100, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) - %vgpr0 = V_MOV_B32_e32 1, implicit %exec + $vgpr0 = V_MOV_B32_e32 100, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + $vgpr0 = V_MOV_B32_e32 1, implicit $exec bb.3.done: - liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 - %sgpr3 = S_MOV_B32 61440 - %sgpr2 = S_MOV_B32 -1 - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out) + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out) S_ENDPGM ... Index: test/CodeGen/AMDGPU/vop-shrink-frame-index.mir =================================================================== --- test/CodeGen/AMDGPU/vop-shrink-frame-index.mir +++ test/CodeGen/AMDGPU/vop-shrink-frame-index.mir @@ -35,7 +35,7 @@ # GCN-LABEL: name: fold_fi_vgpr{{$}} # GCN: %1:vgpr_32 = IMPLICIT_DEF -# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec +# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def $vcc, implicit $exec name: fold_fi_vgpr tracksRegLiveness: true registers: @@ -48,15 +48,15 @@ di-location: '' } body: | bb.0: - %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec + %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec %1 = IMPLICIT_DEF - %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec + %2, $vcc = V_ADD_I32_e64 %0, %1, implicit $exec S_ENDPGM ... # GCN-LABEL: name: fold_vgpr_fi{{$}} # GCN: %1:vgpr_32 = IMPLICIT_DEF -# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec +# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def $vcc, implicit $exec name: fold_vgpr_fi tracksRegLiveness: true registers: @@ -69,16 +69,16 @@ di-location: '' } body: | bb.0: - %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec + %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec %1 = IMPLICIT_DEF - %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec + %2, $vcc = V_ADD_I32_e64 %1, %0, implicit $exec S_ENDPGM ... # GCN-LABEL: name: fold_sgpr_fi{{$}} -# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec +# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec # GCN: %1:sgpr_32 = IMPLICIT_DEF -# GCN: %2:vgpr_32 = V_ADD_I32_e32 %1, %0, implicit-def %vcc, implicit %exec +# GCN: %2:vgpr_32 = V_ADD_I32_e32 %1, %0, implicit-def $vcc, implicit $exec name: fold_sgpr_fi tracksRegLiveness: true registers: @@ -91,16 +91,16 @@ di-location: '' } body: | bb.0: - %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec + %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec %1 = IMPLICIT_DEF - %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec + %2, $vcc = V_ADD_I32_e64 %1, %0, implicit $exec S_ENDPGM ... # GCN-LABEL: name: fold_fi_sgpr{{$}} -# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec +# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec # GCN: %1:sgpr_32 = IMPLICIT_DEF -# GCN: %2:vgpr_32 = V_ADD_I32_e32 %1, %0, implicit-def %vcc, implicit %exec +# GCN: %2:vgpr_32 = V_ADD_I32_e32 %1, %0, implicit-def $vcc, implicit $exec name: fold_fi_sgpr tracksRegLiveness: true registers: @@ -113,15 +113,15 @@ di-location: '' } body: | bb.0: - %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec + %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec %1 = IMPLICIT_DEF - %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec + %2, $vcc = V_ADD_I32_e64 %0, %1, implicit $exec S_ENDPGM ... # TODO: Should probably prefer folding immediate first # GCN-LABEL: name: fold_fi_imm{{$}} -# GCN: %1:vgpr_32 = V_MOV_B32_e32 999, implicit %exec -# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec +# GCN: %1:vgpr_32 = V_MOV_B32_e32 999, implicit $exec +# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def $vcc, implicit $exec name: fold_fi_imm tracksRegLiveness: true registers: @@ -134,15 +134,15 @@ di-location: '' } body: | bb.0: - %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec - %1 = V_MOV_B32_e32 999, implicit %exec - %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec + %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec + %1 = V_MOV_B32_e32 999, implicit $exec + %2, $vcc = V_ADD_I32_e64 %0, %1, implicit $exec S_ENDPGM ... # GCN-LABEL: name: fold_imm_fi{{$}} -# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec -# GCN: %2:vgpr_32 = V_ADD_I32_e32 999, %0, implicit-def %vcc, implicit %exec +# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec +# GCN: %2:vgpr_32 = V_ADD_I32_e32 999, %0, implicit-def $vcc, implicit $exec name: fold_imm_fi tracksRegLiveness: true registers: @@ -155,7 +155,7 @@ di-location: '' } body: | bb.0: - %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec - %1 = V_MOV_B32_e32 999, implicit %exec - %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec + %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec + %1 = V_MOV_B32_e32 999, implicit $exec + %2, $vcc = V_ADD_I32_e64 %1, %0, implicit $exec S_ENDPGM Index: test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir =================================================================== --- test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir +++ test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir @@ -1,8 +1,8 @@ # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-shrink-instructions -o - %s | FileCheck -check-prefix=GCN %s ... # GCN-LABEL: name: fold_imm_non_ssa{{$}} -# GCN: %0:vgpr_32 = V_MOV_B32_e32 123, implicit %exec -# GCN: %2:vgpr_32 = V_ADD_I32_e32 456, %0, implicit-def %vcc, implicit %exec +# GCN: %0:vgpr_32 = V_MOV_B32_e32 123, implicit $exec +# GCN: %2:vgpr_32 = V_ADD_I32_e32 456, %0, implicit-def $vcc, implicit $exec name: fold_imm_non_ssa tracksRegLiveness: true @@ -14,15 +14,15 @@ body: | bb.0: %0 = COPY undef %0 - %0 = V_MOV_B32_e32 123, implicit %exec - %1 = V_MOV_B32_e32 456, implicit %exec - %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec + %0 = V_MOV_B32_e32 123, implicit $exec + %1 = V_MOV_B32_e32 456, implicit $exec + %2, $vcc = V_ADD_I32_e64 %0, %1, implicit $exec S_ENDPGM ... # GCN-LABEL: name: fold_partially_defined_superreg{{$}} -# GCN: %1:vgpr_32 = V_MOV_B32_e32 456, implicit %exec -# GCN: %2:vgpr_32 = V_ADD_I32_e32 123, %1, implicit-def %vcc, implicit %exec +# GCN: %1:vgpr_32 = V_MOV_B32_e32 456, implicit $exec +# GCN: %2:vgpr_32 = V_ADD_I32_e32 123, %1, implicit-def $vcc, implicit $exec name: fold_partially_defined_superreg tracksRegLiveness: true registers: @@ -32,9 +32,9 @@ - { id: 3, class: vreg_64 } body: | bb.0: - undef %3.sub0 = V_MOV_B32_e32 123, implicit %exec, implicit-def %3 - %1 = V_MOV_B32_e32 456, implicit %exec - %2, %vcc = V_ADD_I32_e64 %3.sub0, %1, implicit %exec + undef %3.sub0 = V_MOV_B32_e32 123, implicit $exec, implicit-def %3 + %1 = V_MOV_B32_e32 456, implicit $exec + %2, $vcc = V_ADD_I32_e64 %3.sub0, %1, implicit $exec S_ENDPGM ... Index: test/CodeGen/AMDGPU/waitcnt-permute.mir =================================================================== --- test/CodeGen/AMDGPU/waitcnt-permute.mir +++ test/CodeGen/AMDGPU/waitcnt-permute.mir @@ -7,15 +7,15 @@ name: waitcnt-permute liveins: - - { reg: '%vgpr0' } - - { reg: '%vgpr1' } - - { reg: '%sgpr30_sgpr31' } + - { reg: '$vgpr0' } + - { reg: '$vgpr1' } + - { reg: '$sgpr30_sgpr31' } body: | bb.0: - liveins: %vgpr0, %vgpr1, %sgpr30_sgpr31 + liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 - %vgpr0 = DS_BPERMUTE_B32 killed %vgpr0, killed %vgpr1, 0, implicit %exec - %vgpr0 = V_ADD_F32_e32 1065353216, killed %vgpr0, implicit %exec - S_SETPC_B64_return killed %sgpr30_sgpr31, implicit killed %vgpr0 + $vgpr0 = DS_BPERMUTE_B32 killed $vgpr0, killed $vgpr1, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 1065353216, killed $vgpr0, implicit $exec + S_SETPC_B64_return killed $sgpr30_sgpr31, implicit killed $vgpr0 ... Index: test/CodeGen/AMDGPU/waitcnt.mir =================================================================== --- test/CodeGen/AMDGPU/waitcnt.mir +++ test/CodeGen/AMDGPU/waitcnt.mir @@ -51,22 +51,22 @@ body: | bb.0: successors: %bb.1 - %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.global4) - %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16) - %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec + $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.global4) + $vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 16 from %ir.global16) + $vgpr0 = V_MOV_B32_e32 $vgpr1, implicit $exec S_BRANCH %bb.1 bb.1: successors: %bb.2 - %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr - %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16) - %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec + $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 16 from %ir.global16) + $vgpr0 = V_MOV_B32_e32 $vgpr1, implicit $exec S_BRANCH %bb.2 bb.2: - %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.flat4) - %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.flat16) - %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec + $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.flat4) + $vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 16 from %ir.flat16) + $vgpr0 = V_MOV_B32_e32 $vgpr1, implicit $exec S_ENDPGM ... --- @@ -74,7 +74,7 @@ # need to wait immediately. # CHECK-LABEL: name: single_fallthrough_successor_no_end_block_wait -# CHECK: %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2 +# CHECK: $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2 # CHECK-NOT: S_WAITCNT # CHECK: bb.1: @@ -86,11 +86,11 @@ body: | bb.0: successors: %bb.1 - %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr bb.1: - %vgpr3_vgpr4 = V_LSHLREV_B64 4, %vgpr7_vgpr8, implicit %exec - FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr3_vgpr4 = V_LSHLREV_B64 4, $vgpr7_vgpr8, implicit $exec + FLAT_STORE_DWORD $vgpr3_vgpr4, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... --- @@ -99,7 +99,7 @@ # CHECK-LABEL: name: single_branch_successor_not_next_block -# CHECK: %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2 +# CHECK: $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2 # CHECK-NEXT: S_WAITCNT 112 # CHECK: bb.1 @@ -114,15 +114,15 @@ body: | bb.0: successors: %bb.2 - %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr S_BRANCH %bb.2 bb.1: - FLAT_STORE_DWORD %vgpr8_vgpr9, %vgpr10, 0, 0, 0, implicit %exec, implicit %flat_scr + FLAT_STORE_DWORD $vgpr8_vgpr9, $vgpr10, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM bb.2: - %vgpr3_vgpr4 = V_LSHLREV_B64 4, %vgpr7_vgpr8, implicit %exec - FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr + $vgpr3_vgpr4 = V_LSHLREV_B64 4, $vgpr7_vgpr8, implicit $exec + FLAT_STORE_DWORD $vgpr3_vgpr4, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr S_ENDPGM ... Index: test/CodeGen/AMDGPU/wqm.mir =================================================================== --- test/CodeGen/AMDGPU/wqm.mir +++ test/CodeGen/AMDGPU/wqm.mir @@ -28,23 +28,23 @@ - { id: 11, class: vgpr_32, preferred-register: '' } - { id: 12, class: vgpr_32, preferred-register: '' } liveins: - - { reg: '%sgpr0', virtual-reg: '%0' } - - { reg: '%sgpr1', virtual-reg: '%1' } - - { reg: '%sgpr2', virtual-reg: '%2' } - - { reg: '%vgpr0', virtual-reg: '%3' } + - { reg: '$sgpr0', virtual-reg: '%0' } + - { reg: '$sgpr1', virtual-reg: '%1' } + - { reg: '$sgpr2', virtual-reg: '%2' } + - { reg: '$vgpr0', virtual-reg: '%3' } body: | bb.0: - liveins: %sgpr0, %sgpr1, %sgpr2, %vgpr0 + liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 - %3 = COPY %vgpr0 - %2 = COPY %sgpr2 - %1 = COPY %sgpr1 - %0 = COPY %sgpr0 - S_CMP_LT_I32 0, %0, implicit-def %scc - %12 = V_ADD_I32_e32 %3, %3, implicit-def %vcc, implicit %exec - %5 = S_CSELECT_B32 %2, %1, implicit %scc - %11 = V_ADD_I32_e32 %5, %12, implicit-def %vcc, implicit %exec - %vgpr0 = WWM %11, implicit %exec - SI_RETURN_TO_EPILOG %vgpr0 + %3 = COPY $vgpr0 + %2 = COPY $sgpr2 + %1 = COPY $sgpr1 + %0 = COPY $sgpr0 + S_CMP_LT_I32 0, %0, implicit-def $scc + %12 = V_ADD_I32_e32 %3, %3, implicit-def $vcc, implicit $exec + %5 = S_CSELECT_B32 %2, %1, implicit $scc + %11 = V_ADD_I32_e32 %5, %12, implicit-def $vcc, implicit $exec + $vgpr0 = WWM %11, implicit $exec + SI_RETURN_TO_EPILOG $vgpr0 ... Index: test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll =================================================================== --- test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll +++ test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll @@ -4,7 +4,7 @@ define void @vst(i8* %m, [4 x i64] %v) { entry: ; CHECK: vst: -; CHECK: VST1d64Q killed %r{{[0-9]+}}, 8, %d{{[0-9]+}}, 14, %noreg, implicit killed %q{{[0-9]+}}_q{{[0-9]+}} +; CHECK: VST1d64Q killed $r{{[0-9]+}}, 8, $d{{[0-9]+}}, 14, $noreg, implicit killed $q{{[0-9]+}}_q{{[0-9]+}} %v0 = extractvalue [4 x i64] %v, 0 %v1 = extractvalue [4 x i64] %v, 1 @@ -37,7 +37,7 @@ %struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind { ; CHECK: vtbx4: -; CHECK: VTBX4 {{.*}}, 14, %noreg, implicit %q{{[0-9]+}}_q{{[0-9]+}} +; CHECK: VTBX4 {{.*}}, 14, $noreg, implicit $q{{[0-9]+}}_q{{[0-9]+}} %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load %struct.__neon_int8x8x4_t, %struct.__neon_int8x8x4_t* %B %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0 Index: test/CodeGen/ARM/ARMLoadStoreDBG.mir =================================================================== --- test/CodeGen/ARM/ARMLoadStoreDBG.mir +++ test/CodeGen/ARM/ARMLoadStoreDBG.mir @@ -81,24 +81,24 @@ exposesReturnsTwice: false tracksRegLiveness: true liveins: - - { reg: '%r0' } - - { reg: '%r1' } - - { reg: '%r2' } - - { reg: '%r3' } -calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13', - '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4', - '%r5', '%r6', '%r7', '%r8', '%r9', '%r10', '%r11', - '%s16', '%s17', '%s18', '%s19', '%s20', '%s21', - '%s22', '%s23', '%s24', '%s25', '%s26', '%s27', - '%s28', '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11', - '%d10_d12', '%d11_d13', '%d12_d14', '%d13_d15', - '%q4_q5', '%q5_q6', '%q6_q7', '%q4_q5_q6_q7', '%r4_r5', - '%r6_r7', '%r8_r9', '%r10_r11', '%d8_d9_d10', '%d9_d10_d11', - '%d10_d11_d12', '%d11_d12_d13', '%d12_d13_d14', - '%d13_d14_d15', '%d8_d10_d12', '%d9_d11_d13', '%d10_d12_d14', - '%d11_d13_d15', '%d8_d10_d12_d14', '%d9_d11_d13_d15', - '%d9_d10', '%d11_d12', '%d13_d14', '%d9_d10_d11_d12', - '%d11_d12_d13_d14' ] + - { reg: '$r0' } + - { reg: '$r1' } + - { reg: '$r2' } + - { reg: '$r3' } +calleeSavedRegisters: [ '$lr', '$d8', '$d9', '$d10', '$d11', '$d12', '$d13', + '$d14', '$d15', '$q4', '$q5', '$q6', '$q7', '$r4', + '$r5', '$r6', '$r7', '$r8', '$r9', '$r10', '$r11', + '$s16', '$s17', '$s18', '$s19', '$s20', '$s21', + '$s22', '$s23', '$s24', '$s25', '$s26', '$s27', + '$s28', '$s29', '$s30', '$s31', '$d8_d10', '$d9_d11', + '$d10_d12', '$d11_d13', '$d12_d14', '$d13_d15', + '$q4_q5', '$q5_q6', '$q6_q7', '$q4_q5_q6_q7', '$r4_r5', + '$r6_r7', '$r8_r9', '$r10_r11', '$d8_d9_d10', '$d9_d10_d11', + '$d10_d11_d12', '$d11_d12_d13', '$d12_d13_d14', + '$d13_d14_d15', '$d8_d10_d12', '$d9_d11_d13', '$d10_d12_d14', + '$d11_d13_d15', '$d8_d10_d12_d14', '$d9_d11_d13_d15', + '$d9_d10', '$d11_d12', '$d13_d14', '$d9_d10_d11_d12', + '$d11_d12_d13_d14' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -114,46 +114,46 @@ hasVAStart: false hasMustTailInVarArgFunc: false stack: - - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr', callee-saved-restored: false } - - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r7', callee-saved-restored: true } + - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$lr', callee-saved-restored: false } + - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '$r7', callee-saved-restored: true } body: | bb.0.entry: - liveins: %r0, %r1, %r2, %r3, %lr, %r7 + liveins: $r0, $r1, $r2, $r3, $lr, $r7 - DBG_VALUE debug-use %r0, debug-use %noreg, !18, !27, debug-location !28 - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - DBG_VALUE debug-use %r2, debug-use %noreg, !20, !27, debug-location !28 - DBG_VALUE debug-use %r3, debug-use %noreg, !21, !27, debug-location !28 - t2CMPri %r3, 4, 14, %noreg, implicit-def %cpsr, debug-location !31 - t2Bcc %bb.2.if.end, 2, killed %cpsr + DBG_VALUE debug-use $r0, debug-use $noreg, !18, !27, debug-location !28 + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + DBG_VALUE debug-use $r2, debug-use $noreg, !20, !27, debug-location !28 + DBG_VALUE debug-use $r3, debug-use $noreg, !21, !27, debug-location !28 + t2CMPri $r3, 4, 14, $noreg, implicit-def $cpsr, debug-location !31 + t2Bcc %bb.2.if.end, 2, killed $cpsr bb.1: - liveins: %lr, %r7 + liveins: $lr, $r7 - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - %r0 = t2MOVi -1, 14, %noreg, %noreg - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - tBX_RET 14, %noreg, implicit %r0, debug-location !34 + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + $r0 = t2MOVi -1, 14, $noreg, $noreg + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + tBX_RET 14, $noreg, implicit $r0, debug-location !34 bb.2.if.end: - liveins: %r0, %r2, %r3, %r7, %lr + liveins: $r0, $r2, $r3, $r7, $lr - %sp = frame-setup t2STMDB_UPD %sp, 14, %noreg, killed %r7, killed %lr + $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r7, killed $lr frame-setup CFI_INSTRUCTION def_cfa_offset 8 - frame-setup CFI_INSTRUCTION offset %lr, -4 - frame-setup CFI_INSTRUCTION offset %r7, -8 - DBG_VALUE debug-use %r0, debug-use %noreg, !18, !27, debug-location !28 - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - DBG_VALUE debug-use %r2, debug-use %noreg, !20, !27, debug-location !28 - DBG_VALUE debug-use %r3, debug-use %noreg, !21, !27, debug-location !28 - %r1 = COPY killed %r2, debug-location !32 - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - %r2 = COPY killed %r3, debug-location !32 - tBL 14, %noreg, @g, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit-def %sp, debug-location !32 - %r0 = t2MOVi 0, 14, %noreg, %noreg - %sp = t2LDMIA_UPD %sp, 14, %noreg, def %r7, def %lr - tBX_RET 14, %noreg, implicit %r0, debug-location !34 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + DBG_VALUE debug-use $r0, debug-use $noreg, !18, !27, debug-location !28 + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + DBG_VALUE debug-use $r2, debug-use $noreg, !20, !27, debug-location !28 + DBG_VALUE debug-use $r3, debug-use $noreg, !21, !27, debug-location !28 + $r1 = COPY killed $r2, debug-location !32 + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + $r2 = COPY killed $r3, debug-location !32 + tBL 14, $noreg, @g, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit-def $sp, debug-location !32 + $r0 = t2MOVi 0, 14, $noreg, $noreg + $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r7, def $lr + tBX_RET 14, $noreg, implicit $r0, debug-location !34 # Verify that the DBG_VALUE is ignored. -# CHECK: %sp = t2LDMIA_RET %sp, 14, %noreg, def %r7, def %pc, implicit %r0 +# CHECK: $sp = t2LDMIA_RET $sp, 14, $noreg, def $r7, def $pc, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll +++ test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll @@ -4,14 +4,14 @@ define arm_aapcscc void @test_indirect_call(void() *%fptr) { ; CHECK-LABEL: name: test_indirect_call -; V5T: %[[FPTR:[0-9]+]]:gpr(p0) = COPY %r0 -; V4T: %[[FPTR:[0-9]+]]:tgpr(p0) = COPY %r0 -; NOV4T: %[[FPTR:[0-9]+]]:tgpr(p0) = COPY %r0 -; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; V5T: BLX %[[FPTR]](p0), csr_aapcs, implicit-def %lr, implicit %sp -; V4T: BX_CALL %[[FPTR]](p0), csr_aapcs, implicit-def %lr, implicit %sp -; NOV4T: BMOVPCRX_CALL %[[FPTR]](p0), csr_aapcs, implicit-def %lr, implicit %sp -; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp +; V5T: %[[FPTR:[0-9]+]]:gpr(p0) = COPY $r0 +; V4T: %[[FPTR:[0-9]+]]:tgpr(p0) = COPY $r0 +; NOV4T: %[[FPTR:[0-9]+]]:tgpr(p0) = COPY $r0 +; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; V5T: BLX %[[FPTR]](p0), csr_aapcs, implicit-def $lr, implicit $sp +; V4T: BX_CALL %[[FPTR]](p0), csr_aapcs, implicit-def $lr, implicit $sp +; NOV4T: BMOVPCRX_CALL %[[FPTR]](p0), csr_aapcs, implicit-def $lr, implicit $sp +; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp entry: notail call arm_aapcscc void %fptr() ret void @@ -21,9 +21,9 @@ define arm_aapcscc void @test_direct_call() { ; CHECK-LABEL: name: test_direct_call -; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK: BL @call_target, csr_aapcs, implicit-def %lr, implicit %sp -; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK: BL @call_target, csr_aapcs, implicit-def $lr, implicit $sp +; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp entry: notail call arm_aapcscc void @call_target() ret void Index: test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir +++ test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir @@ -64,23 +64,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_eq_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(eq), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_ne_s32 @@ -94,23 +94,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_ne_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(ne), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_ugt_s32 @@ -124,23 +124,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_ugt_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(ugt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_uge_s32 @@ -154,23 +154,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_uge_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 2, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 2, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(uge), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_ult_s32 @@ -184,23 +184,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_ult_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 3, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 3, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(ult), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_ule_s32 @@ -214,23 +214,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_ule_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(ule), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_sgt_s32 @@ -244,23 +244,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_sgt_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(sgt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_sge_s32 @@ -274,23 +274,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_sge_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(sge), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_slt_s32 @@ -304,23 +304,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_slt_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(slt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_sle_s32 @@ -334,23 +334,23 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; CHECK-LABEL: name: test_icmp_sle_s32 - ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, $noreg, implicit-def $cpsr + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(sle), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_true_s32 @@ -364,19 +364,19 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_true_s32 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 1, 14, %noreg, %noreg - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 1, 14, $noreg, $noreg + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(true), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_false_s32 @@ -390,19 +390,19 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_false_s32 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(false), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_oeq_s32 @@ -416,24 +416,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_oeq_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(oeq), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ogt_s32 @@ -447,24 +447,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_ogt_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(ogt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_oge_s32 @@ -478,24 +478,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_oge_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(oge), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_olt_s32 @@ -509,24 +509,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_olt_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 4, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 4, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(olt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ole_s32 @@ -540,24 +540,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_ole_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(ole), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ord_s32 @@ -571,24 +571,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_ord_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 7, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 7, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(ord), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ugt_s32 @@ -602,24 +602,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_ugt_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(ugt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_uge_s32 @@ -633,24 +633,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_uge_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 5, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 5, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(uge), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ult_s32 @@ -664,24 +664,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_ult_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(ult), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ule_s32 @@ -695,24 +695,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_ule_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(ule), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_une_s32 @@ -726,24 +726,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_une_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(une), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_uno_s32 @@ -757,24 +757,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_uno_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 6, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 6, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(uno), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_one_s32 @@ -788,27 +788,27 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_one_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 4, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, $cpsr + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 4, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(one), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ueq_s32 @@ -822,27 +822,27 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 ; CHECK-LABEL: name: test_fcmp_ueq_s32 - ; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0 - ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr - ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 6, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, $cpsr + ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 6, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(ueq), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_true_s64 @@ -856,19 +856,19 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_true_s64 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 1, 14, %noreg, %noreg - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 1, 14, $noreg, $noreg + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(true), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_false_s64 @@ -882,19 +882,19 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_false_s64 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(false), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_oeq_s64 @@ -908,24 +908,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_oeq_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(oeq), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ogt_s64 @@ -939,24 +939,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_ogt_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(ogt), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_oge_s64 @@ -970,24 +970,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_oge_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(oge), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_olt_s64 @@ -1001,24 +1001,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_olt_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 4, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 4, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(olt), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ole_s64 @@ -1032,24 +1032,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_ole_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(ole), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ord_s64 @@ -1063,24 +1063,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_ord_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 7, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 7, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(ord), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ugt_s64 @@ -1094,24 +1094,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_ugt_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(ugt), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_uge_s64 @@ -1125,24 +1125,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_uge_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 5, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 5, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(uge), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ult_s64 @@ -1156,24 +1156,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_ult_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(ult), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ule_s64 @@ -1187,24 +1187,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_ule_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(ule), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_une_s64 @@ -1218,24 +1218,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_une_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(une), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_uno_s64 @@ -1249,24 +1249,24 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_uno_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 6, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 6, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(uno), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_one_s64 @@ -1280,27 +1280,27 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_one_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 4, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, $cpsr + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 4, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(one), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ueq_s64 @@ -1314,25 +1314,25 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 ; CHECK-LABEL: name: test_fcmp_ueq_s64 - ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1 - ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr - ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv - ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv - ; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 6, %cpsr - ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, %noreg, %noreg - ; CHECK: %r0 = COPY [[ANDri]] - ; CHECK: BX_RET 14, %noreg, implicit %r0 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1 + ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, $cpsr + ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, $noreg, implicit-def $fpscr_nzcv + ; CHECK: FMSTAT 14, $noreg, implicit-def $cpsr, implicit $fpscr_nzcv + ; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 6, $cpsr + ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, $noreg, $noreg + ; CHECK: $r0 = COPY [[ANDri]] + ; CHECK: BX_RET 14, $noreg, implicit $r0 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(ueq), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir +++ test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir @@ -63,24 +63,24 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 - ; CHECK: [[VREGZ:%[0-9]+]]:gprnopc = COPY %r2 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 + ; CHECK: [[VREGZ:%[0-9]+]]:gprnopc = COPY $r2 %3(s32) = G_MUL %0, %1 %4(s32) = G_ADD %3, %2 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_mla_commutative @@ -97,24 +97,24 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 - ; CHECK: [[VREGZ:%[0-9]+]]:gprnopc = COPY %r2 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 + ; CHECK: [[VREGZ:%[0-9]+]]:gprnopc = COPY $r2 %3(s32) = G_MUL %0, %1 %4(s32) = G_ADD %2, %3 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_mla_v5 @@ -131,24 +131,24 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 - ; CHECK: [[VREGZ:%[0-9]+]]:gprnopc = COPY %r2 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 + ; CHECK: [[VREGZ:%[0-9]+]]:gprnopc = COPY $r2 %3(s32) = G_MUL %0, %1 %4(s32) = G_ADD %3, %2 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLAv5 [[VREGX]], [[VREGY]], [[VREGZ]], 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLAv5 [[VREGX]], [[VREGY]], [[VREGZ]], 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_mls @@ -165,24 +165,24 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[VREGZ:%[0-9]+]]:gpr = COPY %r2 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[VREGZ:%[0-9]+]]:gpr = COPY $r2 %3(s32) = G_MUL %0, %1 %4(s32) = G_SUB %2, %3 - ; CHECK: [[VREGR:%[0-9]+]]:gpr = MLS [[VREGX]], [[VREGY]], [[VREGZ]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gpr = MLS [[VREGX]], [[VREGY]], [[VREGZ]], 14, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_no_mls @@ -199,25 +199,25 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 - ; CHECK: [[VREGZ:%[0-9]+]]:gpr = COPY %r2 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 + ; CHECK: [[VREGZ:%[0-9]+]]:gpr = COPY $r2 %3(s32) = G_MUL %0, %1 %4(s32) = G_SUB %2, %3 - ; CHECK: [[VREGM:%[0-9]+]]:gprnopc = MULv5 [[VREGX]], [[VREGY]], 14, %noreg, %noreg - ; CHECK: [[VREGR:%[0-9]+]]:gpr = SUBrr [[VREGZ]], [[VREGM]], 14, %noreg, %noreg + ; CHECK: [[VREGM:%[0-9]+]]:gprnopc = MULv5 [[VREGX]], [[VREGY]], 14, $noreg, $noreg + ; CHECK: [[VREGR:%[0-9]+]]:gpr = SUBrr [[VREGZ]], [[VREGM]], 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_shifts_to_revsh @@ -239,10 +239,10 @@ - { id: 9, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 24 %2(s32) = G_SHL %0(s32), %1(s32) @@ -259,11 +259,11 @@ %9(s32) = G_OR %4(s32), %8(s32) ; CHECK: [[VREGR:%[0-9]+]]:gpr = REVSH [[VREGX]] - %r0 = COPY %9(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %9(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_shifts_to_revsh_commutative @@ -285,10 +285,10 @@ - { id: 9, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 24 %2(s32) = G_SHL %0(s32), %1(s32) @@ -305,11 +305,11 @@ %9(s32) = G_OR %8(s32), %4(s32) ; CHECK: [[VREGR:%[0-9]+]]:gpr = REVSH [[VREGX]] - %r0 = COPY %9(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %9(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_shifts_no_revsh_features @@ -331,9 +331,9 @@ - { id: 9, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s32) = G_CONSTANT i32 24 %2(s32) = G_SHL %0(s32), %1(s32) @@ -351,9 +351,9 @@ ; We don't really care how this is folded as long as it's not into a REVSH. ; CHECK-NOT: REVSH - %r0 = COPY %9(s32) + $r0 = COPY %9(s32) - BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 ... --- name: test_shifts_no_revsh_constants @@ -375,9 +375,9 @@ - { id: 9, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s32) = G_CONSTANT i32 16 ; REVSH needs 24 here %2(s32) = G_SHL %0(s32), %1(s32) @@ -395,9 +395,9 @@ ; We don't really care how this is folded as long as it's not into a REVSH. ; CHECK-NOT: REVSH - %r0 = COPY %9(s32) + $r0 = COPY %9(s32) - BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 ... --- name: test_bicrr @@ -414,23 +414,23 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_CONSTANT i32 -1 %3(s32) = G_XOR %1, %2 %4(s32) = G_AND %0, %3 - ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_bicrr_commutative @@ -447,23 +447,23 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_CONSTANT i32 -1 %3(s32) = G_XOR %1, %2 %4(s32) = G_AND %3, %0 - ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_bicri @@ -480,10 +480,10 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 ; This test and the following ones are a bit contrived, since they use a ; G_XOR that can be constant-folded. They exist mostly to validate the @@ -495,13 +495,13 @@ %2(s32) = G_CONSTANT i32 -1 %3(s32) = G_XOR %1, %2 %4(s32) = G_AND %0, %3 - ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_bicri_commutative_xor @@ -518,23 +518,23 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 192 %2(s32) = G_CONSTANT i32 -1 %3(s32) = G_XOR %2, %1 %4(s32) = G_AND %0, %3 - ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_bicri_commutative_and @@ -551,23 +551,23 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 192 %2(s32) = G_CONSTANT i32 -1 %3(s32) = G_XOR %1, %2 %4(s32) = G_AND %3, %0 - ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_bicri_commutative_both @@ -584,23 +584,23 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 192 %2(s32) = G_CONSTANT i32 -1 %3(s32) = G_XOR %2, %1 %4(s32) = G_AND %3, %0 - ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, %noreg, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, $noreg, $noreg - %r0 = COPY %4(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %4(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_pkhbt @@ -621,12 +621,12 @@ - { id: 8, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 %2(s32) = G_CONSTANT i32 65535 ; 0xFFFF %3(s32) = G_AND %0, %2 @@ -637,13 +637,13 @@ %7(s32) = G_AND %5, %6 %8(s32) = G_OR %3, %7 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHBT [[VREGX]], [[VREGY]], 7, 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHBT [[VREGX]], [[VREGY]], 7, 14, $noreg - %r0 = COPY %8(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %8(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_pkhbt_commutative @@ -664,12 +664,12 @@ - { id: 8, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 %2(s32) = G_CONSTANT i32 65535 ; 0xFFFF %3(s32) = G_AND %0, %2 @@ -680,13 +680,13 @@ %7(s32) = G_AND %5, %6 %8(s32) = G_OR %7, %3 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHBT [[VREGX]], [[VREGY]], 7, 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHBT [[VREGX]], [[VREGY]], 7, 14, $noreg - %r0 = COPY %8(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %8(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_pkhbt_imm16_31 @@ -705,12 +705,12 @@ - { id: 6, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 %2(s32) = G_CONSTANT i32 65535 ; 0xFFFF %3(s32) = G_AND %0, %2 @@ -719,13 +719,13 @@ %5(s32) = G_SHL %1, %4 %6(s32) = G_OR %3, %5 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHBT [[VREGX]], [[VREGY]], 17, 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHBT [[VREGX]], [[VREGY]], 17, 14, $noreg - %r0 = COPY %6(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %6(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_pkhbt_unshifted @@ -744,12 +744,12 @@ - { id: 6, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 %2(s32) = G_CONSTANT i32 65535 ; 0xFFFF %3(s32) = G_AND %0, %2 @@ -758,13 +758,13 @@ %5(s32) = G_AND %1, %4 %6(s32) = G_OR %3, %5 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHBT [[VREGX]], [[VREGY]], 0, 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHBT [[VREGX]], [[VREGY]], 0, 14, $noreg - %r0 = COPY %6(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %6(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_pkhtb_imm16 @@ -783,12 +783,12 @@ - { id: 6, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 %2(s32) = G_CONSTANT i32 4294901760 ; 0xFFFF0000 %3(s32) = G_AND %0, %2 @@ -797,13 +797,13 @@ %5(s32) = G_LSHR %1, %4 %6(s32) = G_OR %3, %5 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHTB [[VREGX]], [[VREGY]], 16, 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHTB [[VREGX]], [[VREGY]], 16, 14, $noreg - %r0 = COPY %6(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %6(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_pkhtb_imm1_15 @@ -824,12 +824,12 @@ - { id: 8, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 %2(s32) = G_CONSTANT i32 4294901760 ; 0xFFFF0000 %3(s32) = G_AND %0, %2 @@ -840,13 +840,13 @@ %7(s32) = G_AND %5, %6 %8(s32) = G_OR %3, %7 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHTB [[VREGX]], [[VREGY]], 7, 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = PKHTB [[VREGX]], [[VREGY]], 7, 14, $noreg - %r0 = COPY %8(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %8(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_movti16_0xffff @@ -861,21 +861,21 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 4294901760 ; 0xFFFF0000 %2(s32) = G_OR %0, %1 - ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MOVTi16 [[VREGX]], 65535, 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MOVTi16 [[VREGX]], 65535, 14, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGR]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_vnmuls @@ -891,22 +891,22 @@ - { id: 3, class: fprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY %s0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY %s1 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY $s0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY $s1 %2(s32) = G_FMUL %0, %1 %3(s32) = G_FNEG %2 - ; CHECK: [[VREGR:%[0-9]+]]:spr = VNMULS [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:spr = VNMULS [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %3(s32) - ; CHECK: %s0 = COPY [[VREGR]] + $s0 = COPY %3(s32) + ; CHECK: $s0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_vnmuls_reassociate @@ -922,22 +922,22 @@ - { id: 3, class: fprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY %s0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY %s1 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY $s0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY $s1 %2(s32) = G_FNEG %0 %3(s32) = G_FMUL %1, %2 - ; CHECK: [[VREGR:%[0-9]+]]:spr = VNMULS [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:spr = VNMULS [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %3(s32) - ; CHECK: %s0 = COPY [[VREGR]] + $s0 = COPY %3(s32) + ; CHECK: $s0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_vnmuld @@ -953,22 +953,22 @@ - { id: 3, class: fprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:dpr = COPY %d0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:dpr = COPY %d1 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:dpr = COPY $d0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:dpr = COPY $d1 %2(s64) = G_FMUL %0, %1 %3(s64) = G_FNEG %2 - ; CHECK: [[VREGR:%[0-9]+]]:dpr = VNMULD [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:dpr = VNMULD [[VREGX]], [[VREGY]], 14, $noreg - %d0 = COPY %3(s64) - ; CHECK: %d0 = COPY [[VREGR]] + $d0 = COPY %3(s64) + ; CHECK: $d0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_vfnmas @@ -985,24 +985,24 @@ - { id: 4, class: fprb } body: | bb.0: - liveins: %s0, %s1, %s2 + liveins: $s0, $s1, $s2 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 - %2(s32) = COPY %s2 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY %s0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY %s1 - ; CHECK-DAG: [[VREGZ:%[0-9]+]]:spr = COPY %s2 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 + %2(s32) = COPY $s2 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY $s0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY $s1 + ; CHECK-DAG: [[VREGZ:%[0-9]+]]:spr = COPY $s2 %3(s32) = G_FMA %0, %1, %2 %4(s32) = G_FNEG %3 - ; CHECK: [[VREGR:%[0-9]+]]:spr = VFNMAS [[VREGZ]], [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:spr = VFNMAS [[VREGZ]], [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %4(s32) - ; CHECK: %s0 = COPY [[VREGR]] + $s0 = COPY %4(s32) + ; CHECK: $s0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_vfnmad @@ -1020,25 +1020,25 @@ - { id: 5, class: fprb } body: | bb.0: - liveins: %d0, %d1, %d2 + liveins: $d0, $d1, $d2 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 - %2(s64) = COPY %d2 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:dpr = COPY %d0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:dpr = COPY %d1 - ; CHECK-DAG: [[VREGZ:%[0-9]+]]:dpr = COPY %d2 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 + %2(s64) = COPY $d2 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:dpr = COPY $d0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:dpr = COPY $d1 + ; CHECK-DAG: [[VREGZ:%[0-9]+]]:dpr = COPY $d2 %3(s64) = G_FNEG %0 %4(s64) = G_FNEG %2 %5(s64) = G_FMA %3, %1, %4 - ; CHECK: [[VREGR:%[0-9]+]]:dpr = VFNMAD [[VREGZ]], [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:dpr = VFNMAD [[VREGZ]], [[VREGX]], [[VREGY]], 14, $noreg - %d0 = COPY %5(s64) - ; CHECK: %d0 = COPY [[VREGR]] + $d0 = COPY %5(s64) + ; CHECK: $d0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_vfmss @@ -1055,24 +1055,24 @@ - { id: 4, class: fprb } body: | bb.0: - liveins: %s0, %s1, %s2 + liveins: $s0, $s1, $s2 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 - %2(s32) = COPY %s2 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY %s0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY %s1 - ; CHECK-DAG: [[VREGZ:%[0-9]+]]:spr = COPY %s2 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 + %2(s32) = COPY $s2 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY $s0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY $s1 + ; CHECK-DAG: [[VREGZ:%[0-9]+]]:spr = COPY $s2 %3(s32) = G_FNEG %0 %4(s32) = G_FMA %3, %1, %2 - ; CHECK: [[VREGR:%[0-9]+]]:spr = VFMSS [[VREGZ]], [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:spr = VFMSS [[VREGZ]], [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %4(s32) - ; CHECK: %s0 = COPY [[VREGR]] + $s0 = COPY %4(s32) + ; CHECK: $s0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_vfmsd @@ -1089,24 +1089,24 @@ - { id: 4, class: fprb } body: | bb.0: - liveins: %d0, %d1, %d2 + liveins: $d0, $d1, $d2 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 - %2(s64) = COPY %d2 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:dpr = COPY %d0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:dpr = COPY %d1 - ; CHECK-DAG: [[VREGZ:%[0-9]+]]:dpr = COPY %d2 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 + %2(s64) = COPY $d2 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:dpr = COPY $d0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:dpr = COPY $d1 + ; CHECK-DAG: [[VREGZ:%[0-9]+]]:dpr = COPY $d2 %3(s64) = G_FNEG %1 %4(s64) = G_FMA %0, %3, %2 - ; CHECK: [[VREGR:%[0-9]+]]:dpr = VFMSD [[VREGZ]], [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:dpr = VFMSD [[VREGZ]], [[VREGX]], [[VREGY]], 14, $noreg - %d0 = COPY %4(s64) - ; CHECK: %d0 = COPY [[VREGR]] + $d0 = COPY %4(s64) + ; CHECK: $d0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_vfnmss @@ -1123,22 +1123,22 @@ - { id: 4, class: fprb } body: | bb.0: - liveins: %s0, %s1, %s2 + liveins: $s0, $s1, $s2 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 - %2(s32) = COPY %s2 - ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY %s0 - ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY %s1 - ; CHECK-DAG: [[VREGZ:%[0-9]+]]:spr = COPY %s2 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 + %2(s32) = COPY $s2 + ; CHECK-DAG: [[VREGX:%[0-9]+]]:spr = COPY $s0 + ; CHECK-DAG: [[VREGY:%[0-9]+]]:spr = COPY $s1 + ; CHECK-DAG: [[VREGZ:%[0-9]+]]:spr = COPY $s2 %3(s32) = G_FNEG %2 %4(s32) = G_FMA %0, %1, %3 - ; CHECK: [[VREGR:%[0-9]+]]:spr = VFNMSS [[VREGZ]], [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:spr = VFNMSS [[VREGZ]], [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %4(s32) - ; CHECK: %s0 = COPY [[VREGR]] + $s0 = COPY %4(s32) + ; CHECK: $s0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... Index: test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir +++ test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir @@ -101,21 +101,21 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0 %1(s1) = G_TRUNC %0(s32) %2(s32) = G_ZEXT %1(s1) - ; CHECK: [[VREGEXT:%[0-9]+]]:gpr = ANDri [[VREG]], 1, 14, %noreg, %noreg + ; CHECK: [[VREGEXT:%[0-9]+]]:gpr = ANDri [[VREG]], 1, 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGEXT]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGEXT]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_trunc_and_sext_s1 @@ -130,22 +130,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0 %1(s1) = G_TRUNC %0(s32) %2(s32) = G_SEXT %1(s1) - ; CHECK: [[VREGAND:%[0-9]+]]:gpr = ANDri [[VREG]], 1, 14, %noreg, %noreg - ; CHECK: [[VREGEXT:%[0-9]+]]:gpr = RSBri [[VREGAND]], 0, 14, %noreg, %noreg + ; CHECK: [[VREGAND:%[0-9]+]]:gpr = ANDri [[VREG]], 1, 14, $noreg, $noreg + ; CHECK: [[VREGEXT:%[0-9]+]]:gpr = RSBri [[VREGAND]], 0, 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGEXT]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGEXT]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_trunc_and_sext_s8 @@ -160,22 +160,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0 %1(s8) = G_TRUNC %0(s32) ; CHECK: [[VREGTRUNC:%[0-9]+]]:gprnopc = COPY [[VREG]] %2(s32) = G_SEXT %1(s8) - ; CHECK: [[VREGEXT:%[0-9]+]]:gprnopc = SXTB [[VREGTRUNC]], 0, 14, %noreg + ; CHECK: [[VREGEXT:%[0-9]+]]:gprnopc = SXTB [[VREGTRUNC]], 0, 14, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGEXT]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGEXT]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_trunc_and_zext_s16 @@ -190,22 +190,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0 %1(s16) = G_TRUNC %0(s32) ; CHECK: [[VREGTRUNC:%[0-9]+]]:gprnopc = COPY [[VREG]] %2(s32) = G_ZEXT %1(s16) - ; CHECK: [[VREGEXT:%[0-9]+]]:gprnopc = UXTH [[VREGTRUNC]], 0, 14, %noreg + ; CHECK: [[VREGEXT:%[0-9]+]]:gprnopc = UXTH [[VREGTRUNC]], 0, 14, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGEXT]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGEXT]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_trunc_and_anyext_s8 @@ -220,20 +220,20 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0 %1(s8) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s8) - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREG]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREG]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_trunc_and_anyext_s16 @@ -248,20 +248,20 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0 %1(s16) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s16) - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREG]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREG]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_trunc_s64 @@ -276,22 +276,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %d0 + liveins: $r0, $d0 - %0(s64) = COPY %d0 - ; CHECK: [[VREG:%[0-9]+]]:dpr = COPY %d0 + %0(s64) = COPY $d0 + ; CHECK: [[VREG:%[0-9]+]]:dpr = COPY $d0 - %2(p0) = COPY %r0 - ; CHECK: [[PTR:%[0-9]+]]:gpr = COPY %r0 + %2(p0) = COPY $r0 + ; CHECK: [[PTR:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_TRUNC %0(s64) ; CHECK: [[VREGTRUNC:%[0-9]+]]:gpr, [[UNINTERESTING:%[0-9]+]]:gpr = VMOVRRD [[VREG]] G_STORE %1(s32), %2 :: (store 4) - ; CHECK: STRi12 [[VREGTRUNC]], [[PTR]], 0, 14, %noreg + ; CHECK: STRi12 [[VREGTRUNC]], [[PTR]], 0, 14, $noreg - BX_RET 14, %noreg - ; CHECK: BX_RET 14, %noreg + BX_RET 14, $noreg + ; CHECK: BX_RET 14, $noreg ... --- name: test_add_s32 @@ -306,22 +306,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_ADD %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGSUM]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_add_fold_imm_s32 @@ -336,20 +336,20 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 255 %2(s32) = G_ADD %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDri [[VREGX]], 255, 14, %noreg, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDri [[VREGX]], 255, 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGSUM]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_add_no_fold_imm_s32 @@ -364,22 +364,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 65535 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = MOVi16 65535, 14, %noreg + ; CHECK: [[VREGY:%[0-9]+]]:gpr = MOVi16 65535, 14, $noreg %2(s32) = G_ADD %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGSUM]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_fadd_s32 @@ -394,22 +394,22 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY %s0 + %0(s32) = COPY $s0 + ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0 - %1(s32) = COPY %s1 - ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1 + %1(s32) = COPY $s1 + ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1 %2(s32) = G_FADD %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VADDS [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VADDS [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %2(s32) - ; CHECK: %s0 = COPY [[VREGSUM]] + $s0 = COPY %2(s32) + ; CHECK: $s0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_fadd_s64 @@ -424,22 +424,22 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY %d0 + %0(s64) = COPY $d0 + ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0 - %1(s64) = COPY %d1 - ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1 + %1(s64) = COPY $d1 + ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1 %2(s64) = G_FADD %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VADDD [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VADDD [[VREGX]], [[VREGY]], 14, $noreg - %d0 = COPY %2(s64) - ; CHECK: %d0 = COPY [[VREGSUM]] + $d0 = COPY %2(s64) + ; CHECK: $d0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_fsub_s32 @@ -454,22 +454,22 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY %s0 + %0(s32) = COPY $s0 + ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0 - %1(s32) = COPY %s1 - ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1 + %1(s32) = COPY $s1 + ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1 %2(s32) = G_FSUB %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VSUBS [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VSUBS [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %2(s32) - ; CHECK: %s0 = COPY [[VREGSUM]] + $s0 = COPY %2(s32) + ; CHECK: $s0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_fsub_s64 @@ -484,22 +484,22 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY %d0 + %0(s64) = COPY $d0 + ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0 - %1(s64) = COPY %d1 - ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1 + %1(s64) = COPY $d1 + ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1 %2(s64) = G_FSUB %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VSUBD [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VSUBD [[VREGX]], [[VREGY]], 14, $noreg - %d0 = COPY %2(s64) - ; CHECK: %d0 = COPY [[VREGSUM]] + $d0 = COPY %2(s64) + ; CHECK: $d0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_fmul_s32 @@ -514,22 +514,22 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY %s0 + %0(s32) = COPY $s0 + ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0 - %1(s32) = COPY %s1 - ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1 + %1(s32) = COPY $s1 + ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1 %2(s32) = G_FMUL %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VMULS [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VMULS [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %2(s32) - ; CHECK: %s0 = COPY [[VREGSUM]] + $s0 = COPY %2(s32) + ; CHECK: $s0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_fmul_s64 @@ -544,22 +544,22 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY %d0 + %0(s64) = COPY $d0 + ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0 - %1(s64) = COPY %d1 - ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1 + %1(s64) = COPY $d1 + ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1 %2(s64) = G_FMUL %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VMULD [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VMULD [[VREGX]], [[VREGY]], 14, $noreg - %d0 = COPY %2(s64) - ; CHECK: %d0 = COPY [[VREGSUM]] + $d0 = COPY %2(s64) + ; CHECK: $d0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_fdiv_s32 @@ -574,22 +574,22 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY %s0 + %0(s32) = COPY $s0 + ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0 - %1(s32) = COPY %s1 - ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1 + %1(s32) = COPY $s1 + ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1 %2(s32) = G_FDIV %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VDIVS [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VDIVS [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %2(s32) - ; CHECK: %s0 = COPY [[VREGSUM]] + $s0 = COPY %2(s32) + ; CHECK: $s0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_fdiv_s64 @@ -604,22 +604,22 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY %d0 + %0(s64) = COPY $d0 + ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0 - %1(s64) = COPY %d1 - ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1 + %1(s64) = COPY $d1 + ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1 %2(s64) = G_FDIV %0, %1 - ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VDIVD [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VDIVD [[VREGX]], [[VREGY]], 14, $noreg - %d0 = COPY %2(s64) - ; CHECK: %d0 = COPY [[VREGSUM]] + $d0 = COPY %2(s64) + ; CHECK: $d0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_fneg_s32 @@ -633,19 +633,19 @@ - { id: 1, class: fprb } body: | bb.0: - liveins: %s0 + liveins: $s0 - %0(s32) = COPY %s0 - ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY %s0 + %0(s32) = COPY $s0 + ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0 %1(s32) = G_FNEG %0 - ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VNEGS [[VREGX]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VNEGS [[VREGX]], 14, $noreg - %s0 = COPY %1(s32) - ; CHECK: %s0 = COPY [[VREGSUM]] + $s0 = COPY %1(s32) + ; CHECK: $s0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_fneg_s64 @@ -660,19 +660,19 @@ - { id: 2, class: fprb } body: | bb.0: - liveins: %d0 + liveins: $d0 - %0(s64) = COPY %d0 - ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY %d0 + %0(s64) = COPY $d0 + ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0 %1(s64) = G_FNEG %0 - ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VNEGD [[VREGX]], 14, %noreg + ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VNEGD [[VREGX]], 14, $noreg - %d0 = COPY %1(s64) - ; CHECK: %d0 = COPY [[VREGSUM]] + $d0 = COPY %1(s64) + ; CHECK: $d0 = COPY [[VREGSUM]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_fma_s32 @@ -688,25 +688,25 @@ - { id: 3, class: fprb } body: | bb.0: - liveins: %s0, %s1, %s2 + liveins: $s0, $s1, $s2 - %0(s32) = COPY %s0 - ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY %s0 + %0(s32) = COPY $s0 + ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0 - %1(s32) = COPY %s1 - ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1 + %1(s32) = COPY $s1 + ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1 - %2(s32) = COPY %s2 - ; CHECK: [[VREGZ:%[0-9]+]]:spr = COPY %s2 + %2(s32) = COPY $s2 + ; CHECK: [[VREGZ:%[0-9]+]]:spr = COPY $s2 %3(s32) = G_FMA %0, %1, %2 - ; CHECK: [[VREGR:%[0-9]+]]:spr = VFMAS [[VREGZ]], [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:spr = VFMAS [[VREGZ]], [[VREGX]], [[VREGY]], 14, $noreg - %s0 = COPY %3(s32) - ; CHECK: %s0 = COPY [[VREGR]] + $s0 = COPY %3(s32) + ; CHECK: $s0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_fma_s64 @@ -722,25 +722,25 @@ - { id: 3, class: fprb } body: | bb.0: - liveins: %d0, %d1, %d2 + liveins: $d0, $d1, $d2 - %0(s64) = COPY %d0 - ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY %d0 + %0(s64) = COPY $d0 + ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0 - %1(s64) = COPY %d1 - ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1 + %1(s64) = COPY $d1 + ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1 - %2(s64) = COPY %d2 - ; CHECK: [[VREGZ:%[0-9]+]]:dpr = COPY %d2 + %2(s64) = COPY $d2 + ; CHECK: [[VREGZ:%[0-9]+]]:dpr = COPY $d2 %3(s64) = G_FMA %0, %1, %2 - ; CHECK: [[VREGR:%[0-9]+]]:dpr = VFMAD [[VREGZ]], [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:dpr = VFMAD [[VREGZ]], [[VREGX]], [[VREGY]], 14, $noreg - %d0 = COPY %3(s64) - ; CHECK: %d0 = COPY [[VREGR]] + $d0 = COPY %3(s64) + ; CHECK: $d0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_fpext_s32_to_s64 @@ -754,19 +754,19 @@ - { id: 1, class: fprb } body: | bb.0: - liveins: %s0 + liveins: $s0 - %0(s32) = COPY %s0 - ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY %s0 + %0(s32) = COPY $s0 + ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0 %1(s64) = G_FPEXT %0(s32) - ; CHECK: [[VREGR:%[0-9]+]]:dpr = VCVTDS [[VREGX]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:dpr = VCVTDS [[VREGX]], 14, $noreg - %d0 = COPY %1(s64) - ; CHECK: %d0 = COPY [[VREGR]] + $d0 = COPY %1(s64) + ; CHECK: $d0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_fptrunc_s64_to_s32 @@ -780,19 +780,19 @@ - { id: 1, class: fprb } body: | bb.0: - liveins: %d0 + liveins: $d0 - %0(s64) = COPY %d0 - ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY %d0 + %0(s64) = COPY $d0 + ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0 %1(s32) = G_FPTRUNC %0(s64) - ; CHECK: [[VREGR:%[0-9]+]]:spr = VCVTSD [[VREGX]], 14, %noreg + ; CHECK: [[VREGR:%[0-9]+]]:spr = VCVTSD [[VREGX]], 14, $noreg - %s0 = COPY %1(s32) - ; CHECK: %s0 = COPY [[VREGR]] + $s0 = COPY %1(s32) + ; CHECK: $s0 = COPY [[VREGR]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_fptosi_s32 @@ -1023,22 +1023,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_SUB %0, %1 - ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SUBrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SUBrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_sub_imm_s32 @@ -1053,20 +1053,20 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 17 %2(s32) = G_SUB %0, %1 - ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SUBri [[VREGX]], 17, 14, %noreg, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SUBri [[VREGX]], 17, 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_sub_rev_imm_s32 @@ -1081,20 +1081,20 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 %1(s32) = G_CONSTANT i32 17 %2(s32) = G_SUB %1, %0 - ; CHECK: [[VREGRES:%[0-9]+]]:gpr = RSBri [[VREGX]], 17, 14, %noreg, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gpr = RSBri [[VREGX]], 17, 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_mul_s32 @@ -1109,22 +1109,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 %2(s32) = G_MUL %0, %1 - ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MUL [[VREGX]], [[VREGY]], 14, %noreg, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MUL [[VREGX]], [[VREGY]], 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_mulv5_s32 @@ -1139,22 +1139,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY $r1 %2(s32) = G_MUL %0, %1 - ; CHECK: early-clobber [[VREGRES:%[0-9]+]]:gprnopc = MULv5 [[VREGX]], [[VREGY]], 14, %noreg, %noreg + ; CHECK: early-clobber [[VREGRES:%[0-9]+]]:gprnopc = MULv5 [[VREGX]], [[VREGY]], 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_sdiv_s32 @@ -1169,22 +1169,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_SDIV %0, %1 - ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SDIV [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SDIV [[VREGX]], [[VREGY]], 14, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_udiv_s32 @@ -1199,22 +1199,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_UDIV %0, %1 - ; CHECK: [[VREGRES:%[0-9]+]]:gpr = UDIV [[VREGX]], [[VREGY]], 14, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gpr = UDIV [[VREGX]], [[VREGY]], 14, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_lshr_s32 @@ -1229,22 +1229,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_LSHR %0, %1 - ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 3, 14, %noreg, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 3, 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_ashr_s32 @@ -1259,22 +1259,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_ASHR %0, %1 - ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 1, 14, %noreg, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 1, 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_shl_s32 @@ -1289,22 +1289,22 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s32) = G_SHL %0, %1 - ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 2, 14, %noreg, %noreg + ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 2, 14, $noreg, $noreg - %r0 = COPY %2(s32) - ; CHECK: %r0 = COPY [[VREGRES]] + $r0 = COPY %2(s32) + ; CHECK: $r0 = COPY [[VREGRES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_load_from_stack @@ -1327,31 +1327,31 @@ # CHECK-DAG: id: [[FI32:[0-9]+]], type: default, offset: 8 body: | bb.0: - liveins: %r0, %r1, %r2, %r3 + liveins: $r0, $r1, $r2, $r3 %0(p0) = G_FRAME_INDEX %fixed-stack.2 - ; CHECK: [[FI32VREG:%[0-9]+]]:gpr = ADDri %fixed-stack.[[FI32]], 0, 14, %noreg, %noreg + ; CHECK: [[FI32VREG:%[0-9]+]]:gpr = ADDri %fixed-stack.[[FI32]], 0, 14, $noreg, $noreg %1(s32) = G_LOAD %0(p0) :: (load 4) - ; CHECK: [[LD32VREG:%[0-9]+]]:gpr = LDRi12 [[FI32VREG]], 0, 14, %noreg + ; CHECK: [[LD32VREG:%[0-9]+]]:gpr = LDRi12 [[FI32VREG]], 0, 14, $noreg - %r0 = COPY %1 - ; CHECK: %r0 = COPY [[LD32VREG]] + $r0 = COPY %1 + ; CHECK: $r0 = COPY [[LD32VREG]] %2(p0) = G_FRAME_INDEX %fixed-stack.0 - ; CHECK: [[FI1VREG:%[0-9]+]]:gpr = ADDri %fixed-stack.[[FI1]], 0, 14, %noreg, %noreg + ; CHECK: [[FI1VREG:%[0-9]+]]:gpr = ADDri %fixed-stack.[[FI1]], 0, 14, $noreg, $noreg %3(s1) = G_LOAD %2(p0) :: (load 1) - ; CHECK: [[LD1VREG:%[0-9]+]]:gprnopc = LDRBi12 [[FI1VREG]], 0, 14, %noreg + ; CHECK: [[LD1VREG:%[0-9]+]]:gprnopc = LDRBi12 [[FI1VREG]], 0, 14, $noreg %4(s32) = G_ANYEXT %3(s1) ; CHECK: [[RES:%[0-9]+]]:gpr = COPY [[LD1VREG]] - %r0 = COPY %4 - ; CHECK: %r0 = COPY [[RES]] + $r0 = COPY %4 + ; CHECK: $r0 = COPY [[RES]] - BX_RET 14, %noreg - ; CHECK: BX_RET 14, %noreg + BX_RET 14, $noreg + ; CHECK: BX_RET 14, $noreg ... --- name: test_load_f32 @@ -1365,19 +1365,19 @@ - { id: 1, class: fprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(p0) = COPY %r0 - ; CHECK: %[[P:[0-9]+]]:gpr = COPY %r0 + %0(p0) = COPY $r0 + ; CHECK: %[[P:[0-9]+]]:gpr = COPY $r0 %1(s32) = G_LOAD %0(p0) :: (load 4) - ; CHECK: %[[V:[0-9]+]]:spr = VLDRS %[[P]], 0, 14, %noreg + ; CHECK: %[[V:[0-9]+]]:spr = VLDRS %[[P]], 0, 14, $noreg - %s0 = COPY %1 - ; CHECK: %s0 = COPY %[[V]] + $s0 = COPY %1 + ; CHECK: $s0 = COPY %[[V]] - BX_RET 14, %noreg, implicit %s0 - ; CHECK: BX_RET 14, %noreg, implicit %s0 + BX_RET 14, $noreg, implicit $s0 + ; CHECK: BX_RET 14, $noreg, implicit $s0 ... --- name: test_load_f64 @@ -1391,19 +1391,19 @@ - { id: 1, class: fprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(p0) = COPY %r0 - ; CHECK: %[[P:[0-9]+]]:gpr = COPY %r0 + %0(p0) = COPY $r0 + ; CHECK: %[[P:[0-9]+]]:gpr = COPY $r0 %1(s64) = G_LOAD %0(p0) :: (load 8) - ; CHECK: %[[V:[0-9]+]]:dpr = VLDRD %[[P]], 0, 14, %noreg + ; CHECK: %[[V:[0-9]+]]:dpr = VLDRD %[[P]], 0, 14, $noreg - %d0 = COPY %1 - ; CHECK: %d0 = COPY %[[V]] + $d0 = COPY %1 + ; CHECK: $d0 = COPY %[[V]] - BX_RET 14, %noreg, implicit %d0 - ; CHECK: BX_RET 14, %noreg, implicit %d0 + BX_RET 14, $noreg, implicit $d0 + ; CHECK: BX_RET 14, $noreg, implicit $d0 ... --- name: test_stores @@ -1427,31 +1427,31 @@ # CHECK: id: [[F64:[0-9]+]], class: dpr body: | bb.0: - liveins: %r0, %r1, %s0, %d0 + liveins: $r0, $r1, $s0, $d0 - %0(p0) = COPY %r0 - %3(s32) = COPY %r1 - %4(s32) = COPY %s0 - %5(s64) = COPY %d2 + %0(p0) = COPY $r0 + %3(s32) = COPY $r1 + %4(s32) = COPY $s0 + %5(s64) = COPY $d2 %1(s8) = G_TRUNC %3(s32) %2(s16) = G_TRUNC %3(s32) G_STORE %1(s8), %0(p0) :: (store 1) - ; CHECK: STRBi12 %[[I8]], %[[P]], 0, 14, %noreg + ; CHECK: STRBi12 %[[I8]], %[[P]], 0, 14, $noreg G_STORE %2(s16), %0(p0) :: (store 2) - ; CHECK: STRH %[[I32]], %[[P]], %noreg, 0, 14, %noreg + ; CHECK: STRH %[[I32]], %[[P]], $noreg, 0, 14, $noreg G_STORE %3(s32), %0(p0) :: (store 4) - ; CHECK: STRi12 %[[I32]], %[[P]], 0, 14, %noreg + ; CHECK: STRi12 %[[I32]], %[[P]], 0, 14, $noreg G_STORE %4(s32), %0(p0) :: (store 4) - ; CHECK: VSTRS %[[F32]], %[[P]], 0, 14, %noreg + ; CHECK: VSTRS %[[F32]], %[[P]], 0, 14, $noreg G_STORE %5(s64), %0(p0) :: (store 8) - ; CHECK: VSTRD %[[F64]], %[[P]], 0, 14, %noreg + ; CHECK: VSTRD %[[F64]], %[[P]], 0, 14, $noreg - BX_RET 14, %noreg + BX_RET 14, $noreg ... --- name: test_gep @@ -1466,19 +1466,19 @@ - { id: 2, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 - ; CHECK: %[[PTR:[0-9]+]]:gpr = COPY %r0 + %0(p0) = COPY $r0 + ; CHECK: %[[PTR:[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: %[[OFF:[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: %[[OFF:[0-9]+]]:gpr = COPY $r1 %2(p0) = G_GEP %0, %1(s32) - ; CHECK: %[[GEP:[0-9]+]]:gpr = ADDrr %[[PTR]], %[[OFF]], 14, %noreg, %noreg + ; CHECK: %[[GEP:[0-9]+]]:gpr = ADDrr %[[PTR]], %[[OFF]], 14, $noreg, $noreg - %r0 = COPY %2(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_constant_imm @@ -1492,10 +1492,10 @@ body: | bb.0: %0(s32) = G_CONSTANT 42 - ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, %noreg, %noreg + ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, $noreg, $noreg - %r0 = COPY %0(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %0(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_constant_cimm @@ -1511,10 +1511,10 @@ ; Adding a type on G_CONSTANT changes its operand from an Imm into a CImm. ; We still want to see the same thing in the output though. %0(s32) = G_CONSTANT i32 42 - ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, %noreg, %noreg + ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, $noreg, $noreg - %r0 = COPY %0(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %0(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_pointer_constant_unconstrained @@ -1528,11 +1528,11 @@ body: | bb.0: %0(p0) = G_CONSTANT i32 0 - ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg + ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg ; This leaves %0 unconstrained before the G_CONSTANT is selected. - %r0 = COPY %0(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %0(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_pointer_constant_constrained @@ -1546,7 +1546,7 @@ body: | bb.0: %0(p0) = G_CONSTANT i32 0 - ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg + ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 0, 14, $noreg, $noreg ; This constrains %0 before the G_CONSTANT is selected. G_STORE %0(p0), %0(p0) :: (store 4) @@ -1563,16 +1563,16 @@ - { id: 1, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(p0) = G_INTTOPTR %0(s32) - ; CHECK: [[INT:%[0-9]+]]:gpr = COPY %r0 + ; CHECK: [[INT:%[0-9]+]]:gpr = COPY $r0 - %r0 = COPY %1(p0) - ; CHECK: %r0 = COPY [[INT]] + $r0 = COPY %1(p0) + ; CHECK: $r0 = COPY [[INT]] - BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 ... --- name: test_ptrtoint_s32 @@ -1586,16 +1586,16 @@ - { id: 1, class: gprb } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s32) = G_PTRTOINT %0(p0) - ; CHECK: [[PTR:%[0-9]+]]:gpr = COPY %r0 + ; CHECK: [[PTR:%[0-9]+]]:gpr = COPY $r0 - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[PTR]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[PTR]] - BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 ... --- name: test_select_s32 @@ -1611,25 +1611,25 @@ - { id: 3, class: gprb } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(s32) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(s32) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 %2(s1) = G_TRUNC %1(s32) %3(s32) = G_SELECT %2(s1), %0, %1 - ; CHECK: CMPri [[VREGY]], 0, 14, %noreg, implicit-def %cpsr - ; CHECK: [[RES:%[0-9]+]]:gpr = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr + ; CHECK: CMPri [[VREGY]], 0, 14, $noreg, implicit-def $cpsr + ; CHECK: [[RES:%[0-9]+]]:gpr = MOVCCr [[VREGX]], [[VREGY]], 0, $cpsr - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[RES]] + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[RES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_select_ptr @@ -1646,28 +1646,28 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(p0) = COPY %r0 - ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0 + %0(p0) = COPY $r0 + ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0 - %1(p0) = COPY %r1 - ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1 + %1(p0) = COPY $r1 + ; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY $r1 - %2(s32) = COPY %r2 - ; CHECK: [[VREGC:%[0-9]+]]:gpr = COPY %r2 + %2(s32) = COPY $r2 + ; CHECK: [[VREGC:%[0-9]+]]:gpr = COPY $r2 %3(s1) = G_TRUNC %2(s32) %4(p0) = G_SELECT %3(s1), %0, %1 - ; CHECK: CMPri [[VREGC]], 0, 14, %noreg, implicit-def %cpsr - ; CHECK: [[RES:%[0-9]+]]:gpr = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr + ; CHECK: CMPri [[VREGC]], 0, 14, $noreg, implicit-def $cpsr + ; CHECK: [[RES:%[0-9]+]]:gpr = MOVCCr [[VREGX]], [[VREGY]], 0, $cpsr - %r0 = COPY %4(p0) - ; CHECK: %r0 = COPY [[RES]] + $r0 = COPY %4(p0) + ; CHECK: $r0 = COPY [[RES]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_br @@ -1683,15 +1683,15 @@ bb.0: ; CHECK: bb.0 successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 - ; CHECK: [[COND32:%[0-9]+]]:gpr = COPY %r0 + %0(s32) = COPY $r0 + ; CHECK: [[COND32:%[0-9]+]]:gpr = COPY $r0 %1(s1) = G_TRUNC %0(s32) G_BRCOND %1(s1), %bb.1 - ; CHECK: TSTri [[COND32]], 1, 14, %noreg, implicit-def %cpsr - ; CHECK: Bcc %bb.1, 1, %cpsr + ; CHECK: TSTri [[COND32]], 1, 14, $noreg, implicit-def $cpsr + ; CHECK: Bcc %bb.1, 1, $cpsr G_BR %bb.2 ; CHECK: B %bb.2 @@ -1705,8 +1705,8 @@ bb.2: ; CHECK: bb.2 - BX_RET 14, %noreg - ; CHECK: BX_RET 14, %noreg + BX_RET 14, $noreg + ; CHECK: BX_RET 14, $noreg ... --- name: test_phi_s32 @@ -1726,15 +1726,15 @@ bb.0: ; CHECK: [[BB1:bb.0]]: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 - %3(s32) = COPY %r2 - ; CHECK: [[V1:%[0-9]+]]:gpr = COPY %r1 - ; CHECK: [[V2:%[0-9]+]]:gpr = COPY %r2 + %2(s32) = COPY $r1 + %3(s32) = COPY $r2 + ; CHECK: [[V1:%[0-9]+]]:gpr = COPY $r1 + ; CHECK: [[V2:%[0-9]+]]:gpr = COPY $r2 G_BRCOND %1(s1), %bb.1 G_BR %bb.2 @@ -1751,8 +1751,8 @@ %4(s32) = G_PHI %2(s32), %bb.0, %3(s32), %bb.1 ; CHECK: {{%[0-9]+}}:gpr = PHI [[V1]], %[[BB1]], [[V2]], %[[BB2]] - %r0 = COPY %4(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %4(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_phi_s64 @@ -1772,15 +1772,15 @@ bb.0: ; CHECK: [[BB1:bb.0]]: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %r0, %d0, %d1 + liveins: $r0, $d0, $d1 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) - %2(s64) = COPY %d0 - %3(s64) = COPY %d1 - ; CHECK: [[V1:%[0-9]+]]:dpr = COPY %d0 - ; CHECK: [[V2:%[0-9]+]]:dpr = COPY %d1 + %2(s64) = COPY $d0 + %3(s64) = COPY $d1 + ; CHECK: [[V1:%[0-9]+]]:dpr = COPY $d0 + ; CHECK: [[V2:%[0-9]+]]:dpr = COPY $d1 G_BRCOND %1(s1), %bb.1 G_BR %bb.2 @@ -1797,8 +1797,8 @@ %4(s64) = G_PHI %2(s64), %bb.0, %3(s64), %bb.1 ; CHECK: {{%[0-9]+}}:dpr = PHI [[V1]], %[[BB1]], [[V2]], %[[BB2]] - %d0 = COPY %4(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %4(s64) + BX_RET 14, $noreg, implicit $d0 ... --- name: test_soft_fp_double @@ -1815,13 +1815,13 @@ - { id: 4, class: gprb } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 + liveins: $r0, $r1, $r2, $r3 - %0(s32) = COPY %r2 - ; CHECK: [[IN1:%[0-9]+]]:gpr = COPY %r2 + %0(s32) = COPY $r2 + ; CHECK: [[IN1:%[0-9]+]]:gpr = COPY $r2 - %1(s32) = COPY %r3 - ; CHECK: [[IN2:%[0-9]+]]:gpr = COPY %r3 + %1(s32) = COPY $r3 + ; CHECK: [[IN2:%[0-9]+]]:gpr = COPY $r3 %2(s64) = G_MERGE_VALUES %0(s32), %1(s32) ; CHECK: %[[DREG:[0-9]+]]:dpr = VMOVDRR [[IN1]], [[IN2]] @@ -1829,12 +1829,12 @@ %3(s32), %4(s32) = G_UNMERGE_VALUES %2(s64) ; CHECK: [[OUT1:%[0-9]+]]:gpr, [[OUT2:%[0-9]+]]:gpr = VMOVRRD %[[DREG]] - %r0 = COPY %3 - ; CHECK: %r0 = COPY [[OUT1]] + $r0 = COPY %3 + ; CHECK: $r0 = COPY [[OUT1]] - %r1 = COPY %4 - ; CHECK: %r1 = COPY [[OUT2]] + $r1 = COPY %4 + ; CHECK: $r1 = COPY [[OUT2]] - BX_RET 14, %noreg, implicit %r0, implicit %r1 - ; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1 + BX_RET 14, $noreg, implicit $r0, implicit $r1 + ; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1 ... Index: test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll +++ test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll @@ -4,22 +4,22 @@ define void @test_void_return() { ; CHECK-LABEL: name: test_void_return -; CHECK: BX_RET 14, %noreg +; CHECK: BX_RET 14, $noreg entry: ret void } define signext i1 @test_add_i1(i1 %x, i1 %y) { ; CHECK-LABEL: name: test_add_i1 -; CHECK: liveins: %r0, %r1 -; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: liveins: $r0, $r1 +; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s1) = G_TRUNC [[VREGR0]] -; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s1) = G_TRUNC [[VREGR1]] ; CHECK: [[SUM:%[0-9]+]]:_(s1) = G_ADD [[VREGX]], [[VREGY]] ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_SEXT [[SUM]] -; CHECK: %r0 = COPY [[EXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[EXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %sum = add i1 %x, %y ret i1 %sum @@ -27,15 +27,15 @@ define i8 @test_add_i8(i8 %x, i8 %y) { ; CHECK-LABEL: name: test_add_i8 -; CHECK: liveins: %r0, %r1 -; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: liveins: $r0, $r1 +; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s8) = G_TRUNC [[VREGR0]] -; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s8) = G_TRUNC [[VREGR1]] ; CHECK: [[SUM:%[0-9]+]]:_(s8) = G_ADD [[VREGX]], [[VREGY]] ; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]] -; CHECK: %r0 = COPY [[SUM_EXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[SUM_EXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %sum = add i8 %x, %y ret i8 %sum @@ -43,15 +43,15 @@ define i8 @test_sub_i8(i8 %x, i8 %y) { ; CHECK-LABEL: name: test_sub_i8 -; CHECK: liveins: %r0, %r1 -; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: liveins: $r0, $r1 +; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s8) = G_TRUNC [[VREGR0]] -; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s8) = G_TRUNC [[VREGR1]] ; CHECK: [[RES:%[0-9]+]]:_(s8) = G_SUB [[VREGX]], [[VREGY]] ; CHECK: [[RES_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]] -; CHECK: %r0 = COPY [[RES_EXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[RES_EXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %res = sub i8 %x, %y ret i8 %res @@ -59,27 +59,27 @@ define signext i8 @test_return_sext_i8(i8 %x) { ; CHECK-LABEL: name: test_return_sext_i8 -; CHECK: liveins: %r0 -; CHECK: [[VREGR0:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: liveins: $r0 +; CHECK: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK: [[VREG:%[0-9]+]]:_(s8) = G_TRUNC [[VREGR0]] ; CHECK: [[VREGEXT:%[0-9]+]]:_(s32) = G_SEXT [[VREG]] -; CHECK: %r0 = COPY [[VREGEXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[VREGEXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: ret i8 %x } define i16 @test_add_i16(i16 %x, i16 %y) { ; CHECK-LABEL: name: test_add_i16 -; CHECK: liveins: %r0, %r1 -; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: liveins: $r0, $r1 +; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s16) = G_TRUNC [[VREGR0]] -; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s16) = G_TRUNC [[VREGR1]] ; CHECK: [[SUM:%[0-9]+]]:_(s16) = G_ADD [[VREGX]], [[VREGY]] ; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]] -; CHECK: %r0 = COPY [[SUM_EXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[SUM_EXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %sum = add i16 %x, %y ret i16 %sum @@ -87,15 +87,15 @@ define i16 @test_sub_i16(i16 %x, i16 %y) { ; CHECK-LABEL: name: test_sub_i16 -; CHECK: liveins: %r0, %r1 -; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: liveins: $r0, $r1 +; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s16) = G_TRUNC [[VREGR0]] -; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s16) = G_TRUNC [[VREGR1]] ; CHECK: [[RES:%[0-9]+]]:_(s16) = G_SUB [[VREGX]], [[VREGY]] ; CHECK: [[RES_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]] -; CHECK: %r0 = COPY [[RES_EXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[RES_EXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %res = sub i16 %x, %y ret i16 %res @@ -103,24 +103,24 @@ define zeroext i16 @test_return_zext_i16(i16 %x) { ; CHECK-LABEL: name: test_return_zext_i16 -; CHECK: liveins: %r0 -; CHECK: [[VREGR0:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: liveins: $r0 +; CHECK: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK: [[VREG:%[0-9]+]]:_(s16) = G_TRUNC [[VREGR0]] ; CHECK: [[VREGEXT:%[0-9]+]]:_(s32) = G_ZEXT [[VREG]] -; CHECK: %r0 = COPY [[VREGEXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[VREGEXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: ret i16 %x } define i32 @test_add_i32(i32 %x, i32 %y) { ; CHECK-LABEL: name: test_add_i32 -; CHECK: liveins: %r0, %r1 -; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: liveins: $r0, $r1 +; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[SUM:%[0-9]+]]:_(s32) = G_ADD [[VREGX]], [[VREGY]] -; CHECK: %r0 = COPY [[SUM]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[SUM]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %sum = add i32 %x, %y ret i32 %sum @@ -128,12 +128,12 @@ define i32 @test_sub_i32(i32 %x, i32 %y) { ; CHECK-LABEL: name: test_sub_i32 -; CHECK: liveins: %r0, %r1 -; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: liveins: $r0, $r1 +; CHECK-DAG: [[VREGX:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SUB [[VREGX]], [[VREGY]] -; CHECK: %r0 = COPY [[RES]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[RES]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %res = sub i32 %x, %y ret i32 %res @@ -144,13 +144,13 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[P4:[0-9]]]{{.*}}offset: 0{{.*}}size: 4 ; CHECK-DAG: id: [[P5:[0-9]]]{{.*}}offset: 4{{.*}}size: 4 -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK: [[VREGP2:%[0-9]+]]:_(s32) = COPY %r2 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK: [[VREGP2:%[0-9]+]]:_(s32) = COPY $r2 ; CHECK: [[FIP5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P5]] ; CHECK: [[VREGP5:%[0-9]+]]:_(s32) = G_LOAD [[FIP5]]{{.*}}load 4 ; CHECK: [[SUM:%[0-9]+]]:_(s32) = G_ADD [[VREGP2]], [[VREGP5]] -; CHECK: %r0 = COPY [[SUM]] -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[SUM]] +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %sum = add i32 %p2, %p5 ret i32 %sum @@ -162,16 +162,16 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[P4:[0-9]]]{{.*}}offset: 0{{.*}}size: 1 ; CHECK-DAG: id: [[P5:[0-9]]]{{.*}}offset: 4{{.*}}size: 2 -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK: [[VREGR1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[VREGP1:%[0-9]+]]:_(s16) = G_TRUNC [[VREGR1]] ; CHECK: [[FIP5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P5]] ; CHECK: [[VREGP5EXT:%[0-9]+]]:_(s32) = G_LOAD [[FIP5]](p0){{.*}}load 4 ; CHECK: [[VREGP5:%[0-9]+]]:_(s16) = G_TRUNC [[VREGP5EXT]] ; CHECK: [[SUM:%[0-9]+]]:_(s16) = G_ADD [[VREGP1]], [[VREGP5]] ; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]] -; CHECK: %r0 = COPY [[SUM_EXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[SUM_EXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %sum = add i16 %p1, %p5 ret i16 %sum @@ -183,16 +183,16 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[P4:[0-9]]]{{.*}}offset: 0{{.*}}size: 1 ; CHECK-DAG: id: [[P5:[0-9]]]{{.*}}offset: 4{{.*}}size: 2 -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK: [[VREGR2:%[0-9]+]]:_(s32) = COPY %r2 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK: [[VREGR2:%[0-9]+]]:_(s32) = COPY $r2 ; CHECK: [[VREGP2:%[0-9]+]]:_(s8) = G_TRUNC [[VREGR2]] ; CHECK: [[FIP4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P4]] ; CHECK: [[VREGP4EXT:%[0-9]+]]:_(s32) = G_LOAD [[FIP4]](p0){{.*}}load 4 ; CHECK: [[VREGP4:%[0-9]+]]:_(s8) = G_TRUNC [[VREGP4EXT]] ; CHECK: [[SUM:%[0-9]+]]:_(s8) = G_ADD [[VREGP2]], [[VREGP4]] ; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]] -; CHECK: %r0 = COPY [[SUM_EXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[SUM_EXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %sum = add i8 %p2, %p4 ret i8 %sum @@ -204,15 +204,15 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[P4:[0-9]]]{{.*}}offset: 0{{.*}}size: 1 ; CHECK-DAG: id: [[P5:[0-9]]]{{.*}}offset: 4{{.*}}size: 2 -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK: [[VREGR2:%[0-9]+]]:_(s32) = COPY %r2 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK: [[VREGR2:%[0-9]+]]:_(s32) = COPY $r2 ; CHECK: [[VREGP2:%[0-9]+]]:_(s8) = G_TRUNC [[VREGR2]] ; CHECK: [[FIP4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P4]] ; CHECK: [[VREGP4:%[0-9]+]]:_(s8) = G_LOAD [[FIP4]](p0){{.*}}load 1 ; CHECK: [[SUM:%[0-9]+]]:_(s8) = G_ADD [[VREGP2]], [[VREGP4]] ; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]] -; CHECK: %r0 = COPY [[SUM_EXT]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[SUM_EXT]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %sum = add i8 %p2, %p4 ret i8 %sum @@ -224,21 +224,21 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[P4:[0-9]]]{{.*}}offset: 0{{.*}}size: 1 ; CHECK-DAG: id: [[P5:[0-9]]]{{.*}}offset: 4{{.*}}size: 2 -; CHECK: liveins: %r0, %r1, %r2, %r3 +; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: [[FIP5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P5]] ; CHECK: [[VREGP5SEXT:%[0-9]+]]:_(s32) = G_LOAD [[FIP5]](p0){{.*}}load 4 ; CHECK: [[VREGP5:%[0-9]+]]:_(s16) = G_TRUNC [[VREGP5SEXT]] ; CHECK: [[VREGP5ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[VREGP5]] -; CHECK: %r0 = COPY [[VREGP5ZEXT]] -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[VREGP5ZEXT]] +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: ret i16 %p5 } define i16 @test_ptr_arg(i16* %p) { ; CHECK-LABEL: name: test_ptr_arg -; CHECK: liveins: %r0 -; CHECK: [[VREGP:%[0-9]+]]:_(p0) = COPY %r0 +; CHECK: liveins: $r0 +; CHECK: [[VREGP:%[0-9]+]]:_(p0) = COPY $r0 ; CHECK: [[VREGV:%[0-9]+]]:_(s16) = G_LOAD [[VREGP]](p0){{.*}}load 2 entry: %v = load i16, i16* %p @@ -248,11 +248,11 @@ define i32* @test_ptr_ret(i32** %p) { ; Test pointer returns and pointer-to-pointer arguments ; CHECK-LABEL: name: test_ptr_ret -; CHECK: liveins: %r0 -; CHECK: [[VREGP:%[0-9]+]]:_(p0) = COPY %r0 +; CHECK: liveins: $r0 +; CHECK: [[VREGP:%[0-9]+]]:_(p0) = COPY $r0 ; CHECK: [[VREGV:%[0-9]+]]:_(p0) = G_LOAD [[VREGP]](p0){{.*}}load 4 -; CHECK: %r0 = COPY [[VREGV]] -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[VREGV]] +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %v = load i32*, i32** %p ret i32* %v @@ -262,12 +262,12 @@ ; CHECK-LABEL: name: test_ptr_arg_on_stack ; CHECK: fixedStack: ; CHECK: id: [[P:[0-9]+]]{{.*}}offset: 0{{.*}}size: 4 -; CHECK: liveins: %r0, %r1, %r2, %r3 +; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: [[FIP:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P]] ; CHECK: [[VREGP:%[0-9]+]]:_(p0) = G_LOAD [[FIP]](p0){{.*}}load 4 ; CHECK: [[VREGV:%[0-9]+]]:_(s32) = G_LOAD [[VREGP]](p0){{.*}}load 4 -; CHECK: %r0 = COPY [[VREGV]] -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[VREGV]] +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %v = load i32, i32* %p ret i32 %v @@ -279,13 +279,13 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[P4:[0-9]+]]{{.*}}offset: 0{{.*}}size: 4 ; CHECK-DAG: id: [[P5:[0-9]+]]{{.*}}offset: 4{{.*}}size: 4 -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK: [[VREGP1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK: [[VREGP1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[FIP5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P5]] ; CHECK: [[VREGP5:%[0-9]+]]:_(s32) = G_LOAD [[FIP5]](p0){{.*}}load 4 ; CHECK: [[VREGV:%[0-9]+]]:_(s32) = G_FADD [[VREGP1]], [[VREGP5]] -; CHECK: %r0 = COPY [[VREGV]] -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[VREGV]] +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %v = fadd float %p1, %p5 ret float %v @@ -308,13 +308,13 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[Q0:[0-9]+]]{{.*}}offset: 0{{.*}}size: 4 ; CHECK-DAG: id: [[Q1:[0-9]+]]{{.*}}offset: 4{{.*}}size: 4 -; CHECK: liveins: %s0, %s1, %s2, %s3, %s4, %s5, %s6, %s7, %s8, %s9, %s10, %s11, %s12, %s13, %s14, %s15 -; CHECK: [[VREGP1:%[0-9]+]]:_(s32) = COPY %s1 +; CHECK: liveins: $s0, $s1, $s2, $s3, $s4, $s5, $s6, $s7, $s8, $s9, $s10, $s11, $s12, $s13, $s14, $s15 +; CHECK: [[VREGP1:%[0-9]+]]:_(s32) = COPY $s1 ; CHECK: [[FIQ1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[Q1]] ; CHECK: [[VREGQ1:%[0-9]+]]:_(s32) = G_LOAD [[FIQ1]](p0){{.*}}load 4 ; CHECK: [[VREGV:%[0-9]+]]:_(s32) = G_FADD [[VREGP1]], [[VREGQ1]] -; CHECK: %s0 = COPY [[VREGV]] -; CHECK: BX_RET 14, %noreg, implicit %s0 +; CHECK: $s0 = COPY [[VREGV]] +; CHECK: BX_RET 14, $noreg, implicit $s0 entry: %v = fadd float %p1, %q1 ret float %v @@ -329,13 +329,13 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[Q0:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8 ; CHECK-DAG: id: [[Q1:[0-9]+]]{{.*}}offset: 8{{.*}}size: 8 -; CHECK: liveins: %d0, %d1, %d2, %d3, %d4, %d5, %d6, %d7 -; CHECK: [[VREGP1:%[0-9]+]]:_(s64) = COPY %d1 +; CHECK: liveins: $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7 +; CHECK: [[VREGP1:%[0-9]+]]:_(s64) = COPY $d1 ; CHECK: [[FIQ1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[Q1]] ; CHECK: [[VREGQ1:%[0-9]+]]:_(s64) = G_LOAD [[FIQ1]](p0){{.*}}load 8 ; CHECK: [[VREGV:%[0-9]+]]:_(s64) = G_FADD [[VREGP1]], [[VREGQ1]] -; CHECK: %d0 = COPY [[VREGV]] -; CHECK: BX_RET 14, %noreg, implicit %d0 +; CHECK: $d0 = COPY [[VREGV]] +; CHECK: BX_RET 14, $noreg, implicit $d0 entry: %v = fadd double %p1, %q1 ret double %v @@ -349,9 +349,9 @@ ; CHECK-DAG: id: [[P3:[0-9]+]]{{.*}}offset: 8{{.*}}size: 8 ; CHECK-DAG: id: [[P4:[0-9]+]]{{.*}}offset: 16{{.*}}size: 8 ; CHECK-DAG: id: [[P5:[0-9]+]]{{.*}}offset: 24{{.*}}size: 8 -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK-DAG: [[VREGP1LO:%[0-9]+]]:_(s32) = COPY %r2 -; CHECK-DAG: [[VREGP1HI:%[0-9]+]]:_(s32) = COPY %r3 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK-DAG: [[VREGP1LO:%[0-9]+]]:_(s32) = COPY $r2 +; CHECK-DAG: [[VREGP1HI:%[0-9]+]]:_(s32) = COPY $r3 ; LITTLE: [[VREGP1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREGP1LO]](s32), [[VREGP1HI]](s32) ; BIG: [[VREGP1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREGP1HI]](s32), [[VREGP1LO]](s32) ; CHECK: [[FIP5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P5]] @@ -359,9 +359,9 @@ ; CHECK: [[VREGV:%[0-9]+]]:_(s64) = G_FADD [[VREGP1]], [[VREGP5]] ; LITTLE: [[VREGVLO:%[0-9]+]]:_(s32), [[VREGVHI:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64) ; BIG: [[VREGVHI:%[0-9]+]]:_(s32), [[VREGVLO:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64) -; CHECK-DAG: %r0 = COPY [[VREGVLO]] -; CHECK-DAG: %r1 = COPY [[VREGVHI]] -; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1 +; CHECK-DAG: $r0 = COPY [[VREGVLO]] +; CHECK-DAG: $r1 = COPY [[VREGVHI]] +; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1 entry: %v = fadd double %p1, %p5 ret double %v @@ -377,13 +377,13 @@ ; CHECK: fixedStack: ; CHECK-DAG: id: [[Q0:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8 ; CHECK-DAG: id: [[Q1:[0-9]+]]{{.*}}offset: 8{{.*}}size: 8 -; CHECK: liveins: %d0, %d2, %d3, %d4, %d5, %d6, %d7, %s2 -; CHECK: [[VREGP1:%[0-9]+]]:_(s64) = COPY %d2 +; CHECK: liveins: $d0, $d2, $d3, $d4, $d5, $d6, $d7, $s2 +; CHECK: [[VREGP1:%[0-9]+]]:_(s64) = COPY $d2 ; CHECK: [[FIQ1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[Q1]] ; CHECK: [[VREGQ1:%[0-9]+]]:_(s64) = G_LOAD [[FIQ1]](p0){{.*}}load 8 ; CHECK: [[VREGV:%[0-9]+]]:_(s64) = G_FADD [[VREGP1]], [[VREGQ1]] -; CHECK: %d0 = COPY [[VREGV]] -; CHECK: BX_RET 14, %noreg, implicit %d0 +; CHECK: $d0 = COPY [[VREGV]] +; CHECK: BX_RET 14, $noreg, implicit $d0 entry: %v = fadd double %p1, %q1 ret double %v @@ -394,9 +394,9 @@ ; CHECK-LABEL: name: test_double_gap_aapcscc ; CHECK: fixedStack: ; CHECK-DAG: id: [[P1:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8 -; CHECK: liveins: %r0, %r2, %r3 -; CHECK-DAG: [[VREGP0LO:%[0-9]+]]:_(s32) = COPY %r2 -; CHECK-DAG: [[VREGP0HI:%[0-9]+]]:_(s32) = COPY %r3 +; CHECK: liveins: $r0, $r2, $r3 +; CHECK-DAG: [[VREGP0LO:%[0-9]+]]:_(s32) = COPY $r2 +; CHECK-DAG: [[VREGP0HI:%[0-9]+]]:_(s32) = COPY $r3 ; LITTLE: [[VREGP0:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREGP0LO]](s32), [[VREGP0HI]](s32) ; BIG: [[VREGP0:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREGP0HI]](s32), [[VREGP0LO]](s32) ; CHECK: [[FIP1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P1]] @@ -404,9 +404,9 @@ ; CHECK: [[VREGV:%[0-9]+]]:_(s64) = G_FADD [[VREGP0]], [[VREGP1]] ; LITTLE: [[VREGVLO:%[0-9]+]]:_(s32), [[VREGVHI:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64) ; BIG: [[VREGVHI:%[0-9]+]]:_(s32), [[VREGVLO:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64) -; CHECK-DAG: %r0 = COPY [[VREGVLO]] -; CHECK-DAG: %r1 = COPY [[VREGVHI]] -; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1 +; CHECK-DAG: $r0 = COPY [[VREGVLO]] +; CHECK-DAG: $r1 = COPY [[VREGVHI]] +; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1 entry: %v = fadd double %p0, %p1 ret double %v @@ -417,9 +417,9 @@ ; CHECK-LABEL: name: test_double_gap2_aapcscc ; CHECK: fixedStack: ; CHECK-DAG: id: [[P1:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8 -; CHECK: liveins: %r0, %r1, %r2 -; CHECK-DAG: [[VREGP0LO:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK-DAG: [[VREGP0HI:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: liveins: $r0, $r1, $r2 +; CHECK-DAG: [[VREGP0LO:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK-DAG: [[VREGP0HI:%[0-9]+]]:_(s32) = COPY $r1 ; LITTLE: [[VREGP0:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREGP0LO]](s32), [[VREGP0HI]](s32) ; BIG: [[VREGP0:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREGP0HI]](s32), [[VREGP0LO]](s32) ; CHECK: [[FIP1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[P1]] @@ -427,9 +427,9 @@ ; CHECK: [[VREGV:%[0-9]+]]:_(s64) = G_FADD [[VREGP0]], [[VREGP1]] ; LITTLE: [[VREGVLO:%[0-9]+]]:_(s32), [[VREGVHI:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64) ; BIG: [[VREGVHI:%[0-9]+]]:_(s32), [[VREGVLO:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64) -; CHECK-DAG: %r0 = COPY [[VREGVLO]] -; CHECK-DAG: %r1 = COPY [[VREGVHI]] -; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1 +; CHECK-DAG: $r0 = COPY [[VREGVLO]] +; CHECK-DAG: $r1 = COPY [[VREGVHI]] +; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1 entry: %v = fadd double %p0, %p1 ret double %v @@ -437,7 +437,7 @@ define i32 @test_shufflevector_s32_v2s32(i32 %arg) { ; CHECK-LABEL: name: test_shufflevector_s32_v2s32 -; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32) @@ -451,8 +451,8 @@ define i32 @test_shufflevector_v2s32_v3s32(i32 %arg1, i32 %arg2) { ; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32 -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[ARG2:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[ARG2:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -471,8 +471,8 @@ define i32 @test_shufflevector_v2s32_v4s32(i32 %arg1, i32 %arg2) { ; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32 -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[ARG2:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[ARG2:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -490,10 +490,10 @@ define i32 @test_shufflevector_v4s32_v2s32(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) { ; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32 -; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[ARG2:%[0-9]+]]:_(s32) = COPY %r1 -; CHECK: [[ARG3:%[0-9]+]]:_(s32) = COPY %r2 -; CHECK: [[ARG4:%[0-9]+]]:_(s32) = COPY %r3 +; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[ARG2:%[0-9]+]]:_(s32) = COPY $r1 +; CHECK: [[ARG3:%[0-9]+]]:_(s32) = COPY $r2 +; CHECK: [[ARG4:%[0-9]+]]:_(s32) = COPY $r3 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 Index: test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir +++ test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir @@ -35,27 +35,27 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HWDIV: [[R:%[0-9]+]]:_(s32) = G_SDIV [[X]], [[Y]] ; SOFT-NOT: G_SDIV ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_idiv, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY %r0 - ; SOFT-DEFAULT: BL &__divsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_idiv, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY $r0 + ; SOFT-DEFAULT: BL &__divsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_SDIV %2(s32) = G_SDIV %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_udiv_i32 @@ -71,27 +71,27 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HWDIV: [[R:%[0-9]+]]:_(s32) = G_UDIV [[X]], [[Y]] ; SOFT-NOT: G_UDIV ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_uidiv, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY %r0 - ; SOFT-DEFAULT: BL &__udivsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_uidiv, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY $r0 + ; SOFT-DEFAULT: BL &__udivsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_UDIV %2(s32) = G_UDIV %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_sdiv_i16 @@ -110,10 +110,10 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; The G_TRUNC will combine with the extensions introduced by the legalizer, ; leading to the following complicated sequences. ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -124,28 +124,28 @@ ; CHECK: [[Y:%[0-9]+]]:_(s32) = COPY [[R1]] ; CHECK: [[SHIFTEDY:%[0-9]+]]:_(s32) = G_SHL [[Y]], [[BITS]] ; CHECK: [[Y32:%[0-9]+]]:_(s32) = G_ASHR [[SHIFTEDY]], [[BITS]] - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s16) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s16) = G_TRUNC %2(s32) ; HWDIV: [[R32:%[0-9]+]]:_(s32) = G_SDIV [[X32]], [[Y32]] ; SOFT-NOT: G_SDIV ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X32]] - ; SOFT-DAG: %r1 = COPY [[Y32]] - ; SOFT-AEABI: BL &__aeabi_idiv, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY %r0 - ; SOFT-DEFAULT: BL &__divsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X32]] + ; SOFT-DAG: $r1 = COPY [[Y32]] + ; SOFT-AEABI: BL &__aeabi_idiv, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY $r0 + ; SOFT-DEFAULT: BL &__divsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_SDIV ; CHECK: [[R:%[0-9]+]]:_(s32) = G_ASHR ; SOFT-NOT: G_SDIV %4(s16) = G_SDIV %1, %3 - ; CHECK: %r0 = COPY [[R]] + ; CHECK: $r0 = COPY [[R]] %5(s32) = G_SEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_udiv_i16 @@ -164,10 +164,10 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; The G_TRUNC will combine with the extensions introduced by the legalizer, ; leading to the following complicated sequences. ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 @@ -176,28 +176,28 @@ ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CHECK: [[Y:%[0-9]+]]:_(s32) = COPY [[R1]] ; CHECK: [[Y32:%[0-9]+]]:_(s32) = G_AND [[Y]], [[BITS]] - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s16) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s16) = G_TRUNC %2(s32) ; HWDIV: [[R32:%[0-9]+]]:_(s32) = G_UDIV [[X32]], [[Y32]] ; SOFT-NOT: G_UDIV ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X32]] - ; SOFT-DAG: %r1 = COPY [[Y32]] - ; SOFT-AEABI: BL &__aeabi_uidiv, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY %r0 - ; SOFT-DEFAULT: BL &__udivsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X32]] + ; SOFT-DAG: $r1 = COPY [[Y32]] + ; SOFT-AEABI: BL &__aeabi_uidiv, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY $r0 + ; SOFT-DEFAULT: BL &__udivsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_UDIV ; CHECK: [[R:%[0-9]+]]:_(s32) = G_AND ; SOFT-NOT: G_UDIV %4(s16) = G_UDIV %1, %3 - ; CHECK: %r0 = COPY [[R]] + ; CHECK: $r0 = COPY [[R]] %5(s32) = G_ZEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_sdiv_i8 @@ -216,10 +216,10 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; The G_TRUNC will combine with the extensions introduced by the legalizer, ; leading to the following complicated sequences. ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 @@ -230,28 +230,28 @@ ; CHECK: [[Y:%[0-9]+]]:_(s32) = COPY [[R1]] ; CHECK: [[SHIFTEDY:%[0-9]+]]:_(s32) = G_SHL [[Y]], [[BITS]] ; CHECK: [[Y32:%[0-9]+]]:_(s32) = G_ASHR [[SHIFTEDY]], [[BITS]] - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s8) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s8) = G_TRUNC %2(s32) ; HWDIV: [[R32:%[0-9]+]]:_(s32) = G_SDIV [[X32]], [[Y32]] ; SOFT-NOT: G_SDIV ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X32]] - ; SOFT-DAG: %r1 = COPY [[Y32]] - ; SOFT-AEABI: BL &__aeabi_idiv, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY %r0 - ; SOFT-DEFAULT: BL &__divsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X32]] + ; SOFT-DAG: $r1 = COPY [[Y32]] + ; SOFT-AEABI: BL &__aeabi_idiv, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY $r0 + ; SOFT-DEFAULT: BL &__divsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_SDIV ; CHECK: [[R:%[0-9]+]]:_(s32) = G_ASHR ; SOFT-NOT: G_SDIV %4(s8) = G_SDIV %1, %3 - ; CHECK: %r0 = COPY [[R]] + ; CHECK: $r0 = COPY [[R]] %5(s32) = G_SEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_udiv_i8 @@ -270,10 +270,10 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 ; The G_TRUNC will combine with the extensions introduced by the legalizer, ; leading to the following complicated sequences. ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 @@ -282,28 +282,28 @@ ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[Y:%[0-9]+]]:_(s32) = COPY [[R1]] ; CHECK: [[Y32:%[0-9]+]]:_(s32) = G_AND [[Y]], [[BITS]] - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s8) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s8) = G_TRUNC %2(s32) ; HWDIV: [[R32:%[0-9]+]]:_(s32) = G_UDIV [[X32]], [[Y32]] ; SOFT-NOT: G_UDIV ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X32]] - ; SOFT-DAG: %r1 = COPY [[Y32]] - ; SOFT-AEABI: BL &__aeabi_uidiv, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY %r0 - ; SOFT-DEFAULT: BL &__udivsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X32]] + ; SOFT-DAG: $r1 = COPY [[Y32]] + ; SOFT-AEABI: BL &__aeabi_uidiv, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY $r0 + ; SOFT-DEFAULT: BL &__udivsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_UDIV ; CHECK: [[R:%[0-9]+]]:_(s32) = G_AND ; SOFT-NOT: G_UDIV %4(s8) = G_UDIV %1, %3 - ; CHECK: %r0 = COPY [[R]] + ; CHECK: $r0 = COPY [[R]] %5(s32) = G_ZEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_srem_i32 @@ -319,29 +319,29 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HWDIV: [[Q:%[0-9]+]]:_(s32) = G_SDIV [[X]], [[Y]] ; HWDIV: [[P:%[0-9]+]]:_(s32) = G_MUL [[Q]], [[Y]] ; HWDIV: [[R:%[0-9]+]]:_(s32) = G_SUB [[X]], [[P]] ; SOFT-NOT: G_SREM ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_idivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0, implicit-def %r1 - ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY %r1 - ; SOFT-DEFAULT: BL &__modsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_idivmod, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1 + ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY $r1 + ; SOFT-DEFAULT: BL &__modsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_SREM %2(s32) = G_SREM %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_urem_i32 @@ -357,29 +357,29 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HWDIV: [[Q:%[0-9]+]]:_(s32) = G_UDIV [[X]], [[Y]] ; HWDIV: [[P:%[0-9]+]]:_(s32) = G_MUL [[Q]], [[Y]] ; HWDIV: [[R:%[0-9]+]]:_(s32) = G_SUB [[X]], [[P]] ; SOFT-NOT: G_UREM ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_uidivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0, implicit-def %r1 - ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY %r1 - ; SOFT-DEFAULT: BL &__umodsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_uidivmod, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1 + ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY $r1 + ; SOFT-DEFAULT: BL &__umodsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_UREM %2(s32) = G_UREM %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_srem_i16 @@ -398,10 +398,10 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; The G_TRUNC will combine with the extensions introduced by the legalizer, ; leading to the following complicated sequences. ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -412,30 +412,30 @@ ; CHECK: [[Y:%[0-9]+]]:_(s32) = COPY [[R1]] ; CHECK: [[SHIFTEDY:%[0-9]+]]:_(s32) = G_SHL [[Y]], [[BITS]] ; CHECK: [[Y32:%[0-9]+]]:_(s32) = G_ASHR [[SHIFTEDY]], [[BITS]] - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s16) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s16) = G_TRUNC %2(s32) ; HWDIV: [[Q32:%[0-9]+]]:_(s32) = G_SDIV [[X32]], [[Y32]] ; HWDIV: [[P32:%[0-9]+]]:_(s32) = G_MUL [[Q32]], [[Y32]] ; HWDIV: [[R32:%[0-9]+]]:_(s32) = G_SUB [[X32]], [[P32]] ; SOFT-NOT: G_SREM ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X32]] - ; SOFT-DAG: %r1 = COPY [[Y32]] - ; SOFT-AEABI: BL &__aeabi_idivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY %r1 - ; SOFT-DEFAULT: BL &__modsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X32]] + ; SOFT-DAG: $r1 = COPY [[Y32]] + ; SOFT-AEABI: BL &__aeabi_idivmod, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY $r1 + ; SOFT-DEFAULT: BL &__modsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_SREM ; CHECK: [[R:%[0-9]+]]:_(s32) = G_ASHR ; SOFT-NOT: G_SREM %4(s16) = G_SREM %1, %3 - ; CHECK: %r0 = COPY [[R]] + ; CHECK: $r0 = COPY [[R]] %5(s32) = G_SEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_urem_i16 @@ -454,10 +454,10 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; The G_TRUNC will combine with the extensions introduced by the legalizer, ; leading to the following complicated sequences. ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 @@ -466,30 +466,30 @@ ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CHECK: [[Y:%[0-9]+]]:_(s32) = COPY [[R1]] ; CHECK: [[Y32:%[0-9]+]]:_(s32) = G_AND [[Y]], [[BITS]] - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s16) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s16) = G_TRUNC %2(s32) ; HWDIV: [[Q32:%[0-9]+]]:_(s32) = G_UDIV [[X32]], [[Y32]] ; HWDIV: [[P32:%[0-9]+]]:_(s32) = G_MUL [[Q32]], [[Y32]] ; HWDIV: [[R32:%[0-9]+]]:_(s32) = G_SUB [[X32]], [[P32]] ; SOFT-NOT: G_UREM ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X32]] - ; SOFT-DAG: %r1 = COPY [[Y32]] - ; SOFT-AEABI: BL &__aeabi_uidivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY %r1 - ; SOFT-DEFAULT: BL &__umodsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X32]] + ; SOFT-DAG: $r1 = COPY [[Y32]] + ; SOFT-AEABI: BL &__aeabi_uidivmod, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY $r1 + ; SOFT-DEFAULT: BL &__umodsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_UREM ; CHECK: [[R:%[0-9]+]]:_(s32) = G_AND ; SOFT-NOT: G_UREM %4(s16) = G_UREM %1, %3 - ; CHECK: %r0 = COPY [[R]] + ; CHECK: $r0 = COPY [[R]] %5(s32) = G_ZEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_srem_i8 @@ -508,10 +508,10 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; The G_TRUNC will combine with the extensions introduced by the legalizer, ; leading to the following complicated sequences. ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 @@ -522,30 +522,30 @@ ; CHECK: [[Y:%[0-9]+]]:_(s32) = COPY [[R1]] ; CHECK: [[SHIFTEDY:%[0-9]+]]:_(s32) = G_SHL [[Y]], [[BITS]] ; CHECK: [[Y32:%[0-9]+]]:_(s32) = G_ASHR [[SHIFTEDY]], [[BITS]] - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s8) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s8) = G_TRUNC %2(s32) ; HWDIV: [[Q32:%[0-9]+]]:_(s32) = G_SDIV [[X32]], [[Y32]] ; HWDIV: [[P32:%[0-9]+]]:_(s32) = G_MUL [[Q32]], [[Y32]] ; HWDIV: [[R32:%[0-9]+]]:_(s32) = G_SUB [[X32]], [[P32]] ; SOFT-NOT: G_SREM ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X32]] - ; SOFT-DAG: %r1 = COPY [[Y32]] - ; SOFT-AEABI: BL &__aeabi_idivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY %r1 - ; SOFT-DEFAULT: BL &__modsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X32]] + ; SOFT-DAG: $r1 = COPY [[Y32]] + ; SOFT-AEABI: BL &__aeabi_idivmod, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY $r1 + ; SOFT-DEFAULT: BL &__modsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_SREM ; CHECK: [[R:%[0-9]+]]:_(s32) = G_ASHR ; SOFT-NOT: G_SREM %4(s8) = G_SREM %1, %3 - ; CHECK: %r0 = COPY [[R]] + ; CHECK: $r0 = COPY [[R]] %5(s32) = G_SEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_urem_i8 @@ -564,10 +564,10 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; The G_TRUNC will combine with the extensions introduced by the legalizer, ; leading to the following complicated sequences. ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 @@ -576,28 +576,28 @@ ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[Y:%[0-9]+]]:_(s32) = COPY [[R1]] ; CHECK: [[Y32:%[0-9]+]]:_(s32) = G_AND [[Y]], [[BITS]] - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s8) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s8) = G_TRUNC %2(s32) ; HWDIV: [[Q32:%[0-9]+]]:_(s32) = G_UDIV [[X32]], [[Y32]] ; HWDIV: [[P32:%[0-9]+]]:_(s32) = G_MUL [[Q32]], [[Y32]] ; HWDIV: [[R32:%[0-9]+]]:_(s32) = G_SUB [[X32]], [[P32]] ; SOFT-NOT: G_UREM ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X32]] - ; SOFT-DAG: %r1 = COPY [[Y32]] - ; SOFT-AEABI: BL &__aeabi_uidivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY %r1 - ; SOFT-DEFAULT: BL &__umodsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X32]] + ; SOFT-DAG: $r1 = COPY [[Y32]] + ; SOFT-AEABI: BL &__aeabi_uidivmod, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-AEABI: [[R32:%[0-9]+]]:_(s32) = COPY $r1 + ; SOFT-DEFAULT: BL &__umodsi3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: [[R32:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_UREM ; CHECK: [[R:%[0-9]+]]:_(s32) = G_AND ; SOFT-NOT: G_UREM %4(s8) = G_UREM %1, %3 - ; CHECK: %r0 = COPY [[R]] + ; CHECK: $r0 = COPY [[R]] %5(s32) = G_ZEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir +++ test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir @@ -91,28 +91,28 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; CHECK-NOT: G_FREM ; CHECK: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; HARD-DAG: %s0 = COPY [[X]] - ; HARD-DAG: %s1 = COPY [[Y]] - ; SOFT: BL &fmodf, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; HARD: BL &fmodf, {{.*}}, implicit %s0, implicit %s1, implicit-def %s0 - ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY %r0 - ; HARD: [[R:%[0-9]+]]:_(s32) = COPY %s0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; HARD-DAG: $s0 = COPY [[X]] + ; HARD-DAG: $s1 = COPY [[Y]] + ; SOFT: BL &fmodf, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; HARD: BL &fmodf, {{.*}}, implicit $s0, implicit $s1, implicit-def $s0 + ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 + ; HARD: [[R:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: ADJCALLSTACKUP ; CHECK-NOT: G_FREM %2(s32) = G_FREM %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_frem_double @@ -134,7 +134,7 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 + liveins: $r0, $r1, $r2, $r3 ; The inputs may be in the wrong order (depending on the target's ; endianness), but that's orthogonal to what we're trying to test here. @@ -142,35 +142,35 @@ ; through R0-R1, ends up in R0-R1 or R1-R0, and the second value, received ; through R2-R3, ends up in R2-R3 or R3-R2, when passed to fmod. ; For hard float, the values need to end up in D0 and D1. - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] ; HARD-DAG: [[Y:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[Y0]] %4(s64) = G_MERGE_VALUES %0(s32), %1(s32) %5(s64) = G_MERGE_VALUES %2(s32), %3(s32) ; CHECK-NOT: G_FREM ; CHECK: ADJCALLSTACKDOWN - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]] - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]] - ; HARD-DAG: %d0 = COPY [[X]] - ; HARD-DAG: %d1 = COPY [[Y]] - ; SOFT: BL &fmod, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 - ; HARD: BL &fmod, {{.*}}, implicit %d0, implicit %d1, implicit-def %d0 + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X0]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X1]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y0]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y1]] + ; HARD-DAG: $d0 = COPY [[X]] + ; HARD-DAG: $d1 = COPY [[Y]] + ; SOFT: BL &fmod, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 + ; HARD: BL &fmod, {{.*}}, implicit $d0, implicit $d1, implicit-def $d0 ; CHECK: ADJCALLSTACKUP ; CHECK-NOT: G_FREM %6(s64) = G_FREM %4, %5 %7(s32), %8(s32) = G_UNMERGE_VALUES %6(s64) - %r0 = COPY %7(s32) - %r1 = COPY %8(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %7(s32) + $r1 = COPY %8(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fpow_float @@ -186,28 +186,28 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; CHECK-NOT: G_FPOW ; CHECK: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; HARD-DAG: %s0 = COPY [[X]] - ; HARD-DAG: %s1 = COPY [[Y]] - ; SOFT: BL &powf, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; HARD: BL &powf, {{.*}}, implicit %s0, implicit %s1, implicit-def %s0 - ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY %r0 - ; HARD: [[R:%[0-9]+]]:_(s32) = COPY %s0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; HARD-DAG: $s0 = COPY [[X]] + ; HARD-DAG: $s1 = COPY [[Y]] + ; SOFT: BL &powf, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; HARD: BL &powf, {{.*}}, implicit $s0, implicit $s1, implicit-def $s0 + ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 + ; HARD: [[R:%[0-9]+]]:_(s32) = COPY $s0 ; CHECK: ADJCALLSTACKUP ; CHECK-NOT: G_FPOW %2(s32) = G_FPOW %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fpow_double @@ -229,7 +229,7 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 + liveins: $r0, $r1, $r2, $r3 ; The inputs may be in the wrong order (depending on the target's ; endianness), but that's orthogonal to what we're trying to test here. @@ -237,35 +237,35 @@ ; through R0-R1, ends up in R0-R1 or R1-R0, and the second value, received ; through R2-R3, ends up in R2-R3 or R3-R2, when passed to pow. ; For hard float, the values need to end up in D0 and D1. - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] ; HARD-DAG: [[Y:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[Y0]] %4(s64) = G_MERGE_VALUES %0(s32), %1(s32) %5(s64) = G_MERGE_VALUES %2(s32), %3(s32) ; CHECK-NOT: G_FPOW ; CHECK: ADJCALLSTACKDOWN - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]] - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]] - ; HARD-DAG: %d0 = COPY [[X]] - ; HARD-DAG: %d1 = COPY [[Y]] - ; SOFT: BL &pow, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 - ; HARD: BL &pow, {{.*}}, implicit %d0, implicit %d1, implicit-def %d0 + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X0]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X1]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y0]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y1]] + ; HARD-DAG: $d0 = COPY [[X]] + ; HARD-DAG: $d1 = COPY [[Y]] + ; SOFT: BL &pow, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 + ; HARD: BL &pow, {{.*}}, implicit $d0, implicit $d1, implicit-def $d0 ; CHECK: ADJCALLSTACKUP ; CHECK-NOT: G_FPOW %6(s64) = G_FPOW %4, %5 %7(s32), %8(s32) = G_UNMERGE_VALUES %6(s64) - %r0 = COPY %7(s32) - %r1 = COPY %8(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %7(s32) + $r1 = COPY %8(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fadd_float @@ -281,26 +281,26 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HARD: [[R:%[0-9]+]]:_(s32) = G_FADD [[X]], [[Y]] ; SOFT-NOT: G_FADD ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fadd, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__addsf3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fadd, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__addsf3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FADD %2(s32) = G_FADD %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fadd_double @@ -322,16 +322,16 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] ; HARD-DAG: [[Y:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[Y0]] %4(s64) = G_MERGE_VALUES %0(s32), %1(s32) @@ -339,20 +339,20 @@ ; HARD: [[R:%[0-9]+]]:_(s64) = G_FADD [[X]], [[Y]] ; SOFT-NOT: G_FADD ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]] - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dadd, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 - ; SOFT-DEFAULT: BL &__adddf3, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X0]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X1]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y0]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dadd, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 + ; SOFT-DEFAULT: BL &__adddf3, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FADD %6(s64) = G_FADD %4, %5 ; HARD-DAG: G_UNMERGE_VALUES [[R]](s64) %7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64) - %r0 = COPY %7(s32) - %r1 = COPY %8(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %7(s32) + $r1 = COPY %8(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fsub_float @@ -368,26 +368,26 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HARD: [[R:%[0-9]+]]:_(s32) = G_FSUB [[X]], [[Y]] ; SOFT-NOT: G_FSUB ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fsub, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__subsf3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fsub, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__subsf3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FSUB %2(s32) = G_FSUB %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fsub_double @@ -409,16 +409,16 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] ; HARD-DAG: [[Y:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[Y0]] %4(s64) = G_MERGE_VALUES %0(s32), %1(s32) @@ -426,20 +426,20 @@ ; HARD: [[R:%[0-9]+]]:_(s64) = G_FSUB [[X]], [[Y]] ; SOFT-NOT: G_FSUB ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]] - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dsub, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 - ; SOFT-DEFAULT: BL &__subdf3, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X0]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X1]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y0]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dsub, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 + ; SOFT-DEFAULT: BL &__subdf3, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FSUB %6(s64) = G_FSUB %4, %5 ; HARD-DAG: G_UNMERGE_VALUES [[R]](s64) %7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64) - %r0 = COPY %7(s32) - %r1 = COPY %8(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %7(s32) + $r1 = COPY %8(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fmul_float @@ -455,26 +455,26 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HARD: [[R:%[0-9]+]]:_(s32) = G_FMUL [[X]], [[Y]] ; SOFT-NOT: G_FMUL ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fmul, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__mulsf3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fmul, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__mulsf3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FMUL %2(s32) = G_FMUL %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fmul_double @@ -496,16 +496,16 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] ; HARD-DAG: [[Y:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[Y0]] %4(s64) = G_MERGE_VALUES %0(s32), %1(s32) @@ -513,20 +513,20 @@ ; HARD: [[R:%[0-9]+]]:_(s64) = G_FMUL [[X]], [[Y]] ; SOFT-NOT: G_FMUL ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]] - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dmul, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 - ; SOFT-DEFAULT: BL &__muldf3, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X0]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X1]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y0]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dmul, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 + ; SOFT-DEFAULT: BL &__muldf3, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FMUL %6(s64) = G_FMUL %4, %5 ; HARD-DAG: G_UNMERGE_VALUES [[R]](s64) %7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64) - %r0 = COPY %7(s32) - %r1 = COPY %8(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %7(s32) + $r1 = COPY %8(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fdiv_float @@ -542,26 +542,26 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HARD: [[R:%[0-9]+]]:_(s32) = G_FDIV [[X]], [[Y]] ; SOFT-NOT: G_FDIV ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fdiv, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__divsf3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fdiv, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__divsf3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FDIV %2(s32) = G_FDIV %0, %1 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fdiv_double @@ -583,16 +583,16 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] ; HARD-DAG: [[Y:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[Y0]] %4(s64) = G_MERGE_VALUES %0(s32), %1(s32) @@ -600,20 +600,20 @@ ; HARD: [[R:%[0-9]+]]:_(s64) = G_FDIV [[X]], [[Y]] ; SOFT-NOT: G_FDIV ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]] - ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_ddiv, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 - ; SOFT-DEFAULT: BL &__divdf3, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X0]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[X1]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y0]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_ddiv, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 + ; SOFT-DEFAULT: BL &__divdf3, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FDIV %6(s64) = G_FDIV %4, %5 ; HARD-DAG: G_UNMERGE_VALUES [[R]](s64) %7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64) - %r0 = COPY %7(s32) - %r1 = COPY %8(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %7(s32) + $r1 = COPY %8(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fconstant_float @@ -634,9 +634,9 @@ ; SOFT: [[R:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1080033280 ; SOFT-NOT: G_FCONSTANT %0(s32) = G_FCONSTANT float -1.25 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %0(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %0(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fconstant_double @@ -661,12 +661,12 @@ ; SOFT-NOT: G_FCONSTANT %0(s64) = G_FCONSTANT double -2.4 ; HARD-DAG: G_UNMERGE_VALUES [[R]](s64) - ; SOFT-DAG: %r0 = COPY [[HI]] - ; SOFT-DAG: %r1 = COPY [[LO]] + ; SOFT-DAG: $r0 = COPY [[HI]] + ; SOFT-DAG: $r1 = COPY [[LO]] %1(s32),%2(s32) = G_UNMERGE_VALUES %0(s64) - %r0 = COPY %2(s32) - %r1 = COPY %1(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %2(s32) + $r1 = COPY %1(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fneg_float @@ -681,25 +681,25 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - %0(s32) = COPY %r0 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + %0(s32) = COPY $r0 ; HARD: [[R:%[0-9]+]]:_(s32) = G_FNEG [[X]] ; SOFT-NOT: G_FNEG ; SOFT-DAG: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648 ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[ZERO]] - ; SOFT-DAG: %r1 = COPY [[X]] - ; SOFT-AEABI: BL &__aeabi_fsub, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__subsf3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[ZERO]] + ; SOFT-DAG: $r1 = COPY [[X]] + ; SOFT-AEABI: BL &__aeabi_fsub, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__subsf3, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FNEG %1(s32) = G_FNEG %0 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %1(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %1(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fneg_double @@ -718,12 +718,12 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] %2(s64) = G_MERGE_VALUES %0(s32), %1(s32) ; HARD: [[R:%[0-9]+]]:_(s64) = G_FNEG [[X]] @@ -731,20 +731,20 @@ ; SOFT-DAG: [[NEGATIVE_ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648 ; SOFT-DAG: [[POSITIVE_ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r{{[0-1]}} = COPY [[NEGATIVE_ZERO]] - ; SOFT-DAG: %r{{[0-1]}} = COPY [[POSITIVE_ZERO]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[X0]] - ; SOFT-DAG: %r{{[2-3]}} = COPY [[X1]] - ; SOFT-AEABI: BL &__aeabi_dsub, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 - ; SOFT-DEFAULT: BL &__subdf3, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 + ; SOFT-DAG: $r{{[0-1]}} = COPY [[NEGATIVE_ZERO]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[POSITIVE_ZERO]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[X0]] + ; SOFT-DAG: $r{{[2-3]}} = COPY [[X1]] + ; SOFT-AEABI: BL &__aeabi_dsub, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 + ; SOFT-DEFAULT: BL &__subdf3, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FNEG %3(s64) = G_FNEG %2 ; HARD-DAG: G_UNMERGE_VALUES [[R]](s64) %4(s32),%5(s32) = G_UNMERGE_VALUES %3(s64) - %r0 = COPY %4(s32) - %r1 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %4(s32) + $r1 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fpext_float_to_double @@ -761,28 +761,28 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - %0(s32) = COPY %r0 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + %0(s32) = COPY $r0 ; HARD: [[R:%[0-9]+]]:_(s64) = G_FPEXT [[X]] ; SOFT-NOT: G_FPEXT ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-AEABI: BL &__aeabi_f2d, {{.*}}, implicit %r0, implicit-def %r0, implicit-def %r1 - ; SOFT-DEFAULT: BL &__extendsfdf2, {{.*}}, implicit %r0, implicit-def %r0, implicit-def %r1 - ; SOFT: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; SOFT: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-AEABI: BL &__aeabi_f2d, {{.*}}, implicit $r0, implicit-def $r0, implicit-def $r1 + ; SOFT-DEFAULT: BL &__extendsfdf2, {{.*}}, implicit $r0, implicit-def $r0, implicit-def $r1 + ; SOFT: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; SOFT: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FPEXT %1(s64) = G_FPEXT %0(s32) ; HARD: G_UNMERGE_VALUES [[R]](s64) - ; SOFT-DAG: %r{{[0-1]}} = COPY [[R0]] - ; SOFT-DAG: %r{{[0-1]}} = COPY [[R1]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[R0]] + ; SOFT-DAG: $r{{[0-1]}} = COPY [[R1]] %2(s32), %3(s32) = G_UNMERGE_VALUES %1(s64) - %r0 = COPY %2(s32) - %r1 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %2(s32) + $r1 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... --- name: test_fptrunc_double_to_float @@ -799,28 +799,28 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 ; HARD: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s64) = G_MERGE_VALUES %0(s32), %1(s32) ; HARD: [[R:%[0-9]+]]:_(s32) = G_FPTRUNC [[X]] ; SOFT-NOT: G_FPTRUNC ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-AEABI: BL &__aeabi_d2f, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__truncdfsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-AEABI: BL &__aeabi_d2f, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__truncdfsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FPTRUNC %3(s32) = G_FPTRUNC %2(s64) - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 --- --- name: test_fptosi_float @@ -1113,16 +1113,16 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(true), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 - ; HARD-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; HARD-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 + ; HARD-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; HARD-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(true), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP @@ -1133,7 +1133,7 @@ ; SOFT: [[RCOPY:%[0-9]+]]:_(s32) = COPY [[R]](s32) ; SOFT: [[REXT:%[0-9]+]]:_(s32) = G_AND [[RCOPY]], [[MASK]] ; SOFT-NOT: G_FCMP - ; CHECK: %r0 = COPY [[REXT]] + ; CHECK: $r0 = COPY [[REXT]] ... --- name: test_fcmp_false_s32 @@ -1150,16 +1150,16 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(false), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 - ; HARD-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; HARD-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 + ; HARD-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; HARD-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(false), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP @@ -1170,7 +1170,7 @@ ; SOFT: [[RCOPY:%[0-9]+]]:_(s32) = COPY [[R]](s32) ; SOFT: [[REXT:%[0-9]+]]:_(s32) = G_AND [[RCOPY]], [[MASK]] ; SOFT-NOT: G_FCMP - ; CHECK: %r0 = COPY [[REXT]] + ; CHECK: $r0 = COPY [[REXT]] ... --- name: test_fcmp_oeq_s32 @@ -1187,22 +1187,22 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(oeq), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(oeq), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__eqsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpeq, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__eqsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -1214,9 +1214,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ogt_s32 @@ -1233,22 +1233,22 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(ogt), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gtsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpgt, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gtsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -1260,9 +1260,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_oge_s32 @@ -1279,22 +1279,22 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(oge), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpge, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpge, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gesf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -1306,9 +1306,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_olt_s32 @@ -1325,22 +1325,22 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(olt), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(olt), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmplt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__ltsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmplt, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__ltsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -1352,9 +1352,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ole_s32 @@ -1371,22 +1371,22 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(ole), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ole), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmple, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__lesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmple, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__lesf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -1398,9 +1398,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ord_s32 @@ -1417,30 +1417,30 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(ord), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[X]](s32), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpun, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__unordsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpun, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__unordsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ugt_s32 @@ -1457,21 +1457,21 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(ugt), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ugt), [[X]](s32), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmple, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__lesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmple, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__lesf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -1479,9 +1479,9 @@ ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_uge_s32 @@ -1498,21 +1498,21 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(uge), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(uge), [[X]](s32), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmplt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__ltsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmplt, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__ltsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -1520,9 +1520,9 @@ ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ult_s32 @@ -1539,21 +1539,21 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(ult), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[X]](s32), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpge, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpge, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gesf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -1561,9 +1561,9 @@ ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ule_s32 @@ -1580,21 +1580,21 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(ule), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ule), [[X]](s32), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gtsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpgt, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gtsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -1602,9 +1602,9 @@ ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_une_s32 @@ -1621,21 +1621,21 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(une), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(une), [[X]](s32), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__nesf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpeq, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__nesf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -1643,9 +1643,9 @@ ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_uno_s32 @@ -1662,22 +1662,22 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(uno), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(uno), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpun, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__unordsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpun, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__unordsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -1689,9 +1689,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_one_s32 @@ -1708,32 +1708,32 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(one), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(one), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gtsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET1:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpgt, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gtsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET1:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-DEFAULT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[RET1]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmplt, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__ltsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmplt, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__ltsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-DEFAULT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO]] @@ -1749,9 +1749,9 @@ ; SOFT: [[REXT:%[0-9]+]]:_(s32) = G_AND [[RCOPY]], [[MASK]] ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ueq_s32 @@ -1768,32 +1768,32 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 %2(s1) = G_FCMP floatpred(ueq), %0(s32), %1 ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ueq), [[X]](s32), [[Y]] ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__eqsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET1:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpeq, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__eqsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET1:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-DEFAULT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET1]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X]] - ; SOFT-DAG: %r1 = COPY [[Y]] - ; SOFT-AEABI: BL &__aeabi_fcmpun, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT-DEFAULT: BL &__unordsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 - ; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X]] + ; SOFT-DAG: $r1 = COPY [[Y]] + ; SOFT-AEABI: BL &__aeabi_fcmpun, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT-DEFAULT: BL &__unordsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0 + ; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-DEFAULT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO]] @@ -1809,9 +1809,9 @@ ; SOFT: [[REXT:%[0-9]+]]:_(s32) = G_AND [[RCOPY]], [[MASK]] ; SOFT-NOT: G_FCMP %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_true_s64 @@ -1832,16 +1832,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -1858,9 +1858,9 @@ ; SOFT: [[REXT:%[0-9]+]]:_(s32) = G_AND [[RCOPY]], [[MASK]] ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_false_s64 @@ -1881,16 +1881,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -1908,9 +1908,9 @@ ; SOFT-NOT: G_FCMP ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_oeq_s64 @@ -1931,16 +1931,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -1950,13 +1950,13 @@ ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__eqdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpeq, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__eqdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -1968,9 +1968,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ogt_s64 @@ -1991,16 +1991,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2010,13 +2010,13 @@ ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gtdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpgt, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gtdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -2028,9 +2028,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_oge_s64 @@ -2051,16 +2051,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2070,13 +2070,13 @@ ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpge, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gedf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpge, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gedf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -2088,9 +2088,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_olt_s64 @@ -2111,16 +2111,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2130,13 +2130,13 @@ ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmplt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__ltdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmplt, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__ltdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -2148,9 +2148,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ole_s64 @@ -2171,16 +2171,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2190,13 +2190,13 @@ ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmple, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__ledf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmple, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__ledf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -2208,9 +2208,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ord_s64 @@ -2231,16 +2231,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2249,22 +2249,22 @@ ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[X]](s64), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpun, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__unorddf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpun, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__unorddf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ugt_s64 @@ -2285,16 +2285,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2303,13 +2303,13 @@ ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ugt), [[X]](s64), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmple, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__ledf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmple, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__ledf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -2317,9 +2317,9 @@ ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_uge_s64 @@ -2340,16 +2340,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2358,13 +2358,13 @@ ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(uge), [[X]](s64), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmplt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__ltdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmplt, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__ltdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -2372,9 +2372,9 @@ ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ult_s64 @@ -2395,16 +2395,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2413,13 +2413,13 @@ ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[X]](s64), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpge, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gedf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpge, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gedf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -2427,9 +2427,9 @@ ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ule_s64 @@ -2450,16 +2450,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2468,13 +2468,13 @@ ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(ule), [[X]](s64), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gtdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpgt, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gtdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -2482,9 +2482,9 @@ ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_une_s64 @@ -2505,16 +2505,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2523,13 +2523,13 @@ ; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(une), [[X]](s64), [[Y]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__nedf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpeq, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__nedf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET]](s32), [[ZERO]] @@ -2537,9 +2537,9 @@ ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) ; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_uno_s64 @@ -2560,16 +2560,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2579,13 +2579,13 @@ ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpun, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__unorddf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpun, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__unorddf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; For aeabi, we just need to truncate the result. The combiner changes the ; truncation into the following masking sequence. @@ -2597,9 +2597,9 @@ ; SOFT-DEFAULT: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_one_s64 @@ -2620,16 +2620,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2639,25 +2639,25 @@ ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpgt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__gtdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET1:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpgt, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__gtdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET1:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-DEFAULT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[RET1]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmplt, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__ltdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmplt, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__ltdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-DEFAULT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO]] @@ -2673,9 +2673,9 @@ ; SOFT: [[REXT:%[0-9]+]]:_(s32) = G_AND [[RCOPY]], [[MASK]] ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fcmp_ueq_s64 @@ -2696,16 +2696,16 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 - - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 + liveins: $r0, $r1, $r2, $r3 + + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 %4(s64) = G_MERGE_VALUES %0(s32), %1 %5(s64) = G_MERGE_VALUES %2(s32), %3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) @@ -2715,25 +2715,25 @@ ; HARD: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1) ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpeq, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__eqdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET1:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpeq, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__eqdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET1:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-DEFAULT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[RET1]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP ; SOFT: ADJCALLSTACKDOWN - ; SOFT-DAG: %r0 = COPY [[X0]] - ; SOFT-DAG: %r1 = COPY [[X1]] - ; SOFT-DAG: %r2 = COPY [[Y0]] - ; SOFT-DAG: %r3 = COPY [[Y1]] - ; SOFT-AEABI: BL &__aeabi_dcmpun, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT-DEFAULT: BL &__unorddf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 - ; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY %r0 + ; SOFT-DAG: $r0 = COPY [[X0]] + ; SOFT-DAG: $r1 = COPY [[X1]] + ; SOFT-DAG: $r2 = COPY [[Y0]] + ; SOFT-DAG: $r3 = COPY [[Y1]] + ; SOFT-AEABI: BL &__aeabi_dcmpun, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT-DEFAULT: BL &__unorddf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 + ; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY $r0 ; SOFT: ADJCALLSTACKUP ; SOFT-DEFAULT: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO]] @@ -2749,7 +2749,7 @@ ; SOFT: [[REXT:%[0-9]+]]:_(s32) = G_AND [[RCOPY]], [[MASK]] ; SOFT-NOT: G_FCMP %7(s32) = G_ZEXT %6(s1) - %r0 = COPY %7(s32) - ; CHECK: %r0 = COPY [[REXT]] - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %7(s32) + ; CHECK: $r0 = COPY [[REXT]] + BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/arm-legalize-vfp4.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-legalize-vfp4.mir +++ test/CodeGen/ARM/GlobalISel/arm-legalize-vfp4.mir @@ -21,33 +21,33 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Z:%[0-9]+]]:_(s32) = COPY %r2 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 + ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Z:%[0-9]+]]:_(s32) = COPY $r2 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 ; HARD: [[R:%[0-9]+]]:_(s32) = G_FMA [[X]], [[Y]], [[Z]] ; SOFT-NOT: G_FMA ; SOFT: ADJCALLSTACKDOWN - ; SOFT-ABI-DAG: %r0 = COPY [[X]] - ; SOFT-ABI-DAG: %r1 = COPY [[Y]] - ; SOFT-ABI-DAG: %r2 = COPY [[Z]] - ; SOFT-ABI: BL &fmaf, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit-def %r0 - ; SOFT-ABI: [[R:%[0-9]+]]:_(s32) = COPY %r0 - ; HARD-ABI-DAG: %s0 = COPY [[X]] - ; HARD-ABI-DAG: %s1 = COPY [[Y]] - ; HARD-ABI-DAG: %s2 = COPY [[Z]] - ; HARD-ABI: BL &fmaf, {{.*}}, implicit %s0, implicit %s1, implicit %s2, implicit-def %s0 - ; HARD-ABI: [[R:%[0-9]+]]:_(s32) = COPY %s0 + ; SOFT-ABI-DAG: $r0 = COPY [[X]] + ; SOFT-ABI-DAG: $r1 = COPY [[Y]] + ; SOFT-ABI-DAG: $r2 = COPY [[Z]] + ; SOFT-ABI: BL &fmaf, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit-def $r0 + ; SOFT-ABI: [[R:%[0-9]+]]:_(s32) = COPY $r0 + ; HARD-ABI-DAG: $s0 = COPY [[X]] + ; HARD-ABI-DAG: $s1 = COPY [[Y]] + ; HARD-ABI-DAG: $s2 = COPY [[Z]] + ; HARD-ABI: BL &fmaf, {{.*}}, implicit $s0, implicit $s1, implicit $s2, implicit-def $s0 + ; HARD-ABI: [[R:%[0-9]+]]:_(s32) = COPY $s0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FMA %3(s32) = G_FMA %0, %1, %2 - ; CHECK: %r0 = COPY [[R]] - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]] + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_fma_double @@ -69,16 +69,16 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2, %r3 + liveins: $r0, $r1, $r2, $r3 - ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 - ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 - ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY %r2 - ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY %r3 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 - %3(s32) = COPY %r3 + ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 + ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 + ; CHECK-DAG: [[Y0:%[0-9]+]]:_(s32) = COPY $r2 + ; CHECK-DAG: [[Y1:%[0-9]+]]:_(s32) = COPY $r3 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 + %3(s32) = COPY $r3 ; HARD-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] ; HARD-DAG: [[Y:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[Y0]] ; HARD-ABI-DAG: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]] @@ -88,34 +88,34 @@ ; HARD: [[R:%[0-9]+]]:_(s64) = G_FMA [[X]], [[X]], [[Y]] ; SOFT-NOT: G_FMA ; SOFT: ADJCALLSTACKDOWN - ; SOFT-ABI-DAG: %r{{[0-1]}} = COPY [[X0]] - ; SOFT-ABI-DAG: %r{{[0-1]}} = COPY [[X1]] - ; SOFT-ABI-DAG: %r{{[2-3]}} = COPY [[X0]] - ; SOFT-ABI-DAG: %r{{[2-3]}} = COPY [[X1]] - ; SOFT-ABI: [[SP1:%[0-9]+]]:_(p0) = COPY %sp + ; SOFT-ABI-DAG: $r{{[0-1]}} = COPY [[X0]] + ; SOFT-ABI-DAG: $r{{[0-1]}} = COPY [[X1]] + ; SOFT-ABI-DAG: $r{{[2-3]}} = COPY [[X0]] + ; SOFT-ABI-DAG: $r{{[2-3]}} = COPY [[X1]] + ; SOFT-ABI: [[SP1:%[0-9]+]]:_(p0) = COPY $sp ; SOFT-ABI: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; SOFT-ABI: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32) ; SOFT-ABI: G_STORE [[Y0]](s32), [[FI1]](p0){{.*}}store 8 into stack ; SOFT-ABI: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SOFT-ABI: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[FI1]], [[OFF2]](s32) ; SOFT-ABI: G_STORE [[Y1]](s32), [[FI2]](p0){{.*}}store 8 into stack - ; SOFT-ABI: BL &fma, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 - ; SOFT-ABI-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 - ; SOFT-ABI-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 - ; HARD-ABI-DAG: %d0 = COPY [[X]] - ; HARD-ABI-DAG: %d1 = COPY [[X]] - ; HARD-ABI-DAG: %d2 = COPY [[Y]] - ; HARD-ABI: BL &fma, {{.*}}, implicit %d0, implicit %d1, implicit %d2, implicit-def %d0 - ; HARD-ABI: [[R:%[0-9]+]]:_(s64) = COPY %d0 + ; SOFT-ABI: BL &fma, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 + ; SOFT-ABI-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 + ; SOFT-ABI-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 + ; HARD-ABI-DAG: $d0 = COPY [[X]] + ; HARD-ABI-DAG: $d1 = COPY [[X]] + ; HARD-ABI-DAG: $d2 = COPY [[Y]] + ; HARD-ABI: BL &fma, {{.*}}, implicit $d0, implicit $d1, implicit $d2, implicit-def $d0 + ; HARD-ABI: [[R:%[0-9]+]]:_(s64) = COPY $d0 ; SOFT: ADJCALLSTACKUP ; SOFT-NOT: G_FMA %6(s64) = G_FMA %4, %4, %5 ; HARD: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[R]](s64) ; HARD-ABI: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[R]](s64) %7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64) - ; CHECK-DAG: %r0 = COPY [[R0]] - ; CHECK-DAG: %r1 = COPY [[R1]] - %r0 = COPY %7(s32) - %r1 = COPY %8(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + ; CHECK-DAG: $r0 = COPY [[R0]] + ; CHECK-DAG: $r1 = COPY [[R1]] + $r0 = COPY %7(s32) + $r1 = COPY %8(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... Index: test/CodeGen/ARM/GlobalISel/arm-legalizer.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-legalizer.mir +++ test/CodeGen/ARM/GlobalISel/arm-legalizer.mir @@ -74,15 +74,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s8) = G_LOAD %0(p0) :: (load 1) %2(s32) = G_SEXT %1 ; G_SEXT with s8 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_SEXT {{%[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_zext_s16 @@ -98,15 +98,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s16) = G_LOAD %0 :: (load 2) %2(s32) = G_ZEXT %1 ; G_ZEXT with s16 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_ZEXT {{%[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_inttoptr_s32 @@ -121,14 +121,14 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(p0) = G_INTTOPTR %0(s32) ; G_INTTOPTR with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(p0) = G_INTTOPTR {{%[0-9]+}} - %r0 = COPY %1(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %1(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_ptrtoint_s32 @@ -143,14 +143,14 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s32) = G_PTRTOINT %0(p0) ; G_PTRTOINT with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_PTRTOINT {{%[0-9]+}} - %r0 = COPY %1(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %1(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_add_s8 @@ -169,11 +169,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s8) = G_LOAD %0 :: (load 1) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s8) = G_LOAD %2 :: (load 1) %4(s8) = G_ADD %1, %3 ; G_ADD with s8 should widen @@ -181,8 +181,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_ADD {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_add_s16 @@ -201,11 +201,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s16) = G_LOAD %0 :: (load 2) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s16) = G_LOAD %2 :: (load 2) %4(s16) = G_ADD %1, %3 ; G_ADD with s16 should widen @@ -213,8 +213,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_ADD {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_add_s32 @@ -230,15 +230,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_ADD %0, %1 ; G_ADD with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -258,11 +258,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s8) = G_LOAD %0 :: (load 1) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s8) = G_LOAD %2 :: (load 1) %4(s8) = G_SUB %1, %3 ; G_SUB with s8 should widen @@ -270,8 +270,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_SUB {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_sub_s16 @@ -290,11 +290,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s16) = G_LOAD %0 :: (load 2) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s16) = G_LOAD %2 :: (load 2) %4(s16) = G_SUB %1, %3 ; G_SUB with s16 should widen @@ -302,8 +302,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_SUB {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_sub_s32 @@ -319,15 +319,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_SUB %0, %1 ; G_SUB with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -347,11 +347,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s8) = G_LOAD %0 :: (load 1) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s8) = G_LOAD %2 :: (load 1) %4(s8) = G_MUL %1, %3 ; G_MUL with s8 should widen @@ -359,8 +359,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_MUL {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_mul_s16 @@ -379,11 +379,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s16) = G_LOAD %0 :: (load 2) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s16) = G_LOAD %2 :: (load 2) %4(s16) = G_MUL %1, %3 ; G_MUL with s16 should widen @@ -391,8 +391,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_MUL {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_mul_s32 @@ -408,15 +408,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_MUL %0, %1 ; G_MUL with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -436,11 +436,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s8) = G_LOAD %0 :: (load 1) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s8) = G_LOAD %2 :: (load 1) %4(s8) = G_AND %1, %3 ; G_AND with s8 should widen @@ -448,8 +448,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_AND {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_and_s16 @@ -468,11 +468,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s16) = G_LOAD %0 :: (load 2) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s16) = G_LOAD %2 :: (load 2) %4(s16) = G_AND %1, %3 ; G_AND with s16 should widen @@ -480,8 +480,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_AND {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_and_s32 @@ -497,15 +497,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_AND %0, %1 ; G_AND with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -525,11 +525,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s8) = G_LOAD %0 :: (load 1) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s8) = G_LOAD %2 :: (load 1) %4(s8) = G_OR %1, %3 ; G_OR with s8 should widen @@ -537,8 +537,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_OR {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_or_s16 @@ -557,11 +557,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s16) = G_LOAD %0 :: (load 2) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s16) = G_LOAD %2 :: (load 2) %4(s16) = G_OR %1, %3 ; G_OR with s16 should widen @@ -569,8 +569,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_OR {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_or_s32 @@ -586,15 +586,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_OR %0, %1 ; G_OR with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -614,11 +614,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s8) = G_LOAD %0 :: (load 1) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s8) = G_LOAD %2 :: (load 1) %4(s8) = G_XOR %1, %3 ; G_XOR with s8 should widen @@ -626,8 +626,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_XOR {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s8) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_xor_s16 @@ -646,11 +646,11 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s16) = G_LOAD %0 :: (load 2) - %2(p0) = COPY %r0 + %2(p0) = COPY $r0 %3(s16) = G_LOAD %2 :: (load 2) %4(s16) = G_XOR %1, %3 ; G_XOR with s16 should widen @@ -658,8 +658,8 @@ ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_XOR {{%[0-9]+, %[0-9]+}} %5(s32) = G_SEXT %4(s16) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_xor_s32 @@ -675,15 +675,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_XOR %0, %1 ; G_XOR with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -700,15 +700,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_LSHR %0, %1 ; G_LSHR with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_LSHR {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -725,15 +725,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_ASHR %0, %1 ; G_ASHR with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_ASHR {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -750,15 +750,15 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_SHL %0, %1 ; G_SHL with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_SHL {{%[0-9]+, %[0-9]+}} - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -781,15 +781,15 @@ # CHECK: id: [[FRAME_INDEX:[0-9]+]], type: default, offset: 8 body: | bb.0: - liveins: %r0, %r1, %r2, %r3 + liveins: $r0, $r1, $r2, $r3 ; This is legal, so we should find it unchanged in the output ; CHECK: [[FIVREG:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[FRAME_INDEX]] ; CHECK: {{%[0-9]+}}:_(s32) = G_LOAD [[FIVREG]](p0) :: (load 4) %0(p0) = G_FRAME_INDEX %fixed-stack.2 %1(s32) = G_LOAD %0(p0) :: (load 4) - %r0 = COPY %1(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %1(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_legal_loads_stores @@ -809,7 +809,7 @@ - { id: 6, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 ; These are all legal, so we should find them unchanged in the output ; CHECK-DAG: G_STORE {{%[0-9]+}}(s64), %0(p0) @@ -824,7 +824,7 @@ ; CHECK-DAG: {{%[0-9]+}}:_(s8) = G_LOAD %0(p0) ; CHECK-DAG: {{%[0-9]+}}:_(s1) = G_LOAD %0(p0) ; CHECK-DAG: {{%[0-9]+}}:_(p0) = G_LOAD %0(p0) - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s64) = G_LOAD %0(p0) :: (load 8) G_STORE %1(s64), %0(p0) :: (store 8) %2(s32) = G_LOAD %0(p0) :: (load 4) @@ -837,7 +837,7 @@ G_STORE %5(s1), %0(p0) :: (store 1) %6(p0) = G_LOAD %0(p0) :: (load 4) G_STORE %6(p0), %0(p0) :: (store 4) - BX_RET 14, %noreg + BX_RET 14, $noreg ... --- name: test_gep @@ -853,16 +853,16 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 - %1(s32) = COPY %r1 + %0(p0) = COPY $r0 + %1(s32) = COPY $r1 ; CHECK: {{%[0-9]+}}:_(p0) = G_GEP {{%[0-9]+}}, {{%[0-9]+}}(s32) %2(p0) = G_GEP %0, %1(s32) - %r0 = COPY %2(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_constants @@ -884,9 +884,9 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - %4(p0) = COPY %r0 + %4(p0) = COPY $r0 %0(s32) = G_CONSTANT 42 ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT 42 @@ -924,8 +924,8 @@ ; CHECK-DAG: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 16 ; CHECK-NOT: G_CONSTANT i64 - %r0 = COPY %0(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %0(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_s8 @@ -944,19 +944,19 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s8) = G_LOAD %0 :: (load 1) - %2(p0) = COPY %r1 + %2(p0) = COPY $r1 %3(s8) = G_LOAD %2 :: (load 1) %4(s1) = G_ICMP intpred(ne), %1(s8), %3 ; G_ICMP with s8 should widen ; CHECK: {{%[0-9]+}}:_(s1) = G_ICMP intpred(ne), {{%[0-9]+}}(s32), {{%[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s1) = G_ICMP intpred(ne), {{%[0-9]+}}(s8), {{%[0-9]+}} %5(s32) = G_ZEXT %4(s1) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_s16 @@ -975,19 +975,19 @@ - { id: 5, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s16) = G_LOAD %0 :: (load 2) - %2(p0) = COPY %r1 + %2(p0) = COPY $r1 %3(s16) = G_LOAD %2 :: (load 2) %4(s1) = G_ICMP intpred(slt), %1(s16), %3 ; G_ICMP with s16 should widen ; CHECK: {{%[0-9]+}}:_(s1) = G_ICMP intpred(slt), {{%[0-9]+}}(s32), {{%[0-9]+}} ; CHECK-NOT: {{%[0-9]+}}:_(s1) = G_ICMP intpred(slt), {{%[0-9]+}}(s16), {{%[0-9]+}} %5(s32) = G_ZEXT %4(s1) - %r0 = COPY %5(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_icmp_s32 @@ -1004,16 +1004,16 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(eq), %0(s32), %1 ; G_ICMP with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s1) = G_ICMP intpred(eq), {{%[0-9]+}}(s32), {{%[0-9]+}} %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_select_s32 @@ -1030,16 +1030,16 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_CONSTANT i1 1 %3(s32) = G_SELECT %2(s1), %0, %1 ; G_SELECT with s32 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(s32) = G_SELECT {{%[0-9]+}}(s1), {{%[0-9]+}}, {{%[0-9]+}} - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_select_ptr @@ -1056,16 +1056,16 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(p0) = COPY %r0 - %1(p0) = COPY %r1 + %0(p0) = COPY $r0 + %1(p0) = COPY $r1 %2(s1) = G_CONSTANT i1 0 %3(p0) = G_SELECT %2(s1), %0, %1 ; G_SELECT with p0 is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(p0) = G_SELECT {{%[0-9]+}}(s1), {{%[0-9]+}}, {{%[0-9]+}} - %r0 = COPY %3(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_brcond @@ -1082,10 +1082,10 @@ body: | bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(sgt), %0(s32), %1 G_BRCOND %2(s1), %bb.1 ; G_BRCOND with s1 is legal, so we should find it unchanged in the output @@ -1093,12 +1093,12 @@ G_BR %bb.2 bb.1: - %r0 = COPY %1(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %1(s32) + BX_RET 14, $noreg, implicit $r0 bb.2: - %r0 = COPY %0(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %0(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -1117,13 +1117,13 @@ - { id: 4, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 - %3(s32) = COPY %r2 + %2(s32) = COPY $r1 + %3(s32) = COPY $r2 G_BRCOND %1(s1), %bb.1 G_BR %bb.2 @@ -1135,8 +1135,8 @@ %4(s32) = G_PHI %2(s32), %bb.0, %3(s32), %bb.1 ; G_PHI with s32 is legal, so we should find it unchanged in the output ; CHECK: G_PHI {{%[0-9]+}}(s32), %bb.0, {{%[0-9]+}}(s32), %bb.1 - %r0 = COPY %4(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %4(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_phi_p0 @@ -1154,13 +1154,13 @@ - { id: 4, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) - %2(p0) = COPY %r1 - %3(p0) = COPY %r2 + %2(p0) = COPY $r1 + %3(p0) = COPY $r2 G_BRCOND %1(s1), %bb.1 G_BR %bb.2 @@ -1172,8 +1172,8 @@ %4(p0) = G_PHI %2(p0), %bb.0, %3(p0), %bb.1 ; G_PHI with p0 is legal, so we should find it unchanged in the output ; CHECK: G_PHI {{%[0-9]+}}(p0), %bb.0, {{%[0-9]+}}(p0), %bb.1 - %r0 = COPY %4(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %4(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_phi_s64 @@ -1191,13 +1191,13 @@ - { id: 4, class: _ } body: | bb.0: - liveins: %r0, %d0, %d1 + liveins: $r0, $d0, $d1 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) - %2(s64) = COPY %d0 - %3(s64) = COPY %d1 + %2(s64) = COPY $d0 + %3(s64) = COPY $d1 G_BRCOND %1(s1), %bb.1 G_BR %bb.2 @@ -1210,8 +1210,8 @@ ; G_PHI with s64 is legal when we have floating point support, so we should ; find it unchanged in the output ; CHECK: G_PHI {{%[0-9]+}}(s64), %bb.0, {{%[0-9]+}}(s64), %bb.1 - %d0 = COPY %4(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %4(s64) + BX_RET 14, $noreg, implicit $d0 ... --- name: test_phi_s8 @@ -1232,18 +1232,18 @@ - { id: 7, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 + %2(s32) = COPY $r1 %3(s8) = G_TRUNC %2(s32) - ; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1 + ; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1 - %4(s32) = COPY %r2 + %4(s32) = COPY $r2 %5(s8) = G_TRUNC %4(s32) - ; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY %r2 + ; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY $r2 ; CHECK: [[V1:%[0-9]+]]:_(s32) = COPY [[R1]] @@ -1261,10 +1261,10 @@ ; CHECK: [[V:%[0-9]+]]:_(s32) = G_PHI [[V1]](s32), %bb.0, [[V2]](s32), %bb.1 %7(s32) = G_ANYEXT %6(s8) - %r0 = COPY %7(s32) + $r0 = COPY %7(s32) ; CHECK: [[R:%[0-9]+]]:_(s32) = COPY [[V]] - ; CHECK: %r0 = COPY [[R]](s32) - BX_RET 14, %noreg, implicit %r0 + ; CHECK: $r0 = COPY [[R]](s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_global_variable @@ -1279,13 +1279,13 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(p0) = G_GLOBAL_VALUE @a_global ; G_GLOBAL_VALUE is legal, so we should find it unchanged in the output ; CHECK: {{%[0-9]+}}:_(p0) = G_GLOBAL_VALUE @a_global - %r0 = COPY %1(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %1(p0) + BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll +++ test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll @@ -6,16 +6,16 @@ define arm_aapcscc i32* @test_call_simple_reg_params(i32 *%a, i32 %b) { ; CHECK-LABEL: name: test_call_simple_reg_params -; CHECK-DAG: [[AVREG:%[0-9]+]]:_(p0) = COPY %r0 -; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY %r1 -; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK-DAG: %r0 = COPY [[BVREG]] -; CHECK-DAG: %r1 = COPY [[AVREG]] -; CHECK: BL @simple_reg_params_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit-def %r0 -; CHECK: [[RVREG:%[0-9]+]]:_(p0) = COPY %r0 -; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK: %r0 = COPY [[RVREG]] -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK-DAG: [[AVREG:%[0-9]+]]:_(p0) = COPY $r0 +; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY $r1 +; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK-DAG: $r0 = COPY [[BVREG]] +; CHECK-DAG: $r1 = COPY [[AVREG]] +; CHECK: BL @simple_reg_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0 +; CHECK: [[RVREG:%[0-9]+]]:_(p0) = COPY $r0 +; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK: $r0 = COPY [[RVREG]] +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %r = notail call arm_aapcscc i32 *@simple_reg_params_target(i32 %b, i32 *%a) ret i32 *%r @@ -25,26 +25,26 @@ define arm_aapcscc i32* @test_call_simple_stack_params(i32 *%a, i32 %b) { ; CHECK-LABEL: name: test_call_simple_stack_params -; CHECK-DAG: [[AVREG:%[0-9]+]]:_(p0) = COPY %r0 -; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY %r1 -; CHECK: ADJCALLSTACKDOWN 8, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK-DAG: %r0 = COPY [[BVREG]] -; CHECK-DAG: %r1 = COPY [[AVREG]] -; CHECK-DAG: %r2 = COPY [[BVREG]] -; CHECK-DAG: %r3 = COPY [[AVREG]] -; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY %sp +; CHECK-DAG: [[AVREG:%[0-9]+]]:_(p0) = COPY $r0 +; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY $r1 +; CHECK: ADJCALLSTACKDOWN 8, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK-DAG: $r0 = COPY [[BVREG]] +; CHECK-DAG: $r1 = COPY [[AVREG]] +; CHECK-DAG: $r2 = COPY [[BVREG]] +; CHECK-DAG: $r3 = COPY [[AVREG]] +; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32) ; CHECK: G_STORE [[BVREG]](s32), [[FI1]](p0){{.*}}store 4 -; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32) ; CHECK: G_STORE [[AVREG]](p0), [[FI2]](p0){{.*}}store 4 -; CHECK: BL @simple_stack_params_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 -; CHECK: [[RVREG:%[0-9]+]]:_(p0) = COPY %r0 -; CHECK: ADJCALLSTACKUP 8, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK: %r0 = COPY [[RVREG]] -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: BL @simple_stack_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 +; CHECK: [[RVREG:%[0-9]+]]:_(p0) = COPY $r0 +; CHECK: ADJCALLSTACKUP 8, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK: $r0 = COPY [[RVREG]] +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %r = notail call arm_aapcscc i32 *@simple_stack_params_target(i32 %b, i32 *%a, i32 %b, i32 *%a, i32 %b, i32 *%a) ret i32 *%r @@ -54,53 +54,53 @@ define arm_aapcscc signext i16 @test_call_ext_params(i8 %a, i16 %b, i1 %c) { ; CHECK-LABEL: name: test_call_ext_params -; CHECK-DAG: [[R0VREG:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK-DAG: [[R0VREG:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK-DAG: [[AVREG:%[0-9]+]]:_(s8) = G_TRUNC [[R0VREG]] -; CHECK-DAG: [[R1VREG:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK-DAG: [[R1VREG:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s16) = G_TRUNC [[R1VREG]] -; CHECK-DAG: [[R2VREG:%[0-9]+]]:_(s32) = COPY %r2 +; CHECK-DAG: [[R2VREG:%[0-9]+]]:_(s32) = COPY $r2 ; CHECK-DAG: [[CVREG:%[0-9]+]]:_(s1) = G_TRUNC [[R2VREG]] -; CHECK: ADJCALLSTACKDOWN 20, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 20, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[SEXTA:%[0-9]+]]:_(s32) = G_SEXT [[AVREG]](s8) -; CHECK: %r0 = COPY [[SEXTA]] +; CHECK: $r0 = COPY [[SEXTA]] ; CHECK: [[ZEXTA:%[0-9]+]]:_(s32) = G_ZEXT [[AVREG]](s8) -; CHECK: %r1 = COPY [[ZEXTA]] +; CHECK: $r1 = COPY [[ZEXTA]] ; CHECK: [[SEXTB:%[0-9]+]]:_(s32) = G_SEXT [[BVREG]](s16) -; CHECK: %r2 = COPY [[SEXTB]] +; CHECK: $r2 = COPY [[SEXTB]] ; CHECK: [[ZEXTB:%[0-9]+]]:_(s32) = G_ZEXT [[BVREG]](s16) -; CHECK: %r3 = COPY [[ZEXTB]] -; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: $r3 = COPY [[ZEXTB]] +; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32) ; CHECK: [[SEXTA2:%[0-9]+]]:_(s32) = G_SEXT [[AVREG]] ; CHECK: G_STORE [[SEXTA2]](s32), [[FI1]](p0){{.*}}store 4 -; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32) ; CHECK: [[ZEXTA2:%[0-9]+]]:_(s32) = G_ZEXT [[AVREG]] ; CHECK: G_STORE [[ZEXTA2]](s32), [[FI2]](p0){{.*}}store 4 -; CHECK: [[SP3:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP3:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CHECK: [[FI3:%[0-9]+]]:_(p0) = G_GEP [[SP3]], [[OFF3]](s32) ; CHECK: [[SEXTB2:%[0-9]+]]:_(s32) = G_SEXT [[BVREG]] ; CHECK: G_STORE [[SEXTB2]](s32), [[FI3]](p0){{.*}}store 4 -; CHECK: [[SP4:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP4:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CHECK: [[FI4:%[0-9]+]]:_(p0) = G_GEP [[SP4]], [[OFF4]](s32) ; CHECK: [[ZEXTB2:%[0-9]+]]:_(s32) = G_ZEXT [[BVREG]] ; CHECK: G_STORE [[ZEXTB2]](s32), [[FI4]](p0){{.*}}store 4 -; CHECK: [[SP5:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP5:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CHECK: [[FI5:%[0-9]+]]:_(p0) = G_GEP [[SP5]], [[OFF5]](s32) ; CHECK: [[ZEXTC:%[0-9]+]]:_(s32) = G_ZEXT [[CVREG]] ; CHECK: G_STORE [[ZEXTC]](s32), [[FI5]](p0){{.*}}store 4 -; CHECK: BL @ext_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 -; CHECK: [[R0VREG:%[0-9]+]]:_(s32) = COPY %r0 +; CHECK: BL @ext_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0 +; CHECK: [[R0VREG:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK: [[RVREG:%[0-9]+]]:_(s16) = G_TRUNC [[R0VREG]] -; CHECK: ADJCALLSTACKUP 20, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKUP 20, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[RExtVREG:%[0-9]+]]:_(s32) = G_SEXT [[RVREG]] -; CHECK: %r0 = COPY [[RExtVREG]] -; CHECK: BX_RET 14, %noreg, implicit %r0 +; CHECK: $r0 = COPY [[RExtVREG]] +; CHECK: BX_RET 14, $noreg, implicit $r0 entry: %r = notail call arm_aapcscc signext i16 @ext_target(i8 signext %a, i8 zeroext %a, i16 signext %b, i16 zeroext %b, i8 signext %a, i8 zeroext %a, i16 signext %b, i16 zeroext %b, i1 zeroext %c) ret i16 %r @@ -110,16 +110,16 @@ define arm_aapcs_vfpcc double @test_call_vfpcc_fp_params(double %a, float %b) { ; CHECK-LABEL: name: test_call_vfpcc_fp_params -; CHECK-DAG: [[AVREG:%[0-9]+]]:_(s64) = COPY %d0 -; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY %s2 -; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK-DAG: %s0 = COPY [[BVREG]] -; CHECK-DAG: %d1 = COPY [[AVREG]] -; CHECK: BL @vfpcc_fp_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %d1, implicit-def %d0 -; CHECK: [[RVREG:%[0-9]+]]:_(s64) = COPY %d0 -; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK: %d0 = COPY [[RVREG]] -; CHECK: BX_RET 14, %noreg, implicit %d0 +; CHECK-DAG: [[AVREG:%[0-9]+]]:_(s64) = COPY $d0 +; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY $s2 +; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK-DAG: $s0 = COPY [[BVREG]] +; CHECK-DAG: $d1 = COPY [[AVREG]] +; CHECK: BL @vfpcc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $d1, implicit-def $d0 +; CHECK: [[RVREG:%[0-9]+]]:_(s64) = COPY $d0 +; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK: $d0 = COPY [[RVREG]] +; CHECK: BX_RET 14, $noreg, implicit $d0 entry: %r = notail call arm_aapcs_vfpcc double @vfpcc_fp_target(float %b, double %a) ret double %r @@ -129,38 +129,38 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) { ; CHECK-LABEL: name: test_call_aapcs_fp_params -; CHECK-DAG: [[A1:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK-DAG: [[A2:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK-DAG: [[A1:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK-DAG: [[A2:%[0-9]+]]:_(s32) = COPY $r1 ; LITTLE-DAG: [[AVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[A1]](s32), [[A2]](s32) ; BIG-DAG: [[AVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[A2]](s32), [[A1]](s32) -; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY %r2 -; CHECK: ADJCALLSTACKDOWN 16, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK-DAG: %r0 = COPY [[BVREG]] +; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY $r2 +; CHECK: ADJCALLSTACKDOWN 16, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK-DAG: $r0 = COPY [[BVREG]] ; CHECK-DAG: [[A1:%[0-9]+]]:_(s32), [[A2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AVREG]](s64) -; LITTLE-DAG: %r2 = COPY [[A1]] -; LITTLE-DAG: %r3 = COPY [[A2]] -; BIG-DAG: %r2 = COPY [[A2]] -; BIG-DAG: %r3 = COPY [[A1]] -; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY %sp +; LITTLE-DAG: $r2 = COPY [[A1]] +; LITTLE-DAG: $r3 = COPY [[A2]] +; BIG-DAG: $r2 = COPY [[A2]] +; BIG-DAG: $r3 = COPY [[A1]] +; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32) ; CHECK: G_STORE [[BVREG]](s32), [[FI1]](p0){{.*}}store 4 -; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32) ; CHECK: G_STORE [[AVREG]](s64), [[FI2]](p0){{.*}}store 8 -; CHECK: BL @aapcscc_fp_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 -; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK-DAG: [[R2:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: BL @aapcscc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 +; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK-DAG: [[R2:%[0-9]+]]:_(s32) = COPY $r1 ; LITTLE: [[RVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R1]](s32), [[R2]](s32) ; BIG: [[RVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R2]](s32), [[R1]](s32) -; CHECK: ADJCALLSTACKUP 16, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKUP 16, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[RVREG]](s64) -; LITTLE-DAG: %r0 = COPY [[R1]] -; LITTLE-DAG: %r1 = COPY [[R2]] -; BIG-DAG: %r0 = COPY [[R2]] -; BIG-DAG: %r1 = COPY [[R1]] -; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1 +; LITTLE-DAG: $r0 = COPY [[R1]] +; LITTLE-DAG: $r1 = COPY [[R2]] +; BIG-DAG: $r0 = COPY [[R2]] +; BIG-DAG: $r1 = COPY [[R1]] +; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1 entry: %r = notail call arm_aapcscc double @aapcscc_fp_target(float %b, double %a, float %b, double %a) ret double %r @@ -170,14 +170,14 @@ define arm_aapcs_vfpcc float @test_call_different_call_conv(float %x) { ; CHECK-LABEL: name: test_call_different_call_conv -; CHECK: [[X:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK: %r0 = COPY [[X]] -; CHECK: BL @different_call_conv_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit-def %r0 -; CHECK: [[R:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK: %s0 = COPY [[R]] -; CHECK: BX_RET 14, %noreg, implicit %s0 +; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK: $r0 = COPY [[X]] +; CHECK: BL @different_call_conv_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit-def $r0 +; CHECK: [[R:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK: $s0 = COPY [[R]] +; CHECK: BX_RET 14, $noreg, implicit $s0 entry: %r = notail call arm_aapcscc float @different_call_conv_target(float %x) ret float %r @@ -187,28 +187,28 @@ define arm_aapcscc [3 x i32] @test_tiny_int_arrays([2 x i32] %arr) { ; CHECK-LABEL: name: test_tiny_int_arrays -; CHECK: liveins: %r0, %r1 -; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: liveins: $r0, $r1 +; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[ARG_ARR:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32) -; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR]](s64) -; CHECK: %r0 = COPY [[R0]] -; CHECK: %r1 = COPY [[R1]] -; CHECK: BL @tiny_int_arrays_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit-def %r0, implicit-def %r1 -; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1 -; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY %r2 +; CHECK: $r0 = COPY [[R0]] +; CHECK: $r1 = COPY [[R1]] +; CHECK: BL @tiny_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1 +; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1 +; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY $r2 ; CHECK: [[RES_ARR:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32), [[R2]](s32) -; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[RES_ARR]](s96) ; FIXME: This doesn't seem correct with regard to the AAPCS docs (which say ; that composite types larger than 4 bytes should be passed through memory), ; but it's what DAGISel does. We should fix it in the common code for both. -; CHECK: %r0 = COPY [[R0]] -; CHECK: %r1 = COPY [[R1]] -; CHECK: %r2 = COPY [[R2]] -; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1, implicit %r2 +; CHECK: $r0 = COPY [[R0]] +; CHECK: $r1 = COPY [[R1]] +; CHECK: $r2 = COPY [[R2]] +; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1, implicit $r2 entry: %r = notail call arm_aapcscc [3 x i32] @tiny_int_arrays_target([2 x i32] %arr) ret [3 x i32] %r @@ -218,23 +218,23 @@ define arm_aapcscc void @test_multiple_int_arrays([2 x i32] %arr0, [2 x i32] %arr1) { ; CHECK-LABEL: name: test_multiple_int_arrays -; CHECK: liveins: %r0, %r1 -; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1 -; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY %r2 -; CHECK: [[R3:%[0-9]+]]:_(s32) = COPY %r3 +; CHECK: liveins: $r0, $r1 +; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1 +; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY $r2 +; CHECK: [[R3:%[0-9]+]]:_(s32) = COPY $r3 ; CHECK: [[ARG_ARR0:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32) ; CHECK: [[ARG_ARR1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R2]](s32), [[R3]](s32) -; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR0]](s64) ; CHECK: [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR1]](s64) -; CHECK: %r0 = COPY [[R0]] -; CHECK: %r1 = COPY [[R1]] -; CHECK: %r2 = COPY [[R2]] -; CHECK: %r3 = COPY [[R3]] -; CHECK: BL @multiple_int_arrays_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3 -; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK: BX_RET 14, %noreg +; CHECK: $r0 = COPY [[R0]] +; CHECK: $r1 = COPY [[R1]] +; CHECK: $r2 = COPY [[R2]] +; CHECK: $r3 = COPY [[R3]] +; CHECK: BL @multiple_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3 +; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK: BX_RET 14, $noreg entry: notail call arm_aapcscc void @multiple_int_arrays_target([2 x i32] %arr0, [2 x i32] %arr1) ret void @@ -249,35 +249,35 @@ ; doesn't fit in the registers. ; CHECK-DAG: id: [[FIRST_STACK_ID:[0-9]+]], type: default, offset: 0, size: 4, ; CHECK-DAG: id: [[LAST_STACK_ID:[-0]+]], type: default, offset: 60, size: 4 -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 -; CHECK-DAG: [[R2:%[0-9]+]]:_(s32) = COPY %r2 -; CHECK-DAG: [[R3:%[0-9]+]]:_(s32) = COPY %r3 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 +; CHECK-DAG: [[R2:%[0-9]+]]:_(s32) = COPY $r2 +; CHECK-DAG: [[R3:%[0-9]+]]:_(s32) = COPY $r3 ; CHECK: [[FIRST_STACK_ELEMENT_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[FIRST_STACK_ID]] ; CHECK: [[FIRST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_LOAD [[FIRST_STACK_ELEMENT_FI]]{{.*}}load 4 from %fixed-stack.[[FIRST_STACK_ID]] ; CHECK: [[LAST_STACK_ELEMENT_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[LAST_STACK_ID]] ; CHECK: [[LAST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_LOAD [[LAST_STACK_ELEMENT_FI]]{{.*}}load 4 from %fixed-stack.[[LAST_STACK_ID]] ; CHECK: [[ARG_ARR:%[0-9]+]]:_(s640) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32), [[R2]](s32), [[R3]](s32), [[FIRST_STACK_ELEMENT]](s32), {{.*}}, [[LAST_STACK_ELEMENT]](s32) -; CHECK: ADJCALLSTACKDOWN 64, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 64, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32), [[FIRST_STACK_ELEMENT:%[0-9]+]]:_(s32), {{.*}}, [[LAST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR]](s640) -; CHECK: %r0 = COPY [[R0]] -; CHECK: %r1 = COPY [[R1]] -; CHECK: %r2 = COPY [[R2]] -; CHECK: %r3 = COPY [[R3]] -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: $r0 = COPY [[R0]] +; CHECK: $r1 = COPY [[R1]] +; CHECK: $r2 = COPY [[R2]] +; CHECK: $r3 = COPY [[R3]] +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF_FIRST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[FIRST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_FIRST_ELEMENT]](s32) ; CHECK: G_STORE [[FIRST_STACK_ELEMENT]](s32), [[FIRST_STACK_ARG_ADDR]]{{.*}}store 4 ; Match the second-to-last offset, so we can get the correct SP for the last element ; CHECK: G_CONSTANT i32 56 -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF_LAST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 60 ; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_LAST_ELEMENT]](s32) ; CHECK: G_STORE [[LAST_STACK_ELEMENT]](s32), [[LAST_STACK_ARG_ADDR]]{{.*}}store 4 -; CHECK: BL @large_int_arrays_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3 -; CHECK: ADJCALLSTACKUP 64, 0, 14, %noreg, implicit-def %sp, implicit %sp -; CHECK: BX_RET 14, %noreg +; CHECK: BL @large_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3 +; CHECK: ADJCALLSTACKUP 64, 0, 14, $noreg, implicit-def $sp, implicit $sp +; CHECK: BX_RET 14, $noreg entry: notail call arm_aapcscc void @large_int_arrays_target([20 x i32] %arr) ret void @@ -289,43 +289,43 @@ ; CHECK-LABEL: name: test_fp_arrays_aapcs ; CHECK: fixedStack: ; CHECK: id: [[ARR2_ID:[0-9]+]], type: default, offset: 0, size: 8, -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK: [[ARR0_0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[ARR0_1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK: [[ARR0_0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[ARR0_1:%[0-9]+]]:_(s32) = COPY $r1 ; LITTLE: [[ARR0:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ARR0_0]](s32), [[ARR0_1]](s32) ; BIG: [[ARR0:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ARR0_1]](s32), [[ARR0_0]](s32) -; CHECK: [[ARR1_0:%[0-9]+]]:_(s32) = COPY %r2 -; CHECK: [[ARR1_1:%[0-9]+]]:_(s32) = COPY %r3 +; CHECK: [[ARR1_0:%[0-9]+]]:_(s32) = COPY $r2 +; CHECK: [[ARR1_1:%[0-9]+]]:_(s32) = COPY $r3 ; LITTLE: [[ARR1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ARR1_0]](s32), [[ARR1_1]](s32) ; BIG: [[ARR1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ARR1_1]](s32), [[ARR1_0]](s32) ; CHECK: [[ARR2_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[ARR2_ID]] ; CHECK: [[ARR2:%[0-9]+]]:_(s64) = G_LOAD [[ARR2_FI]]{{.*}}load 8 from %fixed-stack.[[ARR2_ID]] ; CHECK: [[ARR_MERGED:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[ARR0]](s64), [[ARR1]](s64), [[ARR2]](s64) -; CHECK: ADJCALLSTACKDOWN 8, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 8, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[ARR0:%[0-9]+]]:_(s64), [[ARR1:%[0-9]+]]:_(s64), [[ARR2:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[ARR_MERGED]](s192) ; CHECK: [[ARR0_0:%[0-9]+]]:_(s32), [[ARR0_1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARR0]](s64) -; LITTLE: %r0 = COPY [[ARR0_0]](s32) -; LITTLE: %r1 = COPY [[ARR0_1]](s32) -; BIG: %r0 = COPY [[ARR0_1]](s32) -; BIG: %r1 = COPY [[ARR0_0]](s32) +; LITTLE: $r0 = COPY [[ARR0_0]](s32) +; LITTLE: $r1 = COPY [[ARR0_1]](s32) +; BIG: $r0 = COPY [[ARR0_1]](s32) +; BIG: $r1 = COPY [[ARR0_0]](s32) ; CHECK: [[ARR1_0:%[0-9]+]]:_(s32), [[ARR1_1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARR1]](s64) -; LITTLE: %r2 = COPY [[ARR1_0]](s32) -; LITTLE: %r3 = COPY [[ARR1_1]](s32) -; BIG: %r2 = COPY [[ARR1_1]](s32) -; BIG: %r3 = COPY [[ARR1_0]](s32) -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; LITTLE: $r2 = COPY [[ARR1_0]](s32) +; LITTLE: $r3 = COPY [[ARR1_1]](s32) +; BIG: $r2 = COPY [[ARR1_1]](s32) +; BIG: $r3 = COPY [[ARR1_0]](s32) +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[ARR2_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[ARR2_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[ARR2_OFFSET]](s32) ; CHECK: G_STORE [[ARR2]](s64), [[ARR2_ADDR]](p0){{.*}}store 8 -; CHECK: BL @fp_arrays_aapcs_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 -; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: BL @fp_arrays_aapcs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 +; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[R_MERGED:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32) -; CHECK: ADJCALLSTACKUP 8, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKUP 8, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[R_MERGED]](s64) -; CHECK: %r0 = COPY [[R0]] -; CHECK: %r1 = COPY [[R1]] -; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1 +; CHECK: $r0 = COPY [[R0]] +; CHECK: $r1 = COPY [[R1]] +; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1 entry: %r = notail call arm_aapcscc [2 x float] @fp_arrays_aapcs_target([3 x double] %arr) ret [2 x float] %r @@ -340,13 +340,13 @@ ; CHECK-DAG: id: [[Z1_ID:[0-9]+]], type: default, offset: 8, size: 8, ; CHECK-DAG: id: [[Z2_ID:[0-9]+]], type: default, offset: 16, size: 8, ; CHECK-DAG: id: [[Z3_ID:[0-9]+]], type: default, offset: 24, size: 8, -; CHECK: liveins: %d0, %d1, %d2, %s6, %s7, %s8 -; CHECK: [[X0:%[0-9]+]]:_(s64) = COPY %d0 -; CHECK: [[X1:%[0-9]+]]:_(s64) = COPY %d1 -; CHECK: [[X2:%[0-9]+]]:_(s64) = COPY %d2 -; CHECK: [[Y0:%[0-9]+]]:_(s32) = COPY %s6 -; CHECK: [[Y1:%[0-9]+]]:_(s32) = COPY %s7 -; CHECK: [[Y2:%[0-9]+]]:_(s32) = COPY %s8 +; CHECK: liveins: $d0, $d1, $d2, $s6, $s7, $s8 +; CHECK: [[X0:%[0-9]+]]:_(s64) = COPY $d0 +; CHECK: [[X1:%[0-9]+]]:_(s64) = COPY $d1 +; CHECK: [[X2:%[0-9]+]]:_(s64) = COPY $d2 +; CHECK: [[Y0:%[0-9]+]]:_(s32) = COPY $s6 +; CHECK: [[Y1:%[0-9]+]]:_(s32) = COPY $s7 +; CHECK: [[Y2:%[0-9]+]]:_(s32) = COPY $s8 ; CHECK: [[Z0_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[Z0_ID]] ; CHECK: [[Z0:%[0-9]+]]:_(s64) = G_LOAD [[Z0_FI]]{{.*}}load 8 ; CHECK: [[Z1_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[Z1_ID]] @@ -358,45 +358,45 @@ ; CHECK: [[X_ARR:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[X0]](s64), [[X1]](s64), [[X2]](s64) ; CHECK: [[Y_ARR:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32), [[Y2]](s32) ; CHECK: [[Z_ARR:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[Z0]](s64), [[Z1]](s64), [[Z2]](s64), [[Z3]](s64) -; CHECK: ADJCALLSTACKDOWN 32, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 32, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[X0:%[0-9]+]]:_(s64), [[X1:%[0-9]+]]:_(s64), [[X2:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[X_ARR]](s192) ; CHECK: [[Y0:%[0-9]+]]:_(s32), [[Y1:%[0-9]+]]:_(s32), [[Y2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[Y_ARR]](s96) ; CHECK: [[Z0:%[0-9]+]]:_(s64), [[Z1:%[0-9]+]]:_(s64), [[Z2:%[0-9]+]]:_(s64), [[Z3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[Z_ARR]](s256) -; CHECK: %d0 = COPY [[X0]](s64) -; CHECK: %d1 = COPY [[X1]](s64) -; CHECK: %d2 = COPY [[X2]](s64) -; CHECK: %s6 = COPY [[Y0]](s32) -; CHECK: %s7 = COPY [[Y1]](s32) -; CHECK: %s8 = COPY [[Y2]](s32) -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: $d0 = COPY [[X0]](s64) +; CHECK: $d1 = COPY [[X1]](s64) +; CHECK: $d2 = COPY [[X2]](s64) +; CHECK: $s6 = COPY [[Y0]](s32) +; CHECK: $s7 = COPY [[Y1]](s32) +; CHECK: $s8 = COPY [[Y2]](s32) +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[Z0_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[Z0_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z0_OFFSET]](s32) ; CHECK: G_STORE [[Z0]](s64), [[Z0_ADDR]](p0){{.*}}store 8 -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[Z1_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CHECK: [[Z1_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z1_OFFSET]](s32) ; CHECK: G_STORE [[Z1]](s64), [[Z1_ADDR]](p0){{.*}}store 8 -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[Z2_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CHECK: [[Z2_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z2_OFFSET]](s32) ; CHECK: G_STORE [[Z2]](s64), [[Z2_ADDR]](p0){{.*}}store 8 -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[Z3_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CHECK: [[Z3_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z3_OFFSET]](s32) ; CHECK: G_STORE [[Z3]](s64), [[Z3_ADDR]](p0){{.*}}store 8 -; CHECK: BL @fp_arrays_aapcs_vfp_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit %d2, implicit %s6, implicit %s7, implicit %s8, implicit-def %s0, implicit-def %s1, implicit-def %s2, implicit-def %s3 -; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %s0 -; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %s1 -; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY %s2 -; CHECK: [[R3:%[0-9]+]]:_(s32) = COPY %s3 +; CHECK: BL @fp_arrays_aapcs_vfp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit $d2, implicit $s6, implicit $s7, implicit $s8, implicit-def $s0, implicit-def $s1, implicit-def $s2, implicit-def $s3 +; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $s0 +; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $s1 +; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY $s2 +; CHECK: [[R3:%[0-9]+]]:_(s32) = COPY $s3 ; CHECK: [[R_MERGED:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32), [[R2]](s32), [[R3]](s32) -; CHECK: ADJCALLSTACKUP 32, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKUP 32, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[R_MERGED]](s128) -; CHECK: %s0 = COPY [[R0]] -; CHECK: %s1 = COPY [[R1]] -; CHECK: %s2 = COPY [[R2]] -; CHECK: %s3 = COPY [[R3]] -; CHECK: BX_RET 14, %noreg, implicit %s0, implicit %s1, implicit %s2, implicit %s3 +; CHECK: $s0 = COPY [[R0]] +; CHECK: $s1 = COPY [[R1]] +; CHECK: $s2 = COPY [[R2]] +; CHECK: $s3 = COPY [[R3]] +; CHECK: BX_RET 14, $noreg, implicit $s0, implicit $s1, implicit $s2, implicit $s3 entry: %r = notail call arm_aapcs_vfpcc [4 x float] @fp_arrays_aapcs_vfp_target([3 x double] %x, [3 x float] %y, [4 x double] %z) ret [4 x float] %r @@ -411,41 +411,41 @@ ; doesn't fit in the registers. ; CHECK-DAG: id: [[FIRST_STACK_ID:[0-9]+]], type: default, offset: 0, size: 4, ; CHECK-DAG: id: [[LAST_STACK_ID:[-0]+]], type: default, offset: 76, size: 4 -; CHECK: liveins: %r0, %r1, %r2, %r3 -; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY %r1 -; CHECK-DAG: [[R2:%[0-9]+]]:_(s32) = COPY %r2 -; CHECK-DAG: [[R3:%[0-9]+]]:_(s32) = COPY %r3 +; CHECK: liveins: $r0, $r1, $r2, $r3 +; CHECK-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r1 +; CHECK-DAG: [[R2:%[0-9]+]]:_(s32) = COPY $r2 +; CHECK-DAG: [[R3:%[0-9]+]]:_(s32) = COPY $r3 ; CHECK: [[FIRST_STACK_ELEMENT_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[FIRST_STACK_ID]] ; CHECK: [[FIRST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_LOAD [[FIRST_STACK_ELEMENT_FI]]{{.*}}load 4 from %fixed-stack.[[FIRST_STACK_ID]] ; CHECK: [[LAST_STACK_ELEMENT_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[LAST_STACK_ID]] ; CHECK: [[LAST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_LOAD [[LAST_STACK_ELEMENT_FI]]{{.*}}load 4 from %fixed-stack.[[LAST_STACK_ID]] ; CHECK: [[ARG_ARR:%[0-9]+]]:_(s768) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32), [[R2]](s32), [[R3]](s32), [[FIRST_STACK_ELEMENT]](s32), {{.*}}, [[LAST_STACK_ELEMENT]](s32) -; CHECK: ADJCALLSTACKDOWN 80, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 80, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32), [[FIRST_STACK_ELEMENT:%[0-9]+]]:_(s32), {{.*}}, [[LAST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR]](s768) -; CHECK: %r0 = COPY [[R0]] -; CHECK: %r1 = COPY [[R1]] -; CHECK: %r2 = COPY [[R2]] -; CHECK: %r3 = COPY [[R3]] -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: $r0 = COPY [[R0]] +; CHECK: $r1 = COPY [[R1]] +; CHECK: $r2 = COPY [[R2]] +; CHECK: $r3 = COPY [[R3]] +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF_FIRST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[FIRST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_FIRST_ELEMENT]](s32) ; CHECK: G_STORE [[FIRST_STACK_ELEMENT]](s32), [[FIRST_STACK_ARG_ADDR]]{{.*}}store 4 ; Match the second-to-last offset, so we can get the correct SP for the last element ; CHECK: G_CONSTANT i32 72 -; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp +; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[OFF_LAST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 76 ; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_LAST_ELEMENT]](s32) ; CHECK: G_STORE [[LAST_STACK_ELEMENT]](s32), [[LAST_STACK_ARG_ADDR]]{{.*}}store 4 -; CHECK: BL @tough_arrays_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1 -; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: BL @tough_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1 +; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[RES_ARR:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32) -; CHECK: ADJCALLSTACKUP 80, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKUP 80, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[RES_ARR]](s64) -; CHECK: %r0 = COPY [[R0]] -; CHECK: %r1 = COPY [[R1]] -; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1 +; CHECK: $r0 = COPY [[R0]] +; CHECK: $r1 = COPY [[R1]] +; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1 entry: %r = notail call arm_aapcscc [2 x i32*] @tough_arrays_target([6 x [4 x i32]] %arr) ret [2 x i32*] %r @@ -455,23 +455,23 @@ define arm_aapcscc {i32, i32} @test_structs({i32, i32} %x) { ; CHECK-LABEL: test_structs -; CHECK: liveins: %r0, %r1 -; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK: liveins: $r0, $r1 +; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32) -; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[X0:%[0-9]+]]:_(s32), [[X1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[X]](s64) -; CHECK-DAG: %r0 = COPY [[X0]](s32) -; CHECK-DAG: %r1 = COPY [[X1]](s32) -; CHECK: BL @structs_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit-def %r0, implicit-def %r1 -; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0 -; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1 +; CHECK-DAG: $r0 = COPY [[X0]](s32) +; CHECK-DAG: $r1 = COPY [[X1]](s32) +; CHECK: BL @structs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1 +; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0 +; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1 ; CHECK: [[R:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32) -; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp +; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp ; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[R]](s64) -; CHECK: %r0 = COPY [[R0]](s32) -; CHECK: %r1 = COPY [[R1]](s32) -; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1 +; CHECK: $r0 = COPY [[R0]](s32) +; CHECK: $r1 = COPY [[R1]](s32) +; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1 %r = notail call arm_aapcscc {i32, i32} @structs_target({i32, i32} %x) ret {i32, i32} %r } Index: test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir +++ test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir @@ -101,13 +101,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_ADD %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -127,13 +127,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_SUB %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -153,13 +153,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_MUL %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -179,13 +179,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_SDIV %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -205,13 +205,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_UDIV %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -231,13 +231,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_AND %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -257,13 +257,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_OR %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -283,13 +283,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_XOR %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -309,13 +309,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_LSHR %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -335,13 +335,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_ASHR %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -361,13 +361,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s32) = G_SHL %0, %1 - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -395,15 +395,15 @@ - { id: 6, class: _ } body: | bb.0: - liveins: %r0 - %0(p0) = COPY %r0 + liveins: $r0 + %0(p0) = COPY $r0 %6(s64) = G_LOAD %0 :: (load 8) %1(s32) = G_LOAD %0 :: (load 4) %2(s16) = G_LOAD %0 :: (load 2) %3(s8) = G_LOAD %0 :: (load 1) %4(s1) = G_LOAD %0 :: (load 1) %5(p0) = G_LOAD %0 :: (load 4) - BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 ... --- @@ -431,9 +431,9 @@ - { id: 6, class: _ } body: | bb.0: - liveins: %r0, %r1, %r5, %d6 - %0(p0) = COPY %r0 - %1(s32) = COPY %r1 + liveins: $r0, $r1, $r5, $d6 + %0(p0) = COPY $r0 + %1(s32) = COPY $r1 G_STORE %1(s32), %0 :: (store 4) %2(s16) = G_TRUNC %1(s32) G_STORE %2(s16), %0 :: (store 2) @@ -441,11 +441,11 @@ G_STORE %3(s8), %0 :: (store 1) %4(s1) = G_TRUNC %1(s32) G_STORE %4(s1), %0 :: (store 1) - %5(p0) = COPY %r5 + %5(p0) = COPY $r5 G_STORE %5(p0), %0 :: (store 4) - %6(s64) = COPY %d6 + %6(s64) = COPY $d6 G_STORE %6(s64), %0 :: (store 8) - BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 ... --- @@ -473,12 +473,12 @@ %0(p0) = G_FRAME_INDEX %fixed-stack.0 %1(s32) = G_LOAD %0(p0) :: (load 4 from %fixed-stack.0, align 0) - %2(p0) = COPY %sp + %2(p0) = COPY $sp %3(s32) = G_CONSTANT i32 8 %4(p0) = G_GEP %2, %3(s32) G_STORE %1(s32), %4(p0) :: (store 4) - BX_RET 14, %noreg + BX_RET 14, $noreg ... --- @@ -498,13 +498,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(p0) = COPY %r0 - %1(s32) = COPY %r1 + %0(p0) = COPY $r0 + %1(s32) = COPY $r1 %2(p0) = G_GEP %0, %1(s32) - %r0 = COPY %2(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_constants @@ -519,8 +519,8 @@ body: | bb.0: %0(s32) = G_CONSTANT 42 - %r0 = COPY %0(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %0(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_inttoptr_s32 @@ -536,10 +536,10 @@ - { id: 1, class: _ } body: | bb.0: - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(p0) = G_INTTOPTR %0(s32) - %r0 = COPY %1(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %1(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_ptrtoint_s32 @@ -555,10 +555,10 @@ - { id: 1, class: _ } body: | bb.0: - %0(p0) = COPY %r0 + %0(p0) = COPY $r0 %1(s32) = G_PTRTOINT %0(p0) - %r0 = COPY %1(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %1(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_globals @@ -573,8 +573,8 @@ body: | bb.0: %0(p0) = G_GLOBAL_VALUE @a_global - %r0 = COPY %0(p0) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %0(p0) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_anyext_s8_32 @@ -592,13 +592,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s8) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s8) - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_anyext_s16_32 @@ -616,13 +616,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s16) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s16) - %r0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_trunc_s32_16 @@ -640,13 +640,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %2(p0) = COPY %r1 + %0(s32) = COPY $r0 + %2(p0) = COPY $r1 %1(s16) = G_TRUNC %0(s32) G_STORE %1(s16), %2 :: (store 2) - BX_RET 14, %noreg + BX_RET 14, $noreg ... --- name: test_trunc_s64_32 @@ -664,13 +664,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %r0, %d0 + liveins: $r0, $d0 - %0(s64) = COPY %d0 - %2(p0) = COPY %r0 + %0(s64) = COPY $d0 + %2(p0) = COPY $r0 %1(s32) = G_TRUNC %0(s64) G_STORE %1(s32), %2 :: (store 4) - BX_RET 14, %noreg + BX_RET 14, $noreg ... --- name: test_icmp_eq_s32 @@ -691,14 +691,14 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s1) = G_ICMP intpred(eq), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -720,14 +720,14 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s1) = G_FCMP floatpred(one), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -749,14 +749,14 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s1) = G_FCMP floatpred(ugt), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %r0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -780,15 +780,15 @@ - { id: 4, class: _ } body: | bb.0: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 - %2(s32) = COPY %r2 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 + %2(s32) = COPY $r2 %3(s1) = G_TRUNC %2(s32) %4(s32) = G_SELECT %3(s1), %0, %1 - %r0 = COPY %4(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %4(s32) + BX_RET 14, $noreg, implicit $r0 ... --- @@ -808,18 +808,18 @@ body: | bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %r0 + liveins: $r0 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) G_BRCOND %1(s1), %bb.1 G_BR %bb.2 bb.1: - BX_RET 14, %noreg + BX_RET 14, $noreg bb.2: - BX_RET 14, %noreg + BX_RET 14, $noreg ... --- @@ -844,13 +844,13 @@ body: | bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) - %2(s32) = COPY %r1 - %3(s32) = COPY %r2 + %2(s32) = COPY $r1 + %3(s32) = COPY $r2 G_BRCOND %1(s1), %bb.1 G_BR %bb.2 @@ -860,8 +860,8 @@ bb.2: %4(s32) = G_PHI %2(s32), %bb.0, %3(s32), %bb.1 - %r0 = COPY %4(s32) - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY %4(s32) + BX_RET 14, $noreg, implicit $r0 ... --- name: test_phi_s64 @@ -885,13 +885,13 @@ body: | bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %r0, %d0, %d1 + liveins: $r0, $d0, $d1 - %0(s32) = COPY %r0 + %0(s32) = COPY $r0 %1(s1) = G_TRUNC %0(s32) - %2(s64) = COPY %d0 - %3(s64) = COPY %d1 + %2(s64) = COPY $d0 + %3(s64) = COPY $d1 G_BRCOND %1(s1), %bb.1 G_BR %bb.2 @@ -901,8 +901,8 @@ bb.2: %4(s64) = G_PHI %2(s64), %bb.0, %3(s64), %bb.1 - %d0 = COPY %4(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %4(s64) + BX_RET 14, $noreg, implicit $d0 ... --- name: test_fadd_s32 @@ -921,13 +921,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s32) = G_FADD %0, %1 - %s0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %s0 + $s0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $s0 ... --- @@ -947,13 +947,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s64) = G_FADD %0, %1 - %d0 = COPY %2(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %2(s64) + BX_RET 14, $noreg, implicit $d0 ... --- @@ -973,13 +973,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s32) = G_FSUB %0, %1 - %s0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %s0 + $s0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $s0 ... --- @@ -999,13 +999,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s64) = G_FSUB %0, %1 - %d0 = COPY %2(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %2(s64) + BX_RET 14, $noreg, implicit $d0 ... --- @@ -1025,13 +1025,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s32) = G_FMUL %0, %1 - %s0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %s0 + $s0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $s0 ... --- @@ -1051,13 +1051,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s64) = G_FMUL %0, %1 - %d0 = COPY %2(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %2(s64) + BX_RET 14, $noreg, implicit $d0 ... --- @@ -1077,13 +1077,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %s0, %s1 + liveins: $s0, $s1 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 %2(s32) = G_FDIV %0, %1 - %s0 = COPY %2(s32) - BX_RET 14, %noreg, implicit %s0 + $s0 = COPY %2(s32) + BX_RET 14, $noreg, implicit $s0 ... --- @@ -1103,13 +1103,13 @@ - { id: 2, class: _ } body: | bb.0: - liveins: %d0, %d1 + liveins: $d0, $d1 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 %2(s64) = G_FDIV %0, %1 - %d0 = COPY %2(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %2(s64) + BX_RET 14, $noreg, implicit $d0 ... --- @@ -1126,12 +1126,12 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s32) = G_FNEG %0 - %s0 = COPY %1(s32) - BX_RET 14, %noreg, implicit %s0 + $s0 = COPY %1(s32) + BX_RET 14, $noreg, implicit $s0 ... --- @@ -1148,12 +1148,12 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %d0 + liveins: $d0 - %0(s64) = COPY %d0 + %0(s64) = COPY $d0 %1(s64) = G_FNEG %0 - %d0 = COPY %1(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %1(s64) + BX_RET 14, $noreg, implicit $d0 ... --- @@ -1174,14 +1174,14 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %s0, %s1, %s2 + liveins: $s0, $s1, $s2 - %0(s32) = COPY %s0 - %1(s32) = COPY %s1 - %2(s32) = COPY %s2 + %0(s32) = COPY $s0 + %1(s32) = COPY $s1 + %2(s32) = COPY $s2 %3(s32) = G_FMA %0, %1, %2 - %s0 = COPY %3(s32) - BX_RET 14, %noreg, implicit %s0 + $s0 = COPY %3(s32) + BX_RET 14, $noreg, implicit $s0 ... --- name: test_fma_s64 @@ -1201,14 +1201,14 @@ - { id: 3, class: _ } body: | bb.0: - liveins: %d0, %d1, %d2 + liveins: $d0, $d1, $d2 - %0(s64) = COPY %d0 - %1(s64) = COPY %d1 - %2(s64) = COPY %d2 + %0(s64) = COPY $d0 + %1(s64) = COPY $d1 + %2(s64) = COPY $d2 %3(s64) = G_FMA %0, %1, %2 - %d0 = COPY %3(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %3(s64) + BX_RET 14, $noreg, implicit $d0 ... --- name: test_fpext_s32_to_s64 @@ -1224,12 +1224,12 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %s0 + liveins: $s0 - %0(s32) = COPY %s0 + %0(s32) = COPY $s0 %1(s64) = G_FPEXT %0 - %d0 = COPY %1(s64) - BX_RET 14, %noreg, implicit %d0 + $d0 = COPY %1(s64) + BX_RET 14, $noreg, implicit $d0 ... --- name: test_fptrunc_s64_to_s32 @@ -1245,12 +1245,12 @@ - { id: 1, class: _ } body: | bb.0: - liveins: %d0 + liveins: $d0 - %0(s64) = COPY %d0 + %0(s64) = COPY $d0 %1(s32) = G_FPTRUNC %0 - %s0 = COPY %1(s32) - BX_RET 14, %noreg, implicit %s0 + $s0 = COPY %1(s32) + BX_RET 14, $noreg, implicit $s0 ... --- name: test_fptosi_s32 @@ -1445,14 +1445,14 @@ - { id: 4, class: _ } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %0(s32) = COPY %r0 - %1(s32) = COPY %r1 + %0(s32) = COPY $r0 + %1(s32) = COPY $r1 %2(s64) = G_MERGE_VALUES %0(s32), %1(s32) %3(s32), %4(s32) = G_UNMERGE_VALUES %2(s64) - %r0 = COPY %3(s32) - %r1 = COPY %4(s32) - BX_RET 14, %noreg, implicit %r0, implicit %r1 + $r0 = COPY %3(s32) + $r1 = COPY %4(s32) + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... Index: test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir +++ test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir @@ -33,13 +33,13 @@ ; ELF: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel {{.*}}@internal_global %1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_global) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @internal_global) + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg :: (load 4 from @internal_global) - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_external_global @@ -59,13 +59,13 @@ ; ELF: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel_ldr target-flags(arm-got) @external_global :: (load 4 from got) %1(s32) = G_LOAD %0(p0) :: (load 4 from @external_global) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @external_global) + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg :: (load 4 from @external_global) - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_internal_constant @@ -85,13 +85,13 @@ ; ELF: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel {{.*}}@internal_constant %1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_constant) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @internal_constant) + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg :: (load 4 from @internal_constant) - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_external_constant @@ -111,11 +111,11 @@ ; ELF: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel_ldr target-flags(arm-got) @external_constant :: (load 4 from got) %1(s32) = G_LOAD %0(p0) :: (load 4 from @external_constant) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @external_constant) + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg :: (load 4 from @external_constant) - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir +++ test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir @@ -37,19 +37,19 @@ bb.0: %0(p0) = G_GLOBAL_VALUE @internal_global ; RW-DEFAULT-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @internal_global - ; RW-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool) + ; RW-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, $noreg :: (load 4 from constant-pool) ; RWPI-MOVT: [[OFF:%[0-9]+]]:gpr = MOVi32imm {{.*}} @internal_global - ; RWPI-NOMOVT: [[OFF:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool) - ; RWPI: [[G:%[0-9]+]]:gpr = ADDrr %r9, [[OFF]], 14, %noreg, %noreg + ; RWPI-NOMOVT: [[OFF:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, $noreg :: (load 4 from constant-pool) + ; RWPI: [[G:%[0-9]+]]:gpr = ADDrr $r9, [[OFF]], 14, $noreg, $noreg %1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_global) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @internal_global) + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg :: (load 4 from @internal_global) - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_external_global @@ -71,19 +71,19 @@ bb.0: %0(p0) = G_GLOBAL_VALUE @external_global ; RW-DEFAULT-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @external_global - ; RW-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool) + ; RW-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, $noreg :: (load 4 from constant-pool) ; RWPI-MOVT: [[OFF:%[0-9]+]]:gpr = MOVi32imm {{.*}} @external_global - ; RWPI-NOMOVT: [[OFF:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool) - ; RWPI: [[G:%[0-9]+]]:gpr = ADDrr %r9, [[OFF]], 14, %noreg, %noreg + ; RWPI-NOMOVT: [[OFF:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, $noreg :: (load 4 from constant-pool) + ; RWPI: [[G:%[0-9]+]]:gpr = ADDrr $r9, [[OFF]], 14, $noreg, $noreg %1(s32) = G_LOAD %0(p0) :: (load 4 from @external_global) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @external_global) + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg :: (load 4 from @external_global) - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_internal_constant @@ -104,16 +104,16 @@ ; ROPI-MOVT: [[G:%[0-9]+]]:gpr = MOV_ga_pcrel @internal_constant ; ROPI-NOMOVT: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel @internal_constant ; RO-DEFAULT-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @internal_constant - ; RO-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool) + ; RO-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, $noreg :: (load 4 from constant-pool) %1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_constant) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @internal_constant) + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg :: (load 4 from @internal_constant) - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_external_constant @@ -134,14 +134,14 @@ ; ROPI-MOVT: [[G:%[0-9]+]]:gpr = MOV_ga_pcrel @external_constant ; ROPI-NOMOVT: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel @external_constant ; RO-DEFAULT-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @external_constant - ; RO-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool) + ; RO-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, $noreg :: (load 4 from constant-pool) %1(s32) = G_LOAD %0(p0) :: (load 4 from @external_constant) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @external_constant) + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg :: (load 4 from @external_constant) - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir +++ test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir @@ -26,18 +26,18 @@ bb.0: %0(p0) = G_GLOBAL_VALUE @internal_global ; ELF-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @internal_global - ; ELF-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool) + ; ELF-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, $noreg :: (load 4 from constant-pool) ; DARWIN-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @internal_global ; DARWIN-NOMOVT: [[G:%[0-9]+]]:gpr = LDRLIT_ga_abs @internal_global %1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_global) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... --- name: test_external_global @@ -56,16 +56,16 @@ bb.0: %0(p0) = G_GLOBAL_VALUE @external_global ; ELF-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @external_global - ; ELF-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool) + ; ELF-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, $noreg :: (load 4 from constant-pool) ; DARWIN-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @external_global ; DARWIN-NOMOVT: [[G:%[0-9]+]]:gpr = LDRLIT_ga_abs @external_global %1(s32) = G_LOAD %0(p0) :: (load 4 from @external_global) - ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg + ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, $noreg - %r0 = COPY %1(s32) - ; CHECK: %r0 = COPY [[V]] + $r0 = COPY %1(s32) + ; CHECK: $r0 = COPY [[V]] - BX_RET 14, %noreg, implicit %r0 - ; CHECK: BX_RET 14, %noreg, implicit %r0 + BX_RET 14, $noreg, implicit $r0 + ; CHECK: BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/select-pr35926.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/select-pr35926.mir +++ test/CodeGen/ARM/GlobalISel/select-pr35926.mir @@ -24,17 +24,17 @@ selected: false body: | bb.1 (%ir-block.0): - liveins: %d0, %d1, %d2 + liveins: $d0, $d1, $d2 - %0:fprb(s64) = COPY %d0 - %1:fprb(s64) = COPY %d1 - %2:fprb(s64) = COPY %d2 + %0:fprb(s64) = COPY $d0 + %1:fprb(s64) = COPY $d1 + %2:fprb(s64) = COPY $d2 %3:fprb(s64) = G_FNEG %1 %4:fprb(s64) = G_FMA %0, %3, %2 %5:fprb(s64) = G_FNEG %4 - %d0 = COPY %5(s64) - MOVPCLR 14, %noreg, implicit %d0 + $d0 = COPY %5(s64) + MOVPCLR 14, $noreg, implicit $d0 -# CHECK: %{{[0-9]+}}:dpr = VFNMSD %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, 14, %noreg +# CHECK: %{{[0-9]+}}:dpr = VFNMSD %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, 14, $noreg ... Index: test/CodeGen/ARM/PR32721_ifcvt_triangle_unanalyzable.mir =================================================================== --- test/CodeGen/ARM/PR32721_ifcvt_triangle_unanalyzable.mir +++ test/CodeGen/ARM/PR32721_ifcvt_triangle_unanalyzable.mir @@ -9,7 +9,7 @@ BX_RET 14, 0 bb.2: - Bcc %bb.1, 1, %cpsr + Bcc %bb.1, 1, $cpsr bb.3: B %bb.1 Index: test/CodeGen/ARM/Windows/vla-cpsr.ll =================================================================== --- test/CodeGen/ARM/Windows/vla-cpsr.ll +++ test/CodeGen/ARM/Windows/vla-cpsr.ll @@ -9,5 +9,5 @@ ret void } -; CHECK: tBL 14, %noreg, &__chkstk, implicit-def %lr, implicit %sp, implicit killed %r4, implicit-def %r4, implicit-def dead %r12, implicit-def dead %cpsr +; CHECK: tBL 14, $noreg, &__chkstk, implicit-def $lr, implicit $sp, implicit killed $r4, implicit-def $r4, implicit-def dead $r12, implicit-def dead $cpsr Index: test/CodeGen/ARM/cmp1-peephole-thumb.mir =================================================================== --- test/CodeGen/ARM/cmp1-peephole-thumb.mir +++ test/CodeGen/ARM/cmp1-peephole-thumb.mir @@ -32,8 +32,8 @@ - { id: 4, class: tgpr } - { id: 5, class: tgpr } liveins: - - { reg: '%r0', virtual-reg: '%0' } - - { reg: '%r1', virtual-reg: '%1' } + - { reg: '$r0', virtual-reg: '%0' } + - { reg: '$r1', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -49,27 +49,27 @@ hasVAStart: false hasMustTailInVarArgFunc: false -# CHECK: tMOVi8 1, 14, %noreg -# CHECK: tMOVi8 0, 14, %noreg -# CHECK: tMUL %1, %0, 14, %noreg +# CHECK: tMOVi8 1, 14, $noreg +# CHECK: tMOVi8 0, 14, $noreg +# CHECK: tMUL %1, %0, 14, $noreg # CHECK-NOT: tCMPi8 body: | bb.0.entry: - liveins: %r0, %r1 + liveins: $r0, $r1 - %1 = COPY %r1 - %0 = COPY %r0 - %2, %cpsr = tMUL %1, %0, 14, %noreg - %3, %cpsr = tMOVi8 1, 14, %noreg - %4, %cpsr = tMOVi8 0, 14, %noreg - tCMPi8 killed %2, 0, 14, %noreg, implicit-def %cpsr - tBcc %bb.2.entry, 0, %cpsr + %1 = COPY $r1 + %0 = COPY $r0 + %2, $cpsr = tMUL %1, %0, 14, $noreg + %3, $cpsr = tMOVi8 1, 14, $noreg + %4, $cpsr = tMOVi8 0, 14, $noreg + tCMPi8 killed %2, 0, 14, $noreg, implicit-def $cpsr + tBcc %bb.2.entry, 0, $cpsr bb.1.entry: bb.2.entry: %5 = PHI %4, %bb.1.entry, %3, %bb.0.entry - %r0 = COPY %5 - tBX_RET 14, %noreg, implicit %r0 + $r0 = COPY %5 + tBX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/cmp2-peephole-thumb.mir =================================================================== --- test/CodeGen/ARM/cmp2-peephole-thumb.mir +++ test/CodeGen/ARM/cmp2-peephole-thumb.mir @@ -51,8 +51,8 @@ - { id: 4, class: tgpr } - { id: 5, class: tgpr } liveins: - - { reg: '%r0', virtual-reg: '%0' } - - { reg: '%r1', virtual-reg: '%1' } + - { reg: '$r0', virtual-reg: '%0' } + - { reg: '$r1', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -76,28 +76,28 @@ # CHECK-NEXT: tCMPi8 body: | bb.0.entry: - liveins: %r0, %r1 + liveins: $r0, $r1 - %1 = COPY %r1 - %0 = COPY %r0 - %2, %cpsr = tMUL %0, %1, 14, %noreg - tSTRspi %2, %stack.1.mul, 0, 14, %noreg :: (store 4 into %ir.mul) - tCMPi8 %2, 0, 14, %noreg, implicit-def %cpsr - tBcc %bb.2.if.end, 12, %cpsr - tB %bb.1.if.then, 14, %noreg + %1 = COPY $r1 + %0 = COPY $r0 + %2, $cpsr = tMUL %0, %1, 14, $noreg + tSTRspi %2, %stack.1.mul, 0, 14, $noreg :: (store 4 into %ir.mul) + tCMPi8 %2, 0, 14, $noreg, implicit-def $cpsr + tBcc %bb.2.if.end, 12, $cpsr + tB %bb.1.if.then, 14, $noreg bb.1.if.then: - %4, %cpsr = tMOVi8 42, 14, %noreg - tSTRspi killed %4, %stack.0.retval, 0, 14, %noreg :: (store 4 into %ir.retval) - tB %bb.3.return, 14, %noreg + %4, $cpsr = tMOVi8 42, 14, $noreg + tSTRspi killed %4, %stack.0.retval, 0, 14, $noreg :: (store 4 into %ir.retval) + tB %bb.3.return, 14, $noreg bb.2.if.end: - %3, %cpsr = tMOVi8 1, 14, %noreg - tSTRspi killed %3, %stack.0.retval, 0, 14, %noreg :: (store 4 into %ir.retval) + %3, $cpsr = tMOVi8 1, 14, $noreg + tSTRspi killed %3, %stack.0.retval, 0, 14, $noreg :: (store 4 into %ir.retval) bb.3.return: - %5 = tLDRspi %stack.0.retval, 0, 14, %noreg :: (dereferenceable load 4 from %ir.retval) - %r0 = COPY %5 - tBX_RET 14, %noreg, implicit %r0 + %5 = tLDRspi %stack.0.retval, 0, 14, $noreg :: (dereferenceable load 4 from %ir.retval) + $r0 = COPY %5 + tBX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/constant-islands-cfg.mir =================================================================== --- test/CodeGen/ARM/constant-islands-cfg.mir +++ test/CodeGen/ARM/constant-islands-cfg.mir @@ -15,7 +15,7 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%r0', virtual-reg: '' } + - { reg: '$r0', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -37,7 +37,7 @@ # CHECK-LABEL: name: test_split_cfg # CHECK: bb.0: # CHECK: successors: %[[LONG_BR_BB:bb.[0-9]+]](0x{{[0-9a-f]+}}), %[[DEST1:bb.[0-9]+]](0x{{[0-9a-f]+}}){{$}} -# CHECK: tBcc %[[LONG_BR_BB]], 0, %cpsr +# CHECK: tBcc %[[LONG_BR_BB]], 0, $cpsr # CHECK: tB %[[DEST1]] # CHECK: [[LONG_BR_BB]]: # CHECK: successors: %[[DEST2:bb.[0-9]+]](0x{{[0-9a-f]+}}){{$}} @@ -47,18 +47,18 @@ body: | bb.0: - liveins: %r0 - tCMPi8 killed %r0, 0, 14, %noreg, implicit-def %cpsr - tBcc %bb.2, 1, killed %cpsr - tB %bb.3, 14, %noreg + liveins: $r0 + tCMPi8 killed $r0, 0, 14, $noreg, implicit-def $cpsr + tBcc %bb.2, 1, killed $cpsr + tB %bb.3, 14, $noreg bb.1: - dead %r0 = SPACE 256, undef %r0 + dead $r0 = SPACE 256, undef $r0 bb.2: - tPOP_RET 14, %noreg, def %pc + tPOP_RET 14, $noreg, def $pc bb.3: - tPOP_RET 14, %noreg, def %pc + tPOP_RET 14, $noreg, def $pc ... Index: test/CodeGen/ARM/dbg-range-extension.mir =================================================================== --- test/CodeGen/ARM/dbg-range-extension.mir +++ test/CodeGen/ARM/dbg-range-extension.mir @@ -23,37 +23,37 @@ # CHECK: [[VAR_I:![0-9]+]] = !DILocalVariable(name: "i", # CHECK: bb.0.entry -# CHECK: DBG_VALUE debug-use %r0, debug-use %noreg, [[VAR_A]] -# CHECK: DBG_VALUE debug-use [[REG_A:%r[0-9]+]], debug-use %noreg, [[VAR_A]] -# CHECK: DBG_VALUE debug-use [[REG_B:%r[0-9]+]], debug-use %noreg, [[VAR_B]] +# CHECK: DBG_VALUE debug-use $r0, debug-use $noreg, [[VAR_A]] +# CHECK: DBG_VALUE debug-use [[REG_A:\$r[0-9]+]], debug-use $noreg, [[VAR_A]] +# CHECK: DBG_VALUE debug-use [[REG_B:\$r[0-9]+]], debug-use $noreg, [[VAR_B]] # CHECK: bb.1.if.then -# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]] -# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]] -# CHECK: DBG_VALUE debug-use [[REG_C:%r[0-9]+]], debug-use %noreg, [[VAR_C]] +# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]] +# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]] +# CHECK: DBG_VALUE debug-use [[REG_C:\$r[0-9]+]], debug-use $noreg, [[VAR_C]] # CHECK: DBG_VALUE 1, 0, [[VAR_I]] # CHECK: bb.2.for.body -# CHECK: DBG_VALUE debug-use [[REG_I:%r[0-9]+]], debug-use %noreg, [[VAR_I]] -# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use %noreg, [[VAR_C]] -# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]] -# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]] -# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use %noreg, [[VAR_I]] +# CHECK: DBG_VALUE debug-use [[REG_I:\$r[0-9]+]], debug-use $noreg, [[VAR_I]] +# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use $noreg, [[VAR_C]] +# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]] +# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]] +# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use $noreg, [[VAR_I]] # CHECK: bb.3.for.cond -# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use %noreg, [[VAR_C]] -# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]] -# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]] -# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use %noreg, [[VAR_I]] +# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use $noreg, [[VAR_C]] +# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]] +# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]] +# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use $noreg, [[VAR_I]] # CHECK: bb.4.for.cond.cleanup -# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use %noreg, [[VAR_C]] -# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]] -# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]] +# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use $noreg, [[VAR_C]] +# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]] +# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]] # CHECK: bb.5.if.end -# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]] -# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]] +# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]] +# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]] --- | ; ModuleID = '/data/kwalker/work/OpenSource-llvm/llvm/test/CodeGen/ARM/dbg-range-extension.ll' source_filename = "/data/kwalker/work/OpenSource-llvm/llvm/test/CodeGen/ARM/dbg-range-extension.ll" @@ -171,21 +171,21 @@ selected: false tracksRegLiveness: false liveins: - - { reg: '%r0' } -calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13', - '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4', - '%r5', '%r6', '%r7', '%r8', '%r9', '%r10', '%r11', - '%s16', '%s17', '%s18', '%s19', '%s20', '%s21', - '%s22', '%s23', '%s24', '%s25', '%s26', '%s27', - '%s28', '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11', - '%d10_d12', '%d11_d13', '%d12_d14', '%d13_d15', - '%q4_q5', '%q5_q6', '%q6_q7', '%q4_q5_q6_q7', '%r4_r5', - '%r6_r7', '%r8_r9', '%r10_r11', '%d8_d9_d10', '%d9_d10_d11', - '%d10_d11_d12', '%d11_d12_d13', '%d12_d13_d14', - '%d13_d14_d15', '%d8_d10_d12', '%d9_d11_d13', '%d10_d12_d14', - '%d11_d13_d15', '%d8_d10_d12_d14', '%d9_d11_d13_d15', - '%d9_d10', '%d11_d12', '%d13_d14', '%d9_d10_d11_d12', - '%d11_d12_d13_d14' ] + - { reg: '$r0' } +calleeSavedRegisters: [ '$lr', '$d8', '$d9', '$d10', '$d11', '$d12', '$d13', + '$d14', '$d15', '$q4', '$q5', '$q6', '$q7', '$r4', + '$r5', '$r6', '$r7', '$r8', '$r9', '$r10', '$r11', + '$s16', '$s17', '$s18', '$s19', '$s20', '$s21', + '$s22', '$s23', '$s24', '$s25', '$s26', '$s27', + '$s28', '$s29', '$s30', '$s31', '$d8_d10', '$d9_d11', + '$d10_d12', '$d11_d13', '$d12_d14', '$d13_d15', + '$q4_q5', '$q5_q6', '$q6_q7', '$q4_q5_q6_q7', '$r4_r5', + '$r6_r7', '$r8_r9', '$r10_r11', '$d8_d9_d10', '$d9_d10_d11', + '$d10_d11_d12', '$d11_d12_d13', '$d12_d13_d14', + '$d13_d14_d15', '$d8_d10_d12', '$d9_d11_d13', '$d10_d12_d14', + '$d11_d13_d15', '$d8_d10_d12_d14', '$d9_d11_d13_d15', + '$d9_d10', '$d11_d12', '$d13_d14', '$d9_d10_d11_d12', + '$d11_d12_d13_d14' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -201,76 +201,76 @@ hasVAStart: false hasMustTailInVarArgFunc: false stack: - - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr' } - - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r11' } - - { id: 2, type: spill-slot, offset: -12, size: 4, alignment: 4, callee-saved-register: '%r7' } - - { id: 3, type: spill-slot, offset: -16, size: 4, alignment: 4, callee-saved-register: '%r6' } - - { id: 4, type: spill-slot, offset: -20, size: 4, alignment: 4, callee-saved-register: '%r5' } - - { id: 5, type: spill-slot, offset: -24, size: 4, alignment: 4, callee-saved-register: '%r4' } + - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$lr' } + - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '$r11' } + - { id: 2, type: spill-slot, offset: -12, size: 4, alignment: 4, callee-saved-register: '$r7' } + - { id: 3, type: spill-slot, offset: -16, size: 4, alignment: 4, callee-saved-register: '$r6' } + - { id: 4, type: spill-slot, offset: -20, size: 4, alignment: 4, callee-saved-register: '$r5' } + - { id: 5, type: spill-slot, offset: -24, size: 4, alignment: 4, callee-saved-register: '$r4' } body: | bb.0.entry: - liveins: %r0, %r4, %r5, %r6, %r7, %r11, %lr + liveins: $r0, $r4, $r5, $r6, $r7, $r11, $lr - %sp = frame-setup STMDB_UPD %sp, 14, %noreg, killed %r4, killed %r5, killed %r6, killed %r7, killed %r11, killed %lr + $sp = frame-setup STMDB_UPD $sp, 14, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r11, killed $lr frame-setup CFI_INSTRUCTION def_cfa_offset 24 - frame-setup CFI_INSTRUCTION offset %lr, -4 - frame-setup CFI_INSTRUCTION offset %r11, -8 - frame-setup CFI_INSTRUCTION offset %r7, -12 - frame-setup CFI_INSTRUCTION offset %r6, -16 - frame-setup CFI_INSTRUCTION offset %r5, -20 - frame-setup CFI_INSTRUCTION offset %r4, -24 - DBG_VALUE debug-use %r0, debug-use %noreg, !13, !20, debug-location !21 - %r4 = MOVr killed %r0, 14, %noreg, %noreg - DBG_VALUE debug-use %r4, debug-use %noreg, !13, !20, debug-location !21 - %r0 = MOVi 10, 14, %noreg, _, debug-location !22 - %r1 = MOVi 11, 14, %noreg, _, debug-location !22 - BL @func2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %r0, implicit killed %r1, implicit-def %sp, implicit-def %r0, debug-location !22 - %r5 = MOVr killed %r0, 14, %noreg, _, debug-location !22 - DBG_VALUE debug-use %r5, debug-use %noreg, !14, !20, debug-location !23 - CMPri %r4, 0, 14, %noreg, implicit-def %cpsr, debug-location !25 - Bcc %bb.5.if.end, 0, killed %cpsr + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r11, -8 + frame-setup CFI_INSTRUCTION offset $r7, -12 + frame-setup CFI_INSTRUCTION offset $r6, -16 + frame-setup CFI_INSTRUCTION offset $r5, -20 + frame-setup CFI_INSTRUCTION offset $r4, -24 + DBG_VALUE debug-use $r0, debug-use $noreg, !13, !20, debug-location !21 + $r4 = MOVr killed $r0, 14, $noreg, $noreg + DBG_VALUE debug-use $r4, debug-use $noreg, !13, !20, debug-location !21 + $r0 = MOVi 10, 14, $noreg, _, debug-location !22 + $r1 = MOVi 11, 14, $noreg, _, debug-location !22 + BL @func2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit-def $sp, implicit-def $r0, debug-location !22 + $r5 = MOVr killed $r0, 14, $noreg, _, debug-location !22 + DBG_VALUE debug-use $r5, debug-use $noreg, !14, !20, debug-location !23 + CMPri $r4, 0, 14, $noreg, implicit-def $cpsr, debug-location !25 + Bcc %bb.5.if.end, 0, killed $cpsr bb.1.if.then: - liveins: %r4, %r5 + liveins: $r4, $r5 - %r0 = MOVi 12, 14, %noreg, _, debug-location !26 - %r1 = MOVi 13, 14, %noreg, _, debug-location !26 - BL @func2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %r0, implicit killed %r1, implicit-def %sp, implicit-def %r0, debug-location !26 - %r6 = MOVr killed %r0, 14, %noreg, _, debug-location !26 - DBG_VALUE debug-use %r6, debug-use %noreg, !15, !20, debug-location !27 - %r7 = MOVi 1, 14, %noreg, %noreg + $r0 = MOVi 12, 14, $noreg, _, debug-location !26 + $r1 = MOVi 13, 14, $noreg, _, debug-location !26 + BL @func2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit-def $sp, implicit-def $r0, debug-location !26 + $r6 = MOVr killed $r0, 14, $noreg, _, debug-location !26 + DBG_VALUE debug-use $r6, debug-use $noreg, !15, !20, debug-location !27 + $r7 = MOVi 1, 14, $noreg, $noreg DBG_VALUE 1, 0, !18, !20, debug-location !28 B %bb.3.for.cond bb.2.for.body: - liveins: %r4, %r5, %r6, %r7 + liveins: $r4, $r5, $r6, $r7 - %r1 = ADDrr %r5, %r7, 14, %noreg, _, debug-location !36 - %r0 = MOVr %r7, 14, %noreg, _, debug-location !36 - BL @func2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %r0, implicit killed %r1, implicit-def %sp, implicit-def dead %r0, debug-location !36 - %r7 = ADDri killed %r7, 1, 14, %noreg, _, debug-location !38 - DBG_VALUE debug-use %r7, debug-use %noreg, !18, !20, debug-location !28 + $r1 = ADDrr $r5, $r7, 14, $noreg, _, debug-location !36 + $r0 = MOVr $r7, 14, $noreg, _, debug-location !36 + BL @func2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit-def $sp, implicit-def dead $r0, debug-location !36 + $r7 = ADDri killed $r7, 1, 14, $noreg, _, debug-location !38 + DBG_VALUE debug-use $r7, debug-use $noreg, !18, !20, debug-location !28 bb.3.for.cond: - liveins: %r4, %r5, %r6, %r7 + liveins: $r4, $r5, $r6, $r7 - DBG_VALUE debug-use %r7, debug-use %noreg, !18, !20, debug-location !28 - CMPrr %r7, %r4, 14, %noreg, implicit-def %cpsr, debug-location !33 - Bcc %bb.2.for.body, 11, killed %cpsr, debug-location !33 + DBG_VALUE debug-use $r7, debug-use $noreg, !18, !20, debug-location !28 + CMPrr $r7, $r4, 14, $noreg, implicit-def $cpsr, debug-location !33 + Bcc %bb.2.for.body, 11, killed $cpsr, debug-location !33 bb.4.for.cond.cleanup: - liveins: %r4, %r5, %r6 + liveins: $r4, $r5, $r6 - %r0 = MOVr %r5, 14, %noreg, _, debug-location !34 - %r1 = MOVr killed %r6, 14, %noreg, _, debug-location !34 - BL @func2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %r0, implicit killed %r1, implicit-def %sp, implicit-def dead %r0, debug-location !34 + $r0 = MOVr $r5, 14, $noreg, _, debug-location !34 + $r1 = MOVr killed $r6, 14, $noreg, _, debug-location !34 + BL @func2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit-def $sp, implicit-def dead $r0, debug-location !34 bb.5.if.end: - liveins: %r4, %r5 + liveins: $r4, $r5 - %r0 = MOVr killed %r5, 14, %noreg, _, debug-location !43 - %r1 = MOVr killed %r4, 14, %noreg, _, debug-location !43 - %sp = LDMIA_UPD %sp, 14, %noreg, def %r4, def %r5, def %r6, def %r7, def %r11, def %lr, debug-location !43 - TAILJMPd @func2, implicit %sp, implicit %sp, implicit killed %r0, implicit killed %r1, debug-location !43 + $r0 = MOVr killed $r5, 14, $noreg, _, debug-location !43 + $r1 = MOVr killed $r4, 14, $noreg, _, debug-location !43 + $sp = LDMIA_UPD $sp, 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $r11, def $lr, debug-location !43 + TAILJMPd @func2, implicit $sp, implicit $sp, implicit killed $r0, implicit killed $r1, debug-location !43 ... Index: test/CodeGen/ARM/debug-info-arg.ll =================================================================== --- test/CodeGen/ARM/debug-info-arg.ll +++ test/CodeGen/ARM/debug-info-arg.ll @@ -11,7 +11,7 @@ tail call void @llvm.dbg.value(metadata %struct.tag_s* %c, metadata !13, metadata !DIExpression()), !dbg !21 tail call void @llvm.dbg.value(metadata i64 %x, metadata !14, metadata !DIExpression()), !dbg !22 tail call void @llvm.dbg.value(metadata i64 %y, metadata !17, metadata !DIExpression()), !dbg !23 -;CHECK: @DEBUG_VALUE: foo:y <- [DW_OP_plus_uconst 8] [%r7+0] +;CHECK: @DEBUG_VALUE: foo:y <- [DW_OP_plus_uconst 8] [$r7+0] tail call void @llvm.dbg.value(metadata %struct.tag_s* %ptr1, metadata !18, metadata !DIExpression()), !dbg !24 tail call void @llvm.dbg.value(metadata %struct.tag_s* %ptr2, metadata !19, metadata !DIExpression()), !dbg !25 %1 = icmp eq %struct.tag_s* %c, null, !dbg !26 Index: test/CodeGen/ARM/debug-info-branch-folding.ll =================================================================== --- test/CodeGen/ARM/debug-info-branch-folding.ll +++ test/CodeGen/ARM/debug-info-branch-folding.ll @@ -5,8 +5,8 @@ ;CHECK: vadd.f32 q4, q8, q8 ;CHECK-NEXT: LBB0_1 -;CHECK: @DEBUG_VALUE: x <- %q4{{$}} -;CHECK-NEXT: @DEBUG_VALUE: y <- %q4{{$}} +;CHECK: @DEBUG_VALUE: x <- $q4{{$}} +;CHECK-NEXT: @DEBUG_VALUE: y <- $q4{{$}} ;CHECK: beq LBB0_1 Index: test/CodeGen/ARM/expand-pseudos.mir =================================================================== --- test/CodeGen/ARM/expand-pseudos.mir +++ test/CodeGen/ARM/expand-pseudos.mir @@ -20,16 +20,16 @@ alignment: 2 tracksRegLiveness: true liveins: - - { reg: '%r0', virtual-reg: '' } + - { reg: '$r0', virtual-reg: '' } body: | bb.0.entry: - liveins: %r0 + liveins: $r0 - %r1 = MOVi 2, 14, %noreg, %noreg - CMPri killed %r0, 0, 14, %noreg, implicit-def %cpsr - %r1 = MOVCCi16 killed %r1, 500, 0, killed %cpsr - %r0 = MOVr killed %r1, 14, %noreg, %noreg - BX_RET 14, %noreg, implicit %r0 + $r1 = MOVi 2, 14, $noreg, $noreg + CMPri killed $r0, 0, 14, $noreg, implicit-def $cpsr + $r1 = MOVCCi16 killed $r1, 500, 0, killed $cpsr + $r0 = MOVr killed $r1, 14, $noreg, $noreg + BX_RET 14, $noreg, implicit $r0 ... --- @@ -37,16 +37,16 @@ alignment: 2 tracksRegLiveness: true liveins: - - { reg: '%r0', virtual-reg: '' } + - { reg: '$r0', virtual-reg: '' } body: | bb.0.entry: - liveins: %r0 + liveins: $r0 - %r1 = MOVi 2, 14, %noreg, %noreg - CMPri killed %r0, 0, 14, %noreg, implicit-def %cpsr - %r1 = MOVCCi32imm killed %r1, 500500500, 0, killed %cpsr - %r0 = MOVr killed %r1, 14, %noreg, %noreg - BX_RET 14, %noreg, implicit %r0 + $r1 = MOVi 2, 14, $noreg, $noreg + CMPri killed $r0, 0, 14, $noreg, implicit-def $cpsr + $r1 = MOVCCi32imm killed $r1, 500500500, 0, killed $cpsr + $r0 = MOVr killed $r1, 14, $noreg, $noreg + BX_RET 14, $noreg, implicit $r0 ... --- @@ -54,22 +54,22 @@ alignment: 2 tracksRegLiveness: true liveins: - - { reg: '%r0', virtual-reg: '' } - - { reg: '%r1', virtual-reg: '' } + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } body: | bb.0.entry: - liveins: %r0, %r1 + liveins: $r0, $r1 - CMPri %r1, 500, 14, %noreg, implicit-def %cpsr - %r0 = MOVCCr killed %r0, killed %r1, 12, killed %cpsr - BX_RET 14, %noreg, implicit %r0 + CMPri $r1, 500, 14, $noreg, implicit-def $cpsr + $r0 = MOVCCr killed $r0, killed $r1, 12, killed $cpsr + BX_RET 14, $noreg, implicit $r0 ... # CHECK-LABEL: name: test1 -# CHECK: %r1 = MOVi16 500, 0, killed %cpsr, implicit killed %r1 +# CHECK: $r1 = MOVi16 500, 0, killed $cpsr, implicit killed $r1 # CHECK-LABEL: name: test2 -# CHECK: %r1 = MOVi16 2068, 0, %cpsr, implicit killed %r1 -# CHECK: %r1 = MOVTi16 %r1, 7637, 0, %cpsr +# CHECK: $r1 = MOVi16 2068, 0, $cpsr, implicit killed $r1 +# CHECK: $r1 = MOVTi16 $r1, 7637, 0, $cpsr # CHECK-LABEL: name: test3 -# CHECK: %r0 = MOVr killed %r1, 12, killed %cpsr, %noreg, implicit killed %r0 +# CHECK: $r0 = MOVr killed $r1, 12, killed $cpsr, $noreg, implicit killed $r0 Index: test/CodeGen/ARM/fpoffset_overflow.mir =================================================================== --- test/CodeGen/ARM/fpoffset_overflow.mir +++ test/CodeGen/ARM/fpoffset_overflow.mir @@ -3,10 +3,10 @@ # This should trigger an emergency spill in the register scavenger because the # frame offset into the large argument is too large. # CHECK-LABEL: name: func0 -# CHECK: t2STRi12 killed [[SPILLED:%r[0-9]+]], %sp, 0, 14, %noreg :: (store 4 into %stack.0) -# CHECK: [[SPILLED]] = t2ADDri killed %sp, 4096, 14, %noreg, %noreg -# CHECK: %sp = t2LDRi12 killed [[SPILLED]], 40, 14, %noreg :: (load 4) -# CHECK: [[SPILLED]] = t2LDRi12 %sp, 0, 14, %noreg :: (load 4 from %stack.0) +# CHECK: t2STRi12 killed [[SPILLED:\$r[0-9]+]], $sp, 0, 14, $noreg :: (store 4 into %stack.0) +# CHECK: [[SPILLED]] = t2ADDri killed $sp, 4096, 14, $noreg, $noreg +# CHECK: $sp = t2LDRi12 killed [[SPILLED]], 40, 14, $noreg :: (load 4) +# CHECK: [[SPILLED]] = t2LDRi12 $sp, 0, 14, $noreg :: (load 4 from %stack.0) name: func0 tracksRegLiveness: true fixedStack: @@ -16,44 +16,44 @@ isAliased: false } body: | bb.0: - %r0 = IMPLICIT_DEF - %r1 = IMPLICIT_DEF - %r2 = IMPLICIT_DEF - %r3 = IMPLICIT_DEF - %r4 = IMPLICIT_DEF - %r5 = IMPLICIT_DEF - %r6 = IMPLICIT_DEF - %r7 = IMPLICIT_DEF - %r8 = IMPLICIT_DEF - %r9 = IMPLICIT_DEF - %r10 = IMPLICIT_DEF - %r11 = IMPLICIT_DEF - %r12 = IMPLICIT_DEF - %lr = IMPLICIT_DEF + $r0 = IMPLICIT_DEF + $r1 = IMPLICIT_DEF + $r2 = IMPLICIT_DEF + $r3 = IMPLICIT_DEF + $r4 = IMPLICIT_DEF + $r5 = IMPLICIT_DEF + $r6 = IMPLICIT_DEF + $r7 = IMPLICIT_DEF + $r8 = IMPLICIT_DEF + $r9 = IMPLICIT_DEF + $r10 = IMPLICIT_DEF + $r11 = IMPLICIT_DEF + $r12 = IMPLICIT_DEF + $lr = IMPLICIT_DEF - %sp = t2LDRi12 %fixed-stack.0, 0, 14, %noreg :: (load 4) + $sp = t2LDRi12 %fixed-stack.0, 0, 14, $noreg :: (load 4) - KILL %r0 - KILL %r1 - KILL %r2 - KILL %r3 - KILL %r4 - KILL %r5 - KILL %r6 - KILL %r7 - KILL %r8 - KILL %r9 - KILL %r10 - KILL %r11 - KILL %r12 - KILL %lr + KILL $r0 + KILL $r1 + KILL $r2 + KILL $r3 + KILL $r4 + KILL $r5 + KILL $r6 + KILL $r7 + KILL $r8 + KILL $r9 + KILL $r10 + KILL $r11 + KILL $r12 + KILL $lr ... --- # This should not trigger an emergency spill yet. # CHECK-LABEL: name: func1 # CHECK-NOT: t2STRi12 # CHECK-NOT: t2ADDri -# CHECK: %r11 = t2LDRi12 %sp, 4092, 14, %noreg :: (load 4) +# CHECK: $r11 = t2LDRi12 $sp, 4092, 14, $noreg :: (load 4) # CHECK-NOT: t2LDRi12 name: func1 tracksRegLiveness: true @@ -64,33 +64,33 @@ isAliased: false } body: | bb.0: - %r0 = IMPLICIT_DEF - %r1 = IMPLICIT_DEF - %r2 = IMPLICIT_DEF - %r3 = IMPLICIT_DEF - %r4 = IMPLICIT_DEF - %r5 = IMPLICIT_DEF - %r6 = IMPLICIT_DEF - %r8 = IMPLICIT_DEF - %r9 = IMPLICIT_DEF - %r10 = IMPLICIT_DEF - %r11 = IMPLICIT_DEF - %r12 = IMPLICIT_DEF - %lr = IMPLICIT_DEF + $r0 = IMPLICIT_DEF + $r1 = IMPLICIT_DEF + $r2 = IMPLICIT_DEF + $r3 = IMPLICIT_DEF + $r4 = IMPLICIT_DEF + $r5 = IMPLICIT_DEF + $r6 = IMPLICIT_DEF + $r8 = IMPLICIT_DEF + $r9 = IMPLICIT_DEF + $r10 = IMPLICIT_DEF + $r11 = IMPLICIT_DEF + $r12 = IMPLICIT_DEF + $lr = IMPLICIT_DEF - %r11 = t2LDRi12 %fixed-stack.0, 0, 14, %noreg :: (load 4) + $r11 = t2LDRi12 %fixed-stack.0, 0, 14, $noreg :: (load 4) - KILL %r0 - KILL %r1 - KILL %r2 - KILL %r3 - KILL %r4 - KILL %r5 - KILL %r6 - KILL %r8 - KILL %r9 - KILL %r10 - KILL %r11 - KILL %r12 - KILL %lr + KILL $r0 + KILL $r1 + KILL $r2 + KILL $r3 + KILL $r4 + KILL $r5 + KILL $r6 + KILL $r8 + KILL $r9 + KILL $r10 + KILL $r11 + KILL $r12 + KILL $lr ... Index: test/CodeGen/ARM/ifcvt_canFallThroughTo.mir =================================================================== --- test/CodeGen/ARM/ifcvt_canFallThroughTo.mir +++ test/CodeGen/ARM/ifcvt_canFallThroughTo.mir @@ -10,12 +10,12 @@ bb.1: successors: %bb.2, %bb.4 - Bcc %bb.4, 1, %cpsr + Bcc %bb.4, 1, $cpsr bb.2: successors: %bb.3, %bb.5 - Bcc %bb.5, 1, %cpsr + Bcc %bb.5, 1, $cpsr bb.3: successors: %bb.5 @@ -28,7 +28,7 @@ bb.5: successors: %bb.1, %bb.6 - Bcc %bb.1, 1, %cpsr + Bcc %bb.1, 1, $cpsr bb.6: BX_RET 14, _ Index: test/CodeGen/ARM/ifcvt_diamond_unanalyzable.mir =================================================================== --- test/CodeGen/ARM/ifcvt_diamond_unanalyzable.mir +++ test/CodeGen/ARM/ifcvt_diamond_unanalyzable.mir @@ -3,19 +3,19 @@ name: foo body: | bb.0: - Bcc %bb.2, 1, %cpsr + Bcc %bb.2, 1, $cpsr bb.1: - %sp = tADDspi %sp, 1, 14, _ + $sp = tADDspi $sp, 1, 14, _ B %bb.3 bb.2: - %sp = tADDspi %sp, 2, 14, _ + $sp = tADDspi $sp, 2, 14, _ B %bb.3 bb.3: successors: - %sp = tADDspi %sp, 3, 14, _ + $sp = tADDspi $sp, 3, 14, _ BX_RET 14, _ ... @@ -24,7 +24,7 @@ # CHECK: body: | # CHECK: bb.0: -# CHECK: %sp = tADDspi %sp, 2, 1, %cpsr -# CHECK: %sp = tADDspi %sp, 1, 0, %cpsr, implicit %sp -# CHECK: %sp = tADDspi %sp, 3, 14, %noreg -# CHECK: BX_RET 14, %noreg +# CHECK: $sp = tADDspi $sp, 2, 1, $cpsr +# CHECK: $sp = tADDspi $sp, 1, 0, $cpsr, implicit $sp +# CHECK: $sp = tADDspi $sp, 3, 14, $noreg +# CHECK: BX_RET 14, $noreg Index: test/CodeGen/ARM/ifcvt_forked_diamond_unanalyzable.mir =================================================================== --- test/CodeGen/ARM/ifcvt_forked_diamond_unanalyzable.mir +++ test/CodeGen/ARM/ifcvt_forked_diamond_unanalyzable.mir @@ -3,28 +3,28 @@ name: foo body: | bb.0: - Bcc %bb.2, 1, %cpsr + Bcc %bb.2, 1, $cpsr bb.1: successors: %bb.3(0x20000000), %bb.4(0x60000000) - %sp = tADDspi %sp, 1, 14, _ - Bcc %bb.3, 1, %cpsr + $sp = tADDspi $sp, 1, 14, _ + Bcc %bb.3, 1, $cpsr B %bb.4 bb.2: successors: %bb.3(0x20000000), %bb.4(0x60000000) - %sp = tADDspi %sp, 2, 14, _ - Bcc %bb.3, 1, %cpsr + $sp = tADDspi $sp, 2, 14, _ + Bcc %bb.3, 1, $cpsr B %bb.4 bb.3: successors: - %sp = tADDspi %sp, 3, 14, _ + $sp = tADDspi $sp, 3, 14, _ BX_RET 14, _ bb.4: successors: - %sp = tADDspi %sp, 4, 14, _ + $sp = tADDspi $sp, 4, 14, _ BX_RET 14, _ ... @@ -35,14 +35,14 @@ # CHECK: bb.0: # CHECK: successors: %bb.2(0x20000000), %bb.1(0x60000000) -# CHECK: %sp = tADDspi %sp, 2, 1, %cpsr -# CHECK: %sp = tADDspi %sp, 1, 0, %cpsr, implicit %sp -# CHECK: Bcc %bb.2, 1, %cpsr +# CHECK: $sp = tADDspi $sp, 2, 1, $cpsr +# CHECK: $sp = tADDspi $sp, 1, 0, $cpsr, implicit $sp +# CHECK: Bcc %bb.2, 1, $cpsr # CHECK: bb.1: -# CHECK: %sp = tADDspi %sp, 4, 14, %noreg -# CHECK: BX_RET 14, %noreg +# CHECK: $sp = tADDspi $sp, 4, 14, $noreg +# CHECK: BX_RET 14, $noreg # CHECK: bb.2: -# CHECK: %sp = tADDspi %sp, 3, 14, %noreg -# CHECK: BX_RET 14, %noreg +# CHECK: $sp = tADDspi $sp, 3, 14, $noreg +# CHECK: BX_RET 14, $noreg Index: test/CodeGen/ARM/ifcvt_simple_bad_zero_prob_succ.mir =================================================================== --- test/CodeGen/ARM/ifcvt_simple_bad_zero_prob_succ.mir +++ test/CodeGen/ARM/ifcvt_simple_bad_zero_prob_succ.mir @@ -5,16 +5,16 @@ bb.0: bb.1: - Bcc %bb.3, 0, %cpsr + Bcc %bb.3, 0, $cpsr bb.2: bb.3: - Bcc %bb.1, 0, %cpsr + Bcc %bb.1, 0, $cpsr bb.4: successors: %bb.1 - tBRIND %r1, 14, _ + tBRIND $r1, 14, _ ... # We should only get bb.1 as successor to bb.1. No zero percent probability @@ -27,7 +27,7 @@ # CHECK: bb.1: # CHECK: successors: %bb.1(0x80000000) # CHECK-NOT: %bb.2(0x00000000) -# CHECK: tBRIND %r1, 1, %cpsr +# CHECK: tBRIND $r1, 1, $cpsr # CHECK: B %bb.1 #CHECK-NOT: bb.2: Index: test/CodeGen/ARM/ifcvt_simple_unanalyzable.mir =================================================================== --- test/CodeGen/ARM/ifcvt_simple_unanalyzable.mir +++ test/CodeGen/ARM/ifcvt_simple_unanalyzable.mir @@ -3,7 +3,7 @@ name: foo body: | bb.0: - Bcc %bb.2, 0, %cpsr + Bcc %bb.2, 0, $cpsr bb.1: successors: @@ -11,7 +11,7 @@ bb.2: successors: - %sp = tADDspi %sp, 2, 14, _ + $sp = tADDspi $sp, 2, 14, _ BX_RET 14, _ ... @@ -19,7 +19,7 @@ # CHECK: body: | # CHECK: bb.0: -# CHECK: %sp = tADDspi %sp, 2, 0, %cpsr -# CHECK: BX_RET 0, %cpsr -# CHECK: BX_RET 14, %noreg +# CHECK: $sp = tADDspi $sp, 2, 0, $cpsr +# CHECK: BX_RET 0, $cpsr +# CHECK: BX_RET 14, $noreg Index: test/CodeGen/ARM/ifcvt_triangleWoCvtToNextEdge.mir =================================================================== --- test/CodeGen/ARM/ifcvt_triangleWoCvtToNextEdge.mir +++ test/CodeGen/ARM/ifcvt_triangleWoCvtToNextEdge.mir @@ -12,21 +12,21 @@ body: | bb.0: - Bcc %bb.1, 1, %cpsr + Bcc %bb.1, 1, $cpsr B %bb.2 bb.1: - Bcc %bb.3, 0, %cpsr + Bcc %bb.3, 0, $cpsr bb.2: successors: - tBL 14, %cpsr, @__stack_chk_fail + tBL 14, $cpsr, @__stack_chk_fail bb.3: successors: - %sp = tADDspi %sp, 2, 14, _ - %sp = tADDspi %sp, 2, 14, _ - tTAILJMPdND @bar, 14, %cpsr + $sp = tADDspi $sp, 2, 14, _ + $sp = tADDspi $sp, 2, 14, _ + tTAILJMPdND @bar, 14, $cpsr ... # bb.2 has no successors, presumably because __stack_chk_fail doesn't return, @@ -38,15 +38,15 @@ # CHECK: bb.0: # CHECK: successors: %bb.2(0x40000000), %bb.1(0x40000000) -# CHECK: Bcc %bb.2, 1, %cpsr +# CHECK: Bcc %bb.2, 1, $cpsr # CHECK: bb.1: # CHECK-NOT: successors: %bb -# CHECK: tBL 14, %cpsr, @__stack_chk_fail +# CHECK: tBL 14, $cpsr, @__stack_chk_fail # CHECK: bb.2: # CHECK-NOT: successors: %bb -# CHECK: tBL 1, %cpsr, @__stack_chk_fail -# CHECK: %sp = tADDspi %sp, 2, 14, %noreg -# CHECK: %sp = tADDspi %sp, 2, 14, %noreg -# CHECK: tTAILJMPdND @bar, 14, %cpsr +# CHECK: tBL 1, $cpsr, @__stack_chk_fail +# CHECK: $sp = tADDspi $sp, 2, 14, $noreg +# CHECK: $sp = tADDspi $sp, 2, 14, $noreg +# CHECK: tTAILJMPdND @bar, 14, $cpsr Index: test/CodeGen/ARM/imm-peephole-arm.mir =================================================================== --- test/CodeGen/ARM/imm-peephole-arm.mir +++ test/CodeGen/ARM/imm-peephole-arm.mir @@ -1,6 +1,6 @@ # RUN: llc -run-pass=peephole-opt %s -o - | FileCheck %s -# CHECK: [[IN:%.*]]:gprnopc = COPY %r0 +# CHECK: [[IN:%.*]]:gprnopc = COPY $r0 # CHECK: [[SUM1TMP:%.*]]:rgpr = ADDri [[IN]], 133 # CHECK: [[SUM1:%.*]]:rgpr = ADDri killed [[SUM1TMP]], 25600 @@ -35,25 +35,25 @@ - { id: 7, class: rgpr } - { id: 8, class: rgpr } liveins: - - { reg: '%r0', virtual-reg: '%0' } + - { reg: '$r0', virtual-reg: '%0' } body: | bb.0 (%ir-block.0): - liveins: %r0 + liveins: $r0 - %0 = COPY %r0 + %0 = COPY $r0 %1 = MOVi32imm -25733 - %2 = SUBrr %0, killed %1, 14, %noreg, %noreg + %2 = SUBrr %0, killed %1, 14, $noreg, $noreg %3 = MOVi32imm 25733 - %4 = SUBrr %0, killed %3, 14, %noreg, %noreg + %4 = SUBrr %0, killed %3, 14, $noreg, $noreg %5 = MOVi32imm -25733 - %6 = ADDrr %0, killed %5, 14, %noreg, %noreg + %6 = ADDrr %0, killed %5, 14, $noreg, $noreg %7 = MOVi32imm 25733 - %8 = ADDrr killed %0, killed %7, 14, %noreg, %noreg + %8 = ADDrr killed %0, killed %7, 14, $noreg, $noreg - %r0 = COPY killed %8 - BX_RET 14, %noreg, implicit %r0 + $r0 = COPY killed %8 + BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/imm-peephole-thumb.mir =================================================================== --- test/CodeGen/ARM/imm-peephole-thumb.mir +++ test/CodeGen/ARM/imm-peephole-thumb.mir @@ -1,6 +1,6 @@ # RUN: llc -run-pass=peephole-opt %s -o - | FileCheck %s -# CHECK: [[IN:%.*]]:gprnopc = COPY %r0 +# CHECK: [[IN:%.*]]:gprnopc = COPY $r0 # CHECK: [[SUM1TMP:%.*]]:rgpr = t2ADDri [[IN]], 25600 # CHECK: [[SUM1:%.*]]:rgpr = t2ADDri killed [[SUM1TMP]], 133 @@ -35,24 +35,24 @@ - { id: 7, class: rgpr } - { id: 8, class: rgpr } liveins: - - { reg: '%r0', virtual-reg: '%0' } + - { reg: '$r0', virtual-reg: '%0' } body: | bb.0 (%ir-block.0): - liveins: %r0 - %0 = COPY %r0 + liveins: $r0 + %0 = COPY $r0 %1 = t2MOVi32imm -25733 - %2 = t2SUBrr %0, killed %1, 14, %noreg, %noreg + %2 = t2SUBrr %0, killed %1, 14, $noreg, $noreg %3 = t2MOVi32imm 25733 - %4 = t2SUBrr %0, killed %3, 14, %noreg, %noreg + %4 = t2SUBrr %0, killed %3, 14, $noreg, $noreg %5 = t2MOVi32imm -25733 - %6= t2ADDrr %0, killed %5, 14, %noreg, %noreg + %6= t2ADDrr %0, killed %5, 14, $noreg, $noreg %7 = t2MOVi32imm 25733 - %8 = t2ADDrr killed %0, killed %7, 14, %noreg, %noreg + %8 = t2ADDrr killed %0, killed %7, 14, $noreg, $noreg - %r0 = COPY killed %8 - tBX_RET 14, %noreg, implicit %r0 + $r0 = COPY killed %8 + tBX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/load_store_opt_kill.mir =================================================================== --- test/CodeGen/ARM/load_store_opt_kill.mir +++ test/CodeGen/ARM/load_store_opt_kill.mir @@ -2,11 +2,11 @@ --- # CHECK-LABEL: name: f name: f -# Make sure the load into %r0 doesn't clobber the base register before the second load uses it. -# CHECK: %r3 = LDRi12 %r0, 12, 14, %noreg -# CHECK-NEXT: %r0 = LDRi12 %r0, 8, 14, %noreg +# Make sure the load into $r0 doesn't clobber the base register before the second load uses it. +# CHECK: $r3 = LDRi12 $r0, 12, 14, $noreg +# CHECK-NEXT: $r0 = LDRi12 $r0, 8, 14, $noreg body: | bb.0: - liveins: %r0, %r3 - %r0, %r3 = LDRD %r0, %noreg, 8, 14, %noreg + liveins: $r0, $r3 + $r0, $r3 = LDRD $r0, $noreg, 8, 14, $noreg ... Index: test/CodeGen/ARM/machine-copyprop.mir =================================================================== --- test/CodeGen/ARM/machine-copyprop.mir +++ test/CodeGen/ARM/machine-copyprop.mir @@ -3,20 +3,20 @@ # Test that machine copy prop recognizes the implicit-def operands on a COPY # as clobbering the register. # CHECK-LABEL: name: func -# CHECK: %d2 = VMOVv2i32 2, 14, %noreg -# CHECK: %s5 = COPY %s0, implicit %q1, implicit-def %q1 -# CHECK: VST1q32 %r0, 0, %q1, 14, %noreg +# CHECK: $d2 = VMOVv2i32 2, 14, $noreg +# CHECK: $s5 = COPY $s0, implicit $q1, implicit-def $q1 +# CHECK: VST1q32 $r0, 0, $q1, 14, $noreg # The following two COPYs must not be removed -# CHECK: %s4 = COPY %s20, implicit-def %q1 -# CHECK: %s5 = COPY %s0, implicit killed %d0, implicit %q1, implicit-def %q1 -# CHECK: VST1q32 %r2, 0, %q1, 14, %noreg +# CHECK: $s4 = COPY $s20, implicit-def $q1 +# CHECK: $s5 = COPY $s0, implicit killed $d0, implicit $q1, implicit-def $q1 +# CHECK: VST1q32 $r2, 0, $q1, 14, $noreg name: func body: | bb.0: - %d2 = VMOVv2i32 2, 14, %noreg - %s5 = COPY %s0, implicit %q1, implicit-def %q1 - VST1q32 %r0, 0, %q1, 14, %noreg - %s4 = COPY %s20, implicit-def %q1 - %s5 = COPY %s0, implicit killed %d0, implicit %q1, implicit-def %q1 - VST1q32 %r2, 0, %q1, 14, %noreg + $d2 = VMOVv2i32 2, 14, $noreg + $s5 = COPY $s0, implicit $q1, implicit-def $q1 + VST1q32 $r0, 0, $q1, 14, $noreg + $s4 = COPY $s20, implicit-def $q1 + $s5 = COPY $s0, implicit killed $d0, implicit $q1, implicit-def $q1 + VST1q32 $r2, 0, $q1, 14, $noreg ... Index: test/CodeGen/ARM/misched-int-basic-thumb2.mir =================================================================== --- test/CodeGen/ARM/misched-int-basic-thumb2.mir +++ test/CodeGen/ARM/misched-int-basic-thumb2.mir @@ -42,57 +42,57 @@ # CHECK_SWIFT: Latency : 2 # CHECK_R52: Latency : 2 # -# CHECK: SU(3): %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, %noreg; mem:LD4[@g1](dereferenceable) +# CHECK: SU(3): %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, $noreg; mem:LD4[@g1](dereferenceable) # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 3 # CHECK_R52: Latency : 4 # -# CHECK : SU(6): %6 = t2ADDrr %3:rgpr, %3:rgpr, 14, %noreg, %noreg +# CHECK : SU(6): %6 = t2ADDrr %3:rgpr, %3:rgpr, 14, $noreg, $noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 -# CHECK: SU(7): %7:rgpr = t2SDIV %6:rgpr, %5:rgpr, 14, %noreg +# CHECK: SU(7): %7:rgpr = t2SDIV %6:rgpr, %5:rgpr, 14, $noreg # CHECK_A9: Latency : 0 # CHECK_SWIFT: Latency : 14 # CHECK_R52: Latency : 8 -# CHECK: SU(8): t2STRi12 %7:rgpr, %2:rgpr, 0, 14, %noreg; mem:ST4[@g1] +# CHECK: SU(8): t2STRi12 %7:rgpr, %2:rgpr, 0, 14, $noreg; mem:ST4[@g1] # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 0 # CHECK_R52: Latency : 4 # -# CHECK: SU(9): %8:rgpr = t2SMULBB %1:rgpr, %1:rgpr, 14, %noreg +# CHECK: SU(9): %8:rgpr = t2SMULBB %1:rgpr, %1:rgpr, 14, $noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(10): %9:rgpr = t2SMLABB %0:rgpr, %0:rgpr, %8:rgpr, 14, %noreg +# CHECK: SU(10): %9:rgpr = t2SMLABB %0:rgpr, %0:rgpr, %8:rgpr, 14, $noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(11): %10:rgpr = t2UXTH %9:rgpr, 0, 14, %noreg +# CHECK: SU(11): %10:rgpr = t2UXTH %9:rgpr, 0, 14, $noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 # -# CHECK: SU(12): %11:rgpr = t2MUL %10:rgpr, %7:rgpr, 14, %noreg +# CHECK: SU(12): %11:rgpr = t2MUL %10:rgpr, %7:rgpr, 14, $noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(13): %12:rgpr = t2MLA %11:rgpr, %11:rgpr, %11:rgpr, 14, %noreg +# CHECK: SU(13): %12:rgpr = t2MLA %11:rgpr, %11:rgpr, %11:rgpr, 14, $noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(14): %13:rgpr, %14:rgpr = t2UMULL %12:rgpr, %12:rgpr, 14, %noreg +# CHECK: SU(14): %13:rgpr, %14:rgpr = t2UMULL %12:rgpr, %12:rgpr, 14, $noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 5 # CHECK_R52: Latency : 4 # -# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12:rgpr, %12:rgpr, %19:rgpr, %20:rgpr, 14, %noreg +# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12:rgpr, %12:rgpr, %19:rgpr, %20:rgpr, 14, $noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 7 # CHECK_R52: Latency : 4 @@ -129,8 +129,8 @@ - { id: 19, class: rgpr } - { id: 20, class: rgpr } liveins: - - { reg: '%r0', virtual-reg: '%0' } - - { reg: '%r1', virtual-reg: '%1' } + - { reg: '$r0', virtual-reg: '%0' } + - { reg: '$r1', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -147,29 +147,29 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r0, %r1 + liveins: $r0, $r1 - %1 = COPY %r1 - %0 = COPY %r0 + %1 = COPY $r1 + %0 = COPY $r0 %2 = t2MOVi32imm @g1 - %3 = t2LDRi12 %2, 0, 14, %noreg :: (dereferenceable load 4 from @g1) + %3 = t2LDRi12 %2, 0, 14, $noreg :: (dereferenceable load 4 from @g1) %4 = t2MOVi32imm @g2 - %5 = t2LDRi12 %4, 0, 14, %noreg :: (dereferenceable load 4 from @g2) - %6 = t2ADDrr %3, %3, 14, %noreg, %noreg - %7 = t2SDIV %6, %5, 14, %noreg - t2STRi12 %7, %2, 0, 14, %noreg :: (store 4 into @g1) - %8 = t2SMULBB %1, %1, 14, %noreg - %9 = t2SMLABB %0, %0, %8, 14, %noreg - %10 = t2UXTH %9, 0, 14, %noreg - %11 = t2MUL %10, %7, 14, %noreg - %12 = t2MLA %11, %11, %11, 14, %noreg - %13, %14 = t2UMULL %12, %12, 14, %noreg - %19, %16 = t2UMULL %13, %13, 14, %noreg - %17 = t2MLA %13, %14, %16, 14, %noreg - %20 = t2MLA %13, %14, %17, 14, %noreg - %19, %20 = t2UMLAL %12, %12, %19, %20, 14, %noreg - %r0 = COPY %19 - %r1 = COPY %20 - tBX_RET 14, %noreg, implicit %r0, implicit %r1 + %5 = t2LDRi12 %4, 0, 14, $noreg :: (dereferenceable load 4 from @g2) + %6 = t2ADDrr %3, %3, 14, $noreg, $noreg + %7 = t2SDIV %6, %5, 14, $noreg + t2STRi12 %7, %2, 0, 14, $noreg :: (store 4 into @g1) + %8 = t2SMULBB %1, %1, 14, $noreg + %9 = t2SMLABB %0, %0, %8, 14, $noreg + %10 = t2UXTH %9, 0, 14, $noreg + %11 = t2MUL %10, %7, 14, $noreg + %12 = t2MLA %11, %11, %11, 14, $noreg + %13, %14 = t2UMULL %12, %12, 14, $noreg + %19, %16 = t2UMULL %13, %13, 14, $noreg + %17 = t2MLA %13, %14, %16, 14, $noreg + %20 = t2MLA %13, %14, %17, 14, $noreg + %19, %20 = t2UMLAL %12, %12, %19, %20, 14, $noreg + $r0 = COPY %19 + $r1 = COPY %20 + tBX_RET 14, $noreg, implicit $r0, implicit $r1 ... Index: test/CodeGen/ARM/misched-int-basic.mir =================================================================== --- test/CodeGen/ARM/misched-int-basic.mir +++ test/CodeGen/ARM/misched-int-basic.mir @@ -28,37 +28,37 @@ } # CHECK: ********** MI Scheduling ********** -# CHECK: SU(2): %2:gpr = SMULBB %1:gpr, %1:gpr, 14, %noreg +# CHECK: SU(2): %2:gpr = SMULBB %1:gpr, %1:gpr, 14, $noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(3): %3:gprnopc = SMLABB %0:gprnopc, %0:gprnopc, %2:gpr, 14, %noreg +# CHECK: SU(3): %3:gprnopc = SMLABB %0:gprnopc, %0:gprnopc, %2:gpr, 14, $noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(4): %4:gprnopc = UXTH %3:gprnopc, 0, 14, %noreg +# CHECK: SU(4): %4:gprnopc = UXTH %3:gprnopc, 0, 14, $noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 # -# CHECK: SU(5): %5:gprnopc = MUL %4:gprnopc, %4:gprnopc, 14, %noreg, %noreg +# CHECK: SU(5): %5:gprnopc = MUL %4:gprnopc, %4:gprnopc, 14, $noreg, $noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(6): %6:gprnopc = MLA %5:gprnopc, %5:gprnopc, %5:gprnopc, 14, %noreg, %noreg +# CHECK: SU(6): %6:gprnopc = MLA %5:gprnopc, %5:gprnopc, %5:gprnopc, 14, $noreg, $noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(7): %7:gprnopc, %8:gprnopc = UMULL %6:gprnopc, %6:gprnopc, 14, %noreg, %noreg +# CHECK: SU(7): %7:gprnopc, %8:gprnopc = UMULL %6:gprnopc, %6:gprnopc, 14, $noreg, $noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 5 # CHECK_R52: Latency : 4 # -# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6:gprnopc, %6:gprnopc, %13:gpr, %14:gprnopc, 14, %noreg, %noreg +# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6:gprnopc, %6:gprnopc, %13:gpr, %14:gprnopc, 14, $noreg, $noreg # CHECK_SWIFT: Latency : 7 # CHECK_A9: Latency : 3 # CHECK_R52: Latency : 4 @@ -89,8 +89,8 @@ - { id: 13, class: gpr } - { id: 14, class: gprnopc } liveins: - - { reg: '%r0', virtual-reg: '%0' } - - { reg: '%r1', virtual-reg: '%1' } + - { reg: '$r0', virtual-reg: '%0' } + - { reg: '$r1', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -107,22 +107,22 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r0, %r1 + liveins: $r0, $r1 - %1 = COPY %r1 - %0 = COPY %r0 - %2 = SMULBB %1, %1, 14, %noreg - %3 = SMLABB %0, %0, %2, 14, %noreg - %4 = UXTH %3, 0, 14, %noreg - %5 = MUL %4, %4, 14, %noreg, %noreg - %6 = MLA %5, %5, %5, 14, %noreg, %noreg - %7, %8 = UMULL %6, %6, 14, %noreg, %noreg - %13, %10 = UMULL %7, %7, 14, %noreg, %noreg - %11 = MLA %7, %8, %10, 14, %noreg, %noreg - %14 = MLA %7, %8, %11, 14, %noreg, %noreg - %13, %14 = UMLAL %6, %6, %13, %14, 14, %noreg, %noreg - %r0 = COPY %13 - %r1 = COPY %14 - BX_RET 14, %noreg, implicit %r0, implicit %r1 + %1 = COPY $r1 + %0 = COPY $r0 + %2 = SMULBB %1, %1, 14, $noreg + %3 = SMLABB %0, %0, %2, 14, $noreg + %4 = UXTH %3, 0, 14, $noreg + %5 = MUL %4, %4, 14, $noreg, $noreg + %6 = MLA %5, %5, %5, 14, $noreg, $noreg + %7, %8 = UMULL %6, %6, 14, $noreg, $noreg + %13, %10 = UMULL %7, %7, 14, $noreg, $noreg + %11 = MLA %7, %8, %10, 14, $noreg, $noreg + %14 = MLA %7, %8, %11, 14, $noreg, $noreg + %13, %14 = UMLAL %6, %6, %13, %14, 14, $noreg, $noreg + $r0 = COPY %13 + $r1 = COPY %14 + BX_RET 14, $noreg, implicit $r0, implicit $r1 ... Index: test/CodeGen/ARM/peephole-phi.mir =================================================================== --- test/CodeGen/ARM/peephole-phi.mir +++ test/CodeGen/ARM/peephole-phi.mir @@ -7,39 +7,39 @@ # CHECK-LABEL: name: func # CHECK: body: | # CHECK: bb.0: -# CHECK: Bcc %bb.2, 1, undef %cpsr +# CHECK: Bcc %bb.2, 1, undef $cpsr # # CHECK: bb.1: # CHECK: %0:dpr = IMPLICIT_DEF -# CHECK: %1:gpr, %2:gpr = VMOVRRD %0, 14, %noreg +# CHECK: %1:gpr, %2:gpr = VMOVRRD %0, 14, $noreg # CHECK: B %bb.3 # # CHECK: bb.2: # CHECK: %3:spr = IMPLICIT_DEF -# CHECK: %4:gpr = VMOVRS %3, 14, %noreg +# CHECK: %4:gpr = VMOVRS %3, 14, $noreg # # CHECK: bb.3: # CHECK: %5:gpr = PHI %1, %bb.1, %4, %bb.2 -# CHECK: %6:spr = VMOVSR %5, 14, %noreg +# CHECK: %6:spr = VMOVSR %5, 14, $noreg --- name: func0 tracksRegLiveness: true body: | bb.0: - Bcc %bb.2, 1, undef %cpsr + Bcc %bb.2, 1, undef $cpsr bb.1: %0:dpr = IMPLICIT_DEF - %1:gpr, %2:gpr = VMOVRRD %0:dpr, 14, %noreg + %1:gpr, %2:gpr = VMOVRRD %0:dpr, 14, $noreg B %bb.3 bb.2: %3:spr = IMPLICIT_DEF - %4:gpr = VMOVRS %3:spr, 14, %noreg + %4:gpr = VMOVRS %3:spr, 14, $noreg bb.3: %5:gpr = PHI %1, %bb.1, %4, %bb.2 - %6:spr = VMOVSR %5, 14, %noreg + %6:spr = VMOVSR %5, 14, $noreg ... # CHECK-LABEL: name: func1 @@ -50,20 +50,20 @@ tracksRegLiveness: true body: | bb.0: - Bcc %bb.2, 1, undef %cpsr + Bcc %bb.2, 1, undef $cpsr bb.1: %1:spr = IMPLICIT_DEF - %0:gpr = VMOVRS %1, 14, %noreg + %0:gpr = VMOVRS %1, 14, $noreg B %bb.3 bb.2: %3:spr = IMPLICIT_DEF - %2:gpr = VMOVRS %3:spr, 14, %noreg + %2:gpr = VMOVRS %3:spr, 14, $noreg bb.3: %4:gpr = PHI %0, %bb.1, %2, %bb.2 - %5:spr = VMOVSR %4, 14, %noreg + %5:spr = VMOVSR %4, 14, $noreg ... # The current implementation doesn't perform any transformations if undef @@ -71,33 +71,33 @@ # CHECK-LABEL: name: func-undefops # CHECK: body: | # CHECK: bb.0: -# CHECK: Bcc %bb.2, 1, undef %cpsr +# CHECK: Bcc %bb.2, 1, undef $cpsr # # CHECK: bb.1: -# CHECK: %0:gpr = VMOVRS undef %1:spr, 14, %noreg +# CHECK: %0:gpr = VMOVRS undef %1:spr, 14, $noreg # CHECK: B %bb.3 # # CHECK: bb.2: -# CHECK: %2:gpr = VMOVRS undef %3:spr, 14, %noreg +# CHECK: %2:gpr = VMOVRS undef %3:spr, 14, $noreg # # CHECK: bb.3: # CHECK: %4:gpr = PHI %0, %bb.1, %2, %bb.2 -# CHECK: %5:spr = VMOVSR %4, 14, %noreg +# CHECK: %5:spr = VMOVSR %4, 14, $noreg --- name: func-undefops tracksRegLiveness: true body: | bb.0: - Bcc %bb.2, 1, undef %cpsr + Bcc %bb.2, 1, undef $cpsr bb.1: - %0:gpr = VMOVRS undef %1:spr, 14, %noreg + %0:gpr = VMOVRS undef %1:spr, 14, $noreg B %bb.3 bb.2: - %2:gpr = VMOVRS undef %3:spr, 14, %noreg + %2:gpr = VMOVRS undef %3:spr, 14, $noreg bb.3: %4:gpr = PHI %0, %bb.1, %2, %bb.2 - %5:spr = VMOVSR %4, 14, %noreg + %5:spr = VMOVSR %4, 14, $noreg ... Index: test/CodeGen/ARM/pei-swiftself.mir =================================================================== --- test/CodeGen/ARM/pei-swiftself.mir +++ test/CodeGen/ARM/pei-swiftself.mir @@ -17,44 +17,44 @@ - { id: 1, type: default, size: 4096, alignment: 8 } body: | bb.0: - liveins: %r10 ; swiftself parameter comes in as %r10 + liveins: $r10 ; swiftself parameter comes in as $r10 ; Bring up register pressure to force emergency spilling, coax scavenging - ; to use %r10 as that one is not spilled/restored. - %r0 = IMPLICIT_DEF - %r1 = IMPLICIT_DEF - %r2 = IMPLICIT_DEF - %r3 = IMPLICIT_DEF - %r4 = IMPLICIT_DEF - %r5 = IMPLICIT_DEF - %r6 = IMPLICIT_DEF - %r7 = IMPLICIT_DEF - %r8 = IMPLICIT_DEF - %r9 = IMPLICIT_DEF - %r11 = IMPLICIT_DEF - %r12 = IMPLICIT_DEF - %lr = IMPLICIT_DEF + ; to use $r10 as that one is not spilled/restored. + $r0 = IMPLICIT_DEF + $r1 = IMPLICIT_DEF + $r2 = IMPLICIT_DEF + $r3 = IMPLICIT_DEF + $r4 = IMPLICIT_DEF + $r5 = IMPLICIT_DEF + $r6 = IMPLICIT_DEF + $r7 = IMPLICIT_DEF + $r8 = IMPLICIT_DEF + $r9 = IMPLICIT_DEF + $r11 = IMPLICIT_DEF + $r12 = IMPLICIT_DEF + $lr = IMPLICIT_DEF ; Computing the large stack offset requires an extra register. We should - ; not just use %r10 for that. - ; CHECK-NOT: STRi12 %1,{{.*}}%r10 + ; not just use $r10 for that. + ; CHECK-NOT: STRi12 %1,{{.*}}$r10 - STRi12 %r1, %stack.0, 0, 14, %noreg :: (store 4) + STRi12 $r1, %stack.0, 0, 14, $noreg :: (store 4) ; use the swiftself parameter value. - KILL %r10 + KILL $r10 - KILL %r0 - KILL %r1 - KILL %r2 - KILL %r3 - KILL %r4 - KILL %r5 - KILL %r6 - KILL %r7 - KILL %r8 - KILL %r9 - KILL %r11 - KILL %r12 - KILL %lr + KILL $r0 + KILL $r1 + KILL $r2 + KILL $r3 + KILL $r4 + KILL $r5 + KILL $r6 + KILL $r7 + KILL $r8 + KILL $r9 + KILL $r11 + KILL $r12 + KILL $lr ... Index: test/CodeGen/ARM/prera-ldst-aliasing.mir =================================================================== --- test/CodeGen/ARM/prera-ldst-aliasing.mir +++ test/CodeGen/ARM/prera-ldst-aliasing.mir @@ -18,23 +18,23 @@ alignment: 1 tracksRegLiveness: true liveins: - - { reg: '%r0', virtual-reg: '%0' } - - { reg: '%r1', virtual-reg: '%1' } + - { reg: '$r0', virtual-reg: '%0' } + - { reg: '$r1', virtual-reg: '%1' } body: | bb.0.entry: - liveins: %r0, %r1 + liveins: $r0, $r1 - %1 : gpr = COPY %r1 - %0 : gpr = COPY %r0 - %2 : gpr = t2LDRi12 %1, 0, 14, %noreg :: (load 4 from %ir.y) - t2STRi12 killed %2, %0, 0, 14, %noreg :: (store 4 into %ir.x) - %3 : gpr = t2LDRi12 %1, 4, 14, %noreg :: (load 4 from %ir.arrayidx2) - t2STRi12 killed %3, %0, 4, 14, %noreg :: (store 4 into %ir.arrayidx3) + %1 : gpr = COPY $r1 + %0 : gpr = COPY $r0 + %2 : gpr = t2LDRi12 %1, 0, 14, $noreg :: (load 4 from %ir.y) + t2STRi12 killed %2, %0, 0, 14, $noreg :: (store 4 into %ir.x) + %3 : gpr = t2LDRi12 %1, 4, 14, $noreg :: (load 4 from %ir.arrayidx2) + t2STRi12 killed %3, %0, 4, 14, $noreg :: (store 4 into %ir.arrayidx3) ; CHECK: t2LDRi12 ; CHECK-NEXT: t2LDRi12 ; CHECK-NEXT: t2STRi12 ; CHECK-NEXT: t2STRi12 - tBX_RET 14, %noreg + tBX_RET 14, $noreg ... Index: test/CodeGen/ARM/prera-ldst-insertpt.mir =================================================================== --- test/CodeGen/ARM/prera-ldst-insertpt.mir +++ test/CodeGen/ARM/prera-ldst-insertpt.mir @@ -18,24 +18,24 @@ alignment: 1 tracksRegLiveness: true liveins: - - { reg: '%r0', virtual-reg: '%0' } - - { reg: '%r1', virtual-reg: '%1' } - - { reg: '%r2', virtual-reg: '%2' } + - { reg: '$r0', virtual-reg: '%0' } + - { reg: '$r1', virtual-reg: '%1' } + - { reg: '$r2', virtual-reg: '%2' } body: | bb.0.entry: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %2 : rgpr = COPY %r2 - %1 : rgpr = COPY %r1 - %0 : gpr = COPY %r0 - %3 : rgpr = t2MUL %2, %2, 14, %noreg - %4 : rgpr = t2MUL %1, %1, 14, %noreg + %2 : rgpr = COPY $r2 + %1 : rgpr = COPY $r1 + %0 : gpr = COPY $r0 + %3 : rgpr = t2MUL %2, %2, 14, $noreg + %4 : rgpr = t2MUL %1, %1, 14, $noreg %5 : rgpr = t2MOVi32imm -858993459 - %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, %noreg - %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, %noreg - t2STRi12 %1, %0, 0, 14, %noreg :: (store 4) - %10 : rgpr = t2LSLri %2, 1, 14, %noreg, %noreg - t2STRi12 killed %10, %0, 4, 14, %noreg :: (store 4) + %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, $noreg + %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, $noreg + t2STRi12 %1, %0, 0, 14, $noreg :: (store 4) + %10 : rgpr = t2LSLri %2, 1, 14, $noreg, $noreg + t2STRi12 killed %10, %0, 4, 14, $noreg :: (store 4) ; Make sure we move the paired stores next to each other, and ; insert them in an appropriate location. @@ -44,38 +44,38 @@ ; CHECK-NEXT: t2MOVi ; CHECK-NEXT: t2ADDrs - %11 : rgpr = t2MOVi 55, 14, %noreg, %noreg - %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, %noreg, %noreg - t2STRi12 killed %12, %0, 16, 14, %noreg :: (store 4) - %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, %noreg, %noreg - t2STRi12 killed %13, %0, 20, 14, %noreg :: (store 4) + %11 : rgpr = t2MOVi 55, 14, $noreg, $noreg + %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, $noreg, $noreg + t2STRi12 killed %12, %0, 16, 14, $noreg :: (store 4) + %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, $noreg, $noreg + t2STRi12 killed %13, %0, 20, 14, $noreg :: (store 4) ; Make sure we move the paired stores next to each other. ; CHECK: t2STRi12 killed %12, ; CHECK-NEXT: t2STRi12 killed %13, - tBX_RET 14, %noreg + tBX_RET 14, $noreg --- # CHECK-LABEL: name: b name: b alignment: 1 tracksRegLiveness: true liveins: - - { reg: '%r0', virtual-reg: '%0' } - - { reg: '%r1', virtual-reg: '%1' } - - { reg: '%r2', virtual-reg: '%2' } + - { reg: '$r0', virtual-reg: '%0' } + - { reg: '$r1', virtual-reg: '%1' } + - { reg: '$r2', virtual-reg: '%2' } body: | bb.0.entry: - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %2 : rgpr = COPY %r2 - %1 : rgpr = COPY %r1 - %0 : gpr = COPY %r0 - t2STRi12 %1, %0, 0, 14, %noreg :: (store 4) - %10 : rgpr = t2LSLri %2, 1, 14, %noreg, %noreg - t2STRi12 killed %10, %0, 4, 14, %noreg :: (store 4) - %3 : rgpr = t2MUL %2, %2, 14, %noreg - t2STRi12 %3, %0, 8, 14, %noreg :: (store 4) + %2 : rgpr = COPY $r2 + %1 : rgpr = COPY $r1 + %0 : gpr = COPY $r0 + t2STRi12 %1, %0, 0, 14, $noreg :: (store 4) + %10 : rgpr = t2LSLri %2, 1, 14, $noreg, $noreg + t2STRi12 killed %10, %0, 4, 14, $noreg :: (store 4) + %3 : rgpr = t2MUL %2, %2, 14, $noreg + t2STRi12 %3, %0, 8, 14, $noreg :: (store 4) ; Make sure we move the paired stores next to each other, and ; insert them in an appropriate location. @@ -85,21 +85,21 @@ ; CHECK-NEXT: t2MUL ; CHECK-NEXT: t2MOVi32imm - %4 : rgpr = t2MUL %1, %1, 14, %noreg + %4 : rgpr = t2MUL %1, %1, 14, $noreg %5 : rgpr = t2MOVi32imm -858993459 - %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, %noreg - %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, %noreg - %10 : rgpr = t2LSLri %2, 1, 14, %noreg, %noreg - %11 : rgpr = t2MOVi 55, 14, %noreg, %noreg - %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, %noreg, %noreg - t2STRi12 killed %12, %0, 16, 14, %noreg :: (store 4) - %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, %noreg, %noreg - t2STRi12 killed %13, %0, 20, 14, %noreg :: (store 4) + %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, $noreg + %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, $noreg + %10 : rgpr = t2LSLri %2, 1, 14, $noreg, $noreg + %11 : rgpr = t2MOVi 55, 14, $noreg, $noreg + %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, $noreg, $noreg + t2STRi12 killed %12, %0, 16, 14, $noreg :: (store 4) + %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, $noreg, $noreg + t2STRi12 killed %13, %0, 20, 14, $noreg :: (store 4) ; Make sure we move the paired stores next to each other. ; CHECK: t2STRi12 {{.*}}, 16 ; CHECK-NEXT: t2STRi12 {{.*}}, 20 - tBX_RET 14, %noreg + tBX_RET 14, $noreg ... Index: test/CodeGen/ARM/scavenging.mir =================================================================== --- test/CodeGen/ARM/scavenging.mir +++ test/CodeGen/ARM/scavenging.mir @@ -3,64 +3,64 @@ # CHECK-LABEL: name: scavengebug0 # Make sure we are not spilling/using a physreg used in the very last # instruction of the scavenging range. -# CHECK-NOT: tSTRi {{.*}}%r0,{{.*}}%r0 -# CHECK-NOT: tSTRi {{.*}}%r1,{{.*}}%r1 -# CHECK-NOT: tSTRi {{.*}}%r2,{{.*}}%r2 -# CHECK-NOT: tSTRi {{.*}}%r3,{{.*}}%r3 -# CHECK-NOT: tSTRi {{.*}}%r4,{{.*}}%r4 -# CHECK-NOT: tSTRi {{.*}}%r5,{{.*}}%r5 -# CHECK-NOT: tSTRi {{.*}}%r6,{{.*}}%r6 -# CHECK-NOT: tSTRi {{.*}}%r7,{{.*}}%r7 +# CHECK-NOT: tSTRi {{.*}}$r0,{{.*}}$r0 +# CHECK-NOT: tSTRi {{.*}}$r1,{{.*}}$r1 +# CHECK-NOT: tSTRi {{.*}}$r2,{{.*}}$r2 +# CHECK-NOT: tSTRi {{.*}}$r3,{{.*}}$r3 +# CHECK-NOT: tSTRi {{.*}}$r4,{{.*}}$r4 +# CHECK-NOT: tSTRi {{.*}}$r5,{{.*}}$r5 +# CHECK-NOT: tSTRi {{.*}}$r6,{{.*}}$r6 +# CHECK-NOT: tSTRi {{.*}}$r7,{{.*}}$r7 name: scavengebug0 body: | bb.0: ; Bring up register pressure to force emergency spilling - %r0 = IMPLICIT_DEF - %r1 = IMPLICIT_DEF - %r2 = IMPLICIT_DEF - %r3 = IMPLICIT_DEF - %r4 = IMPLICIT_DEF - %r5 = IMPLICIT_DEF - %r6 = IMPLICIT_DEF - %r7 = IMPLICIT_DEF + $r0 = IMPLICIT_DEF + $r1 = IMPLICIT_DEF + $r2 = IMPLICIT_DEF + $r3 = IMPLICIT_DEF + $r4 = IMPLICIT_DEF + $r5 = IMPLICIT_DEF + $r6 = IMPLICIT_DEF + $r7 = IMPLICIT_DEF %0 : tgpr = IMPLICIT_DEF - %0 = tADDhirr %0, %sp, 14, %noreg - tSTRi %r0, %0, 0, 14, %noreg + %0 = tADDhirr %0, $sp, 14, $noreg + tSTRi $r0, %0, 0, 14, $noreg %1 : tgpr = IMPLICIT_DEF - %1 = tADDhirr %1, %sp, 14, %noreg - tSTRi %r1, %1, 0, 14, %noreg + %1 = tADDhirr %1, $sp, 14, $noreg + tSTRi $r1, %1, 0, 14, $noreg %2 : tgpr = IMPLICIT_DEF - %2 = tADDhirr %2, %sp, 14, %noreg - tSTRi %r2, %2, 0, 14, %noreg + %2 = tADDhirr %2, $sp, 14, $noreg + tSTRi $r2, %2, 0, 14, $noreg %3 : tgpr = IMPLICIT_DEF - %3 = tADDhirr %3, %sp, 14, %noreg - tSTRi %r3, %3, 0, 14, %noreg + %3 = tADDhirr %3, $sp, 14, $noreg + tSTRi $r3, %3, 0, 14, $noreg %4 : tgpr = IMPLICIT_DEF - %4 = tADDhirr %4, %sp, 14, %noreg - tSTRi %r4, %4, 0, 14, %noreg + %4 = tADDhirr %4, $sp, 14, $noreg + tSTRi $r4, %4, 0, 14, $noreg %5 : tgpr = IMPLICIT_DEF - %5 = tADDhirr %5, %sp, 14, %noreg - tSTRi %r5, %5, 0, 14, %noreg + %5 = tADDhirr %5, $sp, 14, $noreg + tSTRi $r5, %5, 0, 14, $noreg %6 : tgpr = IMPLICIT_DEF - %6 = tADDhirr %6, %sp, 14, %noreg - tSTRi %r6, %6, 0, 14, %noreg + %6 = tADDhirr %6, $sp, 14, $noreg + tSTRi $r6, %6, 0, 14, $noreg %7 : tgpr = IMPLICIT_DEF - %7 = tADDhirr %7, %sp, 14, %noreg - tSTRi %r7, %7, 0, 14, %noreg + %7 = tADDhirr %7, $sp, 14, $noreg + tSTRi $r7, %7, 0, 14, $noreg - KILL %r0 - KILL %r1 - KILL %r2 - KILL %r3 - KILL %r4 - KILL %r5 - KILL %r6 - KILL %r7 + KILL $r0 + KILL $r1 + KILL $r2 + KILL $r3 + KILL $r4 + KILL $r5 + KILL $r6 + KILL $r7 Index: test/CodeGen/ARM/sched-it-debug-nodes.mir =================================================================== --- test/CodeGen/ARM/sched-it-debug-nodes.mir +++ test/CodeGen/ARM/sched-it-debug-nodes.mir @@ -32,9 +32,9 @@ ; debug value as KILL'ed, resulting in a DEBUG_VALUE node changing codegen! (or ; hopefully, triggering an assert). - ; CHECK: BUNDLE implicit-def dead %itstate - ; CHECK: * DBG_VALUE debug-use %r1, debug-use %noreg, !"u" - ; CHECK-NOT: * DBG_VALUE killed %r1, %noreg, !"u" + ; CHECK: BUNDLE implicit-def dead $itstate + ; CHECK: * DBG_VALUE debug-use $r1, debug-use $noreg, !"u" + ; CHECK-NOT: * DBG_VALUE killed $r1, $noreg, !"u" declare arm_aapcscc void @g(%struct.s*, i8*, i32) #1 @@ -92,24 +92,24 @@ exposesReturnsTwice: false tracksRegLiveness: true liveins: - - { reg: '%r0' } - - { reg: '%r1' } - - { reg: '%r2' } - - { reg: '%r3' } -calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13', - '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4', - '%r5', '%r6', '%r7', '%r8', '%r9', '%r10', '%r11', - '%s16', '%s17', '%s18', '%s19', '%s20', '%s21', - '%s22', '%s23', '%s24', '%s25', '%s26', '%s27', - '%s28', '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11', - '%d10_d12', '%d11_d13', '%d12_d14', '%d13_d15', - '%q4_q5', '%q5_q6', '%q6_q7', '%q4_q5_q6_q7', '%r4_r5', - '%r6_r7', '%r8_r9', '%r10_r11', '%d8_d9_d10', '%d9_d10_d11', - '%d10_d11_d12', '%d11_d12_d13', '%d12_d13_d14', - '%d13_d14_d15', '%d8_d10_d12', '%d9_d11_d13', '%d10_d12_d14', - '%d11_d13_d15', '%d8_d10_d12_d14', '%d9_d11_d13_d15', - '%d9_d10', '%d11_d12', '%d13_d14', '%d9_d10_d11_d12', - '%d11_d12_d13_d14' ] + - { reg: '$r0' } + - { reg: '$r1' } + - { reg: '$r2' } + - { reg: '$r3' } +calleeSavedRegisters: [ '$lr', '$d8', '$d9', '$d10', '$d11', '$d12', '$d13', + '$d14', '$d15', '$q4', '$q5', '$q6', '$q7', '$r4', + '$r5', '$r6', '$r7', '$r8', '$r9', '$r10', '$r11', + '$s16', '$s17', '$s18', '$s19', '$s20', '$s21', + '$s22', '$s23', '$s24', '$s25', '$s26', '$s27', + '$s28', '$s29', '$s30', '$s31', '$d8_d10', '$d9_d11', + '$d10_d12', '$d11_d13', '$d12_d14', '$d13_d15', + '$q4_q5', '$q5_q6', '$q6_q7', '$q4_q5_q6_q7', '$r4_r5', + '$r6_r7', '$r8_r9', '$r10_r11', '$d8_d9_d10', '$d9_d10_d11', + '$d10_d11_d12', '$d11_d12_d13', '$d12_d13_d14', + '$d13_d14_d15', '$d8_d10_d12', '$d9_d11_d13', '$d10_d12_d14', + '$d11_d13_d15', '$d8_d10_d12_d14', '$d9_d11_d13_d15', + '$d9_d10', '$d11_d12', '$d13_d14', '$d9_d10_d11_d12', + '$d11_d12_d13_d14' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -125,33 +125,33 @@ hasVAStart: false hasMustTailInVarArgFunc: false stack: - - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr', callee-saved-restored: false } - - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r7' } + - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$lr', callee-saved-restored: false } + - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '$r7' } body: | bb.0.entry: - liveins: %r0, %r1, %r2, %r3, %lr, %r7 + liveins: $r0, $r1, $r2, $r3, $lr, $r7 - DBG_VALUE debug-use %r0, debug-use %noreg, !18, !27, debug-location !28 - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - DBG_VALUE debug-use %r2, debug-use %noreg, !20, !27, debug-location !28 - DBG_VALUE debug-use %r3, debug-use %noreg, !21, !27, debug-location !28 - t2CMPri %r3, 4, 14, %noreg, implicit-def %cpsr, debug-location !31 - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - %r0 = t2MOVi -1, 3, %cpsr, %noreg, implicit undef %r0 - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - tBX_RET 3, %cpsr, implicit %r0, debug-location !34 - %sp = frame-setup t2STMDB_UPD %sp, 14, %noreg, killed %r7, killed %lr + DBG_VALUE debug-use $r0, debug-use $noreg, !18, !27, debug-location !28 + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + DBG_VALUE debug-use $r2, debug-use $noreg, !20, !27, debug-location !28 + DBG_VALUE debug-use $r3, debug-use $noreg, !21, !27, debug-location !28 + t2CMPri $r3, 4, 14, $noreg, implicit-def $cpsr, debug-location !31 + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + $r0 = t2MOVi -1, 3, $cpsr, $noreg, implicit undef $r0 + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + tBX_RET 3, $cpsr, implicit $r0, debug-location !34 + $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r7, killed $lr frame-setup CFI_INSTRUCTION def_cfa_offset 8 - frame-setup CFI_INSTRUCTION offset %lr, -4 - frame-setup CFI_INSTRUCTION offset %r7, -8 - DBG_VALUE debug-use %r0, debug-use %noreg, !18, !27, debug-location !28 - DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28 - DBG_VALUE debug-use %r2, debug-use %noreg, !20, !27, debug-location !28 - DBG_VALUE debug-use %r3, debug-use %noreg, !21, !27, debug-location !28 - %r1 = tMOVr killed %r2, 14, %noreg, debug-location !32 - %r2 = tMOVr killed %r3, 14, %noreg, debug-location !32 - tBL 14, %noreg, @g, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit-def %sp, debug-location !32 - %r0 = t2MOVi 0, 14, %noreg, %noreg - %sp = t2LDMIA_RET %sp, 14, %noreg, def %r7, def %pc, implicit %r0 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + DBG_VALUE debug-use $r0, debug-use $noreg, !18, !27, debug-location !28 + DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28 + DBG_VALUE debug-use $r2, debug-use $noreg, !20, !27, debug-location !28 + DBG_VALUE debug-use $r3, debug-use $noreg, !21, !27, debug-location !28 + $r1 = tMOVr killed $r2, 14, $noreg, debug-location !32 + $r2 = tMOVr killed $r3, 14, $noreg, debug-location !32 + tBL 14, $noreg, @g, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit-def $sp, debug-location !32 + $r0 = t2MOVi 0, 14, $noreg, $noreg + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r7, def $pc, implicit $r0 ... Index: test/CodeGen/ARM/single-issue-r52.mir =================================================================== --- test/CodeGen/ARM/single-issue-r52.mir +++ test/CodeGen/ARM/single-issue-r52.mir @@ -20,13 +20,13 @@ # CHECK: ********** MI Scheduling ********** # CHECK: ScheduleDAGMILive::schedule starting -# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, %noreg; mem:LD32[%A](align=8) +# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, $noreg; mem:LD32[%A](align=8) # CHECK: Latency : 8 # CHECK: Single Issue : true; -# CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0:qqpr, %1.dsub_1:qqpr, 14, %noreg +# CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0:qqpr, %1.dsub_1:qqpr, 14, $noreg # CHECK: Latency : 5 # CHECK: Single Issue : false; -# CHECK: SU(3): %5:gpr, %6:gpr = VMOVRRD %4:dpr, 14, %noreg +# CHECK: SU(3): %5:gpr, %6:gpr = VMOVRRD %4:dpr, 14, $noreg # CHECK: Latency : 4 # CHECK: Single Issue : false; @@ -56,7 +56,7 @@ - { id: 5, class: gpr } - { id: 6, class: gpr } liveins: - - { reg: '%r0', virtual-reg: '%0' } + - { reg: '$r0', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -73,14 +73,14 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %r0 + liveins: $r0 - %0 = COPY %r0 - %1 = VLD4d8Pseudo %0, 8, 14, %noreg :: (load 32 from %ir.A, align 8) - %4 = VADDv8i8 %1.dsub_0, %1.dsub_1, 14, %noreg - %5, %6 = VMOVRRD %4, 14, %noreg - %r0 = COPY %5 - %r1 = COPY %6 - BX_RET 14, %noreg, implicit %r0, implicit killed %r1 + %0 = COPY $r0 + %1 = VLD4d8Pseudo %0, 8, 14, $noreg :: (load 32 from %ir.A, align 8) + %4 = VADDv8i8 %1.dsub_0, %1.dsub_1, 14, $noreg + %5, %6 = VMOVRRD %4, 14, $noreg + $r0 = COPY %5 + $r1 = COPY %6 + BX_RET 14, $noreg, implicit $r0, implicit killed $r1 ... Index: test/CodeGen/ARM/tail-dup-bundle.mir =================================================================== --- test/CodeGen/ARM/tail-dup-bundle.mir +++ test/CodeGen/ARM/tail-dup-bundle.mir @@ -2,35 +2,35 @@ --- # CHECK-LABEL: name: func # Make sure the bundle gets duplicated correctly -# CHECK: BUNDLE implicit-def dead %itstate, implicit-def %cpsr, implicit killed %r0, implicit killed %cpsr { -# CHECK: t2IT 1, 24, implicit-def %itstate -# CHECK: t2CMPri killed %r0, 9, 1, killed %cpsr, implicit-def %cpsr, implicit internal killed %itstate +# CHECK: BUNDLE implicit-def dead $itstate, implicit-def $cpsr, implicit killed $r0, implicit killed $cpsr { +# CHECK: t2IT 1, 24, implicit-def $itstate +# CHECK: t2CMPri killed $r0, 9, 1, killed $cpsr, implicit-def $cpsr, implicit internal killed $itstate # CHECK: } -# CHECK: BUNDLE implicit-def dead %itstate, implicit-def %cpsr, implicit killed %r0, implicit killed %cpsr { -# CHECK: t2IT 1, 24, implicit-def %itstate -# CHECK: t2CMPri killed %r0, 9, 1, killed %cpsr, implicit-def %cpsr, implicit internal killed %itstate +# CHECK: BUNDLE implicit-def dead $itstate, implicit-def $cpsr, implicit killed $r0, implicit killed $cpsr { +# CHECK: t2IT 1, 24, implicit-def $itstate +# CHECK: t2CMPri killed $r0, 9, 1, killed $cpsr, implicit-def $cpsr, implicit internal killed $itstate # CHECK: } name: func tracksRegLiveness: true body: | bb.0: - liveins: %r0, %lr, %r7 + liveins: $r0, $lr, $r7 bb.1: - liveins: %r0 + liveins: $r0 - t2CMPri %r0, 32, 14, %noreg, implicit-def %cpsr - BUNDLE implicit-def dead %itstate, implicit-def %cpsr, implicit killed %r0, implicit killed %cpsr { - t2IT 1, 24, implicit-def %itstate - t2CMPri killed %r0, 9, 1, killed %cpsr, implicit-def %cpsr, implicit internal killed %itstate + t2CMPri $r0, 32, 14, $noreg, implicit-def $cpsr + BUNDLE implicit-def dead $itstate, implicit-def $cpsr, implicit killed $r0, implicit killed $cpsr { + t2IT 1, 24, implicit-def $itstate + t2CMPri killed $r0, 9, 1, killed $cpsr, implicit-def $cpsr, implicit internal killed $itstate } - t2Bcc %bb.3, 1, killed %cpsr + t2Bcc %bb.3, 1, killed $cpsr bb.2: - %r0 = IMPLICIT_DEF - t2B %bb.1, 14, %noreg + $r0 = IMPLICIT_DEF + t2B %bb.1, 14, $noreg bb.3: - %r0 = IMPLICIT_DEF - t2B %bb.1, 14, %noreg + $r0 = IMPLICIT_DEF + t2B %bb.1, 14, $noreg ... Index: test/CodeGen/ARM/thumb1-ldst-opt.ll =================================================================== --- test/CodeGen/ARM/thumb1-ldst-opt.ll +++ test/CodeGen/ARM/thumb1-ldst-opt.ll @@ -22,6 +22,6 @@ declare void @g(i32) ; CHECK-LABEL: name: foo -; CHECK: [[BASE:%r[0-7]]], {{.*}} tADDi8 +; CHECK: [[BASE:\$r[0-7]]], {{.*}} tADDi8 ; CHECK-NOT: [[BASE]] = tLDMIA_UPD {{.*}} [[BASE]] ; CHECK: tLDMIA killed [[BASE]], {{.*}} def [[BASE]] Index: test/CodeGen/ARM/v6-jumptable-clobber.mir =================================================================== --- test/CodeGen/ARM/v6-jumptable-clobber.mir +++ test/CodeGen/ARM/v6-jumptable-clobber.mir @@ -12,7 +12,7 @@ # CHECK: JUMPTABLE_ADDRS # CHECK-LABEL: name: bar -# CHECK: tTBB_JT %pc, killed %r1 +# CHECK: tTBB_JT $pc, killed $r1 --- | ; ModuleID = 'simple.ll' @@ -195,8 +195,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%r0' } - - { reg: '%r1' } + - { reg: '$r0' } + - { reg: '$r1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -229,24 +229,24 @@ body: | bb.0 (%ir-block.0): successors: %bb.2.d1(0x03c3c3c4), %bb.1(0x7c3c3c3c) - liveins: %r0, %r1 + liveins: $r0, $r1 - %r2 = tLDRpci %const.0, 14, %noreg - tSTRi killed %r2, killed %r1, 0, 14, %noreg :: (store 4 into %ir.addr) - dead %r1 = SPACE 980, undef %r0 - %r0 = tUXTB killed %r0, 14, %noreg - %r1, dead %cpsr = tSUBi3 killed %r0, 1, 14, %noreg - tCMPi8 %r1, 25, 14, %noreg, implicit-def %cpsr - tBcc %bb.2.d1, 8, killed %cpsr + $r2 = tLDRpci %const.0, 14, $noreg + tSTRi killed $r2, killed $r1, 0, 14, $noreg :: (store 4 into %ir.addr) + dead $r1 = SPACE 980, undef $r0 + $r0 = tUXTB killed $r0, 14, $noreg + $r1, dead $cpsr = tSUBi3 killed $r0, 1, 14, $noreg + tCMPi8 $r1, 25, 14, $noreg, implicit-def $cpsr + tBcc %bb.2.d1, 8, killed $cpsr bb.1 (%ir-block.0): successors: %bb.3.d2(0x07c549d2), %bb.9.d8(0x07c549d2), %bb.4.d3(0x07c549d2), %bb.5.d4(0x07c549d2), %bb.6.d5(0x07c549d2), %bb.7.d6(0x07c549d2), %bb.8.d7(0x07c549d2), %bb.10.d9(0x07c549d2), %bb.11.d10(0x07c549d2), %bb.2.d1(0x03ab62db), %bb.12.d11(0x07c549d2), %bb.13.d12(0x07c549d2), %bb.14.d13(0x07c549d2), %bb.15.d14(0x07c549d2), %bb.16.d15(0x07c549d2), %bb.17.d16(0x07c549d2), %bb.18.d17(0x07c549d2) - liveins: %r1 + liveins: $r1 - %r0, dead %cpsr = tLSLri killed %r1, 2, 14, %noreg - %r1 = tLEApcrelJT %jump-table.0, 14, %noreg - %r0 = tLDRr killed %r1, killed %r0, 14, %noreg :: (load 4 from jump-table) - tBR_JTr killed %r0, %jump-table.0 + $r0, dead $cpsr = tLSLri killed $r1, 2, 14, $noreg + $r1 = tLEApcrelJT %jump-table.0, 14, $noreg + $r0 = tLDRr killed $r1, killed $r0, 14, $noreg :: (load 4 from jump-table) + tBR_JTr killed $r0, %jump-table.0 bb.3.d2: @@ -293,8 +293,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%r0' } - - { reg: '%r1' } + - { reg: '$r0' } + - { reg: '$r1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -327,23 +327,23 @@ body: | bb.0 (%ir-block.0): successors: %bb.2.d1(0x03c3c3c4), %bb.1(0x7c3c3c3c) - liveins: %r0, %r1 + liveins: $r0, $r1 - %r2 = tLDRpci %const.0, 14, %noreg - tSTRi killed %r2, killed %r1, 0, 14, %noreg :: (store 4 into %ir.addr) - %r0 = tUXTB killed %r0, 14, %noreg - %r1, dead %cpsr = tSUBi3 killed %r0, 1, 14, %noreg - tCMPi8 %r1, 25, 14, %noreg, implicit-def %cpsr - tBcc %bb.2.d1, 8, killed %cpsr + $r2 = tLDRpci %const.0, 14, $noreg + tSTRi killed $r2, killed $r1, 0, 14, $noreg :: (store 4 into %ir.addr) + $r0 = tUXTB killed $r0, 14, $noreg + $r1, dead $cpsr = tSUBi3 killed $r0, 1, 14, $noreg + tCMPi8 $r1, 25, 14, $noreg, implicit-def $cpsr + tBcc %bb.2.d1, 8, killed $cpsr bb.1 (%ir-block.0): successors: %bb.3.d2(0x07c549d2), %bb.9.d8(0x07c549d2), %bb.4.d3(0x07c549d2), %bb.5.d4(0x07c549d2), %bb.6.d5(0x07c549d2), %bb.7.d6(0x07c549d2), %bb.8.d7(0x07c549d2), %bb.10.d9(0x07c549d2), %bb.11.d10(0x07c549d2), %bb.2.d1(0x03ab62db), %bb.12.d11(0x07c549d2), %bb.13.d12(0x07c549d2), %bb.14.d13(0x07c549d2), %bb.15.d14(0x07c549d2), %bb.16.d15(0x07c549d2), %bb.17.d16(0x07c549d2), %bb.18.d17(0x07c549d2) - liveins: %r1 + liveins: $r1 - %r0, dead %cpsr = tLSLri killed %r1, 2, 14, %noreg - %r1 = tLEApcrelJT %jump-table.0, 14, %noreg - %r0 = tLDRr killed %r1, killed %r0, 14, %noreg :: (load 4 from jump-table) - tBR_JTr killed %r0, %jump-table.0 + $r0, dead $cpsr = tLSLri killed $r1, 2, 14, $noreg + $r1 = tLEApcrelJT %jump-table.0, 14, $noreg + $r0 = tLDRr killed $r1, killed $r0, 14, $noreg :: (load 4 from jump-table) + tBR_JTr killed $r0, %jump-table.0 bb.3.d2: Index: test/CodeGen/ARM/virtregrewriter-subregliveness.mir =================================================================== --- test/CodeGen/ARM/virtregrewriter-subregliveness.mir +++ test/CodeGen/ARM/virtregrewriter-subregliveness.mir @@ -22,18 +22,18 @@ - { id: 0, class: gprpair } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; That copy is being coalesced so we should use a KILL ; placeholder. If that's not a kill that means we probably - ; not coalescing %0 and %r0_r1 and thus we are not testing + ; not coalescing %0 and $r0_r1 and thus we are not testing ; the problematic code anymore. ; - ; CHECK: %r0 = KILL %r0, implicit killed %r0_r1, implicit-def %r0_r1 - ; CHECK-NEXT: %r1 = KILL %r1, implicit killed %r0_r1 - undef %0.gsub_0 = COPY %r0 - %0.gsub_1 = COPY %r1 - tBX_RET 14, %noreg, implicit %0 + ; CHECK: $r0 = KILL $r0, implicit killed $r0_r1, implicit-def $r0_r1 + ; CHECK-NEXT: $r1 = KILL $r1, implicit killed $r0_r1 + undef %0.gsub_0 = COPY $r0 + %0.gsub_1 = COPY $r1 + tBX_RET 14, $noreg, implicit %0 ... @@ -48,14 +48,14 @@ - { id: 0, class: gprpair } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; r1 is not live through so check we are not implicitly using ; the big register. - ; CHECK: %r0 = KILL %r0, implicit-def %r0_r1 + ; CHECK: $r0 = KILL $r0, implicit-def $r0_r1 ; CHECK-NEXT: tBX_RET - undef %0.gsub_0 = COPY %r0 - tBX_RET 14, %noreg, implicit %0 + undef %0.gsub_0 = COPY $r0 + tBX_RET 14, $noreg, implicit %0 ... @@ -71,14 +71,14 @@ - { id: 0, class: gprpair } body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 ; r1 is not live through so check we are not implicitly using ; the big register. - ; CHECK: %r0 = KILL %r0, implicit-def %r1, implicit-def %r0_r1 + ; CHECK: $r0 = KILL $r0, implicit-def $r1, implicit-def $r0_r1 ; CHECK-NEXT: tBX_RET - undef %0.gsub_0 = COPY %r0, implicit-def %r1 - tBX_RET 14, %noreg, implicit %0 + undef %0.gsub_0 = COPY $r0, implicit-def $r1 + tBX_RET 14, $noreg, implicit %0 ... Index: test/CodeGen/ARM/vldm-liveness.mir =================================================================== --- test/CodeGen/ARM/vldm-liveness.mir +++ test/CodeGen/ARM/vldm-liveness.mir @@ -21,20 +21,20 @@ name: foo alignment: 1 liveins: - - { reg: '%r0' } + - { reg: '$r0' } body: | bb.0 (%ir-block.0): - liveins: %r0 + liveins: $r0 - %s1 = VLDRS %r0, 1, 14, %noreg, implicit-def %q0 :: (load 4) - %s3 = VLDRS %r0, 2, 14, %noreg, implicit killed %q0, implicit-def %q0 :: (load 4) - ; CHECK: %s3 = VLDRS %r0, 2, 14, %noreg, implicit killed undef %q0, implicit-def %q0 :: (load 4) + $s1 = VLDRS $r0, 1, 14, $noreg, implicit-def $q0 :: (load 4) + $s3 = VLDRS $r0, 2, 14, $noreg, implicit killed $q0, implicit-def $q0 :: (load 4) + ; CHECK: $s3 = VLDRS $r0, 2, 14, $noreg, implicit killed undef $q0, implicit-def $q0 :: (load 4) - %s0 = VLDRS %r0, 0, 14, %noreg, implicit killed %q0, implicit-def %q0 :: (load 4) - ; CHECK: VLDMSIA %r0, 14, %noreg, def %s0, def %s1, implicit-def %noreg + $s0 = VLDRS $r0, 0, 14, $noreg, implicit killed $q0, implicit-def $q0 :: (load 4) + ; CHECK: VLDMSIA $r0, 14, $noreg, def $s0, def $s1, implicit-def $noreg - %s2 = VLDRS killed %r0, 4, 14, %noreg, implicit killed %q0, implicit-def %q0 :: (load 4) - ; CHECK: %s2 = VLDRS killed %r0, 4, 14, %noreg, implicit killed %q0, implicit-def %q0 :: (load 4) + $s2 = VLDRS killed $r0, 4, 14, $noreg, implicit killed $q0, implicit-def $q0 :: (load 4) + ; CHECK: $s2 = VLDRS killed $r0, 4, 14, $noreg, implicit killed $q0, implicit-def $q0 :: (load 4) - tBX_RET 14, %noreg, implicit %q0 + tBX_RET 14, $noreg, implicit $q0 ... Index: test/CodeGen/BPF/sockex2.ll =================================================================== --- test/CodeGen/BPF/sockex2.ll +++ test/CodeGen/BPF/sockex2.ll @@ -311,7 +311,7 @@ ; CHECK-LABEL: bpf_prog2: ; CHECK: r0 = *(u16 *)skb[12] # encoding: [0x28,0x00,0x00,0x00,0x0c,0x00,0x00,0x00] ; CHECK: r0 = *(u16 *)skb[16] # encoding: [0x28,0x00,0x00,0x00,0x10,0x00,0x00,0x00] -; CHECK: implicit-def: %r1 +; CHECK: implicit-def: $r1 ; CHECK: r1 = ; CHECK: call 1 # encoding: [0x85,0x00,0x00,0x00,0x01,0x00,0x00,0x00] ; CHECK: call 2 # encoding: [0x85,0x00,0x00,0x00,0x02,0x00,0x00,0x00] Index: test/CodeGen/Hexagon/addrmode-globoff.mir =================================================================== --- test/CodeGen/Hexagon/addrmode-globoff.mir +++ test/CodeGen/Hexagon/addrmode-globoff.mir @@ -13,13 +13,13 @@ body: | bb.0: - liveins: %r0 + liveins: $r0 ; Make sure that the offset in @g0 is 8. - ; CHECK: S4_storerh_ur killed %r0, 2, @g0 + 8, %r0 + ; CHECK: S4_storerh_ur killed $r0, 2, @g0 + 8, $r0 - %r1 = A2_tfrsi @g0+4 - %r2 = S2_addasl_rrri %r1, %r0, 2 - S2_storerh_io %r2, 4, %r0 + $r1 = A2_tfrsi @g0+4 + $r2 = S2_addasl_rrri $r1, $r0, 2 + S2_storerh_io $r2, 4, $r0 ... Index: test/CodeGen/Hexagon/addrmode-keepdeadphis.mir =================================================================== --- test/CodeGen/Hexagon/addrmode-keepdeadphis.mir +++ test/CodeGen/Hexagon/addrmode-keepdeadphis.mir @@ -14,17 +14,17 @@ body: | bb.0: - liveins: %p0 - %r0 = A2_tfrsi @g - %r1 = A2_tfrsi 1 - %r2 = S2_addasl_rrri %r0, %r1, 1 - J2_jumpt %p0, %bb.2, implicit-def %pc + liveins: $p0 + $r0 = A2_tfrsi @g + $r1 = A2_tfrsi 1 + $r2 = S2_addasl_rrri $r0, $r1, 1 + J2_jumpt $p0, %bb.2, implicit-def $pc bb.1: - liveins: %r0, %r2 - %r1 = A2_tfrsi 2 + liveins: $r0, $r2 + $r1 = A2_tfrsi 2 bb.2: - liveins: %r0, %r2 - %r3 = L2_loadri_io %r2, 0 + liveins: $r0, $r2 + $r3 = L2_loadri_io $r2, 0 ... Index: test/CodeGen/Hexagon/addrmode-rr-to-io.mir =================================================================== --- test/CodeGen/Hexagon/addrmode-rr-to-io.mir +++ test/CodeGen/Hexagon/addrmode-rr-to-io.mir @@ -1,7 +1,7 @@ # RUN: llc -march=hexagon -run-pass amode-opt %s -o - | FileCheck %s # This testcase used to crash. -# CHECK: S2_storerb_io killed %r0, @var_i8, killed %r2 +# CHECK: S2_storerb_io killed $r0, @var_i8, killed $r2 --- | define void @fred() { ret void } @@ -13,10 +13,10 @@ tracksRegLiveness: true body: | bb.0: - liveins: %r0 - %r1 = A2_tfrsi @var_i8 - %r2 = A2_tfrsi 255 - S4_storerb_rr killed %r0, killed %r1, 0, killed %r2 - PS_jmpret %r31, implicit-def %pc + liveins: $r0 + $r1 = A2_tfrsi @var_i8 + $r2 = A2_tfrsi 255 + S4_storerb_rr killed $r0, killed $r1, 0, killed $r2 + PS_jmpret $r31, implicit-def $pc ... Index: test/CodeGen/Hexagon/anti-dep-partial.mir =================================================================== --- test/CodeGen/Hexagon/anti-dep-partial.mir +++ test/CodeGen/Hexagon/anti-dep-partial.mir @@ -13,22 +13,22 @@ body: | bb.0: successors: - liveins: %r0, %r1, %d1, %d2, %r16, %r17, %r19, %r22, %r23 - %r2 = A2_add %r23, killed %r17 - %r6 = M2_mpyi %r16, %r16 - %r22 = M2_accii %r22, killed %r2, 2 - %r7 = A2_tfrsi 12345678 - %r3 = A2_tfr killed %r16 - %d2 = A2_tfrp killed %d0 - %r2 = L2_loadri_io %r29, 28 - %r2 = M2_mpyi killed %r6, killed %r2 - %r23 = S2_asr_i_r %r22, 31 - S2_storeri_io killed %r29, 0, killed %r7 + liveins: $r0, $r1, $d1, $d2, $r16, $r17, $r19, $r22, $r23 + $r2 = A2_add $r23, killed $r17 + $r6 = M2_mpyi $r16, $r16 + $r22 = M2_accii $r22, killed $r2, 2 + $r7 = A2_tfrsi 12345678 + $r3 = A2_tfr killed $r16 + $d2 = A2_tfrp killed $d0 + $r2 = L2_loadri_io $r29, 28 + $r2 = M2_mpyi killed $r6, killed $r2 + $r23 = S2_asr_i_r $r22, 31 + S2_storeri_io killed $r29, 0, killed $r7 ; The anti-dependency on r23 between the first A2_add and the ; S2_asr_i_r was causing d11 to be renamed, while r22 remained ; unchanged. Check that the renaming of d11 does not happen. ; CHECK: d11 - %d0 = A2_tfrp killed %d11 - J2_call @check, implicit-def %d0, implicit-def %d1, implicit-def %d2, implicit %d0, implicit %d1, implicit %d2 + $d0 = A2_tfrp killed $d11 + J2_call @check, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit $d0, implicit $d1, implicit $d2 ... Index: test/CodeGen/Hexagon/bank-conflict-load.mir =================================================================== --- test/CodeGen/Hexagon/bank-conflict-load.mir +++ test/CodeGen/Hexagon/bank-conflict-load.mir @@ -1,11 +1,11 @@ # RUN: llc -march=hexagon -run-pass post-RA-sched %s -o - | FileCheck %s -# The two loads from %a (%r0) can cause a bank conflict. Check that they +# The two loads from %a ($r0) can cause a bank conflict. Check that they # are not scheduled next to each other. -# CHECK: L2_loadri_io %r0, 8 -# CHECK: L2_loadri_io killed %r1, 0 -# CHECK: L2_loadri_io killed %r0, 12 +# CHECK: L2_loadri_io $r0, 8 +# CHECK: L2_loadri_io killed $r1, 0 +# CHECK: L2_loadri_io killed $r0, 12 --- | define void @foo(i32* %a, i32* %b) { @@ -19,10 +19,10 @@ body: | bb.0: - liveins: %r0, %r1 + liveins: $r0, $r1 - %r2 = L2_loadri_io %r0, 8 :: (load 4 from %ir.a) - %r3 = L2_loadri_io killed %r0, 12 :: (load 4 from %ir.a) - %r4 = L2_loadri_io killed %r1, 0 :: (load 4 from %ir.b) + $r2 = L2_loadri_io $r0, 8 :: (load 4 from %ir.a) + $r3 = L2_loadri_io killed $r0, 12 :: (load 4 from %ir.a) + $r4 = L2_loadri_io killed $r1, 0 :: (load 4 from %ir.b) ... Index: test/CodeGen/Hexagon/branch-folder-hoist-kills.mir =================================================================== --- test/CodeGen/Hexagon/branch-folder-hoist-kills.mir +++ test/CodeGen/Hexagon/branch-folder-hoist-kills.mir @@ -21,10 +21,10 @@ # J2_jumpr %R31, implicit dead %PC # -# CHECK: %r1 = A2_sxth killed %r0 -# CHECK: %r0 = C2_cmoveit %p0, 2 -# CHECK-NOT: implicit-def %r0 -# CHECK: %r0 = C2_cmoveif killed %p0, 1, implicit killed %r0 +# CHECK: $r1 = A2_sxth killed $r0 +# CHECK: $r0 = C2_cmoveit $p0, 2 +# CHECK-NOT: implicit-def $r0 +# CHECK: $r0 = C2_cmoveif killed $p0, 1, implicit killed $r0 --- name: fred @@ -32,28 +32,28 @@ body: | bb.0: - liveins: %r0 + liveins: $r0 successors: %bb.1, %bb.2 - A2_nop implicit-def %p0 - J2_jumpt killed %p0, %bb.2, implicit-def dead %pc + A2_nop implicit-def $p0 + J2_jumpt killed $p0, %bb.2, implicit-def dead $pc bb.1: successors: %bb.3 - liveins: %r0 - %r1 = A2_sxth killed %r0 - %r0 = A2_tfrsi 1 - J2_jump %bb.3, implicit-def %pc + liveins: $r0 + $r1 = A2_sxth killed $r0 + $r0 = A2_tfrsi 1 + J2_jump %bb.3, implicit-def $pc bb.2: successors: %bb.3 - liveins: %r0 - %r1 = A2_sxth killed %r0 - %r0 = A2_tfrsi 2 + liveins: $r0 + $r1 = A2_sxth killed $r0 + $r0 = A2_tfrsi 2 bb.3: - liveins: %r0, %r1 - %r0 = A2_add killed %r0, killed %r1 - J2_jumpr %r31, implicit-def dead %pc + liveins: $r0, $r1 + $r0 = A2_add killed $r0, killed $r1 + J2_jumpr $r31, implicit-def dead $pc ... Index: test/CodeGen/Hexagon/branchfolder-insert-impdef.mir =================================================================== --- test/CodeGen/Hexagon/branchfolder-insert-impdef.mir +++ test/CodeGen/Hexagon/branchfolder-insert-impdef.mir @@ -10,11 +10,11 @@ # CHECK-LABEL: name: func0 # CHECK-LABEL: bb.0: -# CHECK: %r0 = IMPLICIT_DEF +# CHECK: $r0 = IMPLICIT_DEF # CHECK-LABEL: bb.1: # CHECK-LABEL: bb.2: -# CHECK: liveins: %r0 -# CHECK: PS_storerhabs 0, %r0 +# CHECK: liveins: $r0 +# CHECK: PS_storerhabs 0, $r0 # CHECK: PS_jmpret --- @@ -23,36 +23,36 @@ body: | bb.0: - liveins: %r31 + liveins: $r31 successors: %bb.1, %bb.2 - J2_jumpt undef %p0, %bb.2, implicit-def %pc - J2_jump %bb.1, implicit-def %pc + J2_jumpt undef $p0, %bb.2, implicit-def $pc + J2_jump %bb.1, implicit-def $pc bb.1: - liveins: %r31 + liveins: $r31 successors: %bb.3 - %r0 = L2_loadruh_io undef %r1, 0 - PS_storerhabs 0, killed %r0 - J2_jump %bb.3, implicit-def %pc + $r0 = L2_loadruh_io undef $r1, 0 + PS_storerhabs 0, killed $r0 + J2_jump %bb.3, implicit-def $pc bb.2: - liveins: %r31 + liveins: $r31 successors: %bb.3 - PS_storerhabs 0, undef %r0 - J2_jump %bb.3, implicit-def %pc + PS_storerhabs 0, undef $r0 + J2_jump %bb.3, implicit-def $pc bb.3: - liveins: %r31 - PS_jmpret killed %r31, implicit-def %pc + liveins: $r31 + PS_jmpret killed $r31, implicit-def $pc ... --- # CHECK-LABEL: name: func1 # CHECK-LABEL: bb.1: -# CHECK: %r0 = IMPLICIT_DEF +# CHECK: $r0 = IMPLICIT_DEF # CHECK-LABEL: bb.2: # CHECK-LABEL: bb.3: -# CHECK: liveins: %r0 -# CHECK: PS_storerhabs 0, killed %r0 +# CHECK: liveins: $r0 +# CHECK: PS_storerhabs 0, killed $r0 # CHECK: PS_jmpret name: func1 @@ -60,28 +60,28 @@ body: | bb.0: - liveins: %r31 + liveins: $r31 successors: %bb.1, %bb.2 - J2_jumpt undef %p0, %bb.2, implicit-def %pc - J2_jump %bb.1, implicit-def %pc + J2_jumpt undef $p0, %bb.2, implicit-def $pc + J2_jump %bb.1, implicit-def $pc bb.1: - liveins: %r31 + liveins: $r31 successors: %bb.3 - %r1 = A2_tfrsi 1 - PS_storerhabs 0, undef %r0 - %r0 = A2_tfrsi 1 - J2_jump %bb.3, implicit-def %pc + $r1 = A2_tfrsi 1 + PS_storerhabs 0, undef $r0 + $r0 = A2_tfrsi 1 + J2_jump %bb.3, implicit-def $pc bb.2: - liveins: %r31 + liveins: $r31 successors: %bb.3 - %r0 = L2_loadruh_io undef %r1, 0 - PS_storerhabs 0, killed %r0 - %r0 = A2_tfrsi 1 - J2_jump %bb.3, implicit-def %pc + $r0 = L2_loadruh_io undef $r1, 0 + PS_storerhabs 0, killed $r0 + $r0 = A2_tfrsi 1 + J2_jump %bb.3, implicit-def $pc bb.3: - liveins: %r31 - PS_jmpret killed %r31, implicit undef %r0, implicit-def %pc + liveins: $r31 + PS_jmpret killed $r31, implicit undef $r0, implicit-def $pc ... Index: test/CodeGen/Hexagon/cext-opt-basic.mir =================================================================== --- test/CodeGen/Hexagon/cext-opt-basic.mir +++ test/CodeGen/Hexagon/cext-opt-basic.mir @@ -26,7 +26,7 @@ ... # CHECK-LABEL: name: test1 -# CHECK: [[C:%[0-9]+]]:intregs = COPY %r0 +# CHECK: [[C:%[0-9]+]]:intregs = COPY $r0 # CHECK: [[B:%[0-9]+]]:intregs = A2_addi [[C]], @global_address # CHECK: L2_loadri_io [[B]], 0 # CHECK: L2_loadri_io [[B]], 4 @@ -40,20 +40,20 @@ - { id: 3, class: intregs } body: | bb.0: - liveins: %r0 - %0 = COPY %r0 + liveins: $r0 + %0 = COPY $r0 %1 = L4_loadri_ur %0, 0, @global_address %2 = L4_loadri_ur %0, 0, @global_address+4 %3 = L4_loadri_ur %0, 0, @global_address+8 ... # CHECK-LABEL: name: test2 -# CHECK: [[C:%[0-9]+]]:intregs = COPY %r0 +# CHECK: [[C:%[0-9]+]]:intregs = COPY $r0 # CHECK: [[B:%[0-9]+]]:intregs = A2_tfrsi @global_address + 4 # CHECK: [[T0:%[0-9]+]]:intregs = A2_addi [[B]], -4 -# CHECK: %r0 = COPY [[T0]] +# CHECK: $r0 = COPY [[T0]] # CHECK: [[T1:%[0-9]+]]:intregs = A2_addi [[B]], -2 -# CHECK: %r1 = COPY [[T1]] +# CHECK: $r1 = COPY [[T1]] # CHECK: L4_loadri_rr [[B]], [[C]], 0 --- name: test2 @@ -64,11 +64,11 @@ - { id: 3, class: intregs } body: | bb.0: - liveins: %r0 - %0 = COPY %r0 + liveins: $r0 + %0 = COPY $r0 %1 = A2_tfrsi @global_address - %r0 = COPY %1 + $r0 = COPY %1 %2 = A2_tfrsi @global_address+2 - %r1 = COPY %2 + $r1 = COPY %2 %3 = L4_loadri_ur %0, 0, @global_address+4 ... Index: test/CodeGen/Hexagon/cext-opt-numops.mir =================================================================== --- test/CodeGen/Hexagon/cext-opt-numops.mir +++ test/CodeGen/Hexagon/cext-opt-numops.mir @@ -28,9 +28,9 @@ body: | bb.0: - liveins: %r0, %r1 - %0 = COPY %r1 - %1 = COPY %r0 + liveins: $r0, $r1 + %0 = COPY $r1 + %1 = COPY $r0 %2 = A2_tfrsi @array %3 = IMPLICIT_DEF %4 = A2_tfrsi @array+424 @@ -41,5 +41,5 @@ %8 = A2_tfrsi @array+144 %9 = C2_mux %3, %4, %8 S4_storeiri_io %9, 0, 0 - PS_jmpret %r31, implicit-def %pc + PS_jmpret $r31, implicit-def $pc ... Index: test/CodeGen/Hexagon/cext-opt-range-assert.mir =================================================================== --- test/CodeGen/Hexagon/cext-opt-range-assert.mir +++ test/CodeGen/Hexagon/cext-opt-range-assert.mir @@ -22,12 +22,12 @@ %6:intregs = A2_tfrsi @G %7:intregs = A2_addi killed %6, 2 %8:intregs = A2_tfrsi 127 - ADJCALLSTACKDOWN 0, 0, implicit-def %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29 - %r0 = COPY %7 - %r1 = COPY %8 + ADJCALLSTACKDOWN 0, 0, implicit-def $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 + $r0 = COPY %7 + $r1 = COPY %8 %9:intregs = IMPLICIT_DEF - J2_callr killed %9, implicit-def dead %pc, implicit-def dead %r31, implicit %r29, implicit %r0, implicit %r1, implicit-def %r29 - ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29 + J2_callr killed %9, implicit-def dead $pc, implicit-def dead $r31, implicit $r29, implicit $r0, implicit $r1, implicit-def $r29 + ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 %5:intregs = A2_tfrsi 8 %10:intregs = A2_tfrsi @G + 8 %4:intregs = A2_addi killed %10, 2 @@ -39,16 +39,16 @@ %11:predregs = C2_cmpgtui %1, 127 %2:intregs = A2_addi %1, 8 %3:intregs = A2_addi %0, 16 - J2_jumpf %11, %bb.1, implicit-def %pc + J2_jumpf %11, %bb.1, implicit-def $pc bb.2: %13:intregs = A2_tfrsi @G %14:intregs = A2_addi killed %13, 2 %15:intregs = A2_tfrsi 127 - ADJCALLSTACKDOWN 0, 0, implicit-def %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29 - %r0 = COPY %14 - %r1 = COPY %15 + ADJCALLSTACKDOWN 0, 0, implicit-def $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 + $r0 = COPY %14 + $r1 = COPY %15 %16:intregs = IMPLICIT_DEF - PS_callr_nr killed %16, implicit %r0, implicit %r1, implicit-def %r29 - ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29 + PS_callr_nr killed %16, implicit $r0, implicit $r1, implicit-def $r29 + ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 ... Index: test/CodeGen/Hexagon/cext-opt-range-offset.mir =================================================================== --- test/CodeGen/Hexagon/cext-opt-range-offset.mir +++ test/CodeGen/Hexagon/cext-opt-range-offset.mir @@ -29,8 +29,8 @@ bb.2: successors: %bb.3, %bb.4 %4 = IMPLICIT_DEF - J2_jumpt %4, %bb.4, implicit-def %pc - J2_jump %bb.3, implicit-def %pc + J2_jumpt %4, %bb.4, implicit-def $pc + J2_jump %bb.3, implicit-def $pc bb.3: successors: %bb.4 Index: test/CodeGen/Hexagon/duplex-addi-global-imm.mir =================================================================== --- test/CodeGen/Hexagon/duplex-addi-global-imm.mir +++ test/CodeGen/Hexagon/duplex-addi-global-imm.mir @@ -15,8 +15,8 @@ body: | bb.0: - liveins: %r0 - %r0 = A2_addi %r0, @g - %r1 = A2_tfrsi 0 + liveins: $r0 + $r0 = A2_addi $r0, @g + $r1 = A2_tfrsi 0 ... Index: test/CodeGen/Hexagon/early-if-debug.mir =================================================================== --- test/CodeGen/Hexagon/early-if-debug.mir +++ test/CodeGen/Hexagon/early-if-debug.mir @@ -3,14 +3,14 @@ # if-converted. # CHECK-LABEL: bb.0: -# CHECK: %0:intregs = COPY %r0 +# CHECK: %0:intregs = COPY $r0 # CHECK: %1:predregs = C2_cmpeqi %0, 0 # CHECK: %2:intregs = A2_tfrsi 123 -# CHECK: DBG_VALUE debug-use %0, debug-use %noreg -# CHECK: DBG_VALUE debug-use %0, debug-use %noreg -# CHECK: DBG_VALUE debug-use %0, debug-use %noreg -# CHECK: DBG_VALUE debug-use %0, debug-use %noreg -# CHECK: DBG_VALUE debug-use %0, debug-use %noreg +# CHECK: DBG_VALUE debug-use %0, debug-use $noreg +# CHECK: DBG_VALUE debug-use %0, debug-use $noreg +# CHECK: DBG_VALUE debug-use %0, debug-use $noreg +# CHECK: DBG_VALUE debug-use %0, debug-use $noreg +# CHECK: DBG_VALUE debug-use %0, debug-use $noreg # CHECK: %3:intregs = A2_tfrsi 321 # CHECK: %5:intregs = C2_mux %1, %2, %3 @@ -31,20 +31,20 @@ - { id: 4, class: intregs } body: | bb.0: - liveins: %r0 + liveins: $r0 - %0 = COPY %r0 + %0 = COPY $r0 %1 = C2_cmpeqi %0, 0 %2 = A2_tfrsi 123 - J2_jumpt %1, %bb.2, implicit-def dead %pc - J2_jump %bb.1, implicit-def dead %pc + J2_jumpt %1, %bb.2, implicit-def dead $pc + J2_jump %bb.1, implicit-def dead $pc bb.1: - DBG_VALUE debug-use %0, debug-use %noreg, !1, !1 - DBG_VALUE debug-use %0, debug-use %noreg, !1, !1 - DBG_VALUE debug-use %0, debug-use %noreg, !1, !1 - DBG_VALUE debug-use %0, debug-use %noreg, !1, !1 - DBG_VALUE debug-use %0, debug-use %noreg, !1, !1 + DBG_VALUE debug-use %0, debug-use $noreg, !1, !1 + DBG_VALUE debug-use %0, debug-use $noreg, !1, !1 + DBG_VALUE debug-use %0, debug-use $noreg, !1, !1 + DBG_VALUE debug-use %0, debug-use $noreg, !1, !1 + DBG_VALUE debug-use %0, debug-use $noreg, !1, !1 %3 = A2_tfrsi 321 bb.2: Index: test/CodeGen/Hexagon/expand-condsets-def-undef.mir =================================================================== --- test/CodeGen/Hexagon/expand-condsets-def-undef.mir +++ test/CodeGen/Hexagon/expand-condsets-def-undef.mir @@ -21,16 +21,16 @@ - { id: 2, class: doubleregs } - { id: 3, class: intregs } liveins: - - { reg: '%p0', virtual-reg: '%0' } - - { reg: '%r0', virtual-reg: '%1' } - - { reg: '%d0', virtual-reg: '%2' } + - { reg: '$p0', virtual-reg: '%0' } + - { reg: '$r0', virtual-reg: '%1' } + - { reg: '$d0', virtual-reg: '%2' } body: | bb.0: - liveins: %r0, %d0, %p0 - %0 = COPY %p0 - %1 = COPY %r0 - %2 = COPY %d0 + liveins: $r0, $d0, $p0 + %0 = COPY $p0 + %1 = COPY $r0 + %2 = COPY $d0 ; Check that this instruction is unchanged (remains unpredicated) ; CHECK: %3:intregs = A2_addi %2.isub_hi, 1 %3 = A2_addi %2.isub_hi, 1 Index: test/CodeGen/Hexagon/expand-condsets-imm.mir =================================================================== --- test/CodeGen/Hexagon/expand-condsets-imm.mir +++ test/CodeGen/Hexagon/expand-condsets-imm.mir @@ -17,5 +17,5 @@ bb.1: %1 = IMPLICIT_DEF %1 = C2_muxir undef %0, %1, @G - %r0 = COPY %1 + $r0 = COPY %1 ... Index: test/CodeGen/Hexagon/expand-condsets-impuse.mir =================================================================== --- test/CodeGen/Hexagon/expand-condsets-impuse.mir +++ test/CodeGen/Hexagon/expand-condsets-impuse.mir @@ -28,17 +28,17 @@ - { id: 14, class: intregs } - { id: 99, class: intregs } liveins: - - { reg: '%r0', virtual-reg: '%99' } + - { reg: '$r0', virtual-reg: '%99' } body: | bb.0: - liveins: %r0 + liveins: $r0 successors: %bb.298, %bb.301 - %99 = COPY %r0 - J2_jumpr %99, implicit-def %pc + %99 = COPY $r0 + J2_jumpr %99, implicit-def $pc bb.298: - liveins: %r0 + liveins: $r0 successors: %bb.299, %bb.301, %bb.309 %0 = A2_tfrsi 123 %1 = A2_tfrsi -1 @@ -46,7 +46,7 @@ %4 = C2_cmpeqi %3, 33 %5 = A2_tfrsi -2 %6 = C2_mux %4, %5, %1 - J2_jumpr %6, implicit-def %pc + J2_jumpr %6, implicit-def $pc bb.299: successors: %bb.300, %bb.309 @@ -55,12 +55,12 @@ %9 = A2_tfrsi -999 ; CHECK: %10:intregs = C2_cmoveit killed %8, -999, implicit %10 %10 = C2_mux %8, %9, %1 - J2_jumpr %10, implicit-def %pc + J2_jumpr %10, implicit-def $pc bb.300: successors: %bb.309 S2_storeri_io %99, 0, %0 - J2_jump %bb.309, implicit-def %pc + J2_jump %bb.309, implicit-def $pc bb.301: successors: %bb.299, %bb.309 @@ -70,7 +70,7 @@ %12 = C2_cmpeqi %11, 33 %13 = A2_tfrsi -2 %14 = C2_mux %12, %13, %1 - J2_jumpr %14, implicit-def %pc + J2_jumpr %14, implicit-def $pc bb.309: Index: test/CodeGen/Hexagon/expand-condsets-rm-reg.mir =================================================================== --- test/CodeGen/Hexagon/expand-condsets-rm-reg.mir +++ test/CodeGen/Hexagon/expand-condsets-rm-reg.mir @@ -27,23 +27,23 @@ - { id: 3, class: intregs } - { id: 4, class: intregs } liveins: - - { reg: '%r0', virtual-reg: '%0' } - - { reg: '%r1', virtual-reg: '%1' } - - { reg: '%p0', virtual-reg: '%2' } + - { reg: '$r0', virtual-reg: '%0' } + - { reg: '$r1', virtual-reg: '%1' } + - { reg: '$p0', virtual-reg: '%2' } body: | bb.0: - liveins: %r0, %r1, %p0 - %0 = COPY %r0 - %0 = COPY %r0 ; Force isSSA = false. - %1 = COPY %r1 - %2 = COPY %p0 + liveins: $r0, $r1, $p0 + %0 = COPY $r0 + %0 = COPY $r0 ; Force isSSA = false. + %1 = COPY $r1 + %2 = COPY $p0 ; Check that %3 was coalesced into %4. ; CHECK: %4:intregs = A2_abs %1 ; CHECK: %4:intregs = A2_tfrt killed %2, killed %0, implicit %4 %3 = A2_abs %1 %4 = C2_mux %2, %0, %3 - %r0 = COPY %4 - J2_jumpr %r31, implicit %r0, implicit-def %pc + $r0 = COPY %4 + J2_jumpr $r31, implicit $r0, implicit-def $pc ... Index: test/CodeGen/Hexagon/expand-condsets-same-inputs.mir =================================================================== --- test/CodeGen/Hexagon/expand-condsets-same-inputs.mir +++ test/CodeGen/Hexagon/expand-condsets-same-inputs.mir @@ -18,15 +18,15 @@ body: | bb.0: - liveins: %r0, %r1, %r2, %p0 - %0 = COPY %p0 - %0 = COPY %p0 ; Cheat: convince MIR parser that this is not SSA. - %1 = COPY %r1 + liveins: $r0, $r1, $r2, $p0 + %0 = COPY $p0 + %0 = COPY $p0 ; Cheat: convince MIR parser that this is not SSA. + %1 = COPY $r1 ; Make sure we do not expand/predicate a mux with identical inputs. ; CHECK-NOT: A2_paddit %2 = A2_addi %1, 1 %3 = C2_mux %0, killed %2, %2 - %r0 = COPY %3 + $r0 = COPY %3 ... Index: test/CodeGen/Hexagon/hwloop-redef-imm.mir =================================================================== --- test/CodeGen/Hexagon/hwloop-redef-imm.mir +++ test/CodeGen/Hexagon/hwloop-redef-imm.mir @@ -40,11 +40,11 @@ - { id: 8, class: predregs } body: | bb.0.b0: - liveins: %r0 + liveins: $r0 successors: %bb.1 %0 = A2_tfrsi 0 %1 = A2_tfrsi 0 - %2 = COPY %r0 + %2 = COPY $r0 bb.1.b1: successors: %bb.1, %bb.2 @@ -56,8 +56,8 @@ ; This definition of %7 should not prevent conversion to hardware loop. %7 = A2_tfrsi 3840 %8 = C2_cmpeq %5, %7 - J2_jumpf %8, %bb.1, implicit-def %pc - J2_jump %bb.2, implicit-def %pc + J2_jumpf %8, %bb.1, implicit-def $pc + J2_jump %bb.2, implicit-def $pc bb.2.b2: ... Index: test/CodeGen/Hexagon/ifcvt-common-kill.mir =================================================================== --- test/CodeGen/Hexagon/ifcvt-common-kill.mir +++ test/CodeGen/Hexagon/ifcvt-common-kill.mir @@ -1,7 +1,7 @@ # RUN: llc -march=hexagon -run-pass if-converter -o - %s -verify-machineinstrs | FileCheck %s -# CHECK: %r26 = A2_tfr %r1 -# CHECK: S2_pstorerhf_io undef %p0, undef %r0, 0, killed %r1 +# CHECK: $r26 = A2_tfr $r1 +# CHECK: S2_pstorerhf_io undef $p0, undef $r0, 0, killed $r1 --- name: foo @@ -9,26 +9,26 @@ body: | bb.0: successors: %bb.1, %bb.2 - liveins: %r0, %r1 - J2_jumpf undef %p0, %bb.2, implicit-def %pc + liveins: $r0, $r1 + J2_jumpf undef $p0, %bb.2, implicit-def $pc bb.1: successors: %bb.3 - liveins: %r1 + liveins: $r1 ; This flag should be cleared. It didn't use to be, because ; this instruction is treated as a duplicate of the corresponding ; instruction from the "false" block bb.2. Clearing of the ; flags was limited to the non-common part of the "true" block. - %r26 = A2_tfr killed %r1 - J2_jump %bb.3, implicit-def %pc + $r26 = A2_tfr killed $r1 + J2_jump %bb.3, implicit-def $pc bb.2: successors: %bb.3 - liveins: %r1 - %r26 = A2_tfr %r1 - S2_storerh_io undef %r0, 0, killed %r1 - J2_jump %bb.3, implicit-def %pc + liveins: $r1 + $r26 = A2_tfr $r1 + S2_storerh_io undef $r0, 0, killed $r1 + J2_jump %bb.3, implicit-def $pc bb.3: - liveins: %r26 + liveins: $r26 ... Index: test/CodeGen/Hexagon/ifcvt-impuse-livein.mir =================================================================== --- test/CodeGen/Hexagon/ifcvt-impuse-livein.mir +++ test/CodeGen/Hexagon/ifcvt-impuse-livein.mir @@ -17,26 +17,26 @@ body: | bb.0: successors: %bb.1, %bb.2 - liveins: %r0, %r2, %p1 - J2_jumpf %p1, %bb.1, implicit-def %pc - J2_jump %bb.2, implicit-def %pc + liveins: $r0, $r2, $p1 + J2_jumpf $p1, %bb.1, implicit-def $pc + J2_jump %bb.2, implicit-def $pc bb.1: successors: %bb.3 - liveins: %r2 - %r0 = A2_tfrsi 2 - J2_jump %bb.3, implicit-def %pc + liveins: $r2 + $r0 = A2_tfrsi 2 + J2_jump %bb.3, implicit-def $pc bb.2: successors: %bb.3 - liveins: %r0 + liveins: $r0 ; Even though r2 was not live on entry to this block, it was live across ; block bb.1 in the original diamond. After if-conversion, the diamond ; became a single block, and so r2 is now live on entry to the instructions ; originating from bb.2. - ; CHECK: %r2 = C2_cmoveit %p1, 1, implicit killed %r2 - %r2 = A2_tfrsi 1 + ; CHECK: $r2 = C2_cmoveit $p1, 1, implicit killed $r2 + $r2 = A2_tfrsi 1 bb.3: - liveins: %r0, %r2 - %r0 = A2_add %r0, %r2 - J2_jumpr %r31, implicit-def %pc + liveins: $r0, $r2 + $r0 = A2_add $r0, $r2 + J2_jumpr $r31, implicit-def $pc ... Index: test/CodeGen/Hexagon/ifcvt-live-subreg.mir =================================================================== --- test/CodeGen/Hexagon/ifcvt-live-subreg.mir +++ test/CodeGen/Hexagon/ifcvt-live-subreg.mir @@ -6,10 +6,10 @@ # Verify the predicated block: # CHECK-LABEL: bb.0: -# CHECK: liveins: %r0, %r1, %p0, %d8 -# CHECK: %d8 = A2_combinew killed %r0, killed %r1 -# CHECK: %d8 = L2_ploadrdf_io %p0, %r29, 0, implicit killed %d8 -# CHECK: J2_jumprf killed %p0, %r31, implicit-def %pc, implicit-def %pc, implicit %d8 +# CHECK: liveins: $r0, $r1, $p0, $d8 +# CHECK: $d8 = A2_combinew killed $r0, killed $r1 +# CHECK: $d8 = L2_ploadrdf_io $p0, $r29, 0, implicit killed $d8 +# CHECK: J2_jumprf killed $p0, $r31, implicit-def $pc, implicit-def $pc, implicit $d8 --- | define void @foo() { @@ -23,28 +23,28 @@ alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%r0' } - - { reg: '%r1' } - - { reg: '%p0' } - - { reg: '%d8' } + - { reg: '$r0' } + - { reg: '$r1' } + - { reg: '$p0' } + - { reg: '$d8' } body: | bb.0: successors: %bb.1, %bb.2 - liveins: %r0, %r1, %p0, %d8 - %d8 = A2_combinew killed %r0, killed %r1 - J2_jumpf killed %p0, %bb.2, implicit-def %pc + liveins: $r0, $r1, $p0, $d8 + $d8 = A2_combinew killed $r0, killed $r1 + J2_jumpf killed $p0, %bb.2, implicit-def $pc bb.1: - liveins: %r17 - %r0 = A2_tfrsi 0 - %r1 = A2_tfrsi 0 + liveins: $r17 + $r0 = A2_tfrsi 0 + $r1 = A2_tfrsi 0 A2_nop ; non-predicable - J2_jumpr killed %r31, implicit-def dead %pc, implicit killed %d0 + J2_jumpr killed $r31, implicit-def dead $pc, implicit killed $d0 bb.2: ; Predicate this block. - %d8 = L2_loadrd_io %r29, 0 - J2_jumpr killed %r31, implicit-def dead %pc, implicit killed %d8 + $d8 = L2_loadrd_io $r29, 0 + J2_jumpr killed $r31, implicit-def dead $pc, implicit killed $d8 ... Index: test/CodeGen/Hexagon/invalid-dotnew-attempt.mir =================================================================== --- test/CodeGen/Hexagon/invalid-dotnew-attempt.mir +++ test/CodeGen/Hexagon/invalid-dotnew-attempt.mir @@ -10,8 +10,8 @@ tracksRegLiveness: true body: | bb.0: - liveins: %d0 - %p0 = C2_tfrrp %r0 - J2_jumpr %r31, implicit-def %pc, implicit %p0 + liveins: $d0 + $p0 = C2_tfrrp $r0 + J2_jumpr $r31, implicit-def $pc, implicit $p0 ... Index: test/CodeGen/Hexagon/livephysregs-add-pristines.mir =================================================================== --- test/CodeGen/Hexagon/livephysregs-add-pristines.mir +++ test/CodeGen/Hexagon/livephysregs-add-pristines.mir @@ -2,7 +2,7 @@ # The register r23 is live on the path bb.0->bb.2->bb.3. Make sure we add # an implicit use of r23 to the predicated redefinition: -# CHECK: %r23 = A2_tfrt killed %p0, killed %r1, implicit killed %r23 +# CHECK: $r23 = A2_tfrt killed $p0, killed $r1, implicit killed $r23 # LivePhysRegs::addPristines could accidentally remove a callee-saved # register, if it determined that it wasn't pristine. Doing that caused @@ -13,25 +13,25 @@ name: foo tracksRegLiveness: true fixedStack: - - { id: 0, offset: 0, size: 4, alignment: 4, callee-saved-register: '%r23' } + - { id: 0, offset: 0, size: 4, alignment: 4, callee-saved-register: '$r23' } body: | bb.0: successors: %bb.1, %bb.2 - liveins: %r0, %r1, %r23 - %p0 = C2_cmpgti killed %r0, 0 - J2_jumpf killed %p0, %bb.2, implicit-def %pc + liveins: $r0, $r1, $r23 + $p0 = C2_cmpgti killed $r0, 0 + J2_jumpf killed $p0, %bb.2, implicit-def $pc bb.1: successors: %bb.3 - liveins: %r1 - %r23 = A2_tfr killed %r1 - J2_jump %bb.3, implicit-def %pc + liveins: $r1 + $r23 = A2_tfr killed $r1 + J2_jump %bb.3, implicit-def $pc bb.2: successors: %bb.3 - liveins: %r1, %r23 - %r0 = A2_tfr %r1 + liveins: $r1, $r23 + $r0 = A2_tfr $r1 bb.3: - liveins: %r23 + liveins: $r23 ... Index: test/CodeGen/Hexagon/livephysregs-lane-masks.mir =================================================================== --- test/CodeGen/Hexagon/livephysregs-lane-masks.mir +++ test/CodeGen/Hexagon/livephysregs-lane-masks.mir @@ -1,9 +1,9 @@ # RUN: llc -march=hexagon -run-pass if-converter -verify-machineinstrs -o - %s | FileCheck %s # CHECK-LABEL: name: foo -# CHECK: %p0 = C2_cmpeqi %r16, 0 +# CHECK: $p0 = C2_cmpeqi $r16, 0 # Make sure there is no implicit use of r1. -# CHECK: %r1 = L2_ploadruhf_io %p0, %r29, 6 +# CHECK: $r1 = L2_ploadruhf_io $p0, $r29, 6 --- | define void @foo() { @@ -18,23 +18,23 @@ body: | bb.0: - liveins: %r16 + liveins: $r16 successors: %bb.1, %bb.2 - %p0 = C2_cmpeqi %r16, 0 - J2_jumpt %p0, %bb.2, implicit-def %pc + $p0 = C2_cmpeqi $r16, 0 + J2_jumpt $p0, %bb.2, implicit-def $pc bb.1: - ; The lane mask %d0:0002 is equivalent to %r0. LivePhysRegs would ignore - ; it and treat it as the whole %d0, which is a pair %r1, %r0. The extra - ; %r1 would cause an (undefined) implicit use to be added during + ; The lane mask $d0:0002 is equivalent to $r0. LivePhysRegs would ignore + ; it and treat it as the whole $d0, which is a pair $r1, $r0. The extra + ; $r1 would cause an (undefined) implicit use to be added during ; if-conversion. - liveins: %d0:0x00000002, %d15:0x00000001, %r16 + liveins: $d0:0x00000002, $d15:0x00000001, $r16 successors: %bb.2 - %r1 = L2_loadruh_io %r29, 6 - S2_storeri_io killed %r16, 0, %r1 + $r1 = L2_loadruh_io $r29, 6 + S2_storeri_io killed $r16, 0, $r1 bb.2: - liveins: %r0 - %d8 = L2_loadrd_io %r29, 8 - %d15 = L4_return %r29, implicit-def %r29, implicit-def %pc, implicit %r30, implicit %framekey + liveins: $r0 + $d8 = L2_loadrd_io $r29, 8 + $d15 = L4_return $r29, implicit-def $r29, implicit-def $pc, implicit $r30, implicit $framekey Index: test/CodeGen/Hexagon/livephysregs-lane-masks2.mir =================================================================== --- test/CodeGen/Hexagon/livephysregs-lane-masks2.mir +++ test/CodeGen/Hexagon/livephysregs-lane-masks2.mir @@ -13,27 +13,27 @@ body: | bb.0: - liveins: %p0:0x1, %p2, %r0 + liveins: $p0:0x1, $p2, $r0 successors: %bb.1, %bb.2 - J2_jumpt killed %p2, %bb.1, implicit-def %pc - J2_jump %bb.2, implicit-def %pc + J2_jumpt killed $p2, %bb.1, implicit-def $pc + J2_jump %bb.2, implicit-def $pc bb.1: - liveins: %p0:0x1, %r0, %r19 + liveins: $p0:0x1, $r0, $r19 successors: %bb.3 - %r2 = A2_tfrsi 4 - %r1 = COPY %r19 - %r0 = S2_asl_r_r killed %r0, killed %r2 - %r0 = A2_asrh killed %r0 - J2_jump %bb.3, implicit-def %pc + $r2 = A2_tfrsi 4 + $r1 = COPY $r19 + $r0 = S2_asl_r_r killed $r0, killed $r2 + $r0 = A2_asrh killed $r0 + J2_jump %bb.3, implicit-def $pc bb.2: - liveins: %p0:0x1, %r0, %r18 + liveins: $p0:0x1, $r0, $r18 successors: %bb.3 - %r2 = A2_tfrsi 5 - %r1 = L2_loadrh_io %r18, 0 - %r0 = S2_asl_r_r killed %r0, killed %r2 - %r0 = A2_asrh killed %r0 + $r2 = A2_tfrsi 5 + $r1 = L2_loadrh_io $r18, 0 + $r0 = S2_asl_r_r killed $r0, killed $r2 + $r0 = A2_asrh killed $r0 bb.3: ; A live-in register without subregs, but with a lane mask that is not ~0 @@ -41,15 +41,15 @@ ; (through tail merging). ; ; CHECK: bb.3: - ; CHECK: liveins:{{.*}}%p0 - ; CHECK: %r0 = S2_asl_r_r killed %r0, killed %r2 - ; CHECK: %r0 = A2_asrh killed %r0 - ; CHECK: %r0 = C2_cmoveit killed %p0, 1 - ; CHECK: J2_jumpr %r31, implicit-def %pc, implicit %r0 + ; CHECK: liveins:{{.*}}$p0 + ; CHECK: $r0 = S2_asl_r_r killed $r0, killed $r2 + ; CHECK: $r0 = A2_asrh killed $r0 + ; CHECK: $r0 = C2_cmoveit killed $p0, 1 + ; CHECK: J2_jumpr $r31, implicit-def $pc, implicit $r0 ; - liveins: %p0:0x1 - %r0 = C2_cmoveit killed %p0, 1 - J2_jumpr %r31, implicit-def %pc, implicit %r0 + liveins: $p0:0x1 + $r0 = C2_cmoveit killed $p0, 1 + J2_jumpr $r31, implicit-def $pc, implicit $r0 ... Index: test/CodeGen/Hexagon/mux-kill1.mir =================================================================== --- test/CodeGen/Hexagon/mux-kill1.mir +++ test/CodeGen/Hexagon/mux-kill1.mir @@ -1,15 +1,15 @@ # RUN: llc -march=hexagon -run-pass hexagon-gen-mux -o - %s -verify-machineinstrs | FileCheck %s -# CHECK: %r2 = C2_mux killed %p0, killed %r0, %r1 +# CHECK: $r2 = C2_mux killed $p0, killed $r0, $r1 --- name: fred tracksRegLiveness: true body: | bb.0: - liveins: %d0, %p0 + liveins: $d0, $p0 - %r2 = A2_tfrt %p0, %r0 - %r0 = A2_tfr %r1 - %r2 = A2_tfrf %p0, killed %r1 + $r2 = A2_tfrt $p0, $r0 + $r0 = A2_tfr $r1 + $r2 = A2_tfrf $p0, killed $r1 ... Index: test/CodeGen/Hexagon/mux-kill2.mir =================================================================== --- test/CodeGen/Hexagon/mux-kill2.mir +++ test/CodeGen/Hexagon/mux-kill2.mir @@ -1,17 +1,17 @@ # RUN: llc -march=hexagon -run-pass hexagon-gen-mux -o - -verify-machineinstrs %s | FileCheck %s -# CHECK: %r1 = C2_muxri %p0, 123, %r0 -# CHECK: %r2 = C2_muxir killed %p0, killed %r0, 321 +# CHECK: $r1 = C2_muxri $p0, 123, $r0 +# CHECK: $r2 = C2_muxir killed $p0, killed $r0, 321 --- name: fred tracksRegLiveness: true body: | bb.0: - liveins: %r0, %p0 + liveins: $r0, $p0 - %r2 = A2_tfrt %p0, %r0 - %r1 = C2_cmoveit %p0, 123 - %r1 = A2_tfrf %p0, killed %r0, implicit killed %r1 - %r2 = C2_cmoveif killed %p0, 321, implicit killed %r2 + $r2 = A2_tfrt $p0, $r0 + $r1 = C2_cmoveit $p0, 123 + $r1 = A2_tfrf $p0, killed $r0, implicit killed $r1 + $r2 = C2_cmoveif killed $p0, 321, implicit killed $r2 ... Index: test/CodeGen/Hexagon/mux-kill3.mir =================================================================== --- test/CodeGen/Hexagon/mux-kill3.mir +++ test/CodeGen/Hexagon/mux-kill3.mir @@ -1,31 +1,31 @@ # RUN: llc -march=hexagon -run-pass hexagon-gen-mux -o - %s -verify-machineinstrs | FileCheck %s # Make sure this verifies correctly. -# CHECK: PS_jmpret killed %r31, implicit-def %pc +# CHECK: PS_jmpret killed $r31, implicit-def $pc --- name: fred tracksRegLiveness: true body: | bb.0: - liveins: %d0, %d1, %d2, %d3 + liveins: $d0, $d1, $d2, $d3 - %p0 = C2_cmpeqi killed %r4, 128 - %d4 = A2_tfrpi 0 - %r3 = A2_tfrsi 0 - %r4 = A2_tfrsi 0 - %r7 = A2_tfrt %p0, %r0 - %p1 = C2_cmpeqp %d0, killed %d4 - %r8 = A2_tfrt %p0, killed %r0 - %r9 = A2_tfrt %p0, killed %r1 - %r7 = A2_tfrf %p0, %r3, implicit killed %r7 - %r9 = A2_tfrf %p0, killed %r3, implicit killed %r9 - %r8 = C2_cmoveif killed %p0, 1, implicit killed %r8 - %d0 = A4_combineri killed %r4, 0 - %r2 = A2_tfrt %p1, killed %r7, implicit killed %r2 - %r3 = A2_tfr killed %r9 - %r2 = A2_tfrf killed %p1, killed %r8, implicit killed %r2 - S2_storerd_io killed %r6, 0, killed %d1 - S2_storerd_io killed %r5, 0, killed %d0 - PS_jmpret %r31, implicit-def %pc + $p0 = C2_cmpeqi killed $r4, 128 + $d4 = A2_tfrpi 0 + $r3 = A2_tfrsi 0 + $r4 = A2_tfrsi 0 + $r7 = A2_tfrt $p0, $r0 + $p1 = C2_cmpeqp $d0, killed $d4 + $r8 = A2_tfrt $p0, killed $r0 + $r9 = A2_tfrt $p0, killed $r1 + $r7 = A2_tfrf $p0, $r3, implicit killed $r7 + $r9 = A2_tfrf $p0, killed $r3, implicit killed $r9 + $r8 = C2_cmoveif killed $p0, 1, implicit killed $r8 + $d0 = A4_combineri killed $r4, 0 + $r2 = A2_tfrt $p1, killed $r7, implicit killed $r2 + $r3 = A2_tfr killed $r9 + $r2 = A2_tfrf killed $p1, killed $r8, implicit killed $r2 + S2_storerd_io killed $r6, 0, killed $d1 + S2_storerd_io killed $r5, 0, killed $d0 + PS_jmpret $r31, implicit-def $pc ... Index: test/CodeGen/Hexagon/newvaluejump-c4.mir =================================================================== --- test/CodeGen/Hexagon/newvaluejump-c4.mir +++ test/CodeGen/Hexagon/newvaluejump-c4.mir @@ -2,46 +2,46 @@ --- # CHECK-LABEL: name: test0 -# CHECK: J4_cmpeqi_f_jumpnv_t killed %r1, 0 +# CHECK: J4_cmpeqi_f_jumpnv_t killed $r1, 0 name: test0 tracksRegLiveness: true body: | bb.0: - liveins: %r0 - %r1 = A2_addi %r0, -1 - %p0 = C4_cmpneqi killed %r1, 0 - J2_jumpt killed %p0, %bb.1, implicit-def %pc + liveins: $r0 + $r1 = A2_addi $r0, -1 + $p0 = C4_cmpneqi killed $r1, 0 + J2_jumpt killed $p0, %bb.1, implicit-def $pc bb.1: ... --- # CHECK-LABEL: name: test1 -# CHECK: J4_cmpgti_f_jumpnv_t killed %r1, 27 +# CHECK: J4_cmpgti_f_jumpnv_t killed $r1, 27 name: test1 tracksRegLiveness: true body: | bb.0: - liveins: %r0 - %r1 = A2_addi %r0, -1 - %p0 = C4_cmpltei killed %r1, 27 - J2_jumpt killed %p0, %bb.1, implicit-def %pc + liveins: $r0 + $r1 = A2_addi $r0, -1 + $p0 = C4_cmpltei killed $r1, 27 + J2_jumpt killed $p0, %bb.1, implicit-def $pc bb.1: ... --- # CHECK-LABEL: name: test2 -# CHECK: J4_cmpgtui_f_jumpnv_t killed %r1, 31 +# CHECK: J4_cmpgtui_f_jumpnv_t killed $r1, 31 name: test2 tracksRegLiveness: true body: | bb.0: - liveins: %r0 - %r1 = A2_addi %r0, -1 - %p0 = C4_cmplteui killed %r1, 31 - J2_jumpt killed %p0, %bb.1, implicit-def %pc + liveins: $r0 + $r1 = A2_addi $r0, -1 + $p0 = C4_cmplteui killed $r1, 31 + J2_jumpt killed $p0, %bb.1, implicit-def $pc bb.1: ... Index: test/CodeGen/Hexagon/newvaluejump-kill2.mir =================================================================== --- test/CodeGen/Hexagon/newvaluejump-kill2.mir +++ test/CodeGen/Hexagon/newvaluejump-kill2.mir @@ -1,5 +1,5 @@ # RUN: llc -march=hexagon -run-pass hexagon-nvj -verify-machineinstrs %s -o - | FileCheck %s -# CHECK: J4_cmpgtu_t_jumpnv_t killed %r3, killed %r1, %bb.1, implicit-def %pc +# CHECK: J4_cmpgtu_t_jumpnv_t killed $r3, killed $r1, %bb.1, implicit-def $pc --- name: fred @@ -7,12 +7,12 @@ body: | bb.0: - liveins: %r0 - %r1 = A2_addi %r0, -1 - %r2 = A2_tfrsi -1431655765 - %r3 = A2_tfrsi 2 - %p0 = C2_cmpgtu killed %r3, %r1 - %r2 = S4_subaddi killed %r1, 1, killed %r2 - J2_jumpt killed %p0, %bb.1, implicit-def %pc + liveins: $r0 + $r1 = A2_addi $r0, -1 + $r2 = A2_tfrsi -1431655765 + $r3 = A2_tfrsi 2 + $p0 = C2_cmpgtu killed $r3, $r1 + $r2 = S4_subaddi killed $r1, 1, killed $r2 + J2_jumpt killed $p0, %bb.1, implicit-def $pc bb.1: ... Index: test/CodeGen/Hexagon/newvaluejump-solo.mir =================================================================== --- test/CodeGen/Hexagon/newvaluejump-solo.mir +++ test/CodeGen/Hexagon/newvaluejump-solo.mir @@ -10,10 +10,10 @@ body: | bb.0: successors: %bb.1 - %r0 = A2_tfrsi 0 - %r0 = V6_extractw killed undef %v0, %r0 - %p0 = C2_cmpeqi killed %r0, 1 - J2_jumpf killed %p0, %bb.1, implicit-def %pc + $r0 = A2_tfrsi 0 + $r0 = V6_extractw killed undef $v0, $r0 + $p0 = C2_cmpeqi killed $r0, 1 + J2_jumpf killed $p0, %bb.1, implicit-def $pc bb.1: ... Index: test/CodeGen/Hexagon/packetize-load-store-aliasing.mir =================================================================== --- test/CodeGen/Hexagon/packetize-load-store-aliasing.mir +++ test/CodeGen/Hexagon/packetize-load-store-aliasing.mir @@ -14,9 +14,9 @@ - { id: 1, type: default, size: 4, alignment: 4 } body: | bb.0: - liveins: %r0 - S2_storeri_io %r29, 0, %r0 :: (store 4 into %stack.0) - %r1 = L2_loadri_io %r29, 4 :: (load 4 from %stack.1) + liveins: $r0 + S2_storeri_io $r29, 0, $r0 :: (store 4 into %stack.0) + $r1 = L2_loadri_io $r29, 4 :: (load 4 from %stack.1) ... @@ -24,8 +24,8 @@ # if these instructions are aliased. # CHECK-LABEL: name: sammy # CHECK-NOT: BUNDLE -# CHECK: S2_storeri_io %r29, 0, %r0 -# CHECK: %r1 = L2_loadri_io %r29, 0 +# CHECK: S2_storeri_io $r29, 0, $r0 +# CHECK: $r1 = L2_loadri_io $r29, 0 --- name: sammy @@ -34,8 +34,8 @@ - { id: 0, type: default, size: 4, alignment: 4 } body: | bb.0: - liveins: %r0 - S2_storeri_io %r29, 0, %r0 :: (store 4 into %stack.0) - %r1 = L2_loadri_io %r29, 0 :: (load 4 from %stack.0) + liveins: $r0 + S2_storeri_io $r29, 0, $r0 :: (store 4 into %stack.0) + $r1 = L2_loadri_io $r29, 0 :: (load 4 from %stack.0) ... Index: test/CodeGen/Hexagon/packetize-nvj-no-prune.mir =================================================================== --- test/CodeGen/Hexagon/packetize-nvj-no-prune.mir +++ test/CodeGen/Hexagon/packetize-nvj-no-prune.mir @@ -7,8 +7,8 @@ # CHECK-LABEL: name: fred # CHECK: BUNDLE -# CHECK-NEXT: %r3 = L2_loadri_io %r1, 0 -# CHECK-NEXT: J4_cmpgtu_f_jumpnv_t internal killed %r3 +# CHECK-NEXT: $r3 = L2_loadri_io $r1, 0 +# CHECK-NEXT: J4_cmpgtu_f_jumpnv_t internal killed $r3 --- | @@ -22,10 +22,10 @@ body: | bb.0: successors: %bb.1 - %r1 = A2_tfrsi @array - %r2, %r1 = L2_loadri_pi %r1, 4 - %r3 = L2_loadri_io %r1, 0 - J4_cmpgtu_f_jumpnv_t killed %r3, killed %r2, %bb.1, implicit-def %pc + $r1 = A2_tfrsi @array + $r2, $r1 = L2_loadri_pi $r1, 4 + $r3 = L2_loadri_io $r1, 0 + J4_cmpgtu_f_jumpnv_t killed $r3, killed $r2, %bb.1, implicit-def $pc bb.1: ... Index: test/CodeGen/Hexagon/post-ra-kill-update.mir =================================================================== --- test/CodeGen/Hexagon/post-ra-kill-update.mir +++ test/CodeGen/Hexagon/post-ra-kill-update.mir @@ -6,8 +6,8 @@ # CHECK-LABEL: name: foo # Check for no-kill of r9 in the first instruction, after reordering: -# CHECK: %d7 = S2_lsr_r_p_or killed %d7, killed %d1, %r9 -# CHECK: %d13 = S2_lsr_r_p killed %d0, killed %r9 +# CHECK: $d7 = S2_lsr_r_p_or killed $d7, killed $d1, $r9 +# CHECK: $d13 = S2_lsr_r_p killed $d0, killed $r9 --- | define void @foo() { @@ -21,15 +21,15 @@ body: | bb.0: successors: %bb.1 - liveins: %d0, %d1, %r9, %r13 + liveins: $d0, $d1, $r9, $r13 - %d7 = S2_asl_r_p %d0, %r13 - %d5 = S2_asl_r_p %d1, killed %r13 - %d6 = S2_lsr_r_p killed %d0, %r9 - %d7 = S2_lsr_r_p_or killed %d7, killed %d1, killed %r9 - %d1 = A2_combinew killed %r11, killed %r10 - %d0 = A2_combinew killed %r15, killed %r14 - J2_jump %bb.1, implicit-def %pc + $d7 = S2_asl_r_p $d0, $r13 + $d5 = S2_asl_r_p $d1, killed $r13 + $d6 = S2_lsr_r_p killed $d0, $r9 + $d7 = S2_lsr_r_p_or killed $d7, killed $d1, killed $r9 + $d1 = A2_combinew killed $r11, killed $r10 + $d0 = A2_combinew killed $r15, killed $r14 + J2_jump %bb.1, implicit-def $pc bb.1: A2_nop Index: test/CodeGen/Hexagon/postinc-baseoffset.mir =================================================================== --- test/CodeGen/Hexagon/postinc-baseoffset.mir +++ test/CodeGen/Hexagon/postinc-baseoffset.mir @@ -17,6 +17,6 @@ body: | bb.0: - liveins: %r0 - S4_storeiri_io %r0, 0, -1 :: (store 4 into %ir.a) - %r1, %r0 = L2_loadri_pi %r0, 8 :: (load 4 from %ir.a) + liveins: $r0 + S4_storeiri_io $r0, 0, -1 :: (store 4 into %ir.a) + $r1, $r0 = L2_loadri_pi $r0, 8 :: (load 4 from %ir.a) Index: test/CodeGen/Hexagon/rdf-ehlabel-live.mir =================================================================== --- test/CodeGen/Hexagon/rdf-ehlabel-live.mir +++ test/CodeGen/Hexagon/rdf-ehlabel-live.mir @@ -11,7 +11,7 @@ body: | bb.0: - %r0 = A2_tfrsi 0 + $r0 = A2_tfrsi 0 EH_LABEL 0 ... Index: test/CodeGen/Hexagon/regalloc-bad-undef.mir =================================================================== --- test/CodeGen/Hexagon/regalloc-bad-undef.mir +++ test/CodeGen/Hexagon/regalloc-bad-undef.mir @@ -153,51 +153,51 @@ %13 = S2_asl_r_p_acc %13, %47, %8.isub_lo %51 = A2_tfrpi 0 - ; CHECK: %d2 = S2_extractup undef renamable %d0, 6, 25 - ; CHECK: %d0 = A2_tfrpi 2 - ; CHECK: %d13 = A2_tfrpi -1 - ; CHECK-NOT: undef %r4 + ; CHECK: $d2 = S2_extractup undef renamable $d0, 6, 25 + ; CHECK: $d0 = A2_tfrpi 2 + ; CHECK: $d13 = A2_tfrpi -1 + ; CHECK-NOT: undef $r4 bb.1.for.body: successors: %bb.3.for.end, %bb.2.if.end82 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29 - J2_call @lrand48, implicit-def dead %d0, implicit-def dead %d1, implicit-def dead %d2, implicit-def dead %d3, implicit-def dead %d4, implicit-def dead %d5, implicit-def dead %d6, implicit-def dead %d7, implicit-def dead %r28, implicit-def dead %r31, implicit-def dead %p0, implicit-def dead %p1, implicit-def dead %p2, implicit-def dead %p3, implicit-def dead %m0, implicit-def dead %m1, implicit-def dead %lc0, implicit-def dead %lc1, implicit-def dead %sa0, implicit-def dead %sa1, implicit-def dead %usr, implicit-def %usr_ovf, implicit-def dead %cs0, implicit-def dead %cs1, implicit-def dead %w0, implicit-def dead %w1, implicit-def dead %w2, implicit-def dead %w3, implicit-def dead %w4, implicit-def dead %w5, implicit-def dead %w6, implicit-def dead %w7, implicit-def dead %w8, implicit-def dead %w9, implicit-def dead %w10, implicit-def dead %w11, implicit-def dead %w12, implicit-def dead %w13, implicit-def dead %w14, implicit-def dead %w15, implicit-def dead %q0, implicit-def dead %q1, implicit-def dead %q2, implicit-def dead %q3, implicit-def %r0 - ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29 - undef %29.isub_lo = COPY killed %r0 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 + J2_call @lrand48, implicit-def dead $d0, implicit-def dead $d1, implicit-def dead $d2, implicit-def dead $d3, implicit-def dead $d4, implicit-def dead $d5, implicit-def dead $d6, implicit-def dead $d7, implicit-def dead $r28, implicit-def dead $r31, implicit-def dead $p0, implicit-def dead $p1, implicit-def dead $p2, implicit-def dead $p3, implicit-def dead $m0, implicit-def dead $m1, implicit-def dead $lc0, implicit-def dead $lc1, implicit-def dead $sa0, implicit-def dead $sa1, implicit-def dead $usr, implicit-def $usr_ovf, implicit-def dead $cs0, implicit-def dead $cs1, implicit-def dead $w0, implicit-def dead $w1, implicit-def dead $w2, implicit-def dead $w3, implicit-def dead $w4, implicit-def dead $w5, implicit-def dead $w6, implicit-def dead $w7, implicit-def dead $w8, implicit-def dead $w9, implicit-def dead $w10, implicit-def dead $w11, implicit-def dead $w12, implicit-def dead $w13, implicit-def dead $w14, implicit-def dead $w15, implicit-def dead $q0, implicit-def dead $q1, implicit-def dead $q2, implicit-def dead $q3, implicit-def $r0 + ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 + undef %29.isub_lo = COPY killed $r0 %29.isub_hi = S2_asr_i_r %29.isub_lo, 31 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29 - J2_call @lrand48, implicit-def dead %d0, implicit-def dead %d1, implicit-def dead %d2, implicit-def dead %d3, implicit-def dead %d4, implicit-def dead %d5, implicit-def dead %d6, implicit-def dead %d7, implicit-def dead %r28, implicit-def dead %r31, implicit-def dead %p0, implicit-def dead %p1, implicit-def dead %p2, implicit-def dead %p3, implicit-def dead %m0, implicit-def dead %m1, implicit-def dead %lc0, implicit-def dead %lc1, implicit-def dead %sa0, implicit-def dead %sa1, implicit-def dead %usr, implicit-def %usr_ovf, implicit-def dead %cs0, implicit-def dead %cs1, implicit-def dead %w0, implicit-def dead %w1, implicit-def dead %w2, implicit-def dead %w3, implicit-def dead %w4, implicit-def dead %w5, implicit-def dead %w6, implicit-def dead %w7, implicit-def dead %w8, implicit-def dead %w9, implicit-def dead %w10, implicit-def dead %w11, implicit-def dead %w12, implicit-def dead %w13, implicit-def dead %w14, implicit-def dead %w15, implicit-def dead %q0, implicit-def dead %q1, implicit-def dead %q2, implicit-def dead %q3, implicit-def %r0 - ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29 - %32.isub_lo = COPY killed %r0 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 + J2_call @lrand48, implicit-def dead $d0, implicit-def dead $d1, implicit-def dead $d2, implicit-def dead $d3, implicit-def dead $d4, implicit-def dead $d5, implicit-def dead $d6, implicit-def dead $d7, implicit-def dead $r28, implicit-def dead $r31, implicit-def dead $p0, implicit-def dead $p1, implicit-def dead $p2, implicit-def dead $p3, implicit-def dead $m0, implicit-def dead $m1, implicit-def dead $lc0, implicit-def dead $lc1, implicit-def dead $sa0, implicit-def dead $sa1, implicit-def dead $usr, implicit-def $usr_ovf, implicit-def dead $cs0, implicit-def dead $cs1, implicit-def dead $w0, implicit-def dead $w1, implicit-def dead $w2, implicit-def dead $w3, implicit-def dead $w4, implicit-def dead $w5, implicit-def dead $w6, implicit-def dead $w7, implicit-def dead $w8, implicit-def dead $w9, implicit-def dead $w10, implicit-def dead $w11, implicit-def dead $w12, implicit-def dead $w13, implicit-def dead $w14, implicit-def dead $w15, implicit-def dead $q0, implicit-def dead $q1, implicit-def dead $q2, implicit-def dead $q3, implicit-def $r0 + ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 + %32.isub_lo = COPY killed $r0 %7 = S2_extractup %32, 22, 9 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29 - J2_call @lrand48, implicit-def dead %d0, implicit-def dead %d1, implicit-def dead %d2, implicit-def dead %d3, implicit-def dead %d4, implicit-def dead %d5, implicit-def dead %d6, implicit-def dead %d7, implicit-def dead %r28, implicit-def dead %r31, implicit-def dead %p0, implicit-def dead %p1, implicit-def dead %p2, implicit-def dead %p3, implicit-def dead %m0, implicit-def dead %m1, implicit-def dead %lc0, implicit-def dead %lc1, implicit-def dead %sa0, implicit-def dead %sa1, implicit-def dead %usr, implicit-def %usr_ovf, implicit-def dead %cs0, implicit-def dead %cs1, implicit-def dead %w0, implicit-def dead %w1, implicit-def dead %w2, implicit-def dead %w3, implicit-def dead %w4, implicit-def dead %w5, implicit-def dead %w6, implicit-def dead %w7, implicit-def dead %w8, implicit-def dead %w9, implicit-def dead %w10, implicit-def dead %w11, implicit-def dead %w12, implicit-def dead %w13, implicit-def dead %w14, implicit-def dead %w15, implicit-def dead %q0, implicit-def dead %q1, implicit-def dead %q2, implicit-def dead %q3, implicit-def %r0 - ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29 - undef %43.isub_lo = COPY killed %r0 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 + J2_call @lrand48, implicit-def dead $d0, implicit-def dead $d1, implicit-def dead $d2, implicit-def dead $d3, implicit-def dead $d4, implicit-def dead $d5, implicit-def dead $d6, implicit-def dead $d7, implicit-def dead $r28, implicit-def dead $r31, implicit-def dead $p0, implicit-def dead $p1, implicit-def dead $p2, implicit-def dead $p3, implicit-def dead $m0, implicit-def dead $m1, implicit-def dead $lc0, implicit-def dead $lc1, implicit-def dead $sa0, implicit-def dead $sa1, implicit-def dead $usr, implicit-def $usr_ovf, implicit-def dead $cs0, implicit-def dead $cs1, implicit-def dead $w0, implicit-def dead $w1, implicit-def dead $w2, implicit-def dead $w3, implicit-def dead $w4, implicit-def dead $w5, implicit-def dead $w6, implicit-def dead $w7, implicit-def dead $w8, implicit-def dead $w9, implicit-def dead $w10, implicit-def dead $w11, implicit-def dead $w12, implicit-def dead $w13, implicit-def dead $w14, implicit-def dead $w15, implicit-def dead $q0, implicit-def dead $q1, implicit-def dead $q2, implicit-def dead $q3, implicit-def $r0 + ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 + undef %43.isub_lo = COPY killed $r0 %43.isub_hi = COPY %32.isub_hi %16 = S2_extractup %43, 6, 25 %18 = A2_tfrpi -1 %18 = S2_asl_r_p_acc %18, %47, %16.isub_lo - ADJCALLSTACKDOWN 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit %r31, implicit %r30, implicit %r29 - J2_call @lrand48, implicit-def dead %d0, implicit-def dead %d1, implicit-def dead %d2, implicit-def dead %d3, implicit-def dead %d4, implicit-def dead %d5, implicit-def dead %d6, implicit-def dead %d7, implicit-def dead %r28, implicit-def dead %r31, implicit-def dead %p0, implicit-def dead %p1, implicit-def dead %p2, implicit-def dead %p3, implicit-def dead %m0, implicit-def dead %m1, implicit-def dead %lc0, implicit-def dead %lc1, implicit-def dead %sa0, implicit-def dead %sa1, implicit-def dead %usr, implicit-def %usr_ovf, implicit-def dead %cs0, implicit-def dead %cs1, implicit-def dead %w0, implicit-def dead %w1, implicit-def dead %w2, implicit-def dead %w3, implicit-def dead %w4, implicit-def dead %w5, implicit-def dead %w6, implicit-def dead %w7, implicit-def dead %w8, implicit-def dead %w9, implicit-def dead %w10, implicit-def dead %w11, implicit-def dead %w12, implicit-def dead %w13, implicit-def dead %w14, implicit-def dead %w15, implicit-def dead %q0, implicit-def dead %q1, implicit-def dead %q2, implicit-def dead %q3 - ADJCALLSTACKUP 0, 0, implicit-def dead %r29, implicit-def dead %r30, implicit-def dead %r31, implicit %r29 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 + J2_call @lrand48, implicit-def dead $d0, implicit-def dead $d1, implicit-def dead $d2, implicit-def dead $d3, implicit-def dead $d4, implicit-def dead $d5, implicit-def dead $d6, implicit-def dead $d7, implicit-def dead $r28, implicit-def dead $r31, implicit-def dead $p0, implicit-def dead $p1, implicit-def dead $p2, implicit-def dead $p3, implicit-def dead $m0, implicit-def dead $m1, implicit-def dead $lc0, implicit-def dead $lc1, implicit-def dead $sa0, implicit-def dead $sa1, implicit-def dead $usr, implicit-def $usr_ovf, implicit-def dead $cs0, implicit-def dead $cs1, implicit-def dead $w0, implicit-def dead $w1, implicit-def dead $w2, implicit-def dead $w3, implicit-def dead $w4, implicit-def dead $w5, implicit-def dead $w6, implicit-def dead $w7, implicit-def dead $w8, implicit-def dead $w9, implicit-def dead $w10, implicit-def dead $w11, implicit-def dead $w12, implicit-def dead $w13, implicit-def dead $w14, implicit-def dead $w15, implicit-def dead $q0, implicit-def dead $q1, implicit-def dead $q2, implicit-def dead $q3 + ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 %22 = S2_asl_r_p %18, %8.isub_lo %21 = COPY %13 %21 = S2_lsr_i_p_and %21, %29, 9 %22 = S2_asl_i_p_and %22, %7, 42 S2_storerd_io undef %23, 0, %22 :: (store 8 into `i64* undef`) %25 = C2_cmpeqp %21, %51 - J2_jumpt %25, %bb.3.for.end, implicit-def dead %pc - J2_jump %bb.2.if.end82, implicit-def dead %pc + J2_jumpt %25, %bb.3.for.end, implicit-def dead $pc + J2_jump %bb.2.if.end82, implicit-def dead $pc bb.2.if.end82: successors: %bb.3.for.end, %bb.1.for.body %59 = A2_addi %59, -1 %26 = C2_cmpeqi %59, 0 - J2_jumpf %26, %bb.1.for.body, implicit-def dead %pc - J2_jump %bb.3.for.end, implicit-def dead %pc + J2_jumpf %26, %bb.1.for.body, implicit-def dead $pc + J2_jump %bb.3.for.end, implicit-def dead $pc bb.3.for.end: Index: test/CodeGen/Hexagon/regalloc-liveout-undef.mir =================================================================== --- test/CodeGen/Hexagon/regalloc-liveout-undef.mir +++ test/CodeGen/Hexagon/regalloc-liveout-undef.mir @@ -19,10 +19,10 @@ - { id: 3, class: doubleregs } body: | bb.0: - liveins: %d0 + liveins: $d0 successors: %bb.1 %0 = IMPLICIT_DEF - %1 = COPY %d0 + %1 = COPY $d0 bb.1: successors: %bb.1 @@ -30,5 +30,5 @@ %3 = COPY %1 %1 = COPY %3 undef %1.isub_lo = A2_addi %1.isub_lo, 1 - J2_jump %bb.1, implicit-def %pc + J2_jump %bb.1, implicit-def $pc ... Index: test/CodeGen/Hexagon/target-flag-ext.mir =================================================================== --- test/CodeGen/Hexagon/target-flag-ext.mir +++ test/CodeGen/Hexagon/target-flag-ext.mir @@ -13,12 +13,12 @@ ; testing this is not possible otherwise. ; CHECK: BUNDLE - ; CHECK-DAG: %r0 = A2_tfrsi - ; CHECK-DAG: %r1 = A2_tfrsi - ; CHECK-DAG: %r2 = A2_tfrsi + ; CHECK-DAG: $r0 = A2_tfrsi + ; CHECK-DAG: $r1 = A2_tfrsi + ; CHECK-DAG: $r2 = A2_tfrsi ; CHECK: } - %r0 = A2_tfrsi target-flags (hexagon-pcrel) 0 - %r1 = A2_tfrsi target-flags (hexagon-pcrel) 0 - %r2 = A2_tfrsi target-flags (hexagon-pcrel) 0 + $r0 = A2_tfrsi target-flags (hexagon-pcrel) 0 + $r1 = A2_tfrsi target-flags (hexagon-pcrel) 0 + $r2 = A2_tfrsi target-flags (hexagon-pcrel) 0 ... Index: test/CodeGen/Hexagon/unreachable-mbb-phi-subreg.mir =================================================================== --- test/CodeGen/Hexagon/unreachable-mbb-phi-subreg.mir +++ test/CodeGen/Hexagon/unreachable-mbb-phi-subreg.mir @@ -5,11 +5,11 @@ tracksRegLiveness: true body: | bb.0: - liveins: %d0 + liveins: $d0 successors: %bb.2 - %0 : doubleregs = COPY %d0 - J2_jump %bb.2, implicit-def %pc + %0 : doubleregs = COPY $d0 + J2_jump %bb.2, implicit-def $pc bb.1: successors: %bb.2 @@ -18,7 +18,7 @@ bb.2: ; Make sure that the subregister from the PHI operand is preserved. ; CHECK: %[[REG:[0-9]+]]:intregs = COPY %0.isub_lo - ; CHECK: %r0 = COPY %[[REG]] + ; CHECK: $r0 = COPY %[[REG]] %1 : intregs = PHI %0.isub_lo, %bb.0, %0.isub_hi, %bb.1 - %r0 = COPY %1 + $r0 = COPY %1 ... Index: test/CodeGen/Hexagon/vextract-basic.mir =================================================================== --- test/CodeGen/Hexagon/vextract-basic.mir +++ test/CodeGen/Hexagon/vextract-basic.mir @@ -6,10 +6,10 @@ body: | bb.0: - liveins: %r0, %r1, %v0 - %0:hvxvr = COPY %v0 - %1:intregs = COPY %r0 - %2:intregs = COPY %r1 + liveins: $r0, $r1, $v0 + %0:hvxvr = COPY $v0 + %1:intregs = COPY $r0 + %2:intregs = COPY $r1 %3:intregs = A2_tfrsi 5 %4:intregs = V6_extractw %0, %1 ; CHECK: %[[A0:[0-9]+]]:intregs = A2_andir %{{[0-9]+}}, -4 Index: test/CodeGen/Lanai/peephole-compare.mir =================================================================== --- test/CodeGen/Lanai/peephole-compare.mir +++ test/CodeGen/Lanai/peephole-compare.mir @@ -4,31 +4,31 @@ # CHECK-LABEL: name: test0a # TODO: Enhance combiner to handle this case. This expands into: -# sub %r7, %r6, %r3 -# sub.f %r7, %r6, %r0 -# sel.eq %r18, %r3, %rv +# sub $r7, $r6, $r3 +# sub.f $r7, $r6, $r0 +# sel.eq $r18, $r3, $rv # This is different from the pattern currently matched. If the lowered form had -# been sub.f %r3, 0, %r0 then it would have matched. +# been sub.f $r3, 0, $r0 then it would have matched. # CHECK-LABEL: name: test1a -# CHECK: [[IN1:%.*]]:gpr = COPY %r7 -# CHECK: [[IN2:%.*]]:gpr = COPY %r6 -# CHECK: SUB_F_R [[IN1]], [[IN2]], 0, implicit-def %sr +# CHECK: [[IN1:%.*]]:gpr = COPY $r7 +# CHECK: [[IN2:%.*]]:gpr = COPY $r6 +# CHECK: SUB_F_R [[IN1]], [[IN2]], 0, implicit-def $sr # CHECK-LABEL: name: test1b -# CHECK: [[IN1:%.*]]:gpr = COPY %r7 -# CHECK: [[IN2:%.*]]:gpr = COPY %r6 -# CHECK: SUB_F_R [[IN1]], [[IN2]], 0, implicit-def %sr +# CHECK: [[IN1:%.*]]:gpr = COPY $r7 +# CHECK: [[IN2:%.*]]:gpr = COPY $r6 +# CHECK: SUB_F_R [[IN1]], [[IN2]], 0, implicit-def $sr # CHECK-LABEL: name: test2a -# CHECK: [[IN1:%.*]]:gpr = COPY %r7 -# CHECK: [[IN2:%.*]]:gpr = COPY %r6 -# CHECK: SUB_F_R [[IN1]], [[IN2]], 0, implicit-def %sr +# CHECK: [[IN1:%.*]]:gpr = COPY $r7 +# CHECK: [[IN2:%.*]]:gpr = COPY $r6 +# CHECK: SUB_F_R [[IN1]], [[IN2]], 0, implicit-def $sr # CHECK-LABEL: name: test2b -# CHECK: [[IN1:%.*]]:gpr = COPY %r7 -# CHECK: [[IN2:%.*]]:gpr = COPY %r6 -# CHECK: SUB_F_R [[IN1]], [[IN2]], 0, implicit-def %sr +# CHECK: [[IN1:%.*]]:gpr = COPY $r7 +# CHECK: [[IN2:%.*]]:gpr = COPY $r6 +# CHECK: SUB_F_R [[IN1]], [[IN2]], 0, implicit-def $sr # CHECK-LABEL: name: test3 # CHECK: AND_F_R @@ -184,9 +184,9 @@ - { id: 4, class: gpr } - { id: 5, class: gpr } liveins: - - { reg: '%r6', virtual-reg: '%0' } - - { reg: '%r7', virtual-reg: '%1' } - - { reg: '%r18', virtual-reg: '%2' } + - { reg: '$r6', virtual-reg: '%0' } + - { reg: '$r7', virtual-reg: '%1' } + - { reg: '$r18', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -203,16 +203,16 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r6, %r7, %r18 + liveins: $r6, $r7, $r18 - %2 = COPY %r18 - %1 = COPY %r7 - %0 = COPY %r6 + %2 = COPY $r18 + %1 = COPY $r7 + %0 = COPY $r6 %4 = SUB_R %1, %0, 0 - SFSUB_F_RI_LO %4, 0, implicit-def %sr - %5 = SELECT %2, %4, 7, implicit %sr - %rv = COPY %5 - RET implicit %rca, implicit %rv + SFSUB_F_RI_LO %4, 0, implicit-def $sr + %5 = SELECT %2, %4, 7, implicit $sr + $rv = COPY %5 + RET implicit $rca, implicit $rv ... --- @@ -227,9 +227,9 @@ - { id: 3, class: gpr } - { id: 4, class: gpr } liveins: - - { reg: '%r6', virtual-reg: '%0' } - - { reg: '%r7', virtual-reg: '%1' } - - { reg: '%r18', virtual-reg: '%2' } + - { reg: '$r6', virtual-reg: '%0' } + - { reg: '$r7', virtual-reg: '%1' } + - { reg: '$r18', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -246,15 +246,15 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r6, %r7, %r18 + liveins: $r6, $r7, $r18 - %2 = COPY %r18 - %1 = COPY %r7 - %0 = COPY %r6 - SFSUB_F_RR %1, %0, implicit-def %sr - %4 = SELECT %2, %1, 7, implicit %sr - %rv = COPY %4 - RET implicit %rca, implicit %rv + %2 = COPY $r18 + %1 = COPY $r7 + %0 = COPY $r6 + SFSUB_F_RR %1, %0, implicit-def $sr + %4 = SELECT %2, %1, 7, implicit $sr + $rv = COPY %4 + RET implicit $rca, implicit $rv ... --- @@ -270,10 +270,10 @@ - { id: 4, class: gpr } - { id: 5, class: gpr } liveins: - - { reg: '%r6', virtual-reg: '%0' } - - { reg: '%r7', virtual-reg: '%1' } - - { reg: '%r18', virtual-reg: '%2' } - - { reg: '%r19', virtual-reg: '%3' } + - { reg: '$r6', virtual-reg: '%0' } + - { reg: '$r7', virtual-reg: '%1' } + - { reg: '$r18', virtual-reg: '%2' } + - { reg: '$r19', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -290,17 +290,17 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r6, %r7, %r18, %r19 + liveins: $r6, $r7, $r18, $r19 - %3 = COPY %r19 - %2 = COPY %r18 - %1 = COPY %r7 - %0 = COPY %r6 + %3 = COPY $r19 + %2 = COPY $r18 + %1 = COPY $r7 + %0 = COPY $r6 %4 = SUB_R %1, %0, 0 - SFSUB_F_RI_LO killed %4, 0, implicit-def %sr - %5 = SELECT %2, %3, 11, implicit %sr - %rv = COPY %5 - RET implicit %rca, implicit %rv + SFSUB_F_RI_LO killed %4, 0, implicit-def $sr + %5 = SELECT %2, %3, 11, implicit $sr + $rv = COPY %5 + RET implicit $rca, implicit $rv ... --- @@ -316,10 +316,10 @@ - { id: 4, class: gpr } - { id: 5, class: gpr } liveins: - - { reg: '%r6', virtual-reg: '%0' } - - { reg: '%r7', virtual-reg: '%1' } - - { reg: '%r18', virtual-reg: '%2' } - - { reg: '%r19', virtual-reg: '%3' } + - { reg: '$r6', virtual-reg: '%0' } + - { reg: '$r7', virtual-reg: '%1' } + - { reg: '$r18', virtual-reg: '%2' } + - { reg: '$r19', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -336,17 +336,17 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r6, %r7, %r18, %r19 + liveins: $r6, $r7, $r18, $r19 - %3 = COPY %r19 - %2 = COPY %r18 - %1 = COPY %r7 - %0 = COPY %r6 + %3 = COPY $r19 + %2 = COPY $r18 + %1 = COPY $r7 + %0 = COPY $r6 %4 = SUB_R %1, %0, 0 - SFSUB_F_RI_LO killed %4, 0, implicit-def %sr - %5 = SELECT %2, %3, 11, implicit %sr - %rv = COPY %5 - RET implicit %rca, implicit %rv + SFSUB_F_RI_LO killed %4, 0, implicit-def $sr + %5 = SELECT %2, %3, 11, implicit $sr + $rv = COPY %5 + RET implicit $rca, implicit $rv ... --- @@ -362,10 +362,10 @@ - { id: 4, class: gpr } - { id: 5, class: gpr } liveins: - - { reg: '%r6', virtual-reg: '%0' } - - { reg: '%r7', virtual-reg: '%1' } - - { reg: '%r18', virtual-reg: '%2' } - - { reg: '%r19', virtual-reg: '%3' } + - { reg: '$r6', virtual-reg: '%0' } + - { reg: '$r7', virtual-reg: '%1' } + - { reg: '$r18', virtual-reg: '%2' } + - { reg: '$r19', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -382,17 +382,17 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r6, %r7, %r18, %r19 + liveins: $r6, $r7, $r18, $r19 - %3 = COPY %r19 - %2 = COPY %r18 - %1 = COPY %r7 - %0 = COPY %r6 + %3 = COPY $r19 + %2 = COPY $r18 + %1 = COPY $r7 + %0 = COPY $r6 %4 = SUB_R %1, %0, 0 - SFSUB_F_RI_LO killed %4, 0, implicit-def %sr - %5 = SELECT %2, %3, 10, implicit %sr - %rv = COPY %5 - RET implicit %rca, implicit %rv + SFSUB_F_RI_LO killed %4, 0, implicit-def $sr + %5 = SELECT %2, %3, 10, implicit $sr + $rv = COPY %5 + RET implicit $rca, implicit $rv ... --- @@ -408,10 +408,10 @@ - { id: 4, class: gpr } - { id: 5, class: gpr } liveins: - - { reg: '%r6', virtual-reg: '%0' } - - { reg: '%r7', virtual-reg: '%1' } - - { reg: '%r18', virtual-reg: '%2' } - - { reg: '%r19', virtual-reg: '%3' } + - { reg: '$r6', virtual-reg: '%0' } + - { reg: '$r7', virtual-reg: '%1' } + - { reg: '$r18', virtual-reg: '%2' } + - { reg: '$r19', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -428,17 +428,17 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r6, %r7, %r18, %r19 + liveins: $r6, $r7, $r18, $r19 - %3 = COPY %r19 - %2 = COPY %r18 - %1 = COPY %r7 - %0 = COPY %r6 + %3 = COPY $r19 + %2 = COPY $r18 + %1 = COPY $r7 + %0 = COPY $r6 %4 = SUB_R %1, %0, 0 - SFSUB_F_RI_LO killed %4, 0, implicit-def %sr - %5 = SELECT %2, %3, 10, implicit %sr - %rv = COPY %5 - RET implicit %rca, implicit %rv + SFSUB_F_RI_LO killed %4, 0, implicit-def $sr + %5 = SELECT %2, %3, 10, implicit $sr + $rv = COPY %5 + RET implicit $rca, implicit $rv ... --- @@ -454,10 +454,10 @@ - { id: 4, class: gpr } - { id: 5, class: gpr } liveins: - - { reg: '%r6', virtual-reg: '%0' } - - { reg: '%r7', virtual-reg: '%1' } - - { reg: '%r18', virtual-reg: '%2' } - - { reg: '%r19', virtual-reg: '%3' } + - { reg: '$r6', virtual-reg: '%0' } + - { reg: '$r7', virtual-reg: '%1' } + - { reg: '$r18', virtual-reg: '%2' } + - { reg: '$r19', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -474,17 +474,17 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %r6, %r7, %r18, %r19 + liveins: $r6, $r7, $r18, $r19 - %3 = COPY %r19 - %2 = COPY %r18 - %1 = COPY %r7 - %0 = COPY %r6 + %3 = COPY $r19 + %2 = COPY $r18 + %1 = COPY $r7 + %0 = COPY $r6 %4 = SUB_R %1, %0, 0 - SFSUB_F_RI_LO killed %4, 1, implicit-def %sr - %5 = SELECT %2, %3, 13, implicit %sr - %rv = COPY %5 - RET implicit %rca, implicit %rv + SFSUB_F_RI_LO killed %4, 1, implicit-def $sr + %5 = SELECT %2, %3, 13, implicit $sr + $rv = COPY %5 + RET implicit $rca, implicit $rv ... --- @@ -517,10 +517,10 @@ - { id: 21, class: gpr } - { id: 22, class: gpr } liveins: - - { reg: '%r6', virtual-reg: '%1' } - - { reg: '%r7', virtual-reg: '%2' } - - { reg: '%r18', virtual-reg: '%3' } - - { reg: '%r19', virtual-reg: '%4' } + - { reg: '$r6', virtual-reg: '%1' } + - { reg: '$r7', virtual-reg: '%2' } + - { reg: '$r18', virtual-reg: '%3' } + - { reg: '$r19', virtual-reg: '%4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -538,63 +538,63 @@ body: | bb.0.entry: successors: %bb.4.return, %bb.1.if.end - liveins: %r6, %r7, %r18, %r19 - - %4 = COPY %r19 - %3 = COPY %r18 - %2 = COPY %r7 - %1 = COPY %r6 - SFSUB_F_RI_LO %1, 0, implicit-def %sr - %5 = SCC 6, implicit %sr - SFSUB_F_RR %1, %2, implicit-def %sr - %6 = SCC 4, implicit %sr + liveins: $r6, $r7, $r18, $r19 + + %4 = COPY $r19 + %3 = COPY $r18 + %2 = COPY $r7 + %1 = COPY $r6 + SFSUB_F_RI_LO %1, 0, implicit-def $sr + %5 = SCC 6, implicit $sr + SFSUB_F_RR %1, %2, implicit-def $sr + %6 = SCC 4, implicit $sr %7 = AND_R killed %5, killed %6, 0 %8 = SLI 1 %9 = AND_R killed %7, %8, 0 - SFSUB_F_RI_LO killed %9, 0, implicit-def %sr - BRCC %bb.4.return, 6, implicit %sr + SFSUB_F_RI_LO killed %9, 0, implicit-def $sr + BRCC %bb.4.return, 6, implicit $sr BT %bb.1.if.end bb.1.if.end: successors: %bb.4.return, %bb.2.if.end6 - SFSUB_F_RI_LO %2, 0, implicit-def %sr - %10 = SCC 6, implicit %sr - SFSUB_F_RR %2, %3, implicit-def %sr - %11 = SCC 4, implicit %sr + SFSUB_F_RI_LO %2, 0, implicit-def $sr + %10 = SCC 6, implicit $sr + SFSUB_F_RR %2, %3, implicit-def $sr + %11 = SCC 4, implicit $sr %12 = AND_R killed %10, killed %11, 0 %14 = AND_R killed %12, %8, 0 - SFSUB_F_RI_LO killed %14, 0, implicit-def %sr - BRCC %bb.4.return, 6, implicit %sr + SFSUB_F_RI_LO killed %14, 0, implicit-def $sr + BRCC %bb.4.return, 6, implicit $sr BT %bb.2.if.end6 bb.2.if.end6: successors: %bb.4.return, %bb.3.if.end11 - SFSUB_F_RI_LO %3, 0, implicit-def %sr - %15 = SCC 6, implicit %sr - SFSUB_F_RR %3, %4, implicit-def %sr - %16 = SCC 4, implicit %sr + SFSUB_F_RI_LO %3, 0, implicit-def $sr + %15 = SCC 6, implicit $sr + SFSUB_F_RR %3, %4, implicit-def $sr + %16 = SCC 4, implicit $sr %17 = AND_R killed %15, killed %16, 0 %18 = SLI 1 %19 = AND_R killed %17, killed %18, 0 - SFSUB_F_RI_LO killed %19, 0, implicit-def %sr - BRCC %bb.4.return, 6, implicit %sr + SFSUB_F_RI_LO killed %19, 0, implicit-def $sr + BRCC %bb.4.return, 6, implicit $sr BT %bb.3.if.end11 bb.3.if.end11: %20 = SLI 21 - SFSUB_F_RR %4, %1, implicit-def %sr - %21 = SELECT %2, %20, 4, implicit %sr - SFSUB_F_RI_LO %4, 0, implicit-def %sr - %22 = SELECT killed %21, %20, 6, implicit %sr - %rv = COPY %22 - RET implicit %rca, implicit %rv + SFSUB_F_RR %4, %1, implicit-def $sr + %21 = SELECT %2, %20, 4, implicit $sr + SFSUB_F_RI_LO %4, 0, implicit-def $sr + %22 = SELECT killed %21, %20, 6, implicit $sr + $rv = COPY %22 + RET implicit $rca, implicit $rv bb.4.return: %0 = PHI %3, %bb.0.entry, %4, %bb.1.if.end, %1, %bb.2.if.end6 - %rv = COPY %0 - RET implicit %rca, implicit %rv + $rv = COPY %0 + RET implicit $rca, implicit $rv ... --- @@ -637,16 +637,16 @@ %5 = OR_I_LO killed %4, target-flags(lanai-lo) @b %6 = LDW_RI killed %5, 0, 0 :: (load 4 from @b, !tbaa !0) %0 = SUB_R killed %6, killed %3, 0 - SFSUB_F_RI_LO %0, 0, implicit-def %sr - BRCC %bb.3.if.end, 10, implicit %sr + SFSUB_F_RI_LO %0, 0, implicit-def $sr + BRCC %bb.3.if.end, 10, implicit $sr BT %bb.1.if.then bb.1.if.then: successors: %bb.2.while.body - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp - CALL @g, csr, implicit-def dead %rca, implicit %sp, implicit-def %sp, implicit-def %rv - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + CALL @g, csr, implicit-def dead $rca, implicit $sp, implicit-def $sp, implicit-def $rv + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp bb.2.while.body: successors: %bb.2.while.body @@ -655,17 +655,17 @@ bb.3.if.end: successors: %bb.4.if.then4, %bb.6.if.end7 - liveins: %sr + liveins: $sr - BRCC %bb.6.if.end7, 14, implicit %sr + BRCC %bb.6.if.end7, 14, implicit $sr BT %bb.4.if.then4 bb.4.if.then4: successors: %bb.5.while.body6 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp - CALL @g, csr, implicit-def dead %rca, implicit %sp, implicit-def %sp, implicit-def %rv - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + CALL @g, csr, implicit-def dead $rca, implicit $sp, implicit-def $sp, implicit-def $rv + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp bb.5.while.body6: successors: %bb.5.while.body6 @@ -673,6 +673,6 @@ BT %bb.5.while.body6 bb.6.if.end7: - RET implicit %rca + RET implicit $rca ... Index: test/CodeGen/MIR/AArch64/addrspace-memoperands.mir =================================================================== --- test/CodeGen/MIR/AArch64/addrspace-memoperands.mir +++ test/CodeGen/MIR/AArch64/addrspace-memoperands.mir @@ -13,14 +13,14 @@ bb.0: ; CHECK-LABEL: name: addrspace_memoperands - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8, addrspace 1) ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4, align 2, addrspace 3) ; CHECK: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store 8, addrspace 1) ; CHECK: G_STORE [[LOAD1]](s32), [[COPY]](p0) :: (store 4, align 2, addrspace 3) ; CHECK: G_STORE [[LOAD1]](s32), [[COPY]](p0) :: (store 4) ; CHECK: RET_ReallyLR - %0:_(p0) = COPY %x0 + %0:_(p0) = COPY $x0 %1:_(s64) = G_LOAD %0(p0) :: (load 8, addrspace 1) %2:_(s32) = G_LOAD %0(p0) :: (load 4, align 2, addrspace 3) G_STORE %1(s64), %0(p0) :: (store 8, addrspace 1) Index: test/CodeGen/MIR/AArch64/atomic-memoperands.mir =================================================================== --- test/CodeGen/MIR/AArch64/atomic-memoperands.mir +++ test/CodeGen/MIR/AArch64/atomic-memoperands.mir @@ -14,7 +14,7 @@ bb.0: ; CHECK-LABEL: name: atomic_memoperands - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load unordered 8) ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load monotonic 4) ; CHECK: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load acquire 2) @@ -22,7 +22,7 @@ ; CHECK: G_STORE [[LOAD1]](s32), [[COPY]](p0) :: (store acq_rel 4) ; CHECK: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store syncscope("singlethread") seq_cst 8) ; CHECK: RET_ReallyLR - %0:_(p0) = COPY %x0 + %0:_(p0) = COPY $x0 %1:_(s64) = G_LOAD %0(p0) :: (load unordered 8) %2:_(s32) = G_LOAD %0(p0) :: (load monotonic 4) %3:_(s16) = G_LOAD %0(p0) :: (load acquire 2) Index: test/CodeGen/MIR/AArch64/cfi.mir =================================================================== --- test/CodeGen/MIR/AArch64/cfi.mir +++ test/CodeGen/MIR/AArch64/cfi.mir @@ -17,26 +17,26 @@ # CHECK-LABEL: name: trivial_fp_func body: | bb.0.entry: - ; CHECK: CFI_INSTRUCTION def_cfa %w29, 16 - frame-setup CFI_INSTRUCTION def_cfa %w29, 16 - ; CHECK: CFI_INSTRUCTION def_cfa_register %w29 - frame-setup CFI_INSTRUCTION def_cfa_register %w29 + ; CHECK: CFI_INSTRUCTION def_cfa $w29, 16 + frame-setup CFI_INSTRUCTION def_cfa $w29, 16 + ; CHECK: CFI_INSTRUCTION def_cfa_register $w29 + frame-setup CFI_INSTRUCTION def_cfa_register $w29 ; CHECK: CFI_INSTRUCTION def_cfa_offset -8 frame-setup CFI_INSTRUCTION def_cfa_offset -8 - ; CHECK: CFI_INSTRUCTION offset %w30, -8 - frame-setup CFI_INSTRUCTION offset %w30, -8 - ; CHECK: CFI_INSTRUCTION rel_offset %w30, -8 - frame-setup CFI_INSTRUCTION rel_offset %w30, -8 + ; CHECK: CFI_INSTRUCTION offset $w30, -8 + frame-setup CFI_INSTRUCTION offset $w30, -8 + ; CHECK: CFI_INSTRUCTION rel_offset $w30, -8 + frame-setup CFI_INSTRUCTION rel_offset $w30, -8 ; CHECK: CFI_INSTRUCTION adjust_cfa_offset -8 frame-setup CFI_INSTRUCTION adjust_cfa_offset -8 - CFI_INSTRUCTION restore %w30 - ; CHECK: CFI_INSTRUCTION restore %w30 - CFI_INSTRUCTION undefined %w30 - ; CHECK: CFI_INSTRUCTION undefined %w30 - CFI_INSTRUCTION same_value %w29 - ; CHECK: CFI_INSTRUCTION same_value %w29 - CFI_INSTRUCTION register %w20, %w30 - ; CHECK: CFI_INSTRUCTION register %w20, %w30 + CFI_INSTRUCTION restore $w30 + ; CHECK: CFI_INSTRUCTION restore $w30 + CFI_INSTRUCTION undefined $w30 + ; CHECK: CFI_INSTRUCTION undefined $w30 + CFI_INSTRUCTION same_value $w29 + ; CHECK: CFI_INSTRUCTION same_value $w29 + CFI_INSTRUCTION register $w20, $w30 + ; CHECK: CFI_INSTRUCTION register $w20, $w30 CFI_INSTRUCTION remember_state ; CHECK: CFI_INSTRUCTION remember_state CFI_INSTRUCTION restore_state Index: test/CodeGen/MIR/AArch64/expected-target-flag-name.mir =================================================================== --- test/CodeGen/MIR/AArch64/expected-target-flag-name.mir +++ test/CodeGen/MIR/AArch64/expected-target-flag-name.mir @@ -16,8 +16,8 @@ name: sub_small body: | bb.0.entry: - %x8 = ADRP target-flags(aarch64-page) @var_i32 + $x8 = ADRP target-flags(aarch64-page) @var_i32 ; CHECK: [[@LINE+1]]:60: expected the name of the target flag - %w0 = LDRWui killed %x8, target-flags(aarch64-pageoff, ) @var_i32 - RET_ReallyLR implicit %w0 + $w0 = LDRWui killed $x8, target-flags(aarch64-pageoff, ) @var_i32 + RET_ReallyLR implicit $w0 ... Index: test/CodeGen/MIR/AArch64/generic-virtual-registers-error.mir =================================================================== --- test/CodeGen/MIR/AArch64/generic-virtual-registers-error.mir +++ test/CodeGen/MIR/AArch64/generic-virtual-registers-error.mir @@ -14,8 +14,8 @@ - { id: 0, class: _ } body: | bb.0: - liveins: %w0 + liveins: $w0 ; ERR: generic virtual registers must have a type ; ERR-NEXT: %0 - %0 = G_ADD i32 %w0, %w0 + %0 = G_ADD i32 $w0, $w0 ... Index: test/CodeGen/MIR/AArch64/generic-virtual-registers-with-regbank-error.mir =================================================================== --- test/CodeGen/MIR/AArch64/generic-virtual-registers-with-regbank-error.mir +++ test/CodeGen/MIR/AArch64/generic-virtual-registers-with-regbank-error.mir @@ -15,8 +15,8 @@ - { id: 0, class: gpr } body: | bb.0: - liveins: %w0 + liveins: $w0 ; ERR: generic virtual registers must have a type ; ERR-NEXT: %0 - %0 = G_ADD i32 %w0, %w0 + %0 = G_ADD i32 $w0, $w0 ... Index: test/CodeGen/MIR/AArch64/intrinsics.mir =================================================================== --- test/CodeGen/MIR/AArch64/intrinsics.mir +++ test/CodeGen/MIR/AArch64/intrinsics.mir @@ -9,10 +9,10 @@ ... --- # Completely invalid code, but it checks that intrinsics round-trip properly. -# CHECK: %x0 = COPY intrinsic(@llvm.returnaddress) +# CHECK: $x0 = COPY intrinsic(@llvm.returnaddress) name: use_intrin body: | bb.0: - %x0 = COPY intrinsic(@llvm.returnaddress) + $x0 = COPY intrinsic(@llvm.returnaddress) RET_ReallyLR ... Index: test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir =================================================================== --- test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir +++ test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir @@ -16,8 +16,8 @@ name: sub_small body: | bb.0.entry: - %x8 = ADRP target-flags(aarch64-page) @var_i32 + $x8 = ADRP target-flags(aarch64-page) @var_i32 ; CHECK: [[@LINE+1]]:60: use of undefined target flag 'ncc' - %w0 = LDRWui killed %x8, target-flags(aarch64-pageoff, ncc) @var_i32 - RET_ReallyLR implicit %w0 + $w0 = LDRWui killed $x8, target-flags(aarch64-pageoff, ncc) @var_i32 + RET_ReallyLR implicit $w0 ... Index: test/CodeGen/MIR/AArch64/invalid-target-memoperands.mir =================================================================== --- test/CodeGen/MIR/AArch64/invalid-target-memoperands.mir +++ test/CodeGen/MIR/AArch64/invalid-target-memoperands.mir @@ -12,7 +12,7 @@ body: | bb.0: - %0:_(p0) = COPY %x0 + %0:_(p0) = COPY $x0 ; CHECK: [[@LINE+1]]:35: use of undefined target MMO flag 'aarch64-invalid' %1:_(s64) = G_LOAD %0(p0) :: ("aarch64-invalid" load 8) RET_ReallyLR Index: test/CodeGen/MIR/AArch64/multiple-lhs-operands.mir =================================================================== --- test/CodeGen/MIR/AArch64/multiple-lhs-operands.mir +++ test/CodeGen/MIR/AArch64/multiple-lhs-operands.mir @@ -17,12 +17,12 @@ name: trivial_fp_func body: | bb.0.entry: - liveins: %lr, %fp, %lr, %fp + liveins: $lr, $fp, $lr, $fp - %sp = frame-setup STPXpre killed %fp, killed %lr, %sp, -2 - %fp = frame-setup ADDXri %sp, 0, 0 - BL @foo, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp - ; CHECK: %sp, %fp, %lr = LDPXpost %sp, 2 - %sp, %fp, %lr = LDPXpost %sp, 2 + $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 + $fp = frame-setup ADDXri $sp, 0, 0 + BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ; CHECK: $sp, $fp, $lr = LDPXpost $sp, 2 + $sp, $fp, $lr = LDPXpost $sp, 2 RET_ReallyLR ... Index: test/CodeGen/MIR/AArch64/register-operand-bank.mir =================================================================== --- test/CodeGen/MIR/AArch64/register-operand-bank.mir +++ test/CodeGen/MIR/AArch64/register-operand-bank.mir @@ -12,9 +12,9 @@ name: func body: | bb.0: - %0 : gpr(s64) = COPY %x9 - %x9 = COPY %0 + %0 : gpr(s64) = COPY $x9 + $x9 = COPY %0 - %3 : fpr(s64) = COPY %d0 - %d1 = COPY %3 : fpr + %3 : fpr(s64) = COPY $d0 + $d1 = COPY %3 : fpr ... Index: test/CodeGen/MIR/AArch64/swp.mir =================================================================== --- test/CodeGen/MIR/AArch64/swp.mir +++ test/CodeGen/MIR/AArch64/swp.mir @@ -18,16 +18,16 @@ - { id: 1, class: gpr32 } - { id: 2, class: gpr32 } liveins: - - { reg: '%x0', virtual-reg: '%0' } + - { reg: '$x0', virtual-reg: '%0' } body: | bb.0.entry: - liveins: %x0 + liveins: $x0 ; CHECK-LABEL: swp ; CHECK: {{[0-9]+}}:gpr32 = SWPW killed %1, %0 :: (volatile load store monotonic 4 on %ir.addr) - %0:gpr64common = COPY %x0 + %0:gpr64common = COPY $x0 %1:gpr32 = MOVi32imm 1 %2:gpr32 = SWPW killed %1, %0 :: (volatile load store monotonic 4 on %ir.addr) - %w0 = COPY %2 - RET_ReallyLR implicit %w0 + $w0 = COPY %2 + RET_ReallyLR implicit $w0 ... Index: test/CodeGen/MIR/AArch64/target-flags.mir =================================================================== --- test/CodeGen/MIR/AArch64/target-flags.mir +++ test/CodeGen/MIR/AArch64/target-flags.mir @@ -21,19 +21,19 @@ name: sub_small body: | bb.0.entry: - ; CHECK: %x8 = ADRP target-flags(aarch64-page) @var_i32 - ; CHECK-NEXT: %x9 = ADRP target-flags(aarch64-page) @var_i64 - ; CHECK-NEXT: %w10 = LDRWui %x8, target-flags(aarch64-pageoff, aarch64-nc) @var_i32 - ; CHECK-NEXT: %x11 = LDRXui %x9, target-flags(aarch64-pageoff, aarch64-got, aarch64-nc) @var_i64 - ; CHECK: STRWui killed %w10, killed %x8, target-flags(aarch64-nc) @var_i32 - ; CHECK: STRXui killed %x11, killed %x9, target-flags(aarch64-pageoff, aarch64-nc) @var_i64 - %x8 = ADRP target-flags(aarch64-page) @var_i32 - %x9 = ADRP target-flags(aarch64-page) @var_i64 - %w10 = LDRWui %x8, target-flags(aarch64-pageoff, aarch64-nc) @var_i32 - %x11 = LDRXui %x9, target-flags(aarch64-pageoff, aarch64-got, aarch64-nc) @var_i64 - %w10 = SUBWri killed %w10, 4095, 0 - %x11 = SUBXri killed %x11, 52, 0 - STRWui killed %w10, killed %x8, target-flags(aarch64-nc) @var_i32 - STRXui killed %x11, killed %x9, target-flags(aarch64-pageoff, aarch64-nc) @var_i64 + ; CHECK: $x8 = ADRP target-flags(aarch64-page) @var_i32 + ; CHECK-NEXT: $x9 = ADRP target-flags(aarch64-page) @var_i64 + ; CHECK-NEXT: $w10 = LDRWui $x8, target-flags(aarch64-pageoff, aarch64-nc) @var_i32 + ; CHECK-NEXT: $x11 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got, aarch64-nc) @var_i64 + ; CHECK: STRWui killed $w10, killed $x8, target-flags(aarch64-nc) @var_i32 + ; CHECK: STRXui killed $x11, killed $x9, target-flags(aarch64-pageoff, aarch64-nc) @var_i64 + $x8 = ADRP target-flags(aarch64-page) @var_i32 + $x9 = ADRP target-flags(aarch64-page) @var_i64 + $w10 = LDRWui $x8, target-flags(aarch64-pageoff, aarch64-nc) @var_i32 + $x11 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got, aarch64-nc) @var_i64 + $w10 = SUBWri killed $w10, 4095, 0 + $x11 = SUBXri killed $x11, 52, 0 + STRWui killed $w10, killed $x8, target-flags(aarch64-nc) @var_i32 + STRXui killed $x11, killed $x9, target-flags(aarch64-pageoff, aarch64-nc) @var_i64 RET_ReallyLR ... Index: test/CodeGen/MIR/AArch64/target-memoperands.mir =================================================================== --- test/CodeGen/MIR/AArch64/target-memoperands.mir +++ test/CodeGen/MIR/AArch64/target-memoperands.mir @@ -14,13 +14,13 @@ bb.0: ; CHECK-LABEL: name: target_memoperands - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: ("aarch64-suppress-pair" load 8) ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: ("aarch64-strided-access" load 4) ; CHECK: G_STORE [[LOAD]](s64), [[COPY]](p0) :: ("aarch64-suppress-pair" store 8) ; CHECK: G_STORE [[LOAD1]](s32), [[COPY]](p0) :: ("aarch64-strided-access" store 4) ; CHECK: RET_ReallyLR - %0:_(p0) = COPY %x0 + %0:_(p0) = COPY $x0 %1:_(s64) = G_LOAD %0(p0) :: ("aarch64-suppress-pair" load 8) %2:_(s32) = G_LOAD %0(p0) :: ("aarch64-strided-access" load 4) G_STORE %1(s64), %0(p0) :: ("aarch64-suppress-pair" store 8) Index: test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir =================================================================== --- test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir +++ test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir @@ -20,30 +20,30 @@ --- name: float liveins: - - { reg: '%sgpr0_sgpr1' } + - { reg: '$sgpr0_sgpr1' } frameInfo: maxAlignment: 8 body: | bb.0.entry: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr2_sgpr3 = S_GETPC_B64 + $sgpr2_sgpr3 = S_GETPC_B64 ; CHECK: [[@LINE+1]]:45: expected the name of the target index - %sgpr2 = S_ADD_U32 %sgpr2, target-index(0), implicit-def %scc, implicit-def %scc - %sgpr3 = S_ADDC_U32 %sgpr3, 0, implicit-def %scc, implicit %scc, implicit-def %scc, implicit %scc - %sgpr4_sgpr5 = S_LSHR_B64 %sgpr2_sgpr3, 32, implicit-def dead %scc - %sgpr6 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 11 - %sgpr7 = S_ASHR_I32 %sgpr6, 31, implicit-def dead %scc - %sgpr6_sgpr7 = S_LSHL_B64 %sgpr6_sgpr7, 2, implicit-def dead %scc - %sgpr2 = S_ADD_U32 %sgpr2, @float_gv, implicit-def %scc - %sgpr3 = S_ADDC_U32 %sgpr4, 0, implicit-def dead %scc, implicit %scc - %sgpr4 = S_ADD_U32 %sgpr2, %sgpr6, implicit-def %scc - %sgpr5 = S_ADDC_U32 %sgpr3, %sgpr7, implicit-def dead %scc, implicit %scc - %sgpr2 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 0 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 9 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = V_MOV_B32_e32 killed %sgpr2, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec + $sgpr2 = S_ADD_U32 $sgpr2, target-index(0), implicit-def $scc, implicit-def $scc + $sgpr3 = S_ADDC_U32 $sgpr3, 0, implicit-def $scc, implicit $scc, implicit-def $scc, implicit $scc + $sgpr4_sgpr5 = S_LSHR_B64 $sgpr2_sgpr3, 32, implicit-def dead $scc + $sgpr6 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 11 + $sgpr7 = S_ASHR_I32 $sgpr6, 31, implicit-def dead $scc + $sgpr6_sgpr7 = S_LSHL_B64 $sgpr6_sgpr7, 2, implicit-def dead $scc + $sgpr2 = S_ADD_U32 $sgpr2, @float_gv, implicit-def $scc + $sgpr3 = S_ADDC_U32 $sgpr4, 0, implicit-def dead $scc, implicit $scc + $sgpr4 = S_ADD_U32 $sgpr2, $sgpr6, implicit-def $scc + $sgpr5 = S_ADDC_U32 $sgpr3, $sgpr7, implicit-def dead $scc, implicit $scc + $sgpr2 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 0 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 9 + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... Index: test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir =================================================================== --- test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir +++ test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir @@ -20,30 +20,30 @@ --- name: float liveins: - - { reg: '%sgpr0_sgpr1' } + - { reg: '$sgpr0_sgpr1' } frameInfo: maxAlignment: 8 body: | bb.0.entry: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr2_sgpr3 = S_GETPC_B64 + $sgpr2_sgpr3 = S_GETPC_B64 ; CHECK: [[@LINE+1]]:45: use of undefined target index 'constdata-start' - %sgpr2 = S_ADD_U32 %sgpr2, target-index(constdata-start), implicit-def %scc, implicit-def %scc - %sgpr3 = S_ADDC_U32 %sgpr3, 0, implicit-def %scc, implicit %scc, implicit-def %scc, implicit %scc - %sgpr4_sgpr5 = S_LSHR_B64 %sgpr2_sgpr3, 32, implicit-def dead %scc - %sgpr6 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 11 - %sgpr7 = S_ASHR_I32 %sgpr6, 31, implicit-def dead %scc - %sgpr6_sgpr7 = S_LSHL_B64 %sgpr6_sgpr7, 2, implicit-def dead %scc - %sgpr2 = S_ADD_U32 %sgpr2, @float_gv, implicit-def %scc - %sgpr3 = S_ADDC_U32 %sgpr4, 0, implicit-def dead %scc, implicit %scc - %sgpr4 = S_ADD_U32 %sgpr2, %sgpr6, implicit-def %scc - %sgpr5 = S_ADDC_U32 %sgpr3, %sgpr7, implicit-def dead %scc, implicit %scc - %sgpr2 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 0 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 9 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = V_MOV_B32_e32 killed %sgpr2, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec + $sgpr2 = S_ADD_U32 $sgpr2, target-index(constdata-start), implicit-def $scc, implicit-def $scc + $sgpr3 = S_ADDC_U32 $sgpr3, 0, implicit-def $scc, implicit $scc, implicit-def $scc, implicit $scc + $sgpr4_sgpr5 = S_LSHR_B64 $sgpr2_sgpr3, 32, implicit-def dead $scc + $sgpr6 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 11 + $sgpr7 = S_ASHR_I32 $sgpr6, 31, implicit-def dead $scc + $sgpr6_sgpr7 = S_LSHL_B64 $sgpr6_sgpr7, 2, implicit-def dead $scc + $sgpr2 = S_ADD_U32 $sgpr2, @float_gv, implicit-def $scc + $sgpr3 = S_ADDC_U32 $sgpr4, 0, implicit-def dead $scc, implicit $scc + $sgpr4 = S_ADD_U32 $sgpr2, $sgpr6, implicit-def $scc + $sgpr5 = S_ADDC_U32 $sgpr3, $sgpr7, implicit-def dead $scc, implicit $scc + $sgpr2 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 0 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 9 + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... Index: test/CodeGen/MIR/AMDGPU/syncscopes.mir =================================================================== --- test/CodeGen/MIR/AMDGPU/syncscopes.mir +++ test/CodeGen/MIR/AMDGPU/syncscopes.mir @@ -42,9 +42,9 @@ !0 = !{i32 1} # GCN-LABEL: name: syncscopes -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst 4 into %ir.agent_out, addrspace 4) -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out, addrspace 4) -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out, addrspace 4) +# GCN: FLAT_STORE_DWORD killed $vgpr0_vgpr1, killed $vgpr2, 0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst 4 into %ir.agent_out, addrspace 4) +# GCN: FLAT_STORE_DWORD killed $vgpr0_vgpr1, killed $vgpr2, 0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile non-temporal store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out, addrspace 4) +# GCN: FLAT_STORE_DWORD killed $vgpr0_vgpr1, killed $vgpr2, 0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile non-temporal store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out, addrspace 4) ... --- name: syncscopes @@ -55,7 +55,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%sgpr4_sgpr5' } + - { reg: '$sgpr4_sgpr5' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -71,30 +71,30 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %sgpr4_sgpr5 + liveins: $sgpr4_sgpr5 S_WAITCNT 0 - %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr4_sgpr5, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %sgpr6 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) - %sgpr2_sgpr3 = S_LOAD_DWORDX2_IMM %sgpr4_sgpr5, 24, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %sgpr7 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 16, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) - %sgpr8 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 32, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr4_sgpr5, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $sgpr6 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr2_sgpr3 = S_LOAD_DWORDX2_IMM $sgpr4_sgpr5, 24, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $sgpr7 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 16, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + $sgpr8 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 32, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) S_WAITCNT 127 - %vgpr0 = V_MOV_B32_e32 %sgpr0, implicit %exec, implicit-def %vgpr0_vgpr1, implicit %sgpr0_sgpr1 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr4_sgpr5, 40, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) - %vgpr1 = V_MOV_B32_e32 killed %sgpr1, implicit %exec, implicit killed %sgpr0_sgpr1, implicit %sgpr0_sgpr1, implicit %exec - %vgpr2 = V_MOV_B32_e32 killed %sgpr6, implicit %exec, implicit %exec - FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst 4 into %ir.agent_out) + $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr4_sgpr5, 40, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit killed $sgpr0_sgpr1, implicit $sgpr0_sgpr1, implicit $exec + $vgpr2 = V_MOV_B32_e32 killed $sgpr6, implicit $exec, implicit $exec + FLAT_STORE_DWORD killed $vgpr0_vgpr1, killed $vgpr2, 0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst 4 into %ir.agent_out) S_WAITCNT 112 - %vgpr0 = V_MOV_B32_e32 %sgpr2, implicit %exec, implicit-def %vgpr0_vgpr1, implicit %sgpr2_sgpr3 - %vgpr1 = V_MOV_B32_e32 killed %sgpr3, implicit %exec, implicit killed %sgpr2_sgpr3, implicit %sgpr2_sgpr3, implicit %exec - %vgpr2 = V_MOV_B32_e32 killed %sgpr7, implicit %exec, implicit %exec - FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out) + $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr2_sgpr3 + $vgpr1 = V_MOV_B32_e32 killed $sgpr3, implicit $exec, implicit killed $sgpr2_sgpr3, implicit $sgpr2_sgpr3, implicit $exec + $vgpr2 = V_MOV_B32_e32 killed $sgpr7, implicit $exec, implicit $exec + FLAT_STORE_DWORD killed $vgpr0_vgpr1, killed $vgpr2, 0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile non-temporal store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out) S_WAITCNT 112 - %vgpr0 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr0_vgpr1, implicit %sgpr4_sgpr5 - %vgpr1 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit killed %sgpr4_sgpr5, implicit %sgpr4_sgpr5, implicit %exec - %vgpr2 = V_MOV_B32_e32 killed %sgpr8, implicit %exec, implicit %exec - FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out) + $vgpr0 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr4_sgpr5 + $vgpr1 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit killed $sgpr4_sgpr5, implicit $sgpr4_sgpr5, implicit $exec + $vgpr2 = V_MOV_B32_e32 killed $sgpr8, implicit $exec, implicit $exec + FLAT_STORE_DWORD killed $vgpr0_vgpr1, killed $vgpr2, 0, -1, 0, implicit $exec, implicit $flat_scr :: (volatile non-temporal store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out) S_ENDPGM ... Index: test/CodeGen/MIR/AMDGPU/target-flags.mir =================================================================== --- test/CodeGen/MIR/AMDGPU/target-flags.mir +++ test/CodeGen/MIR/AMDGPU/target-flags.mir @@ -12,7 +12,7 @@ name: flags liveins: - - { reg: '%sgpr0_sgpr1' } + - { reg: '$sgpr0_sgpr1' } frameInfo: maxAlignment: 8 registers: @@ -20,12 +20,12 @@ - { id: 1, class: sreg_64, preferred-register: '' } body: | bb.0: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 ; CHECK-LABEL: name: flags - ; CHECK: [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @foo + 4, target-flags(amdgpu-rel32-hi) @foo + 4, implicit-def dead %scc + ; CHECK: [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @foo + 4, target-flags(amdgpu-rel32-hi) @foo + 4, implicit-def dead $scc ; CHECK: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 target-flags(amdgpu-gotprel) @foo ; CHECK: S_ENDPGM - %0 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @foo + 4, target-flags(amdgpu-rel32-hi) @foo + 4, implicit-def dead %scc + %0 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @foo + 4, target-flags(amdgpu-rel32-hi) @foo + 4, implicit-def dead $scc %1 = S_MOV_B64 target-flags(amdgpu-gotprel) @foo S_ENDPGM Index: test/CodeGen/MIR/AMDGPU/target-index-operands.mir =================================================================== --- test/CodeGen/MIR/AMDGPU/target-index-operands.mir +++ test/CodeGen/MIR/AMDGPU/target-index-operands.mir @@ -28,60 +28,60 @@ --- name: float liveins: - - { reg: '%sgpr0_sgpr1' } + - { reg: '$sgpr0_sgpr1' } frameInfo: maxAlignment: 8 body: | bb.0.entry: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr2_sgpr3 = S_GETPC_B64 - ; CHECK: %sgpr2 = S_ADD_U32 %sgpr2, target-index(amdgpu-constdata-start), implicit-def %scc, implicit-def %scc - %sgpr2 = S_ADD_U32 %sgpr2, target-index(amdgpu-constdata-start), implicit-def %scc, implicit-def %scc - %sgpr3 = S_ADDC_U32 %sgpr3, 0, implicit-def %scc, implicit %scc, implicit-def %scc, implicit %scc - %sgpr4_sgpr5 = S_LSHR_B64 %sgpr2_sgpr3, 32, implicit-def dead %scc - %sgpr6 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 11, 0 - %sgpr7 = S_ASHR_I32 %sgpr6, 31, implicit-def dead %scc - %sgpr6_sgpr7 = S_LSHL_B64 %sgpr6_sgpr7, 2, implicit-def dead %scc - %sgpr2 = S_ADD_U32 %sgpr2, @float_gv, implicit-def %scc - %sgpr3 = S_ADDC_U32 %sgpr4, 0, implicit-def dead %scc, implicit %scc - %sgpr4 = S_ADD_U32 %sgpr2, %sgpr6, implicit-def %scc - %sgpr5 = S_ADDC_U32 %sgpr3, %sgpr7, implicit-def dead %scc, implicit %scc - %sgpr2 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 0, 0 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 9, 0 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = V_MOV_B32_e32 killed %sgpr2, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec + $sgpr2_sgpr3 = S_GETPC_B64 + ; CHECK: $sgpr2 = S_ADD_U32 $sgpr2, target-index(amdgpu-constdata-start), implicit-def $scc, implicit-def $scc + $sgpr2 = S_ADD_U32 $sgpr2, target-index(amdgpu-constdata-start), implicit-def $scc, implicit-def $scc + $sgpr3 = S_ADDC_U32 $sgpr3, 0, implicit-def $scc, implicit $scc, implicit-def $scc, implicit $scc + $sgpr4_sgpr5 = S_LSHR_B64 $sgpr2_sgpr3, 32, implicit-def dead $scc + $sgpr6 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 11, 0 + $sgpr7 = S_ASHR_I32 $sgpr6, 31, implicit-def dead $scc + $sgpr6_sgpr7 = S_LSHL_B64 $sgpr6_sgpr7, 2, implicit-def dead $scc + $sgpr2 = S_ADD_U32 $sgpr2, @float_gv, implicit-def $scc + $sgpr3 = S_ADDC_U32 $sgpr4, 0, implicit-def dead $scc, implicit $scc + $sgpr4 = S_ADD_U32 $sgpr2, $sgpr6, implicit-def $scc + $sgpr5 = S_ADDC_U32 $sgpr3, $sgpr7, implicit-def dead $scc, implicit $scc + $sgpr2 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 0, 0 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 9, 0 + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... --- name: float2 liveins: - - { reg: '%sgpr0_sgpr1' } + - { reg: '$sgpr0_sgpr1' } frameInfo: maxAlignment: 8 body: | bb.0.entry: - liveins: %sgpr0_sgpr1 + liveins: $sgpr0_sgpr1 - %sgpr2_sgpr3 = S_GETPC_B64 - ; CHECK: %sgpr2 = S_ADD_U32 %sgpr2, target-index(amdgpu-constdata-start) + 1, implicit-def %scc, implicit-def %scc - %sgpr2 = S_ADD_U32 %sgpr2, target-index(amdgpu-constdata-start) + 1, implicit-def %scc, implicit-def %scc - %sgpr3 = S_ADDC_U32 %sgpr3, 0, implicit-def %scc, implicit %scc, implicit-def %scc, implicit %scc - %sgpr4_sgpr5 = S_LSHR_B64 %sgpr2_sgpr3, 32, implicit-def dead %scc - %sgpr6 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 11, 0 - %sgpr7 = S_ASHR_I32 %sgpr6, 31, implicit-def dead %scc - %sgpr6_sgpr7 = S_LSHL_B64 %sgpr6_sgpr7, 2, implicit-def dead %scc - %sgpr2 = S_ADD_U32 %sgpr2, @float_gv, implicit-def %scc - %sgpr3 = S_ADDC_U32 %sgpr4, 0, implicit-def dead %scc, implicit %scc - %sgpr4 = S_ADD_U32 %sgpr2, %sgpr6, implicit-def %scc - %sgpr5 = S_ADDC_U32 %sgpr3, %sgpr7, implicit-def dead %scc, implicit %scc - %sgpr2 = S_LOAD_DWORD_IMM %sgpr4_sgpr5, 0, 0 - %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 9, 0 - %sgpr7 = S_MOV_B32 61440 - %sgpr6 = S_MOV_B32 -1 - %vgpr0 = V_MOV_B32_e32 killed %sgpr2, implicit %exec - BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec + $sgpr2_sgpr3 = S_GETPC_B64 + ; CHECK: $sgpr2 = S_ADD_U32 $sgpr2, target-index(amdgpu-constdata-start) + 1, implicit-def $scc, implicit-def $scc + $sgpr2 = S_ADD_U32 $sgpr2, target-index(amdgpu-constdata-start) + 1, implicit-def $scc, implicit-def $scc + $sgpr3 = S_ADDC_U32 $sgpr3, 0, implicit-def $scc, implicit $scc, implicit-def $scc, implicit $scc + $sgpr4_sgpr5 = S_LSHR_B64 $sgpr2_sgpr3, 32, implicit-def dead $scc + $sgpr6 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 11, 0 + $sgpr7 = S_ASHR_I32 $sgpr6, 31, implicit-def dead $scc + $sgpr6_sgpr7 = S_LSHL_B64 $sgpr6_sgpr7, 2, implicit-def dead $scc + $sgpr2 = S_ADD_U32 $sgpr2, @float_gv, implicit-def $scc + $sgpr3 = S_ADDC_U32 $sgpr4, 0, implicit-def dead $scc, implicit $scc + $sgpr4 = S_ADD_U32 $sgpr2, $sgpr6, implicit-def $scc + $sgpr5 = S_ADDC_U32 $sgpr3, $sgpr7, implicit-def dead $scc, implicit $scc + $sgpr2 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 0, 0 + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 9, 0 + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec S_ENDPGM ... Index: test/CodeGen/MIR/ARM/bundled-instructions.mir =================================================================== --- test/CodeGen/MIR/ARM/bundled-instructions.mir +++ test/CodeGen/MIR/ARM/bundled-instructions.mir @@ -23,53 +23,53 @@ name: test1 tracksRegLiveness: true liveins: - - { reg: '%r0' } + - { reg: '$r0' } body: | bb.0.entry: - liveins: %r0 + liveins: $r0 ; CHECK-LABEL: name: test1 - ; CHECK: %r1 = t2MOVi 0, 14, %noreg, %noreg - ; CHECK-NEXT: t2CMNri killed %r0, 78, 14, %noreg, implicit-def %cpsr - ; CHECK-NEXT: BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr { - ; CHECK-NEXT: t2IT 12, 8, implicit-def %itstate - ; CHECK-NEXT: %r1 = t2MOVi 1, 12, killed %cpsr, %noreg, implicit internal killed %itstate + ; CHECK: $r1 = t2MOVi 0, 14, $noreg, $noreg + ; CHECK-NEXT: t2CMNri killed $r0, 78, 14, $noreg, implicit-def $cpsr + ; CHECK-NEXT: BUNDLE implicit-def dead $itstate, implicit-def $r1, implicit killed $cpsr { + ; CHECK-NEXT: t2IT 12, 8, implicit-def $itstate + ; CHECK-NEXT: $r1 = t2MOVi 1, 12, killed $cpsr, $noreg, implicit internal killed $itstate ; CHECK-NEXT: } - ; CHECK-NEXT: %r0 = tMOVr killed %r1, 14, %noreg - ; CHECK-NEXT: tBX_RET 14, %noreg, implicit killed %r0 - %r1 = t2MOVi 0, 14, _, _ - t2CMNri killed %r0, 78, 14, _, implicit-def %cpsr - BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr { - t2IT 12, 8, implicit-def %itstate - %r1 = t2MOVi 1, 12, killed %cpsr, _, implicit internal killed %itstate + ; CHECK-NEXT: $r0 = tMOVr killed $r1, 14, $noreg + ; CHECK-NEXT: tBX_RET 14, $noreg, implicit killed $r0 + $r1 = t2MOVi 0, 14, _, _ + t2CMNri killed $r0, 78, 14, _, implicit-def $cpsr + BUNDLE implicit-def dead $itstate, implicit-def $r1, implicit killed $cpsr { + t2IT 12, 8, implicit-def $itstate + $r1 = t2MOVi 1, 12, killed $cpsr, _, implicit internal killed $itstate } - %r0 = tMOVr killed %r1, 14, _ - tBX_RET 14, _, implicit killed %r0 + $r0 = tMOVr killed $r1, 14, _ + tBX_RET 14, _, implicit killed $r0 ... --- name: test2 tracksRegLiveness: true liveins: - - { reg: '%r0' } + - { reg: '$r0' } body: | bb.0.entry: - liveins: %r0 + liveins: $r0 ; Verify that the next machine instruction can be on the same line as ; '{' or '}'. ; CHECK-LABEL: name: test2 - ; CHECK: %r1 = t2MOVi 0, 14, %noreg, %noreg - ; CHECK-NEXT: t2CMNri killed %r0, 78, 14, %noreg, implicit-def %cpsr - ; CHECK-NEXT: BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr { - ; CHECK-NEXT: t2IT 12, 8, implicit-def %itstate - ; CHECK-NEXT: %r1 = t2MOVi 1, 12, killed %cpsr, %noreg, implicit internal killed %itstate + ; CHECK: $r1 = t2MOVi 0, 14, $noreg, $noreg + ; CHECK-NEXT: t2CMNri killed $r0, 78, 14, $noreg, implicit-def $cpsr + ; CHECK-NEXT: BUNDLE implicit-def dead $itstate, implicit-def $r1, implicit killed $cpsr { + ; CHECK-NEXT: t2IT 12, 8, implicit-def $itstate + ; CHECK-NEXT: $r1 = t2MOVi 1, 12, killed $cpsr, $noreg, implicit internal killed $itstate ; CHECK-NEXT: } - ; CHECK-NEXT: %r0 = tMOVr killed %r1, 14, %noreg - ; CHECK-NEXT: tBX_RET 14, %noreg, implicit killed %r0 - %r1 = t2MOVi 0, 14, _, _ - t2CMNri killed %r0, 78, 14, _, implicit-def %cpsr - BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr { t2IT 12, 8, implicit-def %itstate - %r1 = t2MOVi 1, 12, killed %cpsr, _, internal implicit killed %itstate - } %r0 = tMOVr killed %r1, 14, _ - tBX_RET 14, _, implicit killed %r0 + ; CHECK-NEXT: $r0 = tMOVr killed $r1, 14, $noreg + ; CHECK-NEXT: tBX_RET 14, $noreg, implicit killed $r0 + $r1 = t2MOVi 0, 14, _, _ + t2CMNri killed $r0, 78, 14, _, implicit-def $cpsr + BUNDLE implicit-def dead $itstate, implicit-def $r1, implicit killed $cpsr { t2IT 12, 8, implicit-def $itstate + $r1 = t2MOVi 1, 12, killed $cpsr, _, internal implicit killed $itstate + } $r0 = tMOVr killed $r1, 14, _ + tBX_RET 14, _, implicit killed $r0 ... Index: test/CodeGen/MIR/ARM/cfi-same-value.mir =================================================================== --- test/CodeGen/MIR/ARM/cfi-same-value.mir +++ test/CodeGen/MIR/ARM/cfi-same-value.mir @@ -23,58 +23,58 @@ stack: - { id: 0, name: mem, offset: -48, size: 40, alignment: 4 } - { id: 1, type: spill-slot, offset: -4, size: 4, alignment: 4, - callee-saved-register: '%lr' } + callee-saved-register: '$lr' } - { id: 2, type: spill-slot, offset: -8, size: 4, alignment: 4, - callee-saved-register: '%r11' } + callee-saved-register: '$r11' } body: | bb.0: successors: %bb.2, %bb.1 - liveins: %r11, %lr + liveins: $r11, $lr - %sp = STMDB_UPD %sp, 14, _, %r4, %r5 + $sp = STMDB_UPD $sp, 14, _, $r4, $r5 CFI_INSTRUCTION def_cfa_offset 8 - CFI_INSTRUCTION offset %r5, -4 - CFI_INSTRUCTION offset %r4, -8 - %r5 = MOVr %sp, 14, _, _ - %r4 = MRC 15, 0, 13, 0, 3, 14, _ - %r4 = LDRi12 %r4, 4, 14, _ - CMPrr %r4, %r5, 14, _, implicit-def %cpsr - Bcc %bb.2, 3, %cpsr + CFI_INSTRUCTION offset $r5, -4 + CFI_INSTRUCTION offset $r4, -8 + $r5 = MOVr $sp, 14, _, _ + $r4 = MRC 15, 0, 13, 0, 3, 14, _ + $r4 = LDRi12 $r4, 4, 14, _ + CMPrr $r4, $r5, 14, _, implicit-def $cpsr + Bcc %bb.2, 3, $cpsr bb.1: successors: %bb.2 - liveins: %r11, %lr + liveins: $r11, $lr - %r4 = MOVi 48, 14, _, _ - %r5 = MOVi 0, 14, _, _ - %sp = STMDB_UPD %sp, 14, _, %lr + $r4 = MOVi 48, 14, _, _ + $r5 = MOVi 0, 14, _, _ + $sp = STMDB_UPD $sp, 14, _, $lr CFI_INSTRUCTION def_cfa_offset 12 - CFI_INSTRUCTION offset %lr, -12 - BL &__morestack, implicit-def %lr, implicit %sp - %sp = LDMIA_UPD %sp, 14, _, %lr - %sp = LDMIA_UPD %sp, 14, _, %r4, %r5 + CFI_INSTRUCTION offset $lr, -12 + BL &__morestack, implicit-def $lr, implicit $sp + $sp = LDMIA_UPD $sp, 14, _, $lr + $sp = LDMIA_UPD $sp, 14, _, $r4, $r5 CFI_INSTRUCTION def_cfa_offset 0 BX_RET 14, _ bb.2: - liveins: %r11, %lr + liveins: $r11, $lr - %sp = LDMIA_UPD %sp, 14, _, %r4, %r5 + $sp = LDMIA_UPD $sp, 14, _, $r4, $r5 CFI_INSTRUCTION def_cfa_offset 0 - ; CHECK: CFI_INSTRUCTION same_value %r4 - ; CHECK-NEXT: CFI_INSTRUCTION same_value %r5 - CFI_INSTRUCTION same_value %r4 - CFI_INSTRUCTION same_value %r5 - %sp = frame-setup STMDB_UPD %sp, 14, _, killed %r11, killed %lr + ; CHECK: CFI_INSTRUCTION same_value $r4 + ; CHECK-NEXT: CFI_INSTRUCTION same_value $r5 + CFI_INSTRUCTION same_value $r4 + CFI_INSTRUCTION same_value $r5 + $sp = frame-setup STMDB_UPD $sp, 14, _, killed $r11, killed $lr frame-setup CFI_INSTRUCTION def_cfa_offset 8 - frame-setup CFI_INSTRUCTION offset %lr, -4 - frame-setup CFI_INSTRUCTION offset %r11, -8 - %sp = frame-setup SUBri killed %sp, 40, 14, _, _ + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r11, -8 + $sp = frame-setup SUBri killed $sp, 40, 14, _, _ frame-setup CFI_INSTRUCTION def_cfa_offset 48 - %r0 = MOVr %sp, 14, _, _ - %r1 = MOVi 10, 14, _, _ - BL @dummy_use, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit killed %r1, implicit-def %sp - %sp = ADDri killed %sp, 40, 14, _, _ - %sp = LDMIA_UPD %sp, 14, _, %r4, %r5 + $r0 = MOVr $sp, 14, _, _ + $r1 = MOVi 10, 14, _, _ + BL @dummy_use, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit killed $r1, implicit-def $sp + $sp = ADDri killed $sp, 40, 14, _, _ + $sp = LDMIA_UPD $sp, 14, _, $r4, $r5 MOVPCLR 14, _ ... Index: test/CodeGen/MIR/ARM/expected-closing-brace.mir =================================================================== --- test/CodeGen/MIR/ARM/expected-closing-brace.mir +++ test/CodeGen/MIR/ARM/expected-closing-brace.mir @@ -25,26 +25,26 @@ name: test1 tracksRegLiveness: true liveins: - - { reg: '%r0' } + - { reg: '$r0' } body: | bb.0.entry: successors: %bb.1.foo - liveins: %r0 + liveins: $r0 bb.1.foo: successors: %bb.2.if.then, %bb.1.foo - liveins: %r0 + liveins: $r0 - t2CMNri %r0, 78, 14, _, implicit-def %cpsr - %r1 = t2MOVi 0, 14, _, _ - BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr { - t2IT 12, 8, implicit-def %itstate - %r1 = t2MOVi 1, 12, killed %cpsr, _, implicit killed %itstate - t2CMNri %r0, 77, 14, _, implicit-def %cpsr - t2Bcc %bb.1.foo, 11, killed %cpsr + t2CMNri $r0, 78, 14, _, implicit-def $cpsr + $r1 = t2MOVi 0, 14, _, _ + BUNDLE implicit-def dead $itstate, implicit-def $r1, implicit killed $cpsr { + t2IT 12, 8, implicit-def $itstate + $r1 = t2MOVi 1, 12, killed $cpsr, _, implicit killed $itstate + t2CMNri $r0, 77, 14, _, implicit-def $cpsr + t2Bcc %bb.1.foo, 11, killed $cpsr ; CHECK: [[@LINE+1]]:3: expected '}' bb.2.if.then: - liveins: %r1 + liveins: $r1 - %r0 = tMOVr killed %r1, 14, _ - tBX_RET 14, _, implicit killed %r0 + $r0 = tMOVr killed $r1, 14, _ + tBX_RET 14, _, implicit killed $r0 ... Index: test/CodeGen/MIR/ARM/extraneous-closing-brace-error.mir =================================================================== --- test/CodeGen/MIR/ARM/extraneous-closing-brace-error.mir +++ test/CodeGen/MIR/ARM/extraneous-closing-brace-error.mir @@ -10,11 +10,11 @@ name: test1 tracksRegLiveness: true liveins: - - { reg: '%r0' } + - { reg: '$r0' } body: | bb.0.entry: - liveins: %r0 - tBX_RET 14, _, implicit killed %r0 + liveins: $r0 + tBX_RET 14, _, implicit killed $r0 ; CHECK: [[@LINE+1]]:5: extraneous closing brace ('}') } ... Index: test/CodeGen/MIR/ARM/nested-instruction-bundle-error.mir =================================================================== --- test/CodeGen/MIR/ARM/nested-instruction-bundle-error.mir +++ test/CodeGen/MIR/ARM/nested-instruction-bundle-error.mir @@ -12,19 +12,19 @@ name: test1 tracksRegLiveness: true liveins: - - { reg: '%r0' } + - { reg: '$r0' } body: | bb.0.entry: - liveins: %r0 - %r1 = t2MOVi 0, 14, _, _ - t2CMNri killed %r0, 78, 14, _, implicit-def %cpsr - BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr { - t2IT 12, 8, implicit-def %itstate - %r1 = t2MOVi 1, 12, killed %cpsr, _ + liveins: $r0 + $r1 = t2MOVi 0, 14, _, _ + t2CMNri killed $r0, 78, 14, _, implicit-def $cpsr + BUNDLE implicit-def dead $itstate, implicit-def $r1, implicit killed $cpsr { + t2IT 12, 8, implicit-def $itstate + $r1 = t2MOVi 1, 12, killed $cpsr, _ ; CHECK: [[@LINE+1]]:14: nested instruction bundles are not allowed BUNDLE { } } - %r0 = tMOVr killed %r1, 14, _ - tBX_RET 14, _, implicit killed %r0 + $r0 = tMOVr killed $r1, 14, _ + tBX_RET 14, _, implicit killed $r0 ... Index: test/CodeGen/MIR/Hexagon/parse-lane-masks.mir =================================================================== --- test/CodeGen/MIR/Hexagon/parse-lane-masks.mir +++ test/CodeGen/MIR/Hexagon/parse-lane-masks.mir @@ -3,7 +3,7 @@ # CHECK-LABEL: name: foo # CHECK: bb.0: -# CHECK: liveins: %d0:0x00000002, %d1, %d2:0x00000010 +# CHECK: liveins: $d0:0x00000002, $d1, $d2:0x00000010 --- | define void @foo() { @@ -17,7 +17,7 @@ body: | bb.0: - liveins: %d0:0x00002, %d1, %d2:16 + liveins: $d0:0x00002, $d1, $d2:16 A2_nop ... Index: test/CodeGen/MIR/Hexagon/target-flags.mir =================================================================== --- test/CodeGen/MIR/Hexagon/target-flags.mir +++ test/CodeGen/MIR/Hexagon/target-flags.mir @@ -6,31 +6,31 @@ bb.0: ; CHECK: target-flags(hexagon-pcrel) - %r0 = A2_tfrsi target-flags (hexagon-pcrel) 0 + $r0 = A2_tfrsi target-flags (hexagon-pcrel) 0 ; CHECK: target-flags(hexagon-got) - %r0 = A2_tfrsi target-flags (hexagon-got) 0 + $r0 = A2_tfrsi target-flags (hexagon-got) 0 ; CHECK: target-flags(hexagon-lo16) - %r0 = A2_tfrsi target-flags (hexagon-lo16) 0 + $r0 = A2_tfrsi target-flags (hexagon-lo16) 0 ; CHECK: target-flags(hexagon-hi16) - %r0 = A2_tfrsi target-flags (hexagon-hi16) 0 + $r0 = A2_tfrsi target-flags (hexagon-hi16) 0 ; CHECK: target-flags(hexagon-gprel) - %r0 = A2_tfrsi target-flags (hexagon-gprel) 0 + $r0 = A2_tfrsi target-flags (hexagon-gprel) 0 ; CHECK: target-flags(hexagon-gdgot) - %r0 = A2_tfrsi target-flags (hexagon-gdgot) 0 + $r0 = A2_tfrsi target-flags (hexagon-gdgot) 0 ; CHECK: target-flags(hexagon-gdplt) - %r0 = A2_tfrsi target-flags (hexagon-gdplt) 0 + $r0 = A2_tfrsi target-flags (hexagon-gdplt) 0 ; CHECK: target-flags(hexagon-ie) - %r0 = A2_tfrsi target-flags (hexagon-ie) 0 + $r0 = A2_tfrsi target-flags (hexagon-ie) 0 ; CHECK: target-flags(hexagon-iegot) - %r0 = A2_tfrsi target-flags (hexagon-iegot) 0 + $r0 = A2_tfrsi target-flags (hexagon-iegot) 0 ; CHECK: target-flags(hexagon-tprel) - %r0 = A2_tfrsi target-flags (hexagon-tprel) 0 + $r0 = A2_tfrsi target-flags (hexagon-tprel) 0 ; CHECK: target-flags(hexagon-ext) - %r0 = A2_tfrsi target-flags (hexagon-ext) 0 + $r0 = A2_tfrsi target-flags (hexagon-ext) 0 ; CHECK: target-flags(hexagon-pcrel, hexagon-ext) - %r0 = A2_tfrsi target-flags (hexagon-pcrel,hexagon-ext) 0 + $r0 = A2_tfrsi target-flags (hexagon-pcrel,hexagon-ext) 0 ; CHECK: target-flags(hexagon-ie, hexagon-ext) - %r0 = A2_tfrsi target-flags (hexagon-ie,hexagon-ext) 0 + $r0 = A2_tfrsi target-flags (hexagon-ie,hexagon-ext) 0 ... Index: test/CodeGen/MIR/Mips/expected-global-value-or-symbol-after-call-entry.mir =================================================================== --- test/CodeGen/MIR/Mips/expected-global-value-or-symbol-after-call-entry.mir +++ test/CodeGen/MIR/Mips/expected-global-value-or-symbol-after-call-entry.mir @@ -12,7 +12,7 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%a0' } + - { reg: '$a0' } frameInfo: stackSize: 24 maxAlignment: 4 @@ -21,21 +21,21 @@ maxCallFrameSize: 16 stack: - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, - callee-saved-register: '%ra' } + callee-saved-register: '$ra' } body: | bb.0.entry: - liveins: %a0, %ra + liveins: $a0, $ra - Save16 %ra, 24, implicit-def %sp, implicit %sp - %v0, %v1 = GotPrologue16 &_gp_disp, &_gp_disp - %v0 = SllX16 killed %v0, 16 - %v0 = AdduRxRyRz16 killed %v1, killed %v0 + Save16 $ra, 24, implicit-def $sp, implicit $sp + $v0, $v1 = GotPrologue16 &_gp_disp, &_gp_disp + $v0 = SllX16 killed $v0, 16 + $v0 = AdduRxRyRz16 killed $v1, killed $v0 ; CHECK: [[@LINE+1]]:67: expected a global value or an external symbol after 'call-entry' - %v1 = LwRxRyOffMemX16 %v0, @foo, 0 :: (load 4 from call-entry foo) - %t9 = COPY %v1 - %gp = COPY killed %v0 - JumpLinkReg16 killed %v1, csr_o32, implicit-def %ra, implicit killed %t9, implicit %a0, implicit killed %gp, implicit-def %sp, implicit-def dead %v0 - %v0 = LiRxImmX16 0 - %ra = Restore16 24, implicit-def %sp, implicit %sp - RetRA16 implicit %v0 + $v1 = LwRxRyOffMemX16 $v0, @foo, 0 :: (load 4 from call-entry foo) + $t9 = COPY $v1 + $gp = COPY killed $v0 + JumpLinkReg16 killed $v1, csr_o32, implicit-def $ra, implicit killed $t9, implicit $a0, implicit killed $gp, implicit-def $sp, implicit-def dead $v0 + $v0 = LiRxImmX16 0 + $ra = Restore16 24, implicit-def $sp, implicit $sp + RetRA16 implicit $v0 ... Index: test/CodeGen/MIR/Mips/memory-operands.mir =================================================================== --- test/CodeGen/MIR/Mips/memory-operands.mir +++ test/CodeGen/MIR/Mips/memory-operands.mir @@ -29,7 +29,7 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%a0' } + - { reg: '$a0' } frameInfo: stackSize: 24 maxAlignment: 4 @@ -38,26 +38,26 @@ maxCallFrameSize: 16 stack: - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, - callee-saved-register: '%ra' } + callee-saved-register: '$ra' } body: | bb.0.entry: - liveins: %a0, %ra + liveins: $a0, $ra - Save16 %ra, 24, implicit-def %sp, implicit %sp + Save16 $ra, 24, implicit-def $sp, implicit $sp CFI_INSTRUCTION def_cfa_offset 24 - CFI_INSTRUCTION offset %ra_64, -4 - %v0, %v1 = GotPrologue16 &_gp_disp, &_gp_disp - %v0 = SllX16 killed %v0, 16 - %v0 = AdduRxRyRz16 killed %v1, killed %v0 + CFI_INSTRUCTION offset $ra_64, -4 + $v0, $v1 = GotPrologue16 &_gp_disp, &_gp_disp + $v0 = SllX16 killed $v0, 16 + $v0 = AdduRxRyRz16 killed $v1, killed $v0 ; CHECK-LABEL: name: test - ; CHECK: %v1 = LwRxRyOffMemX16 %v0, @foo :: (load 4 from call-entry @foo) - %v1 = LwRxRyOffMemX16 %v0, @foo :: (load 4 from call-entry @foo) - %t9 = COPY %v1 - %gp = COPY killed %v0 - JumpLinkReg16 killed %v1, csr_o32, implicit-def %ra, implicit killed %t9, implicit %a0, implicit killed %gp, implicit-def %sp, implicit-def dead %v0 - %v0 = LiRxImmX16 0 - %ra = Restore16 24, implicit-def %sp, implicit %sp - RetRA16 implicit %v0 + ; CHECK: $v1 = LwRxRyOffMemX16 $v0, @foo :: (load 4 from call-entry @foo) + $v1 = LwRxRyOffMemX16 $v0, @foo :: (load 4 from call-entry @foo) + $t9 = COPY $v1 + $gp = COPY killed $v0 + JumpLinkReg16 killed $v1, csr_o32, implicit-def $ra, implicit killed $t9, implicit $a0, implicit killed $gp, implicit-def $sp, implicit-def dead $v0 + $v0 = LiRxImmX16 0 + $ra = Restore16 24, implicit-def $sp, implicit $sp + RetRA16 implicit $v0 ... --- name: test2 @@ -70,33 +70,33 @@ maxCallFrameSize: 16 stack: - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, - callee-saved-register: '%ra' } + callee-saved-register: '$ra' } - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, - callee-saved-register: '%s2' } + callee-saved-register: '$s2' } - { id: 2, type: spill-slot, offset: -12, size: 4, alignment: 4, - callee-saved-register: '%s0' } + callee-saved-register: '$s0' } body: | bb.0.entry: - liveins: %ra, %s2, %s0, %ra, %s2, %s0 + liveins: $ra, $s2, $s0, $ra, $s2, $s0 - SaveX16 %s0, %ra, %s2, 32, implicit-def %sp, implicit %sp + SaveX16 $s0, $ra, $s2, 32, implicit-def $sp, implicit $sp CFI_INSTRUCTION def_cfa_offset 32 - CFI_INSTRUCTION offset %ra_64, -4 - CFI_INSTRUCTION offset %s2_64, -8 - CFI_INSTRUCTION offset %s0_64, -12 - %v0, %v1 = GotPrologue16 &_gp_disp, &_gp_disp - %v0 = SllX16 killed %v0, 16 - %s0 = AdduRxRyRz16 killed %v1, killed %v0 - %v0 = LwRxRyOffMemX16 %s0, @g :: (load 4 from call-entry @g) + CFI_INSTRUCTION offset $ra_64, -4 + CFI_INSTRUCTION offset $s2_64, -8 + CFI_INSTRUCTION offset $s0_64, -12 + $v0, $v1 = GotPrologue16 &_gp_disp, &_gp_disp + $v0 = SllX16 killed $v0, 16 + $s0 = AdduRxRyRz16 killed $v1, killed $v0 + $v0 = LwRxRyOffMemX16 $s0, @g :: (load 4 from call-entry @g) ; CHECK-LABEL: test2 - ; CHECK: %v1 = LwRxRyOffMemX16 %s0, &__mips16_call_stub_sf_0 :: (load 4 from call-entry &__mips16_call_stub_sf_0) - %v1 = LwRxRyOffMemX16 %s0, &__mips16_call_stub_sf_0 :: (load 4 from call-entry &__mips16_call_stub_sf_0) - %gp = COPY %s0 - JumpLinkReg16 killed %v1, csr_o32, implicit-def %ra, implicit %v0, implicit killed %gp, implicit-def %sp, implicit-def %v0 - %v1 = LwRxRyOffMemX16 %s0, @__mips16_ret_sf :: (load 4 from call-entry @__mips16_ret_sf) - %t9 = COPY %v1 - %gp = COPY killed %s0 - JumpLinkReg16 killed %v1, csr_mips16rethelper, implicit-def %ra, implicit killed %t9, implicit %v0, implicit killed %gp, implicit-def %sp - %s0, %ra, %s2 = RestoreX16 32, implicit-def %sp, implicit %sp - RetRA16 implicit %v0 + ; CHECK: $v1 = LwRxRyOffMemX16 $s0, &__mips16_call_stub_sf_0 :: (load 4 from call-entry &__mips16_call_stub_sf_0) + $v1 = LwRxRyOffMemX16 $s0, &__mips16_call_stub_sf_0 :: (load 4 from call-entry &__mips16_call_stub_sf_0) + $gp = COPY $s0 + JumpLinkReg16 killed $v1, csr_o32, implicit-def $ra, implicit $v0, implicit killed $gp, implicit-def $sp, implicit-def $v0 + $v1 = LwRxRyOffMemX16 $s0, @__mips16_ret_sf :: (load 4 from call-entry @__mips16_ret_sf) + $t9 = COPY $v1 + $gp = COPY killed $s0 + JumpLinkReg16 killed $v1, csr_mips16rethelper, implicit-def $ra, implicit killed $t9, implicit $v0, implicit killed $gp, implicit-def $sp + $s0, $ra, $s2 = RestoreX16 32, implicit-def $sp, implicit $sp + RetRA16 implicit $v0 ... Index: test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir =================================================================== --- test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir +++ test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir @@ -28,17 +28,17 @@ - { id: 3, class: gprc } - { id: 4, class: g8rc } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } body: | bb.0.entry: - liveins: %x3 + liveins: $x3 - %0 = COPY %x3 + %0 = COPY $x3 %1 = LWZ 0, %0 :: (load 4 from %ir.p) %2 = LI 0 %3 = RLWIMI %2, killed %1, 0, 0, 31 %4 = EXTSW_32_64 killed %3 - %x3 = COPY %4 - ; CHECK: BLR8 implicit %lr8, implicit %rm, implicit %x3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %4 + ; CHECK: BLR8 implicit $lr8, implicit $rm, implicit $x3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... Index: test/CodeGen/MIR/X86/auto-successor.mir =================================================================== --- test/CodeGen/MIR/X86/auto-successor.mir +++ test/CodeGen/MIR/X86/auto-successor.mir @@ -4,31 +4,31 @@ # CHECK-LABEL: name: func0 # CHECK: bb.0: # CHECK-NOT: successors -# CHECK: JE_1 %bb.1, implicit undef %eflags +# CHECK: JE_1 %bb.1, implicit undef $eflags # CHECK: JMP_1 %bb.3 # CHECK: bb.1: # CHECK-NOT: successors # CHECK: bb.2: # CHECK-NOT: successors -# CHECK: JE_1 %bb.1, implicit undef %eflags +# CHECK: JE_1 %bb.1, implicit undef $eflags # CHECK: bb.3: -# CHECK: RETQ undef %eax +# CHECK: RETQ undef $eax name: func0 body: | bb.0: - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags JMP_1 %bb.3 bb.1: bb.2: - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags bb.3: - JE_1 %bb.4, implicit undef %eflags ; condjump+fallthrough to same block + JE_1 %bb.4, implicit undef $eflags ; condjump+fallthrough to same block bb.4: - RETQ undef %eax + RETQ undef $eax ... --- # Some cases that need explicit successors: @@ -39,23 +39,23 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.3, %bb.1 successors: %bb.3, %bb.1 ; different order than operands - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags JMP_1 %bb.3 bb.1: ; CHECK: bb.1: ; CHECK: successors: %bb.2, %bb.1 successors: %bb.2, %bb.1 ; different order (fallthrough variant) - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags bb.2: ; CHECK: bb.2: ; CHECK: successors: %bb.1(0x60000000), %bb.3(0x20000000) successors: %bb.1(3), %bb.3(1) ; branch probabilities not normalized - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags bb.3: ; CHECK: bb.3: - ; CHECK: RETQ undef %eax - RETQ undef %eax + ; CHECK: RETQ undef $eax + RETQ undef $eax ... Index: test/CodeGen/MIR/X86/basic-block-liveins.mir =================================================================== --- test/CodeGen/MIR/X86/basic-block-liveins.mir +++ test/CodeGen/MIR/X86/basic-block-liveins.mir @@ -26,12 +26,12 @@ tracksRegLiveness: true body: | ; CHECK-LABEL: bb.0.body: - ; CHECK-NEXT: liveins: %edi, %esi + ; CHECK-NEXT: liveins: $edi, $esi bb.0.body: - liveins: %edi, %esi + liveins: $edi, $esi - %eax = LEA64_32r killed %rdi, 1, killed %rsi, 0, _ - RETQ %eax + $eax = LEA64_32r killed $rdi, 1, killed $rsi, 0, _ + RETQ $eax ... --- name: test2 @@ -41,13 +41,13 @@ ; Verify that we can have multiple lists of liveins that will be merged into ; one. ; CHECK: bb.0.body: - ; CHECK-NEXT: liveins: %edi, %esi + ; CHECK-NEXT: liveins: $edi, $esi bb.0.body: - liveins: %edi - liveins: %esi + liveins: $edi + liveins: $esi - %eax = LEA64_32r killed %rdi, 1, killed %rsi, 0, _ - RETQ %eax + $eax = LEA64_32r killed $rdi, 1, killed $rsi, 0, _ + RETQ $eax ... --- name: test3 @@ -56,10 +56,10 @@ ; Verify that we can have an empty list of liveins. ; CHECK-LABEL: name: test3 ; CHECK: bb.0.body: - ; CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags + ; CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags bb.0.body: liveins: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/basic-block-not-at-start-of-line-error.mir =================================================================== --- test/CodeGen/MIR/X86/basic-block-not-at-start-of-line-error.mir +++ test/CodeGen/MIR/X86/basic-block-not-at-start-of-line-error.mir @@ -19,23 +19,23 @@ name: foo tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0.entry: successors: %bb.1.less, %bb.2.exit - liveins: %edi 44 + liveins: $edi 44 - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2.exit, implicit killed $eflags ; CHECK: [[@LINE+1]]:8: basic block definition should be located at the start of the line less bb.1: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax bb.2.exit: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/block-address-operands.mir =================================================================== --- test/CodeGen/MIR/X86/block-address-operands.mir +++ test/CodeGen/MIR/X86/block-address-operands.mir @@ -57,10 +57,10 @@ body: | bb.0.entry: successors: %bb.1.block - ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test, %ir-block.block), %noreg - %rax = LEA64r %rip, 1, _, blockaddress(@test, %ir-block.block), _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + ; CHECK: $rax = LEA64r $rip, 1, $noreg, blockaddress(@test, %ir-block.block), $noreg + $rax = LEA64r $rip, 1, _, blockaddress(@test, %ir-block.block), _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1.block (address-taken): RETQ @@ -71,10 +71,10 @@ body: | bb.0.entry: successors: %bb.1 - ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test2, %ir-block."quoted block"), %noreg - %rax = LEA64r %rip, 1, _, blockaddress(@test2, %ir-block."quoted block"), _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + ; CHECK: $rax = LEA64r $rip, 1, $noreg, blockaddress(@test2, %ir-block."quoted block"), $noreg + $rax = LEA64r $rip, 1, _, blockaddress(@test2, %ir-block."quoted block"), _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1 (address-taken): RETQ @@ -84,11 +84,11 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK-LABEL: name: slot_in_other_function - ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test3, %ir-block.0), %noreg - %rax = LEA64r %rip, 1, _, blockaddress(@test3, %ir-block.0), _ - MOV64mr killed %rdi, 1, _, 0, _, killed %rax + ; CHECK: $rax = LEA64r $rip, 1, $noreg, blockaddress(@test3, %ir-block.0), $noreg + $rax = LEA64r $rip, 1, _, blockaddress(@test3, %ir-block.0), _ + MOV64mr killed $rdi, 1, _, 0, _, killed $rax RETQ ... --- @@ -98,10 +98,10 @@ bb.0.entry: successors: %bb.1 ; CHECK-LABEL: name: test3 - ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test3, %ir-block.0), %noreg - %rax = LEA64r %rip, 1, _, blockaddress(@test3, %ir-block.0), _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + ; CHECK: $rax = LEA64r $rip, 1, $noreg, blockaddress(@test3, %ir-block.0), $noreg + $rax = LEA64r $rip, 1, _, blockaddress(@test3, %ir-block.0), _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1 (address-taken): RETQ @@ -111,10 +111,10 @@ body: | bb.0.entry: successors: %bb.1.block - ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test, %ir-block.block) + 2, %noreg - %rax = LEA64r %rip, 1, _, blockaddress(@test, %ir-block.block) + 2, _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + ; CHECK: $rax = LEA64r $rip, 1, $noreg, blockaddress(@test, %ir-block.block) + 2, $noreg + $rax = LEA64r $rip, 1, _, blockaddress(@test, %ir-block.block) + 2, _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1.block (address-taken): RETQ Index: test/CodeGen/MIR/X86/branch-probabilities.mir =================================================================== --- test/CodeGen/MIR/X86/branch-probabilities.mir +++ test/CodeGen/MIR/X86/branch-probabilities.mir @@ -8,11 +8,11 @@ body: | bb.0: successors: %bb.1(4), %bb.2(1) - JE_1 %bb.2, implicit undef %eflags + JE_1 %bb.2, implicit undef $eflags bb.1: NOOP bb.2: - RETQ undef %eax + RETQ undef $eax ... Index: test/CodeGen/MIR/X86/callee-saved-info.mir =================================================================== --- test/CodeGen/MIR/X86/callee-saved-info.mir +++ test/CodeGen/MIR/X86/callee-saved-info.mir @@ -36,10 +36,10 @@ tracksRegLiveness: true body: | bb.0.body: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... --- name: func @@ -50,47 +50,47 @@ adjustsStack: true hasCalls: true # CHECK: fixedStack: -# CHECK: callee-saved-register: '%rbx', callee-saved-restored: true } +# CHECK: callee-saved-register: '$rbx', callee-saved-restored: true } fixedStack: - - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%rbx' } + - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '$rbx' } # CHECK: stack: # CHECK-NEXT: - { id: 0 -# CHECK: callee-saved-register: '%edi', callee-saved-restored: false +# CHECK: callee-saved-register: '$edi', callee-saved-restored: false stack: - { id: 0, name: b, offset: -20, size: 4, alignment: 4 } - - { id: 1, offset: -24, size: 4, alignment: 4, callee-saved-register: '%edi', + - { id: 1, offset: -24, size: 4, alignment: 4, callee-saved-register: '$edi', callee-saved-restored: false } body: | bb.0.entry: successors: %bb.1.check - liveins: %edi, %rbx + liveins: $edi, $rbx - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - %rsp = frame-setup SUB64ri8 %rsp, 16, implicit-def dead %eflags - %ebx = COPY %edi - MOV32mr %rsp, 1, _, 12, _, %ebx + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + $rsp = frame-setup SUB64ri8 $rsp, 16, implicit-def dead $eflags + $ebx = COPY $edi + MOV32mr $rsp, 1, _, 12, _, $ebx bb.1.check: successors: %bb.2.loop, %bb.3.exit - liveins: %ebx + liveins: $ebx - CMP32ri8 %ebx, 10, implicit-def %eflags - JG_1 %bb.3.exit, implicit killed %eflags + CMP32ri8 $ebx, 10, implicit-def $eflags + JG_1 %bb.3.exit, implicit killed $eflags JMP_1 %bb.2.loop bb.2.loop: successors: %bb.1.check - liveins: %ebx + liveins: $ebx - %edi = MOV32rm %rsp, 1, _, 12, _ - CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax - %eax = DEC32r killed %eax, implicit-def dead %eflags - MOV32mr %rsp, 1, _, 12, _, killed %eax + $edi = MOV32rm $rsp, 1, _, 12, _ + CALL64pcrel32 @compute, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax + $eax = DEC32r killed $eax, implicit-def dead $eflags + MOV32mr $rsp, 1, _, 12, _, killed $eax JMP_1 %bb.1.check bb.3.exit: - %eax = MOV32r0 implicit-def dead %eflags - %rsp = ADD64ri8 %rsp, 16, implicit-def dead %eflags - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + $eax = MOV32r0 implicit-def dead $eflags + $rsp = ADD64ri8 $rsp, 16, implicit-def dead $eflags + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/cfi-def-cfa-offset.mir =================================================================== --- test/CodeGen/MIR/X86/cfi-def-cfa-offset.mir +++ test/CodeGen/MIR/X86/cfi-def-cfa-offset.mir @@ -20,10 +20,10 @@ - { id: 0, name: tmp, offset: -4176, size: 4168, alignment: 4 } body: | bb.0.entry: - %rsp = SUB64ri32 %rsp, 4040, implicit-def dead %eflags + $rsp = SUB64ri32 $rsp, 4040, implicit-def dead $eflags ; CHECK: CFI_INSTRUCTION def_cfa_offset 4048 CFI_INSTRUCTION def_cfa_offset 4048 - %rsp = ADD64ri32 %rsp, 4040, implicit-def dead %eflags + $rsp = ADD64ri32 $rsp, 4040, implicit-def dead $eflags RETQ ... Index: test/CodeGen/MIR/X86/cfi-def-cfa-register.mir =================================================================== --- test/CodeGen/MIR/X86/cfi-def-cfa-register.mir +++ test/CodeGen/MIR/X86/cfi-def-cfa-register.mir @@ -21,12 +21,12 @@ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: - liveins: %rbp + liveins: $rbp - PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = MOV64rr %rsp - ; CHECK: CFI_INSTRUCTION def_cfa_register %rbp - CFI_INSTRUCTION def_cfa_register %rbp + CFI_INSTRUCTION offset $rbp, -16 + $rbp = MOV64rr $rsp + ; CHECK: CFI_INSTRUCTION def_cfa_register $rbp + CFI_INSTRUCTION def_cfa_register $rbp ... Index: test/CodeGen/MIR/X86/cfi-offset.mir =================================================================== --- test/CodeGen/MIR/X86/cfi-offset.mir +++ test/CodeGen/MIR/X86/cfi-offset.mir @@ -28,20 +28,20 @@ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: - liveins: %ecx, %edi, %edx, %esi, %rbx + liveins: $ecx, $edi, $edx, $esi, $rbx - PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp + PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - ; CHECK: CFI_INSTRUCTION offset %rbx, -16 - CFI_INSTRUCTION offset %rbx, -16 - %ebx = COPY %edi, implicit-def %rbx - %ebx = ADD32rr %ebx, killed %esi, implicit-def dead %eflags - %ebx = ADD32rr %ebx, killed %edx, implicit-def dead %eflags - %ebx = ADD32rr %ebx, killed %ecx, implicit-def dead %eflags - %edi = COPY %ebx - CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp - %eax = LEA64_32r killed %rbx, 1, %rbx, 0, _ - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + ; CHECK: CFI_INSTRUCTION offset $rbx, -16 + CFI_INSTRUCTION offset $rbx, -16 + $ebx = COPY $edi, implicit-def $rbx + $ebx = ADD32rr $ebx, killed $esi, implicit-def dead $eflags + $ebx = ADD32rr $ebx, killed $edx, implicit-def dead $eflags + $ebx = ADD32rr $ebx, killed $ecx, implicit-def dead $eflags + $edi = COPY $ebx + CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp + $eax = LEA64_32r killed $rbx, 1, $rbx, 0, _ + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/constant-pool.mir =================================================================== --- test/CodeGen/MIR/X86/constant-pool.mir +++ test/CodeGen/MIR/X86/constant-pool.mir @@ -61,13 +61,13 @@ alignment: 4 body: | bb.0.entry: - ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.0, %noreg - ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, %noreg, %const.1, %noreg - %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ - %xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.1, _ - %xmm1 = CVTSS2SDrr killed %xmm1 - %xmm0 = MULSDrr killed %xmm0, killed %xmm1 - RETQ %xmm0 + ; CHECK: $xmm0 = ADDSDrm killed $xmm0, $rip, 1, $noreg, %const.0, $noreg + ; CHECK-NEXT: $xmm1 = ADDSSrm killed $xmm1, $rip, 1, $noreg, %const.1, $noreg + $xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.0, _ + $xmm1 = ADDSSrm killed $xmm1, $rip, 1, _, %const.1, _ + $xmm1 = CVTSS2SDrr killed $xmm1 + $xmm0 = MULSDrr killed $xmm0, killed $xmm1 + RETQ $xmm0 ... --- # Verify that alignment can be inferred: @@ -89,11 +89,11 @@ value: 'float 6.250000e+00' body: | bb.0.entry: - %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ - %xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.1, _ - %xmm1 = CVTSS2SDrr killed %xmm1 - %xmm0 = MULSDrr killed %xmm0, killed %xmm1 - RETQ %xmm0 + $xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.0, _ + $xmm1 = ADDSSrm killed $xmm1, $rip, 1, _, %const.1, _ + $xmm1 = CVTSS2SDrr killed $xmm1 + $xmm0 = MULSDrr killed $xmm0, killed $xmm1 + RETQ $xmm0 ... --- # Verify that the non-standard alignments are respected: @@ -117,13 +117,13 @@ alignment: 1 body: | bb.0.entry: - ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.0, %noreg - ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, %noreg, %const.1, %noreg - %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ - %xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.1, _ - %xmm1 = CVTSS2SDrr killed %xmm1 - %xmm0 = MULSDrr killed %xmm0, killed %xmm1 - RETQ %xmm0 + ; CHECK: $xmm0 = ADDSDrm killed $xmm0, $rip, 1, $noreg, %const.0, $noreg + ; CHECK-NEXT: $xmm1 = ADDSSrm killed $xmm1, $rip, 1, $noreg, %const.1, $noreg + $xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.0, _ + $xmm1 = ADDSSrm killed $xmm1, $rip, 1, _, %const.1, _ + $xmm1 = CVTSS2SDrr killed $xmm1 + $xmm0 = MULSDrr killed $xmm0, killed $xmm1 + RETQ $xmm0 ... --- # CHECK: name: test4 @@ -135,11 +135,11 @@ value: 'float 6.250000e+00' body: | bb.0.entry: - ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.1 - 12, %noreg - ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, %noreg, %const.0 + 8, %noreg - %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.1 - 12, _ - %xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.0 + 8, _ - %xmm1 = CVTSS2SDrr killed %xmm1 - %xmm0 = MULSDrr killed %xmm0, killed %xmm1 - RETQ %xmm0 + ; CHECK: $xmm0 = ADDSDrm killed $xmm0, $rip, 1, $noreg, %const.1 - 12, $noreg + ; CHECK-NEXT: $xmm1 = ADDSSrm killed $xmm1, $rip, 1, $noreg, %const.0 + 8, $noreg + $xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.1 - 12, _ + $xmm1 = ADDSSrm killed $xmm1, $rip, 1, _, %const.0 + 8, _ + $xmm1 = CVTSS2SDrr killed $xmm1 + $xmm0 = MULSDrr killed $xmm0, killed $xmm1 + RETQ $xmm0 ... Index: test/CodeGen/MIR/X86/dead-register-flag.mir =================================================================== --- test/CodeGen/MIR/X86/dead-register-flag.mir +++ test/CodeGen/MIR/X86/dead-register-flag.mir @@ -18,7 +18,7 @@ body: | ; CHECK: bb.0.body: bb.0.body: - ; CHECK: %eax = IMUL32rri8 %edi, 11, implicit-def dead %eflags - %eax = IMUL32rri8 %edi, 11, implicit-def dead %eflags - RETQ %eax + ; CHECK: $eax = IMUL32rri8 $edi, 11, implicit-def dead $eflags + $eax = IMUL32rri8 $edi, 11, implicit-def dead $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/def-register-already-tied-error.mir =================================================================== --- test/CodeGen/MIR/X86/def-register-already-tied-error.mir +++ test/CodeGen/MIR/X86/def-register-already-tied-error.mir @@ -12,13 +12,13 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:83: the tied-def operand #3 is already tied with another register operand - INLINEASM &"$foo", 1, 2818058, def %rdi, 2147483657, killed %rdi(tied-def 3), killed %rdi(tied-def 3) - %rax = COPY killed %rdi - RETQ killed %rax + INLINEASM &"$foo", 1, 2818058, def $rdi, 2147483657, killed $rdi(tied-def 3), killed $rdi(tied-def 3) + $rax = COPY killed $rdi + RETQ killed $rax ... Index: test/CodeGen/MIR/X86/diexpr-win32.mir =================================================================== --- test/CodeGen/MIR/X86/diexpr-win32.mir +++ test/CodeGen/MIR/X86/diexpr-win32.mir @@ -179,7 +179,7 @@ restorePoint: '' fixedStack: - { id: 0, type: spill-slot, offset: -8, size: 4, alignment: 4, stack-id: 0, - callee-saved-register: '%esi' } + callee-saved-register: '$esi' } - { id: 1, type: default, offset: 4, size: 4, alignment: 4, stack-id: 0, isImmutable: true, isAliased: false, callee-saved-register: '' } - { id: 2, type: default, offset: 0, size: 4, alignment: 4, stack-id: 0, @@ -188,24 +188,24 @@ constants: body: | bb.0.entry: - liveins: %esi + liveins: $esi - frame-setup PUSH32r killed %esi, implicit-def %esp, implicit %esp + frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp CFI_INSTRUCTION def_cfa_offset 8 - CFI_INSTRUCTION offset %esi, -8 - %esi = MOV32rm %esp, 1, _, 8, _ :: (load 4 from %fixed-stack.2) - DBG_VALUE %esp, 0, !26, !10, debug-location !25 - DBG_VALUE %esp, 0, !23, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref), debug-location !25 - CALLpcrel32 @getString, csr_32, implicit %esp, implicit-def %esp, implicit-def %eax, debug-location !29 - %ecx = MOV32rm %eax, 1, _, 0, _, debug-location !29 :: (dereferenceable load 4 from %ir.1) - %edx = MOV32rm %eax, 1, _, 4, _, debug-location !29 :: (dereferenceable load 4 from %ir.1 + 4) - MOV32mr %esi, 1, _, 0, _, killed %ecx, debug-location !29 :: (store 4 into %ir.0) - MOV32mr %esi, 1, _, 4, _, killed %edx, debug-location !29 :: (store 4 into %ir.0 + 4) - %eax = MOV32rm killed %eax, 1, _, 8, _, debug-location !29 :: (dereferenceable load 4 from %ir.1 + 8) - MOV32mr %esi, 1, _, 8, _, killed %eax, debug-location !29 :: (store 4 into %ir.0 + 8) - %eax = COPY killed %esi, debug-location !30 - %esi = POP32r implicit-def %esp, implicit %esp, debug-location !30 - RET 0, %eax, debug-location !30 + CFI_INSTRUCTION offset $esi, -8 + $esi = MOV32rm $esp, 1, _, 8, _ :: (load 4 from %fixed-stack.2) + DBG_VALUE $esp, 0, !26, !10, debug-location !25 + DBG_VALUE $esp, 0, !23, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref), debug-location !25 + CALLpcrel32 @getString, csr_32, implicit $esp, implicit-def $esp, implicit-def $eax, debug-location !29 + $ecx = MOV32rm $eax, 1, _, 0, _, debug-location !29 :: (dereferenceable load 4 from %ir.1) + $edx = MOV32rm $eax, 1, _, 4, _, debug-location !29 :: (dereferenceable load 4 from %ir.1 + 4) + MOV32mr $esi, 1, _, 0, _, killed $ecx, debug-location !29 :: (store 4 into %ir.0) + MOV32mr $esi, 1, _, 4, _, killed $edx, debug-location !29 :: (store 4 into %ir.0 + 4) + $eax = MOV32rm killed $eax, 1, _, 8, _, debug-location !29 :: (dereferenceable load 4 from %ir.1 + 8) + MOV32mr $esi, 1, _, 8, _, killed $eax, debug-location !29 :: (store 4 into %ir.0 + 8) + $eax = COPY killed $esi, debug-location !30 + $esi = POP32r implicit-def $esp, implicit $esp, debug-location !30 + RET 0, $eax, debug-location !30 ... --- @@ -244,10 +244,10 @@ constants: body: | bb.0.entry: - %eax = MOV32rm %esp, 1, _, 4, _ :: (load 4 from %fixed-stack.1) - %eax = MOV32rm killed %eax, 1, _, 0, _, debug-location !34 :: (load 4 from %ir.0) - DBG_VALUE debug-use %eax, 0, !35, !DIExpression(DW_OP_constu, 4, DW_OP_minus), debug-location !34 - %eax = ADD32rm killed %eax, %esp, 1, _, 8, _, implicit-def dead %eflags, debug-location !36 :: (load 4 from %fixed-stack.0) - RET 0, %eax, debug-location !36 + $eax = MOV32rm $esp, 1, _, 4, _ :: (load 4 from %fixed-stack.1) + $eax = MOV32rm killed $eax, 1, _, 0, _, debug-location !34 :: (load 4 from %ir.0) + DBG_VALUE debug-use $eax, 0, !35, !DIExpression(DW_OP_constu, 4, DW_OP_minus), debug-location !34 + $eax = ADD32rm killed $eax, $esp, 1, _, 8, _, implicit-def dead $eflags, debug-location !36 :: (load 4 from %fixed-stack.0) + RET 0, $eax, debug-location !36 ... Index: test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir =================================================================== --- test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir +++ test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir @@ -15,13 +15,13 @@ name: volatile_inc tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:50: duplicate 'volatile' memory operand flag - %eax = MOV32rm %rdi, 1, _, 0, _ :: (volatile volatile load 4 from %ir.x) - %eax = INC32r killed %eax, implicit-def dead %eflags - MOV32mr killed %rdi, 1, _, 0, _, %eax :: (volatile store 4 into %ir.x) - RETQ %eax + $eax = MOV32rm $rdi, 1, _, 0, _ :: (volatile volatile load 4 from %ir.x) + $eax = INC32r killed $eax, implicit-def dead $eflags + MOV32mr killed $rdi, 1, _, 0, _, $eax :: (volatile store 4 into %ir.x) + RETQ $eax ... Index: test/CodeGen/MIR/X86/duplicate-register-flag-error.mir =================================================================== --- test/CodeGen/MIR/X86/duplicate-register-flag-error.mir +++ test/CodeGen/MIR/X86/duplicate-register-flag-error.mir @@ -21,15 +21,15 @@ bb.0.entry: successors: %bb.1.less, %bb.2.exit - CMP32ri8 %edi, 10, implicit-def %eflags + CMP32ri8 $edi, 10, implicit-def $eflags ; CHECK: [[@LINE+1]]:31: duplicate 'implicit' register flag - JG_1 %bb.2.exit, implicit implicit %eflags + JG_1 %bb.2.exit, implicit implicit $eflags bb.1.less: - %eax = MOV32r0 implicit-def %eflags - RETQ %eax + $eax = MOV32r0 implicit-def $eflags + RETQ $eax bb.2.exit: - %eax = COPY %edi - RETQ %eax + $eax = COPY $edi + RETQ $eax ... Index: test/CodeGen/MIR/X86/early-clobber-register-flag.mir =================================================================== --- test/CodeGen/MIR/X86/early-clobber-register-flag.mir +++ test/CodeGen/MIR/X86/early-clobber-register-flag.mir @@ -21,24 +21,24 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%edi' } - - { reg: '%esi' } + - { reg: '$edi' } + - { reg: '$esi' } frameInfo: stackSize: 8 adjustsStack: true hasCalls: true body: | bb.0.entry: - liveins: %edi, %esi + liveins: $edi, $esi - frame-setup PUSH64r undef %rax, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - %ecx = COPY %edi - %ecx = ADD32rr killed %ecx, killed %esi, implicit-def dead %eflags - ; CHECK: INLINEASM &nop, 1, 12, implicit-def dead early-clobber %ax, 12, implicit-def dead early-clobber %di - INLINEASM &nop, 1, 12, implicit-def dead early-clobber %ax, 12, implicit-def dead early-clobber %di - %edi = COPY killed %ecx - CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp - %rax = POP64r implicit-def %rsp, implicit %rsp + $ecx = COPY $edi + $ecx = ADD32rr killed $ecx, killed $esi, implicit-def dead $eflags + ; CHECK: INLINEASM &nop, 1, 12, implicit-def dead early-clobber $ax, 12, implicit-def dead early-clobber $di + INLINEASM &nop, 1, 12, implicit-def dead early-clobber $ax, 12, implicit-def dead early-clobber $di + $edi = COPY killed $ecx + CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp + $rax = POP64r implicit-def $rsp, implicit $rsp RETQ ... Index: test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir +++ test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir @@ -15,16 +15,16 @@ name: memory_alignment tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:65: expected 'align' - %xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec, 32) - %xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32) - %xmm2 = FsFLD0SS - %xmm1 = MOVSSrr killed %xmm1, killed %xmm2 - MOVAPSmr %rdi, 1, _, 0, _, killed %xmm0 :: (store 16 into %ir.vec, align 32) - MOVAPSmr killed %rdi, 1, _, 16, _, killed %xmm1 :: (store 16 into %ir.vec + 16, align 32) + $xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, 32) + $xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32) + $xmm2 = FsFLD0SS + $xmm1 = MOVSSrr killed $xmm1, killed $xmm2 + MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32) + MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32) RETQ ... Index: test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir +++ test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir @@ -15,16 +15,16 @@ name: memory_alignment tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:70: expected an integer literal after 'align' - %xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align) - %xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32) - %xmm2 = FsFLD0SS - %xmm1 = MOVSSrr killed %xmm1, killed %xmm2 - MOVAPSmr %rdi, 1, _, 0, _, killed %xmm0 :: (store 16 into %ir.vec, align 32) - MOVAPSmr killed %rdi, 1, _, 16, _, killed %xmm1 :: (store 16 into %ir.vec + 16, align 32) + $xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align) + $xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32) + $xmm2 = FsFLD0SS + $xmm1 = MOVSSrr killed $xmm1, killed $xmm2 + MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32) + MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32) RETQ ... Index: test/CodeGen/MIR/X86/expected-basic-block-at-start-of-body.mir =================================================================== --- test/CodeGen/MIR/X86/expected-basic-block-at-start-of-body.mir +++ test/CodeGen/MIR/X86/expected-basic-block-at-start-of-body.mir @@ -19,22 +19,22 @@ name: foo tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | ; CHECK: [[@LINE+1]]:3: expected a basic block definition before instructions successors: %bb.1.less, %bb.2.exit - liveins: %edi 44 + liveins: $edi 44 - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2.exit, implicit killed $eflags bb.1.less: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax bb.2.exit: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir =================================================================== --- test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir +++ test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir @@ -21,9 +21,9 @@ bb.0.entry: successors: %bb.1.block ; CHECK: [[@LINE+1]]:51: expected an IR block reference - %rax = LEA64r %rip, 1, _, blockaddress(@test, _), _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + $rax = LEA64r $rip, 1, _, blockaddress(@test, _), _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1.block (address-taken): RETQ Index: test/CodeGen/MIR/X86/expected-comma-after-cfi-register.mir =================================================================== --- test/CodeGen/MIR/X86/expected-comma-after-cfi-register.mir +++ test/CodeGen/MIR/X86/expected-comma-after-cfi-register.mir @@ -26,17 +26,17 @@ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: - PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp + PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 ; CHECK: [[@LINE+1]]:33: expected ',' - CFI_INSTRUCTION offset %rbx -16 - %ebx = COPY %edi, implicit-def %rbx - %ebx = ADD32rr %ebx, killed %esi, implicit-def dead %eflags - %ebx = ADD32rr %ebx, killed %edx, implicit-def dead %eflags - %ebx = ADD32rr %ebx, killed %ecx, implicit-def dead %eflags - %edi = COPY %ebx - CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp - %eax = LEA64_32r killed %rbx, 1, %rbx, 0, _ - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + CFI_INSTRUCTION offset $rbx -16 + $ebx = COPY $edi, implicit-def $rbx + $ebx = ADD32rr $ebx, killed $esi, implicit-def dead $eflags + $ebx = ADD32rr $ebx, killed $edx, implicit-def dead $eflags + $ebx = ADD32rr $ebx, killed $ecx, implicit-def dead $eflags + $edi = COPY $ebx + CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp + $eax = LEA64_32r killed $rbx, 1, $rbx, 0, _ + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir +++ test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir @@ -15,11 +15,11 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry2: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:87: expected ',' before the next machine memory operand - INC32m killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (store 4 into %ir.a) (load 4 from %ir.a) + INC32m killed $rdi, 1, _, 0, _, implicit-def dead $eflags :: (store 4 into %ir.a) (load 4 from %ir.a) RETQ ... Index: test/CodeGen/MIR/X86/expected-different-implicit-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-different-implicit-operand.mir +++ test/CodeGen/MIR/X86/expected-different-implicit-operand.mir @@ -21,14 +21,14 @@ name: foo body: | bb.0.entry: - %eax = MOV32rm %rdi, 1, _, 0, _ - CMP32ri8 %eax, 10, implicit-def %eflags + $eax = MOV32rm $rdi, 1, _, 0, _ + CMP32ri8 $eax, 10, implicit-def $eflags ; CHECK: [[@LINE+1]]:35: missing implicit register operand 'implicit %eflags' - JG_1 %bb.2.exit, implicit %eax + JG_1 %bb.2.exit, implicit $eax bb.1.less: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.2.exit: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-different-implicit-register-flag.mir =================================================================== --- test/CodeGen/MIR/X86/expected-different-implicit-register-flag.mir +++ test/CodeGen/MIR/X86/expected-different-implicit-register-flag.mir @@ -21,14 +21,14 @@ name: foo body: | bb.0.entry: - %eax = MOV32rm %rdi, 1, _, 0, _ - CMP32ri8 %eax, 10, implicit-def %eflags + $eax = MOV32rm $rdi, 1, _, 0, _ + CMP32ri8 $eax, 10, implicit-def $eflags ; CHECK: [[@LINE+1]]:42: missing implicit register operand 'implicit %eflags' - JG_1 %bb.2.exit, implicit-def %eflags + JG_1 %bb.2.exit, implicit-def $eflags bb.1.less: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.2.exit: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir =================================================================== --- test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir +++ test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir @@ -21,9 +21,9 @@ bb.0.entry: successors: %bb.1.block ; CHECK: [[@LINE+1]]:44: expected an IR function reference - %rax = LEA64r %rip, 1, _, blockaddress(@addr, %ir-block.block), _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + $rax = LEA64r $rip, 1, _, blockaddress(@addr, %ir-block.block), _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1.block (address-taken): RETQ Index: test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir =================================================================== --- test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir +++ test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir @@ -21,9 +21,9 @@ bb.0.entry: successors: %bb.1.block ; CHECK: [[@LINE+1]]:44: expected a global value - %rax = LEA64r %rip, 1, _, blockaddress(0, %ir-block.block), _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + $rax = LEA64r $rip, 1, _, blockaddress(0, %ir-block.block), _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1.block (address-taken): RETQ Index: test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir =================================================================== --- test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir +++ test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir @@ -17,8 +17,8 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:37: expected an integer literal after '+' - %rax = MOV64rm %rip, 1, _, @G + , _ - %eax = MOV32rm %rax, 1, _, 0, _ - %eax = INC32r %eax, implicit-def %eflags - RETQ %eax + $rax = MOV64rm $rip, 1, _, @G + , _ + $eax = MOV32rm $rax, 1, _, 0, _ + $eax = INC32r $eax, implicit-def $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-integer-after-tied-def.mir =================================================================== --- test/CodeGen/MIR/X86/expected-integer-after-tied-def.mir +++ test/CodeGen/MIR/X86/expected-integer-after-tied-def.mir @@ -12,13 +12,13 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:78: expected tied-def or low-level type after '(' - INLINEASM &"$foo", 1, 2818058, def %rdi, 2147483657, killed %rdi(tied-def) - %rax = COPY killed %rdi - RETQ killed %rax + INLINEASM &"$foo", 1, 2818058, def $rdi, 2147483657, killed $rdi(tied-def) + $rax = COPY killed $rdi + RETQ killed $rax ... Index: test/CodeGen/MIR/X86/expected-integer-in-successor-weight.mir =================================================================== --- test/CodeGen/MIR/X86/expected-integer-in-successor-weight.mir +++ test/CodeGen/MIR/X86/expected-integer-in-successor-weight.mir @@ -21,18 +21,18 @@ bb.0.entry: ; CHECK: [[@LINE+1]]:29: expected an integer literal after '(' successors: %bb.1.less (_), %bb.2.exit(32) - liveins: %edi + liveins: $edi - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2.exit, implicit killed $eflags bb.1.less: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax bb.2.exit: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir +++ test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir @@ -13,11 +13,11 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:48: expected 'load' or 'store' memory operation - %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (4 from %ir.a) - RETQ %eax + $eax = MOV32rm killed $rdi, 1, _, 0, _ :: (4 from %ir.a) + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-machine-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-machine-operand.mir +++ test/CodeGen/MIR/X86/expected-machine-operand.mir @@ -13,7 +13,7 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:20: expected a machine operand - %eax = XOR32rr = - RETQ %eax + $eax = XOR32rr = + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir =================================================================== --- test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir +++ test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir @@ -48,10 +48,10 @@ - { id: 0, name: x.addr, size: 4, alignment: 4 } body: | bb.0.entry: - %0 = COPY %edi + %0 = COPY $edi ; CHECK: [[@LINE+1]]:46: expected a metadata node after 'debug-location' DBG_VALUE _, 0, !12, !13, debug-location 14 - MOV32mr %stack.x.addr, 1, _, 0, _, %0 - %eax = COPY %0 - RETQ %eax + MOV32mr $stack.x.addr, 1, _, 0, _, %0 + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir =================================================================== --- test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir +++ test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir @@ -48,10 +48,10 @@ - { id: 0, name: x.addr, size: 4, alignment: 4 } body: | bb.0.entry: - %0 = COPY %edi + %0 = COPY $edi ; CHECK: [[@LINE+1]]:28: expected metadata id after '!' DBG_VALUE _, 0, !12, ! _ MOV32mr %stack.0.x.addr, 1, _, 0, _, %0 - %eax = COPY %0 - RETQ %eax + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir =================================================================== --- test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir +++ test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir @@ -11,15 +11,15 @@ --- name: test liveins: - - { reg: '%edi' } + - { reg: '$edi' } stack: # CHECK: [[@LINE+1]]:74: expected a metadata node - { id: 0, name: xa, offset: -12, size: 4, alignment: 4, di-variable: '0' } body: | bb.0.entry: - liveins: %edi + liveins: $edi - MOV32mr %rsp, 1, _, -4, _, %edi :: (store 4 into %ir.xa) - %eax = COPY killed %edi - RETQ killed %eax + MOV32mr $rsp, 1, _, -4, _, $edi :: (store 4 into %ir.xa) + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir =================================================================== --- test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir +++ test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir @@ -15,16 +15,16 @@ registers: - { id: 0, class: gr32 } # CHECK: - { id: 1, class: gr32, preferred-register: '%0' } - # CHECK: - { id: 2, class: gr32, preferred-register: '%edi' } + # CHECK: - { id: 2, class: gr32, preferred-register: '$edi' } - { id: 1, class: gr32, preferred-register: '%0' } - - { id: 2, class: gr32, preferred-register: '%edi' } + - { id: 2, class: gr32, preferred-register: '$edi' } body: | bb.0.body: - liveins: %edi, %esi + liveins: $edi, $esi - %1 = COPY %esi - %2 = COPY %edi - %2 = IMUL32rr %2, %1, implicit-def dead %eflags - %eax = COPY %2 - RETQ killed %eax + %1 = COPY $esi + %2 = COPY $edi + %2 = IMUL32rr %2, %1, implicit-def dead $eflags + $eax = COPY %2 + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir =================================================================== --- test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir +++ test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir @@ -34,10 +34,10 @@ tracksRegLiveness: true body: | bb.0.body: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... --- name: func @@ -55,34 +55,34 @@ body: | bb.0.entry: successors: %bb.1.check - liveins: %edi, %rbx + liveins: $edi, $rbx - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - %rsp = frame-setup SUB64ri8 %rsp, 16, implicit-def dead %eflags - %ebx = COPY %edi - MOV32mr %rsp, 1, _, 12, _, %ebx + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + $rsp = frame-setup SUB64ri8 $rsp, 16, implicit-def dead $eflags + $ebx = COPY $edi + MOV32mr $rsp, 1, _, 12, _, $ebx bb.1.check: successors: %bb.2.loop, %bb.3.exit - liveins: %ebx + liveins: $ebx - CMP32ri8 %ebx, 10, implicit-def %eflags - JG_1 %bb.3.exit, implicit killed %eflags + CMP32ri8 $ebx, 10, implicit-def $eflags + JG_1 %bb.3.exit, implicit killed $eflags JMP_1 %bb.2.loop bb.2.loop: successors: %bb.1.check - liveins: %ebx + liveins: $ebx - %edi = MOV32rm %rsp, 1, _, 12, _ - CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax - %eax = DEC32r killed %eax, implicit-def dead %eflags - MOV32mr %rsp, 1, _, 12, _, killed %eax + $edi = MOV32rm $rsp, 1, _, 12, _ + CALL64pcrel32 @compute, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax + $eax = DEC32r killed $eax, implicit-def dead $eflags + MOV32mr $rsp, 1, _, 12, _, killed $eax JMP_1 %bb.1.check bb.3.exit: - %eax = MOV32r0 implicit-def dead %eflags - %rsp = ADD64ri8 %rsp, 16, implicit-def dead %eflags - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + $eax = MOV32r0 implicit-def dead $eflags + $rsp = ADD64ri8 $rsp, 16, implicit-def dead $eflags + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-named-register-livein.mir =================================================================== --- test/CodeGen/MIR/X86/expected-named-register-livein.mir +++ test/CodeGen/MIR/X86/expected-named-register-livein.mir @@ -15,6 +15,6 @@ ; CHECK: [[@LINE+1]]:14: expected a named register liveins: %0 - %eax = COPY %edi - RETQ %eax + $eax = COPY $edi + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-newline-at-end-of-list.mir =================================================================== --- test/CodeGen/MIR/X86/expected-newline-at-end-of-list.mir +++ test/CodeGen/MIR/X86/expected-newline-at-end-of-list.mir @@ -19,23 +19,23 @@ name: foo tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0.entry: successors: %bb.1.less, %bb.2.exit ; CHECK: [[@LINE+1]]:19: expected line break at the end of a list - liveins: %edi 44 + liveins: $edi 44 - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2.exit, implicit killed $eflags bb.1.less: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax bb.2.exit: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/expected-number-after-bb.mir =================================================================== --- test/CodeGen/MIR/X86/expected-number-after-bb.mir +++ test/CodeGen/MIR/X86/expected-number-after-bb.mir @@ -20,14 +20,14 @@ name: foo body: | bb.0.entry: - %eax = MOV32rm %rdi, 1, _, 0, _ - CMP32ri8 %eax, 10, implicit-def %eflags + $eax = MOV32rm $rdi, 1, _, 0, _ + CMP32ri8 $eax, 10, implicit-def $eflags ; CHECK: [[@LINE+1]]:14: expected a number after '%bb.' - JG_1 %bb.nah, implicit %eflags + JG_1 %bb.nah, implicit $eflags bb.1.true: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.2.nah: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-offset-after-cfi-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-offset-after-cfi-operand.mir +++ test/CodeGen/MIR/X86/expected-offset-after-cfi-operand.mir @@ -18,10 +18,10 @@ - { id: 0, name: tmp, offset: -4176, size: 4168, alignment: 4 } body: | bb.0.entry: - %rsp = SUB64ri32 %rsp, 4040, implicit-def dead %eflags + $rsp = SUB64ri32 $rsp, 4040, implicit-def dead $eflags ; CHECK: [[@LINE+1]]:36: expected a cfi offset CFI_INSTRUCTION def_cfa_offset _ - %rsp = ADD64ri32 %rsp, 4040, implicit-def dead %eflags + $rsp = ADD64ri32 $rsp, 4040, implicit-def dead $eflags RETQ ... Index: test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir +++ test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir @@ -13,12 +13,12 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:60: expected a pointer IR value - %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.b) - RETQ %eax + $eax = MOV32rm killed $rdi, 1, _, 0, _ :: (load 4 from %ir.b) + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir =================================================================== --- test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir +++ test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir @@ -15,16 +15,16 @@ name: memory_alignment tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:71: expected an integer literal after 'align' - %xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align -32) - %xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32) - %xmm2 = FsFLD0SS - %xmm1 = MOVSSrr killed %xmm1, killed %xmm2 - MOVAPSmr %rdi, 1, _, 0, _, killed %xmm0 :: (store 16 into %ir.vec, align 32) - MOVAPSmr killed %rdi, 1, _, 16, _, killed %xmm1 :: (store 16 into %ir.vec + 16, align 32) + $xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align -32) + $xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32) + $xmm2 = FsFLD0SS + $xmm1 = MOVSSrr killed $xmm1, killed $xmm2 + MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32) + MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32) RETQ ... Index: test/CodeGen/MIR/X86/expected-register-after-cfi-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-register-after-cfi-operand.mir +++ test/CodeGen/MIR/X86/expected-register-after-cfi-operand.mir @@ -26,17 +26,17 @@ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: - PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp + PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 ; CHECK: [[@LINE+1]]:28: expected a cfi register CFI_INSTRUCTION offset %0, -16 - %ebx = COPY %edi, implicit-def %rbx - %ebx = ADD32rr %ebx, killed %esi, implicit-def dead %eflags - %ebx = ADD32rr %ebx, killed %edx, implicit-def dead %eflags - %ebx = ADD32rr %ebx, killed %ecx, implicit-def dead %eflags - %edi = COPY %ebx - CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp - %eax = LEA64_32r killed %rbx, 1, %rbx, 0, _ - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + $ebx = COPY $edi, implicit-def $rbx + $ebx = ADD32rr $ebx, killed $esi, implicit-def dead $eflags + $ebx = ADD32rr $ebx, killed $edx, implicit-def dead $eflags + $ebx = ADD32rr $ebx, killed $ecx, implicit-def dead $eflags + $edi = COPY $ebx + CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp + $eax = LEA64_32r killed $rbx, 1, $rbx, 0, _ + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-register-after-flags.mir =================================================================== --- test/CodeGen/MIR/X86/expected-register-after-flags.mir +++ test/CodeGen/MIR/X86/expected-register-after-flags.mir @@ -15,6 +15,6 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:33: expected a register after register flags - %eax = MOV32r0 implicit-def 2 - RETQ %eax + $eax = MOV32r0 implicit-def 2 + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir =================================================================== --- test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir +++ test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir @@ -13,12 +13,12 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:53: expected an atomic scope, ordering or a size integer literal - %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load from %ir.a) - RETQ %eax + $eax = MOV32rm killed $rdi, 1, _, 0, _ :: (load from %ir.a) + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-stack-object.mir =================================================================== --- test/CodeGen/MIR/X86/expected-stack-object.mir +++ test/CodeGen/MIR/X86/expected-stack-object.mir @@ -42,26 +42,26 @@ stackProtector: '0' fixedStack: - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, - callee-saved-register: '%rbx' } + callee-saved-register: '$rbx' } stack: - { id: 0, name: StackGuardSlot, offset: -24, size: 8, alignment: 8 } - { id: 1, name: test, offset: -40, size: 8, alignment: 8 } - { id: 2, name: a, offset: -29, size: 5, alignment: 1 } body: | bb.0.entry: - liveins: %rbx, %rbx + liveins: $rbx, $rbx - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - %rsp = frame-setup SUB64ri8 %rsp, 32, implicit-def dead %eflags - %rbx = LOAD_STACK_GUARD :: (invariant load 8 from %ir.__stack_chk_guard) - MOV64mr %rsp, 1, _, 24, _, %rbx - %rsi = LEA64r %rsp, 1, _, 19, _ - MOV64mr %rsp, 1, _, 8, _, %rsi - %rdi = LEA64r %rip, 1, _, @.str, _ - dead %eax = MOV32r0 implicit-def dead %eflags, implicit-def %al - CALL64pcrel32 @printf, csr_64, implicit %rsp, implicit %rdi, implicit %rsi, implicit %al, implicit-def %rsp, implicit-def %eax - CMP64rm killed %rbx, %rsp, 1, _, 24, _, implicit-def %eflags - %rsp = ADD64ri8 %rsp, 32, implicit-def dead %eflags - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + $rsp = frame-setup SUB64ri8 $rsp, 32, implicit-def dead $eflags + $rbx = LOAD_STACK_GUARD :: (invariant load 8 from %ir.__stack_chk_guard) + MOV64mr $rsp, 1, _, 24, _, $rbx + $rsi = LEA64r $rsp, 1, _, 19, _ + MOV64mr $rsp, 1, _, 8, _, $rsi + $rdi = LEA64r $rip, 1, _, @.str, _ + dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al + CALL64pcrel32 @printf, csr_64, implicit $rsp, implicit $rdi, implicit $rsi, implicit $al, implicit-def $rsp, implicit-def $eax + CMP64rm killed $rbx, $rsp, 1, _, 24, _, implicit-def $eflags + $rsp = ADD64ri8 $rsp, 32, implicit-def dead $eflags + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-subregister-after-colon.mir =================================================================== --- test/CodeGen/MIR/X86/expected-subregister-after-colon.mir +++ test/CodeGen/MIR/X86/expected-subregister-after-colon.mir @@ -17,10 +17,10 @@ - { id: 2, class: gr8 } body: | bb.0.entry: - %0 = COPY %edi + %0 = COPY $edi ; CHECK: [[@LINE+1]]:20: expected a subregister index after '.' %1 = COPY %0 . 42 - %2 = AND8ri %1, 1, implicit-def %eflags - %al = COPY %2 - RETQ %al + %2 = AND8ri %1, 1, implicit-def $eflags + $al = COPY %2 + RETQ $al ... Index: test/CodeGen/MIR/X86/expected-target-flag-name.mir =================================================================== --- test/CodeGen/MIR/X86/expected-target-flag-name.mir +++ test/CodeGen/MIR/X86/expected-target-flag-name.mir @@ -17,8 +17,8 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:46: expected the name of the target flag - %rax = MOV64rm %rip, 1, _, target-flags( ) @G, _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - %eax = INC32r killed %eax, implicit-def dead %eflags - RETQ %eax + $rax = MOV64rm $rip, 1, _, target-flags( ) @G, _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + $eax = INC32r killed $eax, implicit-def dead $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-tied-def-after-lparen.mir =================================================================== --- test/CodeGen/MIR/X86/expected-tied-def-after-lparen.mir +++ test/CodeGen/MIR/X86/expected-tied-def-after-lparen.mir @@ -12,13 +12,13 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:70: expected tied-def or low-level type after '(' - INLINEASM &"$foo", 1, 2818058, def %rdi, 2147483657, killed %rdi(3) - %rax = COPY killed %rdi - RETQ killed %rax + INLINEASM &"$foo", 1, 2818058, def $rdi, 2147483657, killed $rdi(3) + $rax = COPY killed $rdi + RETQ killed $rax ... Index: test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir =================================================================== --- test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir +++ test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir @@ -13,12 +13,12 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:60: expected an IR value reference - %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from a) - RETQ %eax + $eax = MOV32rm killed $rdi, 1, _, 0, _ :: (load 4 from a) + RETQ $eax ... Index: test/CodeGen/MIR/X86/expected-virtual-register-in-functions-livein.mir =================================================================== --- test/CodeGen/MIR/X86/expected-virtual-register-in-functions-livein.mir +++ test/CodeGen/MIR/X86/expected-virtual-register-in-functions-livein.mir @@ -15,12 +15,12 @@ - { id: 0, class: gr32 } liveins: # CHECK: [[@LINE+1]]:34: expected a virtual register - - { reg: '%edi', virtual-reg: '%edi' } + - { reg: '$edi', virtual-reg: '$edi' } body: | bb.0.body: - liveins: %edi + liveins: $edi - %0 = COPY %edi - %eax = COPY %0 - RETQ %eax + %0 = COPY $edi + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/external-symbol-operands.mir =================================================================== --- test/CodeGen/MIR/X86/external-symbol-operands.mir +++ test/CodeGen/MIR/X86/external-symbol-operands.mir @@ -32,21 +32,21 @@ body: | bb.0.entry: successors: %bb.1.entry, %bb.2.entry - liveins: %edi + liveins: $edi - %rsp = SUB64ri32 %rsp, 520, implicit-def %eflags - %rcx = LOAD_STACK_GUARD - MOV64mr %rsp, 1, _, 512, _, %rcx - %rax = MOVSX64rr32 %edi - %eax = MOV32rm %rsp, 4, %rax, 0, _ - CMP64rm %rcx, %rsp, 1, _, 512, _, implicit-def %eflags - JNE_1 %bb.2.entry, implicit %eflags + $rsp = SUB64ri32 $rsp, 520, implicit-def $eflags + $rcx = LOAD_STACK_GUARD + MOV64mr $rsp, 1, _, 512, _, $rcx + $rax = MOVSX64rr32 $edi + $eax = MOV32rm $rsp, 4, $rax, 0, _ + CMP64rm $rcx, $rsp, 1, _, 512, _, implicit-def $eflags + JNE_1 %bb.2.entry, implicit $eflags bb.1.entry: - liveins: %eax + liveins: $eax - %rsp = ADD64ri32 %rsp, 520, implicit-def %eflags - RETQ %eax + $rsp = ADD64ri32 $rsp, 520, implicit-def $eflags + RETQ $eax bb.2.entry: ; CHECK: CALL64pcrel32 &__stack_chk_fail, @@ -55,10 +55,10 @@ ; CHECK-NEXT: CALL64pcrel32 &"$Quoted \09 External symbol \11 ", ; CHECK-NEXT: CALL64pcrel32 &__stack_chk_fail + 2, ; CHECK-NEXT: CALL64pcrel32 &" check stack - 20" - 20, - CALL64pcrel32 &__stack_chk_fail, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 &__stack_chk_fail.09-_, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 &__stack_chk_fail$, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 &"$Quoted \09 External symbol \11 ", csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 &__stack_chk_fail + 2, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 &" check stack - 20" - 20, csr_64, implicit %rsp, implicit-def %rsp + CALL64pcrel32 &__stack_chk_fail, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 &__stack_chk_fail.09-_, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 &__stack_chk_fail$, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 &"$Quoted \09 External symbol \11 ", csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 &__stack_chk_fail + 2, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 &" check stack - 20" - 20, csr_64, implicit $rsp, implicit-def $rsp ... Index: test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir =================================================================== --- test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir +++ test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir @@ -28,12 +28,12 @@ - { id: 0, name: b, offset: -8, size: 4, alignment: 4 } body: | bb.0.entry: - frame-setup PUSH32r undef %eax, implicit-def %esp, implicit %esp + frame-setup PUSH32r undef $eax, implicit-def $esp, implicit $esp CFI_INSTRUCTION def_cfa_offset 8 ; CHECK: name: test - ; CHECK: %eax = MOV32rm %esp, 1, %noreg, 8, %noreg :: (load 4 from %fixed-stack.0, align 16) - %eax = MOV32rm %esp, 1, _, 8, _ :: (load 4 from %fixed-stack.0, align 16) - MOV32mr %esp, 1, _, 0, _, %eax :: (store 4 into %ir.b) - %edx = POP32r implicit-def %esp, implicit %esp - RETL %eax + ; CHECK: $eax = MOV32rm $esp, 1, $noreg, 8, $noreg :: (load 4 from %fixed-stack.0, align 16) + $eax = MOV32rm $esp, 1, _, 8, _ :: (load 4 from %fixed-stack.0, align 16) + MOV32mr $esp, 1, _, 0, _, $eax :: (store 4 into %ir.b) + $edx = POP32r implicit-def $esp, implicit $esp + RETL $eax ... Index: test/CodeGen/MIR/X86/fixed-stack-object-redefinition-error.mir =================================================================== --- test/CodeGen/MIR/X86/fixed-stack-object-redefinition-error.mir +++ test/CodeGen/MIR/X86/fixed-stack-object-redefinition-error.mir @@ -22,7 +22,7 @@ - { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false } body: | bb.0.entry: - %eax = MOV32rm %esp, 1, _, 4, _ - %eax = ADD32rm killed %eax, %esp, 1, _, 8, _, implicit-def dead %eflags - RETL %eax + $eax = MOV32rm $esp, 1, _, 4, _ + $eax = ADD32rm killed $eax, $esp, 1, _, 8, _, implicit-def dead $eflags + RETL $eax ... Index: test/CodeGen/MIR/X86/fixed-stack-objects.mir =================================================================== --- test/CodeGen/MIR/X86/fixed-stack-objects.mir +++ test/CodeGen/MIR/X86/fixed-stack-objects.mir @@ -28,7 +28,7 @@ - { id: 0, offset: -8, size: 4, alignment: 4 } body: | bb.0.entry: - %eax = MOV32rm %esp, 1, _, 8, _ - MOV32mr %esp, 1, _, 0, _, %eax - RETL %eax + $eax = MOV32rm $esp, 1, _, 8, _ + MOV32mr $esp, 1, _, 0, _, $eax + RETL $eax ... Index: test/CodeGen/MIR/X86/frame-info-save-restore-points.mir =================================================================== --- test/CodeGen/MIR/X86/frame-info-save-restore-points.mir +++ test/CodeGen/MIR/X86/frame-info-save-restore-points.mir @@ -27,8 +27,8 @@ name: foo tracksRegLiveness: true liveins: - - { reg: '%edi' } - - { reg: '%esi' } + - { reg: '$edi' } + - { reg: '$esi' } # CHECK: frameInfo: # CHECK: savePoint: '%bb.2' # CHECK-NEXT: restorePoint: '%bb.2' @@ -43,31 +43,31 @@ body: | bb.0: successors: %bb.2, %bb.1 - liveins: %edi, %esi + liveins: $edi, $esi - %eax = COPY %edi - CMP32rr %eax, killed %esi, implicit-def %eflags - JL_1 %bb.2, implicit killed %eflags + $eax = COPY $edi + CMP32rr $eax, killed $esi, implicit-def $eflags + JL_1 %bb.2, implicit killed $eflags bb.1: successors: %bb.3 - liveins: %eax + liveins: $eax JMP_1 %bb.3 bb.2.true: successors: %bb.3 - liveins: %eax + liveins: $eax - MOV32mr %stack.0.tmp, 1, _, 0, _, killed %eax - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %ssp, implicit-def dead %eflags, implicit %rsp, implicit %ssp - %rsi = LEA64r %stack.0.tmp, 1, _, 0, _ - %edi = MOV32r0 implicit-def dead %eflags - CALL64pcrel32 @doSomething, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %rsi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax - ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %ssp, implicit-def dead %eflags, implicit %rsp, implicit %ssp + MOV32mr %stack.0.tmp, 1, _, 0, _, killed $eax + ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $ssp, implicit-def dead $eflags, implicit $rsp, implicit $ssp + $rsi = LEA64r %stack.0.tmp, 1, _, 0, _ + $edi = MOV32r0 implicit-def dead $eflags + CALL64pcrel32 @doSomething, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $rsi, implicit-def $rsp, implicit-def $ssp, implicit-def $eax + ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $ssp, implicit-def dead $eflags, implicit $rsp, implicit $ssp bb.3.false: - liveins: %eax + liveins: $eax - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/frame-info-stack-references.mir =================================================================== --- test/CodeGen/MIR/X86/frame-info-stack-references.mir +++ test/CodeGen/MIR/X86/frame-info-stack-references.mir @@ -45,7 +45,7 @@ stackProtector: '%stack.0.StackGuardSlot' fixedStack: - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, - callee-saved-register: '%rbx' } + callee-saved-register: '$rbx' } stack: - { id: 0, name: StackGuardSlot, offset: -24, size: 8, alignment: 8 } - { id: 1, name: test, offset: -40, size: 8, alignment: 8 } @@ -53,27 +53,27 @@ body: | bb.0.entry: successors: %bb.1.entry, %bb.2.entry - liveins: %rbx, %rbx + liveins: $rbx, $rbx - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - %rsp = frame-setup SUB64ri8 %rsp, 32, implicit-def dead %eflags - %rbx = LOAD_STACK_GUARD :: (invariant load 8 from @__stack_chk_guard) - MOV64mr %rsp, 1, _, 24, _, %rbx - %rsi = LEA64r %rsp, 1, _, 19, _ - MOV64mr %rsp, 1, _, 8, _, %rsi - %rdi = LEA64r %rip, 1, _, @.str, _ - dead %eax = MOV32r0 implicit-def dead %eflags, implicit-def %al - CALL64pcrel32 @printf, csr_64, implicit %rsp, implicit %rdi, implicit %rsi, implicit %al, implicit-def %rsp, implicit-def %eax - CMP64rm killed %rbx, %rsp, 1, _, 24, _, implicit-def %eflags - JNE_1 %bb.2.entry, implicit %eflags + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + $rsp = frame-setup SUB64ri8 $rsp, 32, implicit-def dead $eflags + $rbx = LOAD_STACK_GUARD :: (invariant load 8 from @__stack_chk_guard) + MOV64mr $rsp, 1, _, 24, _, $rbx + $rsi = LEA64r $rsp, 1, _, 19, _ + MOV64mr $rsp, 1, _, 8, _, $rsi + $rdi = LEA64r $rip, 1, _, @.str, _ + dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al + CALL64pcrel32 @printf, csr_64, implicit $rsp, implicit $rdi, implicit $rsi, implicit $al, implicit-def $rsp, implicit-def $eax + CMP64rm killed $rbx, $rsp, 1, _, 24, _, implicit-def $eflags + JNE_1 %bb.2.entry, implicit $eflags bb.1.entry: - liveins: %eax + liveins: $eax - %rsp = ADD64ri8 %rsp, 32, implicit-def dead %eflags - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + $rsp = ADD64ri8 $rsp, 32, implicit-def dead $eflags + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax bb.2.entry: - CALL64pcrel32 &__stack_chk_fail, csr_64, implicit %rsp, implicit-def %rsp + CALL64pcrel32 &__stack_chk_fail, csr_64, implicit $rsp, implicit-def $rsp ... Index: test/CodeGen/MIR/X86/frame-setup-instruction-flag.mir =================================================================== --- test/CodeGen/MIR/X86/frame-setup-instruction-flag.mir +++ test/CodeGen/MIR/X86/frame-setup-instruction-flag.mir @@ -20,17 +20,17 @@ name: compute body: | bb.0.body: - %eax = IMUL32rri8 %edi, 11, implicit-def %eflags - RETQ %eax + $eax = IMUL32rri8 $edi, 11, implicit-def $eflags + RETQ $eax ... --- name: foo body: | bb.0.entry: - ; CHECK: frame-setup PUSH64r %rax - frame-setup PUSH64r %rax, implicit-def %rsp, implicit %rsp - CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax - ; CHECK: %rdx = frame-destroy POP64r - %rdx = frame-destroy POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + ; CHECK: frame-setup PUSH64r $rax + frame-setup PUSH64r $rax, implicit-def $rsp, implicit $rsp + CALL64pcrel32 @compute, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax + ; CHECK: $rdx = frame-destroy POP64r + $rdx = frame-destroy POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/function-liveins.mir =================================================================== --- test/CodeGen/MIR/X86/function-liveins.mir +++ test/CodeGen/MIR/X86/function-liveins.mir @@ -19,18 +19,18 @@ - { id: 1, class: gr32 } - { id: 2, class: gr32 } # CHECK: liveins: -# CHECK-NEXT: - { reg: '%edi', virtual-reg: '%0' } -# CHECK-NEXT: - { reg: '%esi', virtual-reg: '%1' } +# CHECK-NEXT: - { reg: '$edi', virtual-reg: '%0' } +# CHECK-NEXT: - { reg: '$esi', virtual-reg: '%1' } liveins: - - { reg: '%edi', virtual-reg: '%0' } - - { reg: '%esi', virtual-reg: '%1' } + - { reg: '$edi', virtual-reg: '%0' } + - { reg: '$esi', virtual-reg: '%1' } body: | bb.0.body: - liveins: %edi, %esi + liveins: $edi, $esi - %1 = COPY %esi - %0 = COPY %edi - %2 = ADD32rr %0, %1, implicit-def dead %eflags - %eax = COPY %2 - RETQ %eax + %1 = COPY $esi + %0 = COPY $edi + %2 = ADD32rr %0, %1, implicit-def dead $eflags + $eax = COPY %2 + RETQ $eax ... Index: test/CodeGen/MIR/X86/generic-instr-type.mir =================================================================== --- test/CodeGen/MIR/X86/generic-instr-type.mir +++ test/CodeGen/MIR/X86/generic-instr-type.mir @@ -36,11 +36,11 @@ - { id: 8, class: _ } body: | bb.0: - liveins: %edi, %xmm0 + liveins: $edi, $xmm0 ; CHECK: %1:_(s32) = G_ADD %0 - %0(s32) = COPY %edi - %6(<4 x s32>) = COPY %xmm0 - %7(s64) = COPY %rdi + %0(s32) = COPY $edi + %6(<4 x s32>) = COPY $xmm0 + %7(s64) = COPY $rdi %1(s32) = G_ADD %0, %0 ; CHECK: %2:_(<4 x s32>) = G_ADD %6, %6 Index: test/CodeGen/MIR/X86/global-value-operands.mir =================================================================== --- test/CodeGen/MIR/X86/global-value-operands.mir +++ test/CodeGen/MIR/X86/global-value-operands.mir @@ -64,22 +64,22 @@ name: inc body: | bb.0.entry: - ; CHECK: %rax = MOV64rm %rip, 1, %noreg, @G, %noreg - %rax = MOV64rm %rip, 1, _, @G, _ - %eax = MOV32rm %rax, 1, _, 0, _ - %eax = INC32r %eax, implicit-def %eflags - RETQ %eax + ; CHECK: $rax = MOV64rm $rip, 1, $noreg, @G, $noreg + $rax = MOV64rm $rip, 1, _, @G, _ + $eax = MOV32rm $rax, 1, _, 0, _ + $eax = INC32r $eax, implicit-def $eflags + RETQ $eax ... --- # CHECK: name: inc2 name: inc2 body: | bb.0.entry: - ; CHECK: %rax = MOV64rm %rip, 1, %noreg, @0, %noreg - %rax = MOV64rm %rip, 1, _, @0, _ - %eax = MOV32rm %rax, 1, _, 0, _ - %eax = INC32r %eax, implicit-def %eflags - RETQ %eax + ; CHECK: $rax = MOV64rm $rip, 1, $noreg, @0, $noreg + $rax = MOV64rm $rip, 1, _, @0, _ + $eax = MOV32rm $rax, 1, _, 0, _ + $eax = INC32r $eax, implicit-def $eflags + RETQ $eax ... --- name: test @@ -89,24 +89,24 @@ ; CHECK: , @-_-, ; CHECK: , @_-_a, ; CHECK: , @"$.-B", - %rax = MOV64rm %rip, 1, _, @.$0, _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - %rcx = MOV64rm %rip, 1, _, @-_-, _ - MOV32mr killed %rcx, 1, _, 0, _, killed %eax - %rax = MOV64rm %rip, 1, _, @_-_a, _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - %rcx = MOV64rm %rip, 1, _, @$.-B, _ - MOV32mr killed %rcx, 1, _, 0, _, %eax - RETQ %eax + $rax = MOV64rm $rip, 1, _, @.$0, _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + $rcx = MOV64rm $rip, 1, _, @-_-, _ + MOV32mr killed $rcx, 1, _, 0, _, killed $eax + $rax = MOV64rm $rip, 1, _, @_-_a, _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + $rcx = MOV64rm $rip, 1, _, @$.-B, _ + MOV32mr killed $rcx, 1, _, 0, _, $eax + RETQ $eax ... --- name: test2 body: | bb.0.entry: ; CHECK: , @"\01Hello@$%09 \5C World,", - %rax = MOV64rm %rip, 1, _, @"\01Hello@$%09 \\ World,", _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - RETQ %eax + $rax = MOV64rm $rip, 1, _, @"\01Hello@$%09 \\ World,", _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + RETQ $eax ... --- # CHECK: name: test3 @@ -117,24 +117,24 @@ ; CHECK: , @-_-, ; CHECK: , @_-_a + 4, ; CHECK: , @"$.-B" - 8, - %rax = MOV64rm %rip, 1, _, @.$0 + 0, _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - %rcx = MOV64rm %rip, 1, _, @-_- - 0, _ - MOV32mr killed %rcx, 1, _, 0, _, killed %eax - %rax = MOV64rm %rip, 1, _, @_-_a + 4, _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - %rcx = MOV64rm %rip, 1, _, @$.-B - 8, _ - MOV32mr killed %rcx, 1, _, 0, _, %eax - RETQ %eax + $rax = MOV64rm $rip, 1, _, @.$0 + 0, _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + $rcx = MOV64rm $rip, 1, _, @-_- - 0, _ + MOV32mr killed $rcx, 1, _, 0, _, killed $eax + $rax = MOV64rm $rip, 1, _, @_-_a + 4, _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + $rcx = MOV64rm $rip, 1, _, @$.-B - 8, _ + MOV32mr killed $rcx, 1, _, 0, _, $eax + RETQ $eax ... --- # CHECK: name: tf name: tf body: | bb.0.entry: - ; CHECK: %rax = MOV64rm %rip, 1, %noreg, target-flags(x86-gotpcrel) @G, %noreg - %rax = MOV64rm %rip, 1, _, target-flags(x86-gotpcrel) @G, _ - %eax = MOV32rm %rax, 1, _, 0, _ - %eax = INC32r %eax, implicit-def %eflags - RETQ %eax + ; CHECK: $rax = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @G, $noreg + $rax = MOV64rm $rip, 1, _, target-flags(x86-gotpcrel) @G, _ + $eax = MOV32rm $rax, 1, _, 0, _ + $eax = INC32r $eax, implicit-def $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/immediate-operands.mir =================================================================== --- test/CodeGen/MIR/X86/immediate-operands.mir +++ test/CodeGen/MIR/X86/immediate-operands.mir @@ -19,18 +19,18 @@ name: foo body: | bb.0.entry: - ; CHECK: %eax = MOV32ri 42 - ; CHECK-NEXT: RETQ %eax - %eax = MOV32ri 42 - RETQ %eax + ; CHECK: $eax = MOV32ri 42 + ; CHECK-NEXT: RETQ $eax + $eax = MOV32ri 42 + RETQ $eax ... --- # CHECK: name: bar name: bar body: | bb.0.entry: - ; CHECK: %eax = MOV32ri -11 - ; CHECK-NEXT: RETQ %eax - %eax = MOV32ri -11 - RETQ %eax + ; CHECK: $eax = MOV32ri -11 + ; CHECK-NEXT: RETQ $eax + $eax = MOV32ri -11 + RETQ $eax ... Index: test/CodeGen/MIR/X86/implicit-register-flag.mir =================================================================== --- test/CodeGen/MIR/X86/implicit-register-flag.mir +++ test/CodeGen/MIR/X86/implicit-register-flag.mir @@ -32,19 +32,19 @@ body: | bb.0.entry: successors: %bb.1, %bb.2 - ; CHECK: CMP32ri8 %edi, 10, implicit-def %eflags - ; CHECK-NEXT: JG_1 %bb.2, implicit %eflags - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2, implicit %eflags + ; CHECK: CMP32ri8 $edi, 10, implicit-def $eflags + ; CHECK-NEXT: JG_1 %bb.2, implicit $eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2, implicit $eflags bb.1.less: - ; CHECK: %eax = MOV32r0 implicit-def %eflags - %eax = MOV32r0 implicit-def %eflags - RETQ %eax + ; CHECK: $eax = MOV32r0 implicit-def $eflags + $eax = MOV32r0 implicit-def $eflags + RETQ $eax bb.2.exit: - %eax = COPY %edi - RETQ %eax + $eax = COPY $edi + RETQ $eax ... --- name: implicit_subregister1 @@ -53,16 +53,16 @@ ; Verify that the implicit register verifier won't report an error on implicit ; subregisters. ; CHECK-LABEL: name: implicit_subregister1 - ; CHECK: dead %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags, implicit-def %al - dead %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags, implicit-def %al - RETQ killed %al + ; CHECK: dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al + dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al + RETQ killed $al ... --- name: implicit_subregister2 body: | bb.0.entry: ; CHECK-LABEL: name: implicit_subregister2 - ; CHECK: dead %r15 = XOR64rr undef %r15, undef %r15, implicit-def dead %eflags, implicit-def %r15w - dead %r15 = XOR64rr undef %r15, undef %r15, implicit-def dead %eflags, implicit-def %r15w - RETQ killed %r15w + ; CHECK: dead $r15 = XOR64rr undef $r15, undef $r15, implicit-def dead $eflags, implicit-def $r15w + dead $r15 = XOR64rr undef $r15, undef $r15, implicit-def dead $eflags, implicit-def $r15w + RETQ killed $r15w ... Index: test/CodeGen/MIR/X86/inline-asm-registers.mir =================================================================== --- test/CodeGen/MIR/X86/inline-asm-registers.mir +++ test/CodeGen/MIR/X86/inline-asm-registers.mir @@ -21,32 +21,32 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; CHECK-LABEL: name: test - ; CHECK: INLINEASM &foo, 0, 2818058, def %rsi, 2818058, def dead %rdi, - INLINEASM &foo, 0, 2818058, def %rsi, 2818058, def dead %rdi, 2147549193, killed %rdi, 2147483657, killed %rsi, 12, implicit-def dead early-clobber %eflags - %rax = MOV64rr killed %rsi - RETQ killed %rax + ; CHECK: INLINEASM &foo, 0, 2818058, def $rsi, 2818058, def dead $rdi, + INLINEASM &foo, 0, 2818058, def $rsi, 2818058, def dead $rdi, 2147549193, killed $rdi, 2147483657, killed $rsi, 12, implicit-def dead early-clobber $eflags + $rax = MOV64rr killed $rsi + RETQ killed $rax ... --- name: test2 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; Verify that the register ties are preserved. ; CHECK-LABEL: name: test2 - ; CHECK: INLINEASM &foo, 0, 2818058, def %rsi, 2818058, def dead %rdi, 2147549193, killed %rdi(tied-def 5), 2147483657, killed %rsi(tied-def 3), 12, implicit-def dead early-clobber %eflags - INLINEASM &foo, 0, 2818058, def %rsi, 2818058, def dead %rdi, 2147549193, killed %rdi(tied-def 5), 2147483657, killed %rsi(tied-def 3), 12, implicit-def dead early-clobber %eflags - %rax = MOV64rr killed %rsi - RETQ killed %rax + ; CHECK: INLINEASM &foo, 0, 2818058, def $rsi, 2818058, def dead $rdi, 2147549193, killed $rdi(tied-def 5), 2147483657, killed $rsi(tied-def 3), 12, implicit-def dead early-clobber $eflags + INLINEASM &foo, 0, 2818058, def $rsi, 2818058, def dead $rdi, 2147549193, killed $rdi(tied-def 5), 2147483657, killed $rsi(tied-def 3), 12, implicit-def dead early-clobber $eflags + $rax = MOV64rr killed $rsi + RETQ killed $rax ... Index: test/CodeGen/MIR/X86/instructions-debug-location.mir =================================================================== --- test/CodeGen/MIR/X86/instructions-debug-location.mir +++ test/CodeGen/MIR/X86/instructions-debug-location.mir @@ -58,15 +58,15 @@ - { id: 0, name: x.addr, size: 4, alignment: 4 } body: | bb.0.entry: - liveins: %edi - ; CHECK: DBG_VALUE debug-use %noreg, 0, !11, !DIExpression(), debug-location !12 - ; CHECK: %eax = COPY %0, debug-location !13 - ; CHECK: RETQ %eax, debug-location !13 - %0 = COPY %edi + liveins: $edi + ; CHECK: DBG_VALUE debug-use $noreg, 0, !11, !DIExpression(), debug-location !12 + ; CHECK: $eax = COPY %0, debug-location !13 + ; CHECK: RETQ $eax, debug-location !13 + %0 = COPY $edi DBG_VALUE debug-use _, 0, !12, !DIExpression(), debug-location !13 MOV32mr %stack.0.x.addr, 1, _, 0, _, %0 - %eax = COPY %0, debug-location !14 - RETQ %eax, debug-location !14 + $eax = COPY %0, debug-location !14 + RETQ $eax, debug-location !14 ... --- name: test_typed_immediates @@ -79,16 +79,16 @@ - { id: 0, name: x.addr, size: 4, alignment: 4 } body: | bb.0.entry: - liveins: %edi + liveins: $edi - %0 = COPY %edi - ; CHECK: DBG_VALUE %noreg, i32 0, !DIExpression(), !12 - ; CHECK-NEXT: DBG_VALUE %noreg, i64 -22, !DIExpression(), !12 - ; CHECK-NEXT: DBG_VALUE %noreg, i128 123492148938512984928424384934328985928, !DIExpression(), !12 + %0 = COPY $edi + ; CHECK: DBG_VALUE $noreg, i32 0, !DIExpression(), !12 + ; CHECK-NEXT: DBG_VALUE $noreg, i64 -22, !DIExpression(), !12 + ; CHECK-NEXT: DBG_VALUE $noreg, i128 123492148938512984928424384934328985928, !DIExpression(), !12 DBG_VALUE _, i32 0, !DIExpression(), !13 DBG_VALUE _, i64 -22, !DIExpression(), !13 DBG_VALUE _, i128 123492148938512984928424384934328985928, !DIExpression(), !13 MOV32mr %stack.0.x.addr, 1, _, 0, _, %0 - %eax = COPY %0 - RETQ %eax + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/invalid-constant-pool-item.mir =================================================================== --- test/CodeGen/MIR/X86/invalid-constant-pool-item.mir +++ test/CodeGen/MIR/X86/invalid-constant-pool-item.mir @@ -19,7 +19,7 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:47: use of undefined constant '%const.10' - %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.10, _ - RETQ %xmm0 + $xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.10, _ + RETQ $xmm0 ... Index: test/CodeGen/MIR/X86/invalid-target-flag-name.mir =================================================================== --- test/CodeGen/MIR/X86/invalid-target-flag-name.mir +++ test/CodeGen/MIR/X86/invalid-target-flag-name.mir @@ -17,8 +17,8 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:45: use of undefined target flag 'x86-test' - %rax = MOV64rm %rip, 1, _, target-flags(x86-test) @G, _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - %eax = INC32r killed %eax, implicit-def dead %eflags - RETQ %eax + $rax = MOV64rm $rip, 1, _, target-flags(x86-test) @G, _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + $eax = INC32r killed $eax, implicit-def dead $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/invalid-tied-def-index-error.mir =================================================================== --- test/CodeGen/MIR/X86/invalid-tied-def-index-error.mir +++ test/CodeGen/MIR/X86/invalid-tied-def-index-error.mir @@ -12,13 +12,13 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:58: use of invalid tied-def operand index '300'; instruction has only 6 operands - INLINEASM &"$foo", 1, 2818058, def %rdi, 2147483657, killed %rdi(tied-def 300) - %rax = COPY killed %rdi - RETQ killed %rax + INLINEASM &"$foo", 1, 2818058, def $rdi, 2147483657, killed $rdi(tied-def 300) + $rax = COPY killed $rdi + RETQ killed $rax ... Index: test/CodeGen/MIR/X86/jump-table-info.mir =================================================================== --- test/CodeGen/MIR/X86/jump-table-info.mir +++ test/CodeGen/MIR/X86/jump-table-info.mir @@ -72,37 +72,37 @@ bb.0.entry: successors: %bb.2, %bb.1 - %eax = MOV32rr %edi, implicit-def %rax - CMP32ri8 %edi, 3, implicit-def %eflags - JA_1 %bb.2, implicit %eflags + $eax = MOV32rr $edi, implicit-def $rax + CMP32ri8 $edi, 3, implicit-def $eflags + JA_1 %bb.2, implicit $eflags bb.1.entry: successors: %bb.3, %bb.4, %bb.5, %bb.6 - ; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg - %rcx = LEA64r %rip, 1, _, %jump-table.0, _ - %rax = MOVSX64rm32 %rcx, 4, %rax, 0, _ - %rax = ADD64rr %rax, %rcx, implicit-def %eflags - JMP64r %rax + ; CHECK: $rcx = LEA64r $rip, 1, $noreg, %jump-table.0, $noreg + $rcx = LEA64r $rip, 1, _, %jump-table.0, _ + $rax = MOVSX64rm32 $rcx, 4, $rax, 0, _ + $rax = ADD64rr $rax, $rcx, implicit-def $eflags + JMP64r $rax bb.2.def: - %eax = MOV32r0 implicit-def %eflags - RETQ %eax + $eax = MOV32r0 implicit-def $eflags + RETQ $eax bb.3.lbl1: - %eax = MOV32ri 1 - RETQ %eax + $eax = MOV32ri 1 + RETQ $eax bb.4.lbl2: - %eax = MOV32ri 2 - RETQ %eax + $eax = MOV32ri 2 + RETQ $eax bb.5.lbl3: - %eax = MOV32ri 4 - RETQ %eax + $eax = MOV32ri 4 + RETQ $eax bb.6.lbl4: - %eax = MOV32ri 8 - RETQ %eax + $eax = MOV32ri 8 + RETQ $eax ... --- name: test_jumptable2 @@ -115,36 +115,36 @@ bb.0.entry: successors: %bb.2, %bb.1 - %eax = MOV32rr %edi, implicit-def %rax - CMP32ri8 %edi, 3, implicit-def %eflags - JA_1 %bb.2, implicit %eflags + $eax = MOV32rr $edi, implicit-def $rax + CMP32ri8 $edi, 3, implicit-def $eflags + JA_1 %bb.2, implicit $eflags bb.1.entry: successors: %bb.3, %bb.4, %bb.5, %bb.6 ; Verify that the printer will use an id of 0 for this jump table: - ; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg - %rcx = LEA64r %rip, 1, _, %jump-table.1, _ - %rax = MOVSX64rm32 %rcx, 4, %rax, 0, _ - %rax = ADD64rr %rax, %rcx, implicit-def %eflags - JMP64r %rax + ; CHECK: $rcx = LEA64r $rip, 1, $noreg, %jump-table.0, $noreg + $rcx = LEA64r $rip, 1, _, %jump-table.1, _ + $rax = MOVSX64rm32 $rcx, 4, $rax, 0, _ + $rax = ADD64rr $rax, $rcx, implicit-def $eflags + JMP64r $rax bb.2.def: - %eax = MOV32r0 implicit-def %eflags - RETQ %eax + $eax = MOV32r0 implicit-def $eflags + RETQ $eax bb.3.lbl1: - %eax = MOV32ri 1 - RETQ %eax + $eax = MOV32ri 1 + RETQ $eax bb.4.lbl2: - %eax = MOV32ri 2 - RETQ %eax + $eax = MOV32ri 2 + RETQ $eax bb.5.lbl3: - %eax = MOV32ri 4 - RETQ %eax + $eax = MOV32ri 4 + RETQ $eax bb.6.lbl4: - %eax = MOV32ri 8 - RETQ %eax + $eax = MOV32ri 8 + RETQ $eax ... Index: test/CodeGen/MIR/X86/jump-table-redefinition-error.mir =================================================================== --- test/CodeGen/MIR/X86/jump-table-redefinition-error.mir +++ test/CodeGen/MIR/X86/jump-table-redefinition-error.mir @@ -42,35 +42,35 @@ bb.0.entry: successors: %bb.2.def, %bb.1.entry - %eax = MOV32rr %edi, implicit-def %rax - CMP32ri8 %edi, 3, implicit-def %eflags - JA_1 %bb.2.def, implicit %eflags + $eax = MOV32rr $edi, implicit-def $rax + CMP32ri8 $edi, 3, implicit-def $eflags + JA_1 %bb.2.def, implicit $eflags bb.1.entry: successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4 - %rcx = LEA64r %rip, 1, _, %jump-table.0, _ - %rax = MOVSX64rm32 %rcx, 4, %rax, 0, _ - %rax = ADD64rr %rax, %rcx, implicit-def %eflags - JMP64r %rax + $rcx = LEA64r $rip, 1, _, %jump-table.0, _ + $rax = MOVSX64rm32 $rcx, 4, $rax, 0, _ + $rax = ADD64rr $rax, $rcx, implicit-def $eflags + JMP64r $rax bb.2.def: - %eax = MOV32r0 implicit-def %eflags - RETQ %eax + $eax = MOV32r0 implicit-def $eflags + RETQ $eax bb.3.lbl1: - %eax = MOV32ri 1 - RETQ %eax + $eax = MOV32ri 1 + RETQ $eax bb.4.lbl2: - %eax = MOV32ri 2 - RETQ %eax + $eax = MOV32ri 2 + RETQ $eax bb.5.lbl3: - %eax = MOV32ri 4 - RETQ %eax + $eax = MOV32ri 4 + RETQ $eax bb.6.lbl4: - %eax = MOV32ri 8 - RETQ %eax + $eax = MOV32ri 8 + RETQ $eax ... Index: test/CodeGen/MIR/X86/killed-register-flag.mir =================================================================== --- test/CodeGen/MIR/X86/killed-register-flag.mir +++ test/CodeGen/MIR/X86/killed-register-flag.mir @@ -23,18 +23,18 @@ bb.0.entry: successors: %bb.1.less, %bb.2.exit - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit %eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2.exit, implicit $eflags bb.1.less: - ; CHECK: %eax = MOV32r0 - ; CHECK-NEXT: RETQ killed %eax - %eax = MOV32r0 implicit-def %eflags - RETQ killed %eax + ; CHECK: $eax = MOV32r0 + ; CHECK-NEXT: RETQ killed $eax + $eax = MOV32r0 implicit-def $eflags + RETQ killed $eax bb.2.exit: - ; CHECK: %eax = COPY killed %edi - ; CHECK-NEXT: RETQ killed %eax - %eax = COPY killed %edi - RETQ killed %eax + ; CHECK: $eax = COPY killed $edi + ; CHECK-NEXT: RETQ killed $eax + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/large-cfi-offset-number-error.mir =================================================================== --- test/CodeGen/MIR/X86/large-cfi-offset-number-error.mir +++ test/CodeGen/MIR/X86/large-cfi-offset-number-error.mir @@ -18,10 +18,10 @@ - { id: 0, name: tmp, offset: -4176, size: 4168, alignment: 4 } body: | bb.0.entry: - %rsp = SUB64ri32 %rsp, 4040, implicit-def dead %eflags + $rsp = SUB64ri32 $rsp, 4040, implicit-def dead $eflags ; CHECK: [[@LINE+1]]:36: expected a 32 bit integer (the cfi offset is too large) CFI_INSTRUCTION def_cfa_offset 123456789123456 - %rsp = ADD64ri32 %rsp, 4040, implicit-def dead %eflags + $rsp = ADD64ri32 $rsp, 4040, implicit-def dead $eflags RETQ ... Index: test/CodeGen/MIR/X86/large-immediate-operand-error.mir =================================================================== --- test/CodeGen/MIR/X86/large-immediate-operand-error.mir +++ test/CodeGen/MIR/X86/large-immediate-operand-error.mir @@ -13,6 +13,6 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:20: integer literal is too large to be an immediate operand - %eax = MOV32ri 12346127502983478823754212949184914 - RETQ %eax + $eax = MOV32ri 12346127502983478823754212949184914 + RETQ $eax ... Index: test/CodeGen/MIR/X86/large-index-number-error.mir =================================================================== --- test/CodeGen/MIR/X86/large-index-number-error.mir +++ test/CodeGen/MIR/X86/large-index-number-error.mir @@ -20,14 +20,14 @@ name: foo body: | bb.0.entry: - %eax = MOV32rm %rdi, 1, _, 0, _ - CMP32ri8 %eax, 10, implicit-def %eflags + $eax = MOV32rm $rdi, 1, _, 0, _ + CMP32ri8 $eax, 10, implicit-def $eflags ; CHECK: [[@LINE+1]]:10: expected 32-bit integer (too large) - JG_1 %bb.123456789123456, implicit %eflags + JG_1 %bb.123456789123456, implicit $eflags bb.1: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.2: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/large-offset-number-error.mir =================================================================== --- test/CodeGen/MIR/X86/large-offset-number-error.mir +++ test/CodeGen/MIR/X86/large-offset-number-error.mir @@ -17,8 +17,8 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:37: expected 64-bit integer (too large) - %rax = MOV64rm %rip, 1, _, @G + 123456789123456789123456789, _ - %eax = MOV32rm %rax, 1, _, 0, _ - %eax = INC32r %eax implicit-def %eflags - RETQ %eax + $rax = MOV64rm $rip, 1, _, @G + 123456789123456789123456789, _ + $eax = MOV32rm $rax, 1, _, 0, _ + $eax = INC32r $eax implicit-def $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir =================================================================== --- test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir +++ test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir @@ -13,12 +13,12 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:53: expected 64-bit integer (too large) - %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 12345678912345678924218574857 from %ir.a) - RETQ %eax + $eax = MOV32rm killed $rdi, 1, _, 0, _ :: (load 12345678912345678924218574857 from %ir.a) + RETQ $eax ... Index: test/CodeGen/MIR/X86/liveout-register-mask.mir =================================================================== --- test/CodeGen/MIR/X86/liveout-register-mask.mir +++ test/CodeGen/MIR/X86/liveout-register-mask.mir @@ -17,8 +17,8 @@ name: small_patchpoint_codegen tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } frameInfo: hasPatchPoint: true stackSize: 8 @@ -28,15 +28,15 @@ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: - liveins: %rdi, %rsi, %rbp + liveins: $rdi, $rsi, $rbp - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = frame-setup MOV64rr %rsp - CFI_INSTRUCTION def_cfa_register %rbp - ; CHECK: PATCHPOINT 5, 5, 0, 2, 0, %rdi, %rsi, csr_64, liveout(%esp, %rsp, %sp, %spl), - PATCHPOINT 5, 5, 0, 2, 0, %rdi, %rsi, csr_64, liveout(%esp, %rsp, %sp, %spl), implicit-def dead early-clobber %r11, implicit-def %rsp, implicit-def dead %rax - %rbp = POP64r implicit-def %rsp, implicit %rsp + CFI_INSTRUCTION offset $rbp, -16 + $rbp = frame-setup MOV64rr $rsp + CFI_INSTRUCTION def_cfa_register $rbp + ; CHECK: PATCHPOINT 5, 5, 0, 2, 0, $rdi, $rsi, csr_64, liveout($esp, $rsp, $sp, $spl), + PATCHPOINT 5, 5, 0, 2, 0, $rdi, $rsi, csr_64, liveout($esp, $rsp, $sp, $spl), implicit-def dead early-clobber $r11, implicit-def $rsp, implicit-def dead $rax + $rbp = POP64r implicit-def $rsp, implicit $rsp RETQ ... Index: test/CodeGen/MIR/X86/machine-basic-block-operands.mir =================================================================== --- test/CodeGen/MIR/X86/machine-basic-block-operands.mir +++ test/CodeGen/MIR/X86/machine-basic-block-operands.mir @@ -38,18 +38,18 @@ bb.0.entry: successors: %bb.1, %bb.2 - %eax = MOV32rm %rdi, 1, _, 0, _ - ; CHECK: CMP32ri8 %eax, 10 + $eax = MOV32rm $rdi, 1, _, 0, _ + ; CHECK: CMP32ri8 $eax, 10 ; CHECK-NEXT: JG_1 %bb.2 - CMP32ri8 %eax, 10, implicit-def %eflags - JG_1 %bb.2, implicit %eflags + CMP32ri8 $eax, 10, implicit-def $eflags + JG_1 %bb.2, implicit $eflags ; CHECK: bb.1.less: bb.1.less: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.2.exit: - RETQ %eax + RETQ $eax ... --- # CHECK: name: bar @@ -59,15 +59,15 @@ bb.0.entry: successors: %bb.1, %bb.3 - %eax = MOV32rm %rdi, 1, _, 0, _ - ; CHECK: CMP32ri8 %eax, 10 + $eax = MOV32rm $rdi, 1, _, 0, _ + ; CHECK: CMP32ri8 $eax, 10 ; CHECK-NEXT: JG_1 %bb.2 - CMP32ri8 %eax, 10, implicit-def %eflags - JG_1 %bb.3, implicit %eflags + CMP32ri8 $eax, 10, implicit-def $eflags + JG_1 %bb.3, implicit $eflags bb.1: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.3: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/machine-instructions.mir =================================================================== --- test/CodeGen/MIR/X86/machine-instructions.mir +++ test/CodeGen/MIR/X86/machine-instructions.mir @@ -18,6 +18,6 @@ bb.0.entry: ; CHECK: MOV32rr ; CHECK-NEXT: RETQ - %eax = MOV32rr %eax - RETQ %eax + $eax = MOV32rr $eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/machine-verifier.mir =================================================================== --- test/CodeGen/MIR/X86/machine-verifier.mir +++ test/CodeGen/MIR/X86/machine-verifier.mir @@ -14,7 +14,7 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %edi + liveins: $edi ; CHECK: *** Bad machine code: Too few operands *** ; CHECK: instruction: COPY ; CHECK: 2 operands expected, but 0 given. Index: test/CodeGen/MIR/X86/memory-operands.mir =================================================================== --- test/CodeGen/MIR/X86/memory-operands.mir +++ test/CodeGen/MIR/X86/memory-operands.mir @@ -194,151 +194,151 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi - ; CHECK: %eax = MOV32rm %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.a) - ; CHECK-NEXT: MOV32mi killed %rdi, 1, %noreg, 0, %noreg, 42 :: (store 4 into %ir.a) - %eax = MOV32rm %rdi, 1, _, 0, _ :: (load 4 from %ir.a) - MOV32mi killed %rdi, 1, _, 0, _, 42 :: (store 4 into %ir.a) - RETQ %eax + liveins: $rdi + ; CHECK: $eax = MOV32rm $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.a) + ; CHECK-NEXT: MOV32mi killed $rdi, 1, $noreg, 0, $noreg, 42 :: (store 4 into %ir.a) + $eax = MOV32rm $rdi, 1, _, 0, _ :: (load 4 from %ir.a) + MOV32mi killed $rdi, 1, _, 0, _, 42 :: (store 4 into %ir.a) + RETQ $eax ... --- name: test2 tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry2: - liveins: %rdi - ; CHECK: INC32m killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value") - INC32m killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value") + liveins: $rdi + ; CHECK: INC32m killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value") + INC32m killed $rdi, 1, _, 0, _, implicit-def dead $eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value") RETQ ... --- name: test3 tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } frameInfo: maxAlignment: 4 stack: - { id: 0, offset: -12, size: 4, alignment: 4 } body: | bb.0.entry3: - liveins: %rdi + liveins: $rdi ; Verify that the unnamed local values can be serialized. ; CHECK-LABEL: name: test3 - ; CHECK: %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.0) - ; CHECK: MOV32mr %rsp, 1, %noreg, -4, %noreg, killed %eax :: (store 4 into %ir.1) - %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.0) - %eax = INC32r killed %eax, implicit-def dead %eflags - MOV32mr %rsp, 1, _, -4, _, killed %eax :: (store 4 into %ir.1) + ; CHECK: $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.0) + ; CHECK: MOV32mr $rsp, 1, $noreg, -4, $noreg, killed $eax :: (store 4 into %ir.1) + $eax = MOV32rm killed $rdi, 1, _, 0, _ :: (load 4 from %ir.0) + $eax = INC32r killed $eax, implicit-def dead $eflags + MOV32mr $rsp, 1, _, -4, _, killed $eax :: (store 4 into %ir.1) RETQ ... --- name: volatile_inc tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: name: volatile_inc - ; CHECK: %eax = MOV32rm %rdi, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.x) - ; CHECK: MOV32mr killed %rdi, 1, %noreg, 0, %noreg, %eax :: (volatile store 4 into %ir.x) - %eax = MOV32rm %rdi, 1, _, 0, _ :: (volatile load 4 from %ir.x) - %eax = INC32r killed %eax, implicit-def dead %eflags - MOV32mr killed %rdi, 1, _, 0, _, %eax :: (volatile store 4 into %ir.x) - RETQ %eax + ; CHECK: $eax = MOV32rm $rdi, 1, $noreg, 0, $noreg :: (volatile load 4 from %ir.x) + ; CHECK: MOV32mr killed $rdi, 1, $noreg, 0, $noreg, $eax :: (volatile store 4 into %ir.x) + $eax = MOV32rm $rdi, 1, _, 0, _ :: (volatile load 4 from %ir.x) + $eax = INC32r killed $eax, implicit-def dead $eflags + MOV32mr killed $rdi, 1, _, 0, _, $eax :: (volatile store 4 into %ir.x) + RETQ $eax ... --- name: non_temporal_store tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%esi' } + - { reg: '$rdi' } + - { reg: '$esi' } body: | bb.0.entry: - liveins: %esi, %rdi + liveins: $esi, $rdi ; CHECK: name: non_temporal_store - ; CHECK: MOVNTImr killed %rdi, 1, %noreg, 0, %noreg, killed %esi :: (non-temporal store 4 into %ir.a) - MOVNTImr killed %rdi, 1, _, 0, _, killed %esi :: (non-temporal store 4 into %ir.a) + ; CHECK: MOVNTImr killed $rdi, 1, $noreg, 0, $noreg, killed $esi :: (non-temporal store 4 into %ir.a) + MOVNTImr killed $rdi, 1, _, 0, _, killed $esi :: (non-temporal store 4 into %ir.a) RETQ ... --- name: invariant_load tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: name: invariant_load - ; CHECK: %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (invariant load 4 from %ir.x) - %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (invariant load 4 from %ir.x) - RETQ %eax + ; CHECK: $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg :: (invariant load 4 from %ir.x) + $eax = MOV32rm killed $rdi, 1, _, 0, _ :: (invariant load 4 from %ir.x) + RETQ $eax ... --- name: memory_offset tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: name: memory_offset - ; CHECK: %xmm0 = MOVAPSrm %rdi, 1, %noreg, 0, %noreg :: (load 16 from %ir.vec) - ; CHECK-NEXT: %xmm1 = MOVAPSrm %rdi, 1, %noreg, 16, %noreg :: (load 16 from %ir.vec + 16) - ; CHECK: MOVAPSmr %rdi, 1, %noreg, 0, %noreg, killed %xmm0 :: (store 16 into %ir.vec) - ; CHECK-NEXT: MOVAPSmr killed %rdi, 1, %noreg, 16, %noreg, killed %xmm1 :: (store 16 into %ir.vec + 16) - %xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec) - %xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16) - %xmm2 = FsFLD0SS - %xmm1 = MOVSSrr killed %xmm1, killed %xmm2 - MOVAPSmr %rdi, 1, _, 0, _, killed %xmm0 :: (store 16 into %ir.vec) - MOVAPSmr killed %rdi, 1, _, 16, _, killed %xmm1 :: (store 16 into %ir.vec + 16) + ; CHECK: $xmm0 = MOVAPSrm $rdi, 1, $noreg, 0, $noreg :: (load 16 from %ir.vec) + ; CHECK-NEXT: $xmm1 = MOVAPSrm $rdi, 1, $noreg, 16, $noreg :: (load 16 from %ir.vec + 16) + ; CHECK: MOVAPSmr $rdi, 1, $noreg, 0, $noreg, killed $xmm0 :: (store 16 into %ir.vec) + ; CHECK-NEXT: MOVAPSmr killed $rdi, 1, $noreg, 16, $noreg, killed $xmm1 :: (store 16 into %ir.vec + 16) + $xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec) + $xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16) + $xmm2 = FsFLD0SS + $xmm1 = MOVSSrr killed $xmm1, killed $xmm2 + MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec) + MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16) RETQ ... --- name: memory_alignment tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: name: memory_alignment - ; CHECK: %xmm0 = MOVAPSrm %rdi, 1, %noreg, 0, %noreg :: (load 16 from %ir.vec, align 32) - ; CHECK-NEXT: %xmm1 = MOVAPSrm %rdi, 1, %noreg, 16, %noreg :: (load 16 from %ir.vec + 16, align 32) - ; CHECK: MOVAPSmr %rdi, 1, %noreg, 0, %noreg, killed %xmm0 :: (store 16 into %ir.vec, align 32) - ; CHECK-NEXT: MOVAPSmr killed %rdi, 1, %noreg, 16, %noreg, killed %xmm1 :: (store 16 into %ir.vec + 16, align 32) - %xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align 32) - %xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32) - %xmm2 = FsFLD0SS - %xmm1 = MOVSSrr killed %xmm1, killed %xmm2 - MOVAPSmr %rdi, 1, _, 0, _, killed %xmm0 :: (store 16 into %ir.vec, align 32) - MOVAPSmr killed %rdi, 1, _, 16, _, killed %xmm1 :: (store 16 into %ir.vec + 16, align 32) + ; CHECK: $xmm0 = MOVAPSrm $rdi, 1, $noreg, 0, $noreg :: (load 16 from %ir.vec, align 32) + ; CHECK-NEXT: $xmm1 = MOVAPSrm $rdi, 1, $noreg, 16, $noreg :: (load 16 from %ir.vec + 16, align 32) + ; CHECK: MOVAPSmr $rdi, 1, $noreg, 0, $noreg, killed $xmm0 :: (store 16 into %ir.vec, align 32) + ; CHECK-NEXT: MOVAPSmr killed $rdi, 1, $noreg, 16, $noreg, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32) + $xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align 32) + $xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32) + $xmm2 = FsFLD0SS + $xmm1 = MOVSSrr killed $xmm1, killed $xmm2 + MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32) + MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32) RETQ ... --- name: constant_pool_psv tracksRegLiveness: true liveins: - - { reg: '%xmm0' } + - { reg: '$xmm0' } constants: - id: 0 value: 'double 3.250000e+00' body: | bb.0.entry: - liveins: %xmm0 + liveins: $xmm0 ; CHECK: name: constant_pool_psv - ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.0, %noreg :: (load 8 from constant-pool) - ; CHECK-NEXT: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.0, %noreg :: (load 8 from constant-pool + 8) - %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ :: (load 8 from constant-pool) - %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ :: (load 8 from constant-pool + 8) - RETQ %xmm0 + ; CHECK: $xmm0 = ADDSDrm killed $xmm0, $rip, 1, $noreg, %const.0, $noreg :: (load 8 from constant-pool) + ; CHECK-NEXT: $xmm0 = ADDSDrm killed $xmm0, $rip, 1, $noreg, %const.0, $noreg :: (load 8 from constant-pool + 8) + $xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.0, _ :: (load 8 from constant-pool) + $xmm0 = ADDSDrm killed $xmm0, $rip, 1, _, %const.0, _ :: (load 8 from constant-pool + 8) + RETQ $xmm0 ... --- name: stack_psv @@ -353,14 +353,14 @@ - { id: 0, offset: 0, size: 10, alignment: 16, isImmutable: true, isAliased: false } body: | bb.0.entry: - %rsp = frame-setup SUB64ri8 %rsp, 24, implicit-def dead %eflags + $rsp = frame-setup SUB64ri8 $rsp, 24, implicit-def dead $eflags CFI_INSTRUCTION def_cfa_offset 32 - LD_F80m %rsp, 1, %noreg, 32, %noreg, implicit-def dead %fpsw + LD_F80m $rsp, 1, $noreg, 32, $noreg, implicit-def dead $fpsw ; CHECK: name: stack_psv - ; CHECK: ST_FP80m %rsp, 1, %noreg, 0, %noreg, implicit-def dead %fpsw :: (store 10 into stack, align 16) - ST_FP80m %rsp, 1, _, 0, _, implicit-def dead %fpsw :: (store 10 into stack, align 16) - CALL64pcrel32 &cosl, csr_64, implicit %rsp, implicit-def %rsp, implicit-def %fp0 - %rsp = ADD64ri8 %rsp, 24, implicit-def dead %eflags + ; CHECK: ST_FP80m $rsp, 1, $noreg, 0, $noreg, implicit-def dead $fpsw :: (store 10 into stack, align 16) + ST_FP80m $rsp, 1, _, 0, _, implicit-def dead $fpsw :: (store 10 into stack, align 16) + CALL64pcrel32 &cosl, csr_64, implicit $rsp, implicit-def $rsp, implicit-def $fp0 + $rsp = ADD64ri8 $rsp, 24, implicit-def dead $eflags RETQ ... --- @@ -369,32 +369,32 @@ body: | bb.0.entry: ; CHECK: name: got_psv - ; CHECK: %rax = MOV64rm %rip, 1, %noreg, @G, %noreg :: (load 8 from got) - %rax = MOV64rm %rip, 1, _, @G, _ :: (load 8 from got) - %eax = MOV32rm killed %rax, 1, _, 0, _ - %eax = INC32r killed %eax, implicit-def dead %eflags - RETQ %eax + ; CHECK: $rax = MOV64rm $rip, 1, $noreg, @G, $noreg :: (load 8 from got) + $rax = MOV64rm $rip, 1, _, @G, _ :: (load 8 from got) + $eax = MOV32rm killed $rax, 1, _, 0, _ + $eax = INC32r killed $eax, implicit-def dead $eflags + RETQ $eax ... --- name: global_value tracksRegLiveness: true body: | bb.0.entry: - %rax = MOV64rm %rip, 1, _, @G, _ + $rax = MOV64rm $rip, 1, _, @G, _ ; CHECK-LABEL: name: global_value - ; CHECK: %eax = MOV32rm killed %rax, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from @G) - ; CHECK: %ecx = MOV32rm killed %rcx, 1, %noreg, 0, %noreg, implicit-def %rcx :: (load 4 from @0) - %eax = MOV32rm killed %rax, 1, _, 0, _, implicit-def %rax :: (load 4 from @G) - %rcx = MOV64rm %rip, 1, _, @0, _ - %ecx = MOV32rm killed %rcx, 1, _, 0, _, implicit-def %rcx :: (load 4 from @0) - %eax = LEA64_32r killed %rax, 1, killed %rcx, 1, _ - RETQ %eax + ; CHECK: $eax = MOV32rm killed $rax, 1, $noreg, 0, $noreg, implicit-def $rax :: (load 4 from @G) + ; CHECK: $ecx = MOV32rm killed $rcx, 1, $noreg, 0, $noreg, implicit-def $rcx :: (load 4 from @0) + $eax = MOV32rm killed $rax, 1, _, 0, _, implicit-def $rax :: (load 4 from @G) + $rcx = MOV64rm $rip, 1, _, @0, _ + $ecx = MOV32rm killed $rcx, 1, _, 0, _, implicit-def $rcx :: (load 4 from @0) + $eax = LEA64_32r killed $rax, 1, killed $rcx, 1, _ + RETQ $eax ... --- name: jumptable_psv tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } jumpTable: kind: label-difference32 entries: @@ -403,100 +403,100 @@ body: | bb.0.entry: successors: %bb.2.def, %bb.1.entry - liveins: %edi + liveins: $edi - %eax = MOV32rr %edi, implicit-def %rax - CMP32ri8 killed %edi, 3, implicit-def %eflags - JA_1 %bb.2.def, implicit killed %eflags + $eax = MOV32rr $edi, implicit-def $rax + CMP32ri8 killed $edi, 3, implicit-def $eflags + JA_1 %bb.2.def, implicit killed $eflags bb.1.entry: successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4 - liveins: %rax + liveins: $rax - %rcx = LEA64r %rip, 1, _, %jump-table.0, _ + $rcx = LEA64r $rip, 1, _, %jump-table.0, _ ; CHECK: name: jumptable_psv - ; CHECK: %rax = MOVSX64rm32 %rcx, 4, killed %rax, 0, %noreg :: (load 4 from jump-table, align 8) - %rax = MOVSX64rm32 %rcx, 4, killed %rax, 0, _ :: (load 4 from jump-table, align 8) - %rax = ADD64rr killed %rax, killed %rcx, implicit-def dead %eflags - JMP64r killed %rax + ; CHECK: $rax = MOVSX64rm32 $rcx, 4, killed $rax, 0, $noreg :: (load 4 from jump-table, align 8) + $rax = MOVSX64rm32 $rcx, 4, killed $rax, 0, _ :: (load 4 from jump-table, align 8) + $rax = ADD64rr killed $rax, killed $rcx, implicit-def dead $eflags + JMP64r killed $rax bb.2.def: - %eax = MOV32r0 implicit-def dead %eflags - RETQ %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ $eax bb.3.lbl1: - %eax = MOV32ri 1 - RETQ %eax + $eax = MOV32ri 1 + RETQ $eax bb.4.lbl2: - %eax = MOV32ri 2 - RETQ %eax + $eax = MOV32ri 2 + RETQ $eax bb.5.lbl3: - %eax = MOV32ri 4 - RETQ %eax + $eax = MOV32ri 4 + RETQ $eax bb.6.lbl4: - %eax = MOV32ri 8 - RETQ %eax + $eax = MOV32ri 8 + RETQ $eax ... --- name: tbaa_metadata tracksRegLiveness: true body: | bb.0.entry: - %rax = MOV64rm %rip, 1, _, @a, _ :: (load 8 from got) + $rax = MOV64rm $rip, 1, _, @a, _ :: (load 8 from got) ; CHECK-LABEL: name: tbaa_metadata - ; CHECK: %eax = MOV32rm killed %rax, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from @a, !tbaa !2) - ; CHECK-NEXT: %eax = MOV32rm killed %rax, 1, %noreg, 0, %noreg :: (load 4 from %ir.total_len2, !tbaa !6) - %eax = MOV32rm killed %rax, 1, _, 0, _, implicit-def %rax :: (load 4 from @a, !tbaa !2) - %eax = MOV32rm killed %rax, 1, _, 0, _ :: (load 4 from %ir.total_len2, !tbaa !6) - RETQ %eax + ; CHECK: $eax = MOV32rm killed $rax, 1, $noreg, 0, $noreg, implicit-def $rax :: (load 4 from @a, !tbaa !2) + ; CHECK-NEXT: $eax = MOV32rm killed $rax, 1, $noreg, 0, $noreg :: (load 4 from %ir.total_len2, !tbaa !6) + $eax = MOV32rm killed $rax, 1, _, 0, _, implicit-def $rax :: (load 4 from @a, !tbaa !2) + $eax = MOV32rm killed $rax, 1, _, 0, _ :: (load 4 from %ir.total_len2, !tbaa !6) + RETQ $eax ... --- name: aa_scope tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; CHECK-LABEL: name: aa_scope - ; CHECK: %xmm0 = MOVSSrm %rsi, 1, %noreg, 0, %noreg :: (load 4 from %ir.c, !alias.scope !9) - %xmm0 = MOVSSrm %rsi, 1, _, 0, _ :: (load 4 from %ir.c, !alias.scope !9) - ; CHECK-NEXT: MOVSSmr %rdi, 1, %noreg, 20, %noreg, killed %xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9) - MOVSSmr %rdi, 1, _, 20, _, killed %xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9) - %xmm0 = MOVSSrm killed %rsi, 1, _, 0, _ :: (load 4 from %ir.c) - MOVSSmr killed %rdi, 1, _, 28, _, killed %xmm0 :: (store 4 into %ir.arrayidx) + ; CHECK: $xmm0 = MOVSSrm $rsi, 1, $noreg, 0, $noreg :: (load 4 from %ir.c, !alias.scope !9) + $xmm0 = MOVSSrm $rsi, 1, _, 0, _ :: (load 4 from %ir.c, !alias.scope !9) + ; CHECK-NEXT: MOVSSmr $rdi, 1, $noreg, 20, $noreg, killed $xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9) + MOVSSmr $rdi, 1, _, 20, _, killed $xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9) + $xmm0 = MOVSSrm killed $rsi, 1, _, 0, _ :: (load 4 from %ir.c) + MOVSSmr killed $rdi, 1, _, 28, _, killed $xmm0 :: (store 4 into %ir.arrayidx) RETQ ... --- name: range_metadata tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK-LABEL: name: range_metadata - ; CHECK: %al = MOV8rm killed %rdi, 1, %noreg, 0, %noreg :: (load 1 from %ir.x, !range !11) - %al = MOV8rm killed %rdi, 1, _, 0, _ :: (load 1 from %ir.x, !range !11) - RETQ %al + ; CHECK: $al = MOV8rm killed $rdi, 1, $noreg, 0, $noreg :: (load 1 from %ir.x, !range !11) + $al = MOV8rm killed $rdi, 1, _, 0, _ :: (load 1 from %ir.x, !range !11) + RETQ $al ... --- name: gep_value tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi - %rax = MOV64rm %rip, 1, _, @values, _ :: (load 8 from got) + $rax = MOV64rm $rip, 1, _, @values, _ :: (load 8 from got) ; CHECK-LABEL: gep_value - ; CHECK: MOV32mr killed %rax, 1, %noreg, 0, %noreg, %edi, implicit killed %rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16) - MOV32mr killed %rax, 1, _, 0, _, %edi, implicit killed %rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16) + ; CHECK: MOV32mr killed $rax, 1, $noreg, 0, $noreg, $edi, implicit killed $rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16) + MOV32mr killed $rax, 1, _, 0, _, $edi, implicit killed $rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16) RETQ ... --- @@ -505,32 +505,32 @@ body: | bb.0.entry: ; CHECK-LABEL: name: undef_value - ; CHECK: %rax = MOV64rm undef %rax, 1, %noreg, 0, %noreg :: (load 8 from `i8** undef`) - %rax = MOV64rm undef %rax, 1, _, 0, _ :: (load 8 from `i8** undef`) - RETQ %rax + ; CHECK: $rax = MOV64rm undef $rax, 1, $noreg, 0, $noreg :: (load 8 from `i8** undef`) + $rax = MOV64rm undef $rax, 1, _, 0, _ :: (load 8 from `i8** undef`) + RETQ $rax ... --- # Test memory operand without associated value. # CHECK-LABEL: name: dummy0 -# CHECK: %rax = MOV64rm undef %rax, 1, %noreg, 0, %noreg :: (load 8) +# CHECK: $rax = MOV64rm undef $rax, 1, $noreg, 0, $noreg :: (load 8) name: dummy0 tracksRegLiveness: true body: | bb.0: - %rax = MOV64rm undef %rax, 1, _, 0, _ :: (load 8) - RETQ %rax + $rax = MOV64rm undef $rax, 1, _, 0, _ :: (load 8) + RETQ $rax ... --- # Test parsing of stack references in machine memory operands. # CHECK-LABEL: name: dummy1 -# CHECK: %rax = MOV64rm %rsp, 1, %noreg, 0, %noreg :: (load 8 from %stack.0) +# CHECK: $rax = MOV64rm $rsp, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) name: dummy1 tracksRegLiveness: true stack: - { id: 0, size: 4, alignment: 4 } body: | bb.0: - %rax = MOV64rm %rsp, 1, _, 0, _ :: (load 8 from %stack.0) - RETQ %rax + $rax = MOV64rm $rsp, 1, _, 0, _ :: (load 8 from %stack.0) + RETQ $rax ... Index: test/CodeGen/MIR/X86/metadata-operands.mir =================================================================== --- test/CodeGen/MIR/X86/metadata-operands.mir +++ test/CodeGen/MIR/X86/metadata-operands.mir @@ -49,12 +49,12 @@ - { id: 0, name: x.addr, size: 4, alignment: 4 } body: | bb.0.entry: - liveins: %edi - ; CHECK: %0:gr32 = COPY %edi - ; CHECK-NEXT: DBG_VALUE %noreg, 0, !11, !DIExpression() - %0 = COPY %edi + liveins: $edi + ; CHECK: %0:gr32 = COPY $edi + ; CHECK-NEXT: DBG_VALUE $noreg, 0, !11, !DIExpression() + %0 = COPY $edi DBG_VALUE _, 0, !12, !DIExpression() MOV32mr %stack.0.x.addr, 1, _, 0, _, %0 - %eax = COPY %0 - RETQ %eax + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/missing-closing-quote.mir =================================================================== --- test/CodeGen/MIR/X86/missing-closing-quote.mir +++ test/CodeGen/MIR/X86/missing-closing-quote.mir @@ -16,7 +16,7 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:48: end of machine instruction reached before the closing '"' - %rax = MOV64rm %rip, 1, _, @"quoted name, _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - RETQ %eax + $rax = MOV64rm $rip, 1, _, @"quoted name, _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + RETQ $eax ... Index: test/CodeGen/MIR/X86/missing-comma.mir =================================================================== --- test/CodeGen/MIR/X86/missing-comma.mir +++ test/CodeGen/MIR/X86/missing-comma.mir @@ -13,7 +13,7 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:25: expected ',' before the next machine operand - %eax = XOR32rr %eax %eflags - RETQ %eax + $eax = XOR32rr $eax $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/missing-implicit-operand.mir =================================================================== --- test/CodeGen/MIR/X86/missing-implicit-operand.mir +++ test/CodeGen/MIR/X86/missing-implicit-operand.mir @@ -25,14 +25,14 @@ bb.0.entry: successors: %bb.1.less, %bb.2.exit - %eax = MOV32rm %rdi, 1, _, 0, _ - CMP32ri8 %eax, 10, implicit-def %eflags + $eax = MOV32rm $rdi, 1, _, 0, _ + CMP32ri8 $eax, 10, implicit-def $eflags ; CHECK: [[@LINE+1]]:20: missing implicit register operand 'implicit %eflags' JG_1 %bb.2.exit bb.1.less: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.2.exit: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/named-registers.mir =================================================================== --- test/CodeGen/MIR/X86/named-registers.mir +++ test/CodeGen/MIR/X86/named-registers.mir @@ -14,8 +14,8 @@ name: foo body: | bb.0.entry: - ; CHECK: %eax = MOV32r0 - ; CHECK-NEXT: RETQ %eax - %eax = MOV32r0 implicit-def %eflags - RETQ %eax + ; CHECK: $eax = MOV32r0 + ; CHECK-NEXT: RETQ $eax + $eax = MOV32r0 implicit-def $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/newline-handling.mir =================================================================== --- test/CodeGen/MIR/X86/newline-handling.mir +++ test/CodeGen/MIR/X86/newline-handling.mir @@ -31,79 +31,79 @@ name: foo tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } # CHECK-LABEL: name: foo # CHECK: body: | # CHECK-NEXT: bb.0.entry: # CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) -# CHECK-NEXT: liveins: %edi -# CHECK: CMP32ri8 %edi, 10, implicit-def %eflags -# CHECK-NEXT: JG_1 %bb.2, implicit killed %eflags +# CHECK-NEXT: liveins: $edi +# CHECK: CMP32ri8 $edi, 10, implicit-def $eflags +# CHECK-NEXT: JG_1 %bb.2, implicit killed $eflags # CHECK: bb.1.less: -# CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags -# CHECK-NEXT: RETQ killed %eax +# CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags +# CHECK-NEXT: RETQ killed $eax # CHECK: bb.2.exit: -# CHECK-NEXT: liveins: %edi -# CHECK: %eax = COPY killed %edi -# CHECK-NEXT: RETQ killed %eax +# CHECK-NEXT: liveins: $edi +# CHECK: $eax = COPY killed $edi +# CHECK-NEXT: RETQ killed $eax body: | bb.0.entry: successors: %bb.1, %bb.2 - liveins: %edi + liveins: $edi - CMP32ri8 %edi, 10, implicit-def %eflags + CMP32ri8 $edi, 10, implicit-def $eflags - JG_1 %bb.2, implicit killed %eflags + JG_1 %bb.2, implicit killed $eflags bb.1.less: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax bb.2.exit: - liveins: %edi - %eax = COPY killed %edi - RETQ killed %eax + liveins: $edi + $eax = COPY killed $edi + RETQ killed $eax ... --- name: bar tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } # CHECK-LABEL: name: bar # CHECK: body: | # CHECK-NEXT: bb.0.entry: # CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) -# CHECK-NEXT: liveins: %edi -# CHECK: CMP32ri8 %edi, 10, implicit-def %eflags -# CHECK-NEXT: JG_1 %bb.2, implicit killed %eflags +# CHECK-NEXT: liveins: $edi +# CHECK: CMP32ri8 $edi, 10, implicit-def $eflags +# CHECK-NEXT: JG_1 %bb.2, implicit killed $eflags # CHECK: bb.1.less: -# CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags -# CHECK-NEXT: RETQ killed %eax +# CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags +# CHECK-NEXT: RETQ killed $eax # CHECK: bb.2.exit: -# CHECK-NEXT: liveins: %edi -# CHECK: %eax = COPY killed %edi -# CHECK-NEXT: RETQ killed %eax +# CHECK-NEXT: liveins: $edi +# CHECK: $eax = COPY killed $edi +# CHECK-NEXT: RETQ killed $eax body: | bb.0.entry: successors: %bb.1, %bb.2 - liveins: %edi - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2, implicit killed %eflags - bb.1.less: %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax - - bb.2.exit: liveins: %edi - %eax = COPY killed %edi - RETQ killed %eax + liveins: $edi + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2, implicit killed $eflags + bb.1.less: $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax + + bb.2.exit: liveins: $edi + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/null-register-operands.mir =================================================================== --- test/CodeGen/MIR/X86/null-register-operands.mir +++ test/CodeGen/MIR/X86/null-register-operands.mir @@ -15,8 +15,8 @@ name: deref body: | bb.0.entry: - ; CHECK: %eax = MOV32rm %rdi, 1, %noreg, 0, %noreg - ; CHECK-NEXT: RETQ %eax - %eax = MOV32rm %rdi, 1, _, 0, %noreg - RETQ %eax + ; CHECK: $eax = MOV32rm $rdi, 1, $noreg, 0, $noreg + ; CHECK-NEXT: RETQ $eax + $eax = MOV32rm $rdi, 1, _, 0, $noreg + RETQ $eax ... Index: test/CodeGen/MIR/X86/register-mask-operands.mir =================================================================== --- test/CodeGen/MIR/X86/register-mask-operands.mir +++ test/CodeGen/MIR/X86/register-mask-operands.mir @@ -22,18 +22,18 @@ name: compute body: | bb.0.body: - %eax = IMUL32rri8 %edi, 11, implicit-def %eflags - RETQ %eax + $eax = IMUL32rri8 $edi, 11, implicit-def $eflags + RETQ $eax ... --- # CHECK: name: foo name: foo body: | bb.0.entry: - ; CHECK: PUSH64r %rax - ; CHECK-NEXT: CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax - PUSH64r %rax, implicit-def %rsp, implicit %rsp - CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax - %rdx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + ; CHECK: PUSH64r $rax + ; CHECK-NEXT: CALL64pcrel32 @compute, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax + PUSH64r $rax, implicit-def $rsp, implicit $rsp + CALL64pcrel32 @compute, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax + $rdx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/register-operand-class-invalid0.mir =================================================================== --- test/CodeGen/MIR/X86/register-operand-class-invalid0.mir +++ test/CodeGen/MIR/X86/register-operand-class-invalid0.mir @@ -9,5 +9,5 @@ body: | bb.0: ; CHECK: [[@LINE+1]]:10: register class specification expects a virtual register - %eax : gr32 = COPY %rdx + $eax : gr32 = COPY $rdx ... Index: test/CodeGen/MIR/X86/register-operand-class-invalid1.mir =================================================================== --- test/CodeGen/MIR/X86/register-operand-class-invalid1.mir +++ test/CodeGen/MIR/X86/register-operand-class-invalid1.mir @@ -8,7 +8,7 @@ name: t body: | bb.0: - %0 : gr32 = COPY %rdx + %0 : gr32 = COPY $rdx ; CHECK: [[@LINE+1]]:24: conflicting register classes, previously: GR32 NOOP implicit %0 : gr32_abcd ... Index: test/CodeGen/MIR/X86/register-operand-class.mir =================================================================== --- test/CodeGen/MIR/X86/register-operand-class.mir +++ test/CodeGen/MIR/X86/register-operand-class.mir @@ -14,14 +14,14 @@ name: func body: | bb.0: - %0 : gr32 = COPY %rax - %1.sub_32bit : gr64 = COPY %eax - %rdx = COPY %1 - %2 = COPY %ecx - %ecx = COPY %2 : gr32 + %0 : gr32 = COPY $rax + %1.sub_32bit : gr64 = COPY $eax + $rdx = COPY %1 + %2 = COPY $ecx + $ecx = COPY %2 : gr32 - %3 : gr16 = COPY %bx - %bx = COPY %3 : gr16 + %3 : gr16 = COPY $bx + $bx = COPY %3 : gr16 - %4 : _(s32) = COPY %edx + %4 : _(s32) = COPY $edx ... Index: test/CodeGen/MIR/X86/register-operands-target-flag-error.mir =================================================================== --- test/CodeGen/MIR/X86/register-operands-target-flag-error.mir +++ test/CodeGen/MIR/X86/register-operands-target-flag-error.mir @@ -17,8 +17,8 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:42: register operands can't have target flags - %rax = MOV64rm target-flags(x86-got) %rip, 1, _, @G, _ - %eax = MOV32rm killed %rax, 1, _, 0, _ - %eax = INC32r killed %eax, implicit-def dead %eflags - RETQ %eax + $rax = MOV64rm target-flags(x86-got) $rip, 1, _, @G, _ + $eax = MOV32rm killed $rax, 1, _, 0, _ + $eax = INC32r killed $eax, implicit-def dead $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/renamable-register-flag.mir =================================================================== --- test/CodeGen/MIR/X86/renamable-register-flag.mir +++ test/CodeGen/MIR/X86/renamable-register-flag.mir @@ -10,7 +10,7 @@ body: | ; CHECK: bb.0: bb.0: - ; CHECK: renamable %eax = IMUL32rri8 %edi, 11, implicit-def dead %eflags - renamable %eax = IMUL32rri8 %edi, 11, implicit-def dead %eflags - RETQ %eax + ; CHECK: renamable $eax = IMUL32rri8 $edi, 11, implicit-def dead $eflags + renamable $eax = IMUL32rri8 $edi, 11, implicit-def dead $eflags + RETQ $eax ... Index: test/CodeGen/MIR/X86/roundtrip.mir =================================================================== --- test/CodeGen/MIR/X86/roundtrip.mir +++ test/CodeGen/MIR/X86/roundtrip.mir @@ -6,15 +6,15 @@ # CHECK: - { id: 1, class: gr32, preferred-register: '' } # CHECK: body: | # CHECK: bb.0: -# CHECK: %0:gr32 = MOV32r0 implicit-def %eflags +# CHECK: %0:gr32 = MOV32r0 implicit-def $eflags # CHECK: dead %1:gr32 = COPY %0 -# CHECK: MOV32mr undef %rcx, 1, %noreg, 0, %noreg, killed %0 :: (volatile store 4) -# CHECK: RETQ undef %eax +# CHECK: MOV32mr undef $rcx, 1, $noreg, 0, $noreg, killed %0 :: (volatile store 4) +# CHECK: RETQ undef $eax name: func0 body: | bb.0: - %0 : gr32 = MOV32r0 implicit-def %eflags + %0 : gr32 = MOV32r0 implicit-def $eflags dead %1 : gr32 = COPY %0 - MOV32mr undef %rcx, 1, _, 0, _, killed %0 :: (volatile store 4) - RETQ undef %eax + MOV32mr undef $rcx, 1, _, 0, _, killed %0 :: (volatile store 4) + RETQ undef $eax ... Index: test/CodeGen/MIR/X86/simple-register-allocation-hints.mir =================================================================== --- test/CodeGen/MIR/X86/simple-register-allocation-hints.mir +++ test/CodeGen/MIR/X86/simple-register-allocation-hints.mir @@ -16,19 +16,19 @@ tracksRegLiveness: true # CHECK: registers: # CHECK-NEXT: - { id: 0, class: gr32, preferred-register: '' } -# CHECK-NEXT: - { id: 1, class: gr32, preferred-register: '%esi' } -# CHECK-NEXT: - { id: 2, class: gr32, preferred-register: '%edi' } +# CHECK-NEXT: - { id: 1, class: gr32, preferred-register: '$esi' } +# CHECK-NEXT: - { id: 2, class: gr32, preferred-register: '$edi' } registers: - { id: 0, class: gr32 } - - { id: 1, class: gr32, preferred-register: '%esi' } - - { id: 2, class: gr32, preferred-register: '%edi' } + - { id: 1, class: gr32, preferred-register: '$esi' } + - { id: 2, class: gr32, preferred-register: '$edi' } body: | bb.0.body: - liveins: %edi, %esi + liveins: $edi, $esi - %1 = COPY %esi - %2 = COPY %edi - %2 = IMUL32rr %2, %1, implicit-def dead %eflags - %eax = COPY %2 - RETQ killed %eax + %1 = COPY $esi + %2 = COPY $edi + %2 = IMUL32rr %2, %1, implicit-def dead $eflags + $eax = COPY %2 + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir =================================================================== --- test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir +++ test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir @@ -27,7 +27,7 @@ - { id: 0, offset: -12, size: 4, alignment: 4 } body: | bb.0.entry: - MOV32mr %rsp, 1, _, -4, _, %edi - %eax = COPY %edi - RETQ %eax + MOV32mr $rsp, 1, _, -4, _, $edi + $eax = COPY $edi + RETQ $eax ... Index: test/CodeGen/MIR/X86/stack-object-invalid-name.mir =================================================================== --- test/CodeGen/MIR/X86/stack-object-invalid-name.mir +++ test/CodeGen/MIR/X86/stack-object-invalid-name.mir @@ -22,7 +22,7 @@ - { id: 0, name: x, offset: -12, size: 4, alignment: 4 } body: | bb.0.entry: - MOV32mr %rsp, 1, _, -4, _, %edi - %eax = MOV32rm %rsp, 1, _, -4, _ - RETQ %eax + MOV32mr $rsp, 1, _, -4, _, $edi + $eax = MOV32rm $rsp, 1, _, -4, _ + RETQ $eax ... Index: test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir =================================================================== --- test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir +++ test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir @@ -24,9 +24,9 @@ - { id: 0, name: b, size: 4, alignment: 4 } body: | bb.0.entry: - %0 = COPY %edi + %0 = COPY $edi ; CHECK: [[@LINE+1]]:13: the name of the stack object '%stack.0' isn't 'x' MOV32mr %stack.0.x, 1, _, 0, _, %0 - %eax = COPY %0 - RETQ %eax + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/stack-object-operands.mir =================================================================== --- test/CodeGen/MIR/X86/stack-object-operands.mir +++ test/CodeGen/MIR/X86/stack-object-operands.mir @@ -32,16 +32,16 @@ body: | bb.0.entry: ; CHECK-LABEL: name: test - ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg - ; CHECK: MOV32mr %stack.0.b, 1, %noreg, 0, %noreg, [[MOV32rm]] - ; CHECK: MOV32mi %stack.1, 1, %noreg, 0, %noreg, 2 - ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %stack.0.b, 1, %noreg, 0, %noreg - ; CHECK: %eax = COPY [[MOV32rm1]] - ; CHECK: RETL %eax + ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg + ; CHECK: MOV32mr %stack.0.b, 1, $noreg, 0, $noreg, [[MOV32rm]] + ; CHECK: MOV32mi %stack.1, 1, $noreg, 0, $noreg, 2 + ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %stack.0.b, 1, $noreg, 0, $noreg + ; CHECK: $eax = COPY [[MOV32rm1]] + ; CHECK: RETL $eax %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ MOV32mr %stack.0.b, 1, _, 0, _, %0 MOV32mi %stack.1, 1, _, 0, _, 2 %1 = MOV32rm %stack.0, 1, _, 0, _ - %eax = COPY %1 - RETL %eax + $eax = COPY %1 + RETL $eax ... Index: test/CodeGen/MIR/X86/stack-object-redefinition-error.mir =================================================================== --- test/CodeGen/MIR/X86/stack-object-redefinition-error.mir +++ test/CodeGen/MIR/X86/stack-object-redefinition-error.mir @@ -19,7 +19,7 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } frameInfo: maxAlignment: 8 stack: @@ -28,10 +28,10 @@ - { id: 0, name: x, offset: -24, size: 8, alignment: 8 } body: | bb.0.entry: - liveins: %edi + liveins: $edi - MOV32mr %rsp, 1, _, -4, _, killed %edi - MOV64mi32 %rsp, 1, _, -16, _, 2 - %eax = MOV32rm %rsp, 1, _, -4, _ - RETQ %eax + MOV32mr $rsp, 1, _, -4, _, killed $edi + MOV64mi32 $rsp, 1, _, -16, _, 2 + $eax = MOV32rm $rsp, 1, _, -4, _ + RETQ $eax ... Index: test/CodeGen/MIR/X86/stack-objects.mir =================================================================== --- test/CodeGen/MIR/X86/stack-objects.mir +++ test/CodeGen/MIR/X86/stack-objects.mir @@ -36,8 +36,8 @@ - { id: 2, type: spill-slot, offset: -32, size: 4, alignment: 4 } body: | bb.0.entry: - MOV32mr %rsp, 1, _, -4, _, %edi - MOV64mi32 %rsp, 1, _, -16, _, 2 - %eax = MOV32rm %rsp, 1, _, -4, _ - RETQ %eax + MOV32mr $rsp, 1, _, -4, _, $edi + MOV64mi32 $rsp, 1, _, -16, _, 2 + $eax = MOV32rm $rsp, 1, _, -4, _ + RETQ $eax ... Index: test/CodeGen/MIR/X86/standalone-register-error.mir =================================================================== --- test/CodeGen/MIR/X86/standalone-register-error.mir +++ test/CodeGen/MIR/X86/standalone-register-error.mir @@ -12,12 +12,12 @@ - { id: 0, class: gr32 } liveins: # CHECK: [[@LINE+1]]:13: unknown register name 'register' - - { reg: '%register', virtual-reg: '%0' } + - { reg: '$register', virtual-reg: '%0' } body: | bb.0.body: - liveins: %edi + liveins: $edi - %0 = COPY %edi - %eax = COPY %0 - RETQ %eax + %0 = COPY $edi + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/subreg-on-physreg.mir =================================================================== --- test/CodeGen/MIR/X86/subreg-on-physreg.mir +++ test/CodeGen/MIR/X86/subreg-on-physreg.mir @@ -8,5 +8,5 @@ body: | bb.0: ; CHECK: [[@LINE+1]]:19: subregister index expects a virtual register - %eax.sub_8bit = COPY %bl + $eax.sub_8bit = COPY $bl ... Index: test/CodeGen/MIR/X86/subregister-index-operands.mir =================================================================== --- test/CodeGen/MIR/X86/subregister-index-operands.mir +++ test/CodeGen/MIR/X86/subregister-index-operands.mir @@ -19,16 +19,16 @@ - { id: 1, class: gr8 } body: | bb.0.entry: - liveins: %edi, %eax + liveins: $edi, $eax ; CHECK-LABEL: name: t - ; CHECK: liveins: %edi, %eax - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG %edi, %al, %subreg.sub_8bit - ; CHECK: [[EXTRACT_SUBREG:%[0-9]+]]:gr8 = EXTRACT_SUBREG %eax, %subreg.sub_8bit_hi - ; CHECK: %ax = REG_SEQUENCE [[EXTRACT_SUBREG]], %subreg.sub_8bit, [[EXTRACT_SUBREG]], %subreg.sub_8bit_hi - ; CHECK: RETQ %ax - %0 = INSERT_SUBREG %edi, %al, %subreg.sub_8bit - %1 = EXTRACT_SUBREG %eax, %subreg.sub_8bit_hi - %ax = REG_SEQUENCE %1, %subreg.sub_8bit, %1, %subreg.sub_8bit_hi - RETQ %ax + ; CHECK: liveins: $edi, $eax + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG $edi, $al, %subreg.sub_8bit + ; CHECK: [[EXTRACT_SUBREG:%[0-9]+]]:gr8 = EXTRACT_SUBREG $eax, %subreg.sub_8bit_hi + ; CHECK: $ax = REG_SEQUENCE [[EXTRACT_SUBREG]], %subreg.sub_8bit, [[EXTRACT_SUBREG]], %subreg.sub_8bit_hi + ; CHECK: RETQ $ax + %0 = INSERT_SUBREG $edi, $al, %subreg.sub_8bit + %1 = EXTRACT_SUBREG $eax, %subreg.sub_8bit_hi + $ax = REG_SEQUENCE %1, %subreg.sub_8bit, %1, %subreg.sub_8bit_hi + RETQ $ax ... Index: test/CodeGen/MIR/X86/subregister-operands.mir =================================================================== --- test/CodeGen/MIR/X86/subregister-operands.mir +++ test/CodeGen/MIR/X86/subregister-operands.mir @@ -20,18 +20,18 @@ - { id: 2, class: gr8 } body: | bb.0.entry: - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: t - ; CHECK: liveins: %edi - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit - ; CHECK: [[AND8ri:%[0-9]+]]:gr8 = AND8ri [[COPY1]], 1, implicit-def %eflags - ; CHECK: %al = COPY [[AND8ri]] - ; CHECK: RETQ %al - %0 = COPY %edi + ; CHECK: [[AND8ri:%[0-9]+]]:gr8 = AND8ri [[COPY1]], 1, implicit-def $eflags + ; CHECK: $al = COPY [[AND8ri]] + ; CHECK: RETQ $al + %0 = COPY $edi %1 = COPY %0.sub_8bit - %2 = AND8ri %1, 1, implicit-def %eflags - %al = COPY %2 - RETQ %al + %2 = AND8ri %1, 1, implicit-def $eflags + $al = COPY %2 + RETQ $al ... Index: test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir =================================================================== --- test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir +++ test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir @@ -25,18 +25,18 @@ ; CHECK-LABEL: bb.1.less: bb.0.entry: successors: %bb.1 (33), %bb.2(67) - liveins: %edi + liveins: $edi - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2, implicit killed %eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2, implicit killed $eflags bb.1.less: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax bb.2.exit: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/successor-basic-blocks.mir =================================================================== --- test/CodeGen/MIR/X86/successor-basic-blocks.mir +++ test/CodeGen/MIR/X86/successor-basic-blocks.mir @@ -35,20 +35,20 @@ ; CHECK-LABEL: bb.1.less: bb.0.entry: successors: %bb.1.less, %bb.2.exit - liveins: %edi + liveins: $edi - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2.exit, implicit killed $eflags bb.1.less: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax bb.2.exit: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... --- name: bar @@ -59,24 +59,24 @@ ; CHECK-LABEL: bb.0.entry: ; CHECK: successors: %bb.1(0x80000000), %bb.2(0x00000000) bb.0.entry: - liveins: %edi + liveins: $edi successors: %bb.1 successors: %bb.2 - CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2, implicit killed %eflags + CMP32ri8 $edi, 10, implicit-def $eflags + JG_1 %bb.2, implicit killed $eflags ; Verify that we can have an empty list of successors. ; CHECK-LABEL: bb.1: - ; CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags + ; CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags bb.1: successors: - %eax = MOV32r0 implicit-def dead %eflags - RETQ killed %eax + $eax = MOV32r0 implicit-def dead $eflags + RETQ killed $eax bb.2: - liveins: %edi + liveins: $edi - %eax = COPY killed %edi - RETQ killed %eax + $eax = COPY killed $edi + RETQ killed $eax ... Index: test/CodeGen/MIR/X86/tied-def-operand-invalid.mir =================================================================== --- test/CodeGen/MIR/X86/tied-def-operand-invalid.mir +++ test/CodeGen/MIR/X86/tied-def-operand-invalid.mir @@ -12,13 +12,13 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:58: use of invalid tied-def operand index '0'; the operand #0 isn't a defined register - INLINEASM &"$foo", 1, 2818058, def %rdi, 2147483657, killed %rdi(tied-def 0) - %rax = COPY killed %rdi - RETQ killed %rax + INLINEASM &"$foo", 1, 2818058, def $rdi, 2147483657, killed $rdi(tied-def 0) + $rax = COPY killed $rdi + RETQ killed $rax ... Index: test/CodeGen/MIR/X86/tied-physical-regs-match.mir =================================================================== --- test/CodeGen/MIR/X86/tied-physical-regs-match.mir +++ test/CodeGen/MIR/X86/tied-physical-regs-match.mir @@ -14,9 +14,9 @@ name: foo body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: Tied physical registers must match. - %rbx = AND64rm killed %rdx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags - RETQ %rbx + $rbx = AND64rm killed $rdx, killed $rdi, 1, _, 0, _, implicit-def dead $eflags + RETQ $rbx ... Index: test/CodeGen/MIR/X86/undef-register-flag.mir =================================================================== --- test/CodeGen/MIR/X86/undef-register-flag.mir +++ test/CodeGen/MIR/X86/undef-register-flag.mir @@ -23,16 +23,16 @@ name: compute body: | bb.0.body: - %eax = IMUL32rri8 %edi, 11, implicit-def %eflags - RETQ %eax + $eax = IMUL32rri8 $edi, 11, implicit-def $eflags + RETQ $eax ... --- name: foo body: | bb.0.entry: - ; CHECK: PUSH64r undef %rax - PUSH64r undef %rax, implicit-def %rsp, implicit %rsp - CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax - %rdx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + ; CHECK: PUSH64r undef $rax + PUSH64r undef $rax, implicit-def $rsp, implicit $rsp + CALL64pcrel32 @compute, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax + $rdx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir +++ test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir @@ -32,6 +32,6 @@ MOV32mr %stack.0, 1, _, 0, _, %0 MOV32mi %stack.1, 1, _, 0, _, 2 %1 = MOV32rm %stack.0, 1, _, 0, _ - %eax = COPY %1 - RETL %eax + $eax = COPY %1 + RETL $eax ... Index: test/CodeGen/MIR/X86/undefined-global-value.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-global-value.mir +++ test/CodeGen/MIR/X86/undefined-global-value.mir @@ -19,8 +19,8 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:32: use of undefined global value '@2' - %rax = MOV64rm %rip, 1, _, @2, _ - %eax = MOV32rm %rax, 1, _, 0, _ - %eax = INC32r %eax - RETQ %eax + $rax = MOV64rm $rip, 1, _, @2, _ + $eax = MOV32rm $rax, 1, _, 0, _ + $eax = INC32r $eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir +++ test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir @@ -21,9 +21,9 @@ bb.0.entry: successors: %bb.1.block ; CHECK: [[@LINE+1]]:51: use of undefined IR block '%ir-block."block "' - %rax = LEA64r %rip, 1, _, blockaddress(@test, %ir-block."block "), _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + $rax = LEA64r $rip, 1, _, blockaddress(@test, %ir-block."block "), _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1.block (address-taken): RETQ Index: test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir +++ test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir @@ -20,9 +20,9 @@ bb.0.entry: successors: %bb.1 ; CHECK: [[@LINE+1]]:51: use of undefined IR block '%ir-block.1' - %rax = LEA64r %rip, 1, _, blockaddress(@test, %ir-block.1), _ - MOV64mr %rip, 1, _, @addr, _, killed %rax - JMP64m %rip, 1, _, @addr, _ + $rax = LEA64r $rip, 1, _, blockaddress(@test, %ir-block.1), _ + MOV64mr $rip, 1, _, @addr, _, killed $rax + JMP64m $rip, 1, _, @addr, _ bb.1 (address-taken): RETQ Index: test/CodeGen/MIR/X86/undefined-jump-table-id.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-jump-table-id.mir +++ test/CodeGen/MIR/X86/undefined-jump-table-id.mir @@ -39,35 +39,35 @@ bb.0.entry: successors: %bb.2.def, %bb.1.entry - %eax = MOV32rr %edi, implicit-def %rax - CMP32ri8 %edi, 3, implicit-def %eflags - JA_1 %bb.2.def, implicit %eflags + $eax = MOV32rr $edi, implicit-def $rax + CMP32ri8 $edi, 3, implicit-def $eflags + JA_1 %bb.2.def, implicit $eflags bb.1.entry: successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4 ; CHECK: [[@LINE+1]]:31: use of undefined jump table '%jump-table.2' - %rcx = LEA64r %rip, 1, _, %jump-table.2, _ - %rax = MOVSX64rm32 %rcx, 4, %rax, 0, _ - %rax = ADD64rr %rax, %rcx, implicit-def %eflags - JMP64r %rax + $rcx = LEA64r $rip, 1, _, %jump-table.2, _ + $rax = MOVSX64rm32 $rcx, 4, $rax, 0, _ + $rax = ADD64rr $rax, $rcx, implicit-def $eflags + JMP64r $rax bb.2.def: - %eax = MOV32r0 implicit-def %eflags - RETQ %eax + $eax = MOV32r0 implicit-def $eflags + RETQ $eax bb.3.lbl1: - %eax = MOV32ri 1 - RETQ %eax + $eax = MOV32ri 1 + RETQ $eax bb.4.lbl2: - %eax = MOV32ri 2 - RETQ %eax + $eax = MOV32ri 2 + RETQ $eax bb.5.lbl3: - %eax = MOV32ri 4 - RETQ %eax + $eax = MOV32ri 4 + RETQ $eax bb.6.lbl4: - %eax = MOV32ri 8 - RETQ %eax + $eax = MOV32ri 8 + RETQ $eax ... Index: test/CodeGen/MIR/X86/undefined-named-global-value.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-named-global-value.mir +++ test/CodeGen/MIR/X86/undefined-named-global-value.mir @@ -19,8 +19,8 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:32: use of undefined global value '@GG' - %rax = MOV64rm %rip, 1, _, @GG, _ - %eax = MOV32rm %rax, 1, _, 0, _ - %eax = INC32r %eax - RETQ %eax + $rax = MOV64rm $rip, 1, _, @GG, _ + $eax = MOV32rm $rax, 1, _, 0, _ + $eax = INC32r $eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/undefined-stack-object.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-stack-object.mir +++ test/CodeGen/MIR/X86/undefined-stack-object.mir @@ -21,9 +21,9 @@ - { id: 0, name: b, size: 4, alignment: 4 } body: | bb.0.entry: - %0 = COPY %edi + %0 = COPY $edi ; CHECK: [[@LINE+1]]:13: use of undefined stack object '%stack.2' MOV32mr %stack.2, 1, _, 0, _, %0 - %eax = COPY %0 - RETQ %eax + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir +++ test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir @@ -13,12 +13,12 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:60: use of undefined IR value '%ir.c' - %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.c) - RETQ %eax + $eax = MOV32rm killed $rdi, 1, _, 0, _ :: (load 4 from %ir.c) + RETQ $eax ... Index: test/CodeGen/MIR/X86/undefined-virtual-register.mir =================================================================== --- test/CodeGen/MIR/X86/undefined-virtual-register.mir +++ test/CodeGen/MIR/X86/undefined-virtual-register.mir @@ -17,9 +17,9 @@ - { id: 0, class: gr32 } body: | bb.0.entry: - %0 = COPY %edi + %0 = COPY $edi ; CHECK: Cannot determine class/bank of virtual register 1 in function 'test' - %eax = COPY %1 - RETQ %eax + $eax = COPY %1 + RETQ $eax ... Index: test/CodeGen/MIR/X86/unexpected-type-phys.mir =================================================================== --- test/CodeGen/MIR/X86/unexpected-type-phys.mir +++ test/CodeGen/MIR/X86/unexpected-type-phys.mir @@ -7,7 +7,7 @@ registers: body: | bb.0.entry: - liveins: %edi + liveins: $edi ; CHECK: [[@LINE+1]]:10: unexpected type on physical register - %edi(s32) = G_ADD i32 %edi, %edi + $edi(s32) = G_ADD i32 $edi, $edi ... Index: test/CodeGen/MIR/X86/unknown-machine-basic-block.mir =================================================================== --- test/CodeGen/MIR/X86/unknown-machine-basic-block.mir +++ test/CodeGen/MIR/X86/unknown-machine-basic-block.mir @@ -23,14 +23,14 @@ name: foo body: | bb.0.entry: - %eax = MOV32rm %rdi, 1, _, 0, _ - CMP32ri8 %eax, 10, implicit-def %eflags + $eax = MOV32rm $rdi, 1, _, 0, _ + CMP32ri8 $eax, 10, implicit-def $eflags ; CHECK: [[@LINE+1]]:10: use of undefined machine basic block #4 - JG_1 %bb.4, implicit %eflags + JG_1 %bb.4, implicit $eflags bb.1: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.2: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/unknown-metadata-keyword.mir =================================================================== --- test/CodeGen/MIR/X86/unknown-metadata-keyword.mir +++ test/CodeGen/MIR/X86/unknown-metadata-keyword.mir @@ -13,13 +13,13 @@ name: inc tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } body: | bb.0.entry: - liveins: %rdi + liveins: $rdi ; CHECK: [[@LINE+1]]:60: use of unknown metadata keyword '!tba' - %eax = MOV32rm %rdi, 1, _, 0, _ :: (load 4 from %ir.x, !tba !0) - %eax = INC32r killed %eax, implicit-def dead %eflags - MOV32mr killed %rdi, 1, _, 0, _, %eax :: (store 4 into %ir.x) - RETQ %eax + $eax = MOV32rm $rdi, 1, _, 0, _ :: (load 4 from %ir.x, !tba !0) + $eax = INC32r killed $eax, implicit-def dead $eflags + MOV32mr killed $rdi, 1, _, 0, _, $eax :: (store 4 into %ir.x) + RETQ $eax ... Index: test/CodeGen/MIR/X86/unknown-metadata-node.mir =================================================================== --- test/CodeGen/MIR/X86/unknown-metadata-node.mir +++ test/CodeGen/MIR/X86/unknown-metadata-node.mir @@ -48,10 +48,10 @@ - { id: 0, name: x.addr, size: 4, alignment: 4 } body: | bb.0.entry: - %0 = COPY %edi + %0 = COPY $edi ; CHECK: [[@LINE+1]]:21: use of undefined metadata '!42' DBG_VALUE _, 0, !42, !13 MOV32mr %stack.0.x.addr, 1, _, 0, _, %0 - %eax = COPY %0 - RETQ %eax + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir =================================================================== --- test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir +++ test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir @@ -22,14 +22,14 @@ name: foo body: | bb.0.entry: - %eax = MOV32rm %rdi, 1, _, 0, _ - CMP32ri8 %eax, 10, implicit-def %eflags + $eax = MOV32rm $rdi, 1, _, 0, _ + CMP32ri8 $eax, 10, implicit-def $eflags ; CHECK: [[@LINE+1]]:10: the name of machine basic block #2 isn't 'hit' - JG_1 %bb.2.hit, implicit %eflags + JG_1 %bb.2.hit, implicit $eflags bb.1.less: - %eax = MOV32r0 implicit-def %eflags + $eax = MOV32r0 implicit-def $eflags bb.2.exit: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/MIR/X86/unknown-register.mir =================================================================== --- test/CodeGen/MIR/X86/unknown-register.mir +++ test/CodeGen/MIR/X86/unknown-register.mir @@ -15,6 +15,6 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:5: unknown register name 'xax' - %xax = MOV32r0 - RETQ %xax + $xax = MOV32r0 + RETQ $xax ... Index: test/CodeGen/MIR/X86/unknown-subregister-index-op.mir =================================================================== --- test/CodeGen/MIR/X86/unknown-subregister-index-op.mir +++ test/CodeGen/MIR/X86/unknown-subregister-index-op.mir @@ -20,6 +20,6 @@ body: | bb.0.entry: ; CHECK: [[@LINE+1]]:35: unknown subregister index 'bit8' - %0 = INSERT_SUBREG %edi, %al, %subreg.bit8 + %0 = INSERT_SUBREG $edi, $al, %subreg.bit8 RETQ %0 ... Index: test/CodeGen/MIR/X86/unknown-subregister-index.mir =================================================================== --- test/CodeGen/MIR/X86/unknown-subregister-index.mir +++ test/CodeGen/MIR/X86/unknown-subregister-index.mir @@ -19,10 +19,10 @@ - { id: 2, class: gr8 } body: | bb.0.entry: - %0 = COPY %edi + %0 = COPY $edi ; CHECK: [[@LINE+1]]:18: use of unknown subregister index 'bit8' %1 = COPY %0.bit8 - %2 = AND8ri %1, 1, implicit-def %eflags - %al = COPY %2 - RETQ %al + %2 = AND8ri %1, 1, implicit-def $eflags + $al = COPY %2 + RETQ $al ... Index: test/CodeGen/MIR/X86/variable-sized-stack-objects.mir =================================================================== --- test/CodeGen/MIR/X86/variable-sized-stack-objects.mir +++ test/CodeGen/MIR/X86/variable-sized-stack-objects.mir @@ -37,8 +37,8 @@ - { id: 2, name: y, type: variable-sized, offset: -32, alignment: 1 } body: | bb.0.entry: - MOV32mr %rsp, 1, _, -4, _, %edi - MOV64mi32 %rsp, 1, _, -16, _, 2 - %eax = MOV32rm %rsp, 1, _, -4, _ - RETQ %eax + MOV32mr $rsp, 1, _, -4, _, $edi + MOV64mi32 $rsp, 1, _, -16, _, 2 + $eax = MOV32rm $rsp, 1, _, -4, _ + RETQ $eax ... Index: test/CodeGen/MIR/X86/virtual-registers.mir =================================================================== --- test/CodeGen/MIR/X86/virtual-registers.mir +++ test/CodeGen/MIR/X86/virtual-registers.mir @@ -43,24 +43,24 @@ body: | bb.0.entry: successors: %bb.2.exit, %bb.1.less - liveins: %edi - ; CHECK: %0:gr32 = COPY %edi + liveins: $edi + ; CHECK: %0:gr32 = COPY $edi ; CHECK-NEXT: %1:gr32 = SUB32ri8 %0, 10 - %0 = COPY %edi - %1 = SUB32ri8 %0, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit %eflags + %0 = COPY $edi + %1 = SUB32ri8 %0, 10, implicit-def $eflags + JG_1 %bb.2.exit, implicit $eflags JMP_1 %bb.1.less bb.1.less: ; CHECK: %2:gr32 = MOV32r0 - ; CHECK-NEXT: %eax = COPY %2 - %2 = MOV32r0 implicit-def %eflags - %eax = COPY %2 - RETQ %eax + ; CHECK-NEXT: $eax = COPY %2 + %2 = MOV32r0 implicit-def $eflags + $eax = COPY %2 + RETQ $eax bb.2.exit: - %eax = COPY %0 - RETQ %eax + $eax = COPY %0 + RETQ $eax ... --- name: foo @@ -77,23 +77,23 @@ body: | bb.0.entry: successors: %bb.2.exit, %bb.1.less - liveins: %edi - ; CHECK: %0:gr32 = COPY %edi + liveins: $edi + ; CHECK: %0:gr32 = COPY $edi ; CHECK-NEXT: %1:gr32 = SUB32ri8 %0, 10 - %2 = COPY %edi - %0 = SUB32ri8 %2, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit %eflags + %2 = COPY $edi + %0 = SUB32ri8 %2, 10, implicit-def $eflags + JG_1 %bb.2.exit, implicit $eflags JMP_1 %bb.1.less bb.1.less: ; CHECK: %2:gr32 = MOV32r0 - ; CHECK-NEXT: %eax = COPY %2 - %10 = MOV32r0 implicit-def %eflags - %eax = COPY %10 - RETQ %eax + ; CHECK-NEXT: $eax = COPY %2 + %10 = MOV32r0 implicit-def $eflags + $eax = COPY %10 + RETQ $eax bb.2.exit: - ; CHECK: %eax = COPY %0 - %eax = COPY %2 - RETQ %eax + ; CHECK: $eax = COPY %0 + $eax = COPY %2 + RETQ $eax ... Index: test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir =================================================================== --- test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir +++ test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir @@ -62,17 +62,17 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%a0_64' } - - { reg: '%t9_64' } -calleeSavedRegisters: [ '%fp', '%gp', '%ra', '%d12', '%d13', '%d14', '%d15', - '%f24', '%f25', '%f26', '%f27', '%f28', '%f29', - '%f30', '%f31', '%fp_64', '%f_hi24', '%f_hi25', - '%f_hi26', '%f_hi27', '%f_hi28', '%f_hi29', '%f_hi30', - '%f_hi31', '%gp_64', '%ra_64', '%s0', '%s1', '%s2', - '%s3', '%s4', '%s5', '%s6', '%s7', '%d24_64', '%d25_64', - '%d26_64', '%d27_64', '%d28_64', '%d29_64', '%d30_64', - '%d31_64', '%s0_64', '%s1_64', '%s2_64', '%s3_64', - '%s4_64', '%s5_64', '%s6_64', '%s7_64' ] + - { reg: '$a0_64' } + - { reg: '$t9_64' } +calleeSavedRegisters: [ '$fp', '$gp', '$ra', '$d12', '$d13', '$d14', '$d15', + '$f24', '$f25', '$f26', '$f27', '$f28', '$f29', + '$f30', '$f31', '$fp_64', '$f_hi24', '$f_hi25', + '$f_hi26', '$f_hi27', '$f_hi28', '$f_hi29', '$f_hi30', + '$f_hi31', '$gp_64', '$ra_64', '$s0', '$s1', '$s2', + '$s3', '$s4', '$s5', '$s6', '$s7', '$d24_64', '$d25_64', + '$d26_64', '$d27_64', '$d28_64', '$d29_64', '$d30_64', + '$d31_64', '$s0_64', '$s1_64', '$s2_64', '$s3_64', + '$s4_64', '$s5_64', '$s6_64', '$s7_64' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -90,69 +90,69 @@ stack: - { id: 0, name: retval, offset: -28, size: 4, alignment: 4 } - { id: 1, name: a.addr, offset: -32, size: 4, alignment: 4 } - - { id: 2, type: spill-slot, offset: -8, size: 8, alignment: 8, callee-saved-register: '%ra_64' } - - { id: 3, type: spill-slot, offset: -16, size: 8, alignment: 8, callee-saved-register: '%fp_64' } - - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%gp_64' } + - { id: 2, type: spill-slot, offset: -8, size: 8, alignment: 8, callee-saved-register: '$ra_64' } + - { id: 3, type: spill-slot, offset: -16, size: 8, alignment: 8, callee-saved-register: '$fp_64' } + - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$gp_64' } body: | bb.0.entry: successors: %bb.1.if.then(0x40000000), %bb.5.if.else(0x40000000) - liveins: %a0_64, %t9_64, %ra_64, %fp_64, %gp_64 + liveins: $a0_64, $t9_64, $ra_64, $fp_64, $gp_64 - %sp_64 = DADDiu %sp_64, -32 + $sp_64 = DADDiu $sp_64, -32 CFI_INSTRUCTION def_cfa_offset 32 - SD killed %ra_64, %sp_64, 24 :: (store 8 into %stack.2) - SD killed %fp_64, %sp_64, 16 :: (store 8 into %stack.3) - SD killed %gp_64, %sp_64, 8 :: (store 8 into %stack.4) - CFI_INSTRUCTION offset %ra_64, -8 - CFI_INSTRUCTION offset %fp_64, -16 - CFI_INSTRUCTION offset %gp_64, -24 - CFI_INSTRUCTION def_cfa_register %fp_64 - %at_64 = LUi64 @f - %v0_64 = DADDu killed %at_64, %t9_64 - SW %a0, %sp_64, 0 :: (store 4 into %ir.a.addr) - BGTZC %a0, %bb.5.if.else, implicit-def %at + SD killed $ra_64, $sp_64, 24 :: (store 8 into %stack.2) + SD killed $fp_64, $sp_64, 16 :: (store 8 into %stack.3) + SD killed $gp_64, $sp_64, 8 :: (store 8 into %stack.4) + CFI_INSTRUCTION offset $ra_64, -8 + CFI_INSTRUCTION offset $fp_64, -16 + CFI_INSTRUCTION offset $gp_64, -24 + CFI_INSTRUCTION def_cfa_register $fp_64 + $at_64 = LUi64 @f + $v0_64 = DADDu killed $at_64, $t9_64 + SW $a0, $sp_64, 0 :: (store 4 into %ir.a.addr) + BGTZC $a0, %bb.5.if.else, implicit-def $at bb.1.if.then: successors: %bb.6.return(0x40000000), %bb.2.if.then(0x40000000) - liveins: %a0 + liveins: $a0 - BLTZC %a0, %bb.6.return, implicit-def %at + BLTZC $a0, %bb.6.return, implicit-def $at bb.2.if.then: successors: %bb.3.if.else(0x80000000) - %t8 = IMPLICIT_DEF + $t8 = IMPLICIT_DEF bb.3.if.else: successors: %bb.6.return(0x40000000), %bb.4.if.else(0x40000000) - liveins: %t8 + liveins: $t8 - BLEZC %t8, %bb.6.return, implicit-def %at + BLEZC $t8, %bb.6.return, implicit-def $at bb.4.if.else: successors: %bb.6.return(0x80000000) - liveins: %t8 + liveins: $t8 - %at = LW %sp_64, 0 :: (dereferenceable load 4 from %ir.a.addr) - %at = ADDu killed %at, %t8 - SW killed %at, %sp_64, 4 :: (store 4 into %ir.retval) - J %bb.6.return, implicit-def dead %at + $at = LW $sp_64, 0 :: (dereferenceable load 4 from %ir.a.addr) + $at = ADDu killed $at, $t8 + SW killed $at, $sp_64, 4 :: (store 4 into %ir.retval) + J %bb.6.return, implicit-def dead $at bb.5.if.else: successors: %bb.6.return(0x80000000) - liveins: %v0_64 + liveins: $v0_64 - %gp_64 = DADDiu killed %v0_64, @f - %a0_64 = LW64 %sp_64, 0 :: (dereferenceable load 4 from %ir.a.addr) - %t9_64 = LD %gp_64, @g :: (load 8 from call-entry @g) - JALR64Pseudo %t9_64, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0 - SW killed %v0, %sp_64, 4 :: (store 4 into %ir.retval) + $gp_64 = DADDiu killed $v0_64, @f + $a0_64 = LW64 $sp_64, 0 :: (dereferenceable load 4 from %ir.a.addr) + $t9_64 = LD $gp_64, @g :: (load 8 from call-entry @g) + JALR64Pseudo $t9_64, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0 + SW killed $v0, $sp_64, 4 :: (store 4 into %ir.retval) bb.6.return: - %v0 = LW %sp_64, 4 :: (dereferenceable load 4 from %ir.retval) - %gp_64 = LD %sp_64, 8 :: (load 8 from %stack.4) - %fp_64 = LD %sp_64, 16 :: (load 8 from %stack.3) - %ra_64 = LD %sp_64, 24 :: (load 8 from %stack.2) - %sp_64 = DADDiu %sp_64, 32 - PseudoReturn64 %ra_64 + $v0 = LW $sp_64, 4 :: (dereferenceable load 4 from %ir.retval) + $gp_64 = LD $sp_64, 8 :: (load 8 from %stack.4) + $fp_64 = LD $sp_64, 16 :: (load 8 from %stack.3) + $ra_64 = LD $sp_64, 24 :: (load 8 from %stack.2) + $sp_64 = DADDiu $sp_64, 32 + PseudoReturn64 $ra_64 ... Index: test/CodeGen/Mips/compactbranches/empty-block.mir =================================================================== --- test/CodeGen/Mips/compactbranches/empty-block.mir +++ test/CodeGen/Mips/compactbranches/empty-block.mir @@ -58,18 +58,18 @@ hasVAStart: false hasMustTailInVarArgFunc: false stack: - - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%ra' } + - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$ra' } body: | bb.0.entry: successors: %bb.1.if.then(0x50000000), %bb.4.if.end(0x30000000) - liveins: %ra + liveins: $ra - %sp = ADDiu %sp, -24 + $sp = ADDiu $sp, -24 CFI_INSTRUCTION def_cfa_offset 24 - SW killed %ra, %sp, 20 :: (store 4 into %stack.0) - CFI_INSTRUCTION offset %ra_64, -4 - JAL @k, csr_o32_fp64, implicit-def dead %ra, implicit-def %sp, implicit-def %v0 - BLEZ %v0, %bb.4.if.end, implicit-def %at + SW killed $ra, $sp, 20 :: (store 4 into %stack.0) + CFI_INSTRUCTION offset $ra_64, -4 + JAL @k, csr_o32_fp64, implicit-def dead $ra, implicit-def $sp, implicit-def $v0 + BLEZ $v0, %bb.4.if.end, implicit-def $at bb.1.if.then: successors: %bb.2.if.then(0x80000000) @@ -80,12 +80,12 @@ bb.3.if.then: successors: %bb.4.if.end(0x80000000) - %a0 = ADDiu %zero, 2 - JAL @f, csr_o32_fp64, implicit-def dead %ra, implicit killed %a0, implicit-def %sp + $a0 = ADDiu $zero, 2 + JAL @f, csr_o32_fp64, implicit-def dead $ra, implicit killed $a0, implicit-def $sp bb.4.if.end: - %ra = LW %sp, 20 :: (load 4 from %stack.0) - %sp = ADDiu %sp, 24 - PseudoReturn undef %ra + $ra = LW $sp, 20 :: (load 4 from %stack.0) + $sp = ADDiu $sp, 24 + PseudoReturn undef $ra ... Index: test/CodeGen/Mips/instverify/dext-pos.mir =================================================================== --- test/CodeGen/Mips/instverify/dext-pos.mir +++ test/CodeGen/Mips/instverify/dext-pos.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXT %0, 55, 10 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dext-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dext-size.mir +++ test/CodeGen/Mips/instverify/dext-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXT %0, 5, 50 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dextm-pos-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dextm-pos-size.mir +++ test/CodeGen/Mips/instverify/dextm-pos-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXTM %0, 3, 62 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dextm-pos.mir =================================================================== --- test/CodeGen/Mips/instverify/dextm-pos.mir +++ test/CodeGen/Mips/instverify/dextm-pos.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXTM %0, 65, 5 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dextm-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dextm-size.mir +++ test/CodeGen/Mips/instverify/dextm-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXTM %0, 31, 67 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dextu-pos-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dextu-pos-size.mir +++ test/CodeGen/Mips/instverify/dextu-pos-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXTU %0, 43, 30 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dextu-pos.mir =================================================================== --- test/CodeGen/Mips/instverify/dextu-pos.mir +++ test/CodeGen/Mips/instverify/dextu-pos.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXTU %0, 64, 5 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dextu-size-valid.mir =================================================================== --- test/CodeGen/Mips/instverify/dextu-size-valid.mir +++ test/CodeGen/Mips/instverify/dextu-size-valid.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXTU %0, 63, 1 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dextu-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dextu-size.mir +++ test/CodeGen/Mips/instverify/dextu-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DEXTU %0, 33, 67 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dins-pos-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dins-pos-size.mir +++ test/CodeGen/Mips/instverify/dins-pos-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINS %0, 17, 17 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dins-pos.mir =================================================================== --- test/CodeGen/Mips/instverify/dins-pos.mir +++ test/CodeGen/Mips/instverify/dins-pos.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINS %0, 55, 10 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dins-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dins-size.mir +++ test/CodeGen/Mips/instverify/dins-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINS %0, 5, 50 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dinsm-pos-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dinsm-pos-size.mir +++ test/CodeGen/Mips/instverify/dinsm-pos-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINSM %0, 20, 50 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dinsm-pos.mir =================================================================== --- test/CodeGen/Mips/instverify/dinsm-pos.mir +++ test/CodeGen/Mips/instverify/dinsm-pos.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINSM %0, 65, 5 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dinsm-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dinsm-size.mir +++ test/CodeGen/Mips/instverify/dinsm-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINSM %0, 31, 67 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dinsu-pos-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dinsu-pos-size.mir +++ test/CodeGen/Mips/instverify/dinsu-pos-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINSU %0, 50, 20 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dinsu-pos.mir =================================================================== --- test/CodeGen/Mips/instverify/dinsu-pos.mir +++ test/CodeGen/Mips/instverify/dinsu-pos.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINSU %0, 65, 5 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/dinsu-size.mir =================================================================== --- test/CodeGen/Mips/instverify/dinsu-size.mir +++ test/CodeGen/Mips/instverify/dinsu-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr64, preferred-register: '' } - { id: 1, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } + - { reg: '$a0_64', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0_64 + liveins: $a0_64 - %0 = COPY %a0_64 + %0 = COPY $a0_64 %1 = DINSU %0, 33, 67 - %v0_64 = COPY %1 - RetRA implicit %v0_64 + $v0_64 = COPY %1 + RetRA implicit $v0_64 ... Index: test/CodeGen/Mips/instverify/ext-pos-size.mir =================================================================== --- test/CodeGen/Mips/instverify/ext-pos-size.mir +++ test/CodeGen/Mips/instverify/ext-pos-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr32, preferred-register: '' } - { id: 1, class: gpr32, preferred-register: '' } liveins: - - { reg: '%a0', virtual-reg: '%0' } + - { reg: '$a0', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0 + liveins: $a0 - %0 = COPY %a0 + %0 = COPY $a0 %1 = EXT %0, 17, 17 - %v0 = COPY %1 - RetRA implicit %v0 + $v0 = COPY %1 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/instverify/ext-pos.mir =================================================================== --- test/CodeGen/Mips/instverify/ext-pos.mir +++ test/CodeGen/Mips/instverify/ext-pos.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr32, preferred-register: '' } - { id: 1, class: gpr32, preferred-register: '' } liveins: - - { reg: '%a0', virtual-reg: '%0' } + - { reg: '$a0', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0 + liveins: $a0 - %0 = COPY %a0 + %0 = COPY $a0 %1 = EXT %0, 44, 21 - %v0 = COPY %1 - RetRA implicit %v0 + $v0 = COPY %1 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/instverify/ext-size.mir =================================================================== --- test/CodeGen/Mips/instverify/ext-size.mir +++ test/CodeGen/Mips/instverify/ext-size.mir @@ -16,7 +16,7 @@ - { id: 0, class: gpr32, preferred-register: '' } - { id: 1, class: gpr32, preferred-register: '' } liveins: - - { reg: '%a0', virtual-reg: '%0' } + - { reg: '$a0', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -39,11 +39,11 @@ constants: body: | bb.0.entry: - liveins: %a0 + liveins: $a0 - %0 = COPY %a0 + %0 = COPY $a0 %1 = EXT %0, 0, 33 - %v0 = COPY %1 - RetRA implicit %v0 + $v0 = COPY %1 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/instverify/ins-pos-size.mir =================================================================== --- test/CodeGen/Mips/instverify/ins-pos-size.mir +++ test/CodeGen/Mips/instverify/ins-pos-size.mir @@ -18,8 +18,8 @@ - { id: 2, class: gpr32, preferred-register: '' } - { id: 3, class: gpr32, preferred-register: '' } liveins: - - { reg: '%a0', virtual-reg: '%0' } - - { reg: '%a1', virtual-reg: '%1' } + - { reg: '$a0', virtual-reg: '%0' } + - { reg: '$a1', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -42,13 +42,13 @@ constants: body: | bb.0.entry: - liveins: %a0, %a1 + liveins: $a0, $a1 - %1 = COPY %a1 - %0 = COPY %a0 + %1 = COPY $a1 + %0 = COPY $a0 %2 = ANDi %1, 15 %3 = INS killed %2, 17, 17, %0 - %v0 = COPY %3 - RetRA implicit %v0 + $v0 = COPY %3 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/instverify/ins-pos.mir =================================================================== --- test/CodeGen/Mips/instverify/ins-pos.mir +++ test/CodeGen/Mips/instverify/ins-pos.mir @@ -18,8 +18,8 @@ - { id: 2, class: gpr32, preferred-register: '' } - { id: 3, class: gpr32, preferred-register: '' } liveins: - - { reg: '%a0', virtual-reg: '%0' } - - { reg: '%a1', virtual-reg: '%1' } + - { reg: '$a0', virtual-reg: '%0' } + - { reg: '$a1', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -42,13 +42,13 @@ constants: body: | bb.0.entry: - liveins: %a0, %a1 + liveins: $a0, $a1 - %1 = COPY %a1 - %0 = COPY %a0 + %1 = COPY $a1 + %0 = COPY $a0 %2 = ANDi %1, 15 %3 = INS killed %2, 32, 4, %0 - %v0 = COPY %3 - RetRA implicit %v0 + $v0 = COPY %3 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/instverify/ins-size.mir =================================================================== --- test/CodeGen/Mips/instverify/ins-size.mir +++ test/CodeGen/Mips/instverify/ins-size.mir @@ -18,8 +18,8 @@ - { id: 2, class: gpr32, preferred-register: '' } - { id: 3, class: gpr32, preferred-register: '' } liveins: - - { reg: '%a0', virtual-reg: '%0' } - - { reg: '%a1', virtual-reg: '%1' } + - { reg: '$a0', virtual-reg: '%0' } + - { reg: '$a1', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -42,13 +42,13 @@ constants: body: | bb.0.entry: - liveins: %a0, %a1 + liveins: $a0, $a1 - %1 = COPY %a1 - %0 = COPY %a0 + %1 = COPY $a1 + %0 = COPY $a0 %2 = ANDi %1, 15 %3 = INS killed %2, 0, 40, %0 - %v0 = COPY %3 - RetRA implicit %v0 + $v0 = COPY %3 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/llvm-ir/call.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/call.ll +++ test/CodeGen/Mips/llvm-ir/call.ll @@ -161,8 +161,8 @@ define hidden void @thunk_undef_double(i32 %this, double %volume) unnamed_addr align 2 { ; ALL-LABEL: thunk_undef_double: -; O32: # implicit-def: %a2 -; O32: # implicit-def: %a3 +; O32: # implicit-def: $a2 +; O32: # implicit-def: $a3 ; NOT-R6C: jr $[[TGT]] ; R6C: jrc $[[TGT]] Index: test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir =================================================================== --- test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir +++ test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir @@ -118,8 +118,8 @@ - { id: 57, class: gpr64, preferred-register: '' } - { id: 58, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%5' } - - { reg: '%t9_64', virtual-reg: '' } + - { reg: '$a0_64', virtual-reg: '%5' } + - { reg: '$t9_64', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -143,21 +143,21 @@ body: | bb.0.entry: successors: %bb.1.entry._ZTW1k.exit_crit_edge(0x7fe00000), %bb.2.init.i.i(0x00200000) - liveins: %a0_64, %t9_64 + liveins: $a0_64, $t9_64 %57 = LUi64 target-flags(mips-gpoff-hi) @_Z2k1i - %58 = DADDu %57, %t9_64 + %58 = DADDu %57, $t9_64 %6 = DADDiu %58, target-flags(mips-gpoff-lo) @_Z2k1i - %5 = COPY %a0_64 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + %5 = COPY $a0_64 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %7 = LUi64 target-flags(mips-call-hi16) @_Z1gi %8 = DADDu killed %7, %6 %9 = LD killed %8, target-flags(mips-call-lo16) @_Z1gi :: (load 8 from call-entry @_Z1gi) - %a0_64 = COPY %5 - %gp_64 = COPY %6 - JALR64Pseudo killed %9, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %10 = COPY %v0 + $a0_64 = COPY %5 + $gp_64 = COPY %6 + JALR64Pseudo killed %9, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %10 = COPY $v0 %11 = COPY %5.sub_32 %12 = ADDu %10, killed %11 %13 = LUi64 target-flags(mips-got-hi16) @v @@ -165,71 +165,71 @@ %15 = LD killed %14, target-flags(mips-got-lo16) @v :: (load 8 from got) %16 = LW killed %15, 0 :: (dereferenceable load 4 from @v) %0 = ADDu killed %12, killed %16 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %17 = LUi64 target-flags(mips-call-hi16) &__tls_get_addr %18 = DADDu killed %17, %6 %19 = LD killed %18, target-flags(mips-call-lo16) &__tls_get_addr :: (load 8 from call-entry &__tls_get_addr) %20 = DADDiu %6, target-flags(mips-tlsldm) @__tls_guard - %a0_64 = COPY %20 - %gp_64 = COPY %6 - JALR64Pseudo killed %19, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0_64 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %21 = COPY %v0_64 + $a0_64 = COPY %20 + $gp_64 = COPY %6 + JALR64Pseudo killed %19, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0_64 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %21 = COPY $v0_64 %22 = DADDiu %21, target-flags(mips-dtprel-hi) @__tls_guard %23 = LBu killed %22, target-flags(mips-dtprel-lo) @__tls_guard :: (dereferenceable load 1 from @__tls_guard) - BEQ killed %23, %zero, %bb.2.init.i.i, implicit-def dead %at - B %bb.1.entry._ZTW1k.exit_crit_edge, implicit-def dead %at + BEQ killed %23, $zero, %bb.2.init.i.i, implicit-def dead $at + B %bb.1.entry._ZTW1k.exit_crit_edge, implicit-def dead $at bb.1.entry._ZTW1k.exit_crit_edge: successors: %bb.3._ZTW1k.exit(0x80000000) - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %39 = LUi64 target-flags(mips-call-hi16) &__tls_get_addr %40 = DADDu killed %39, %6 %41 = LD killed %40, target-flags(mips-call-lo16) &__tls_get_addr :: (load 8 from call-entry &__tls_get_addr) %42 = DADDiu %6, target-flags(mips-tlsgd) @k - %a0_64 = COPY %42 - %gp_64 = COPY %6 - JALR64Pseudo killed %41, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0_64 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %43 = COPY %v0_64 + $a0_64 = COPY %42 + $gp_64 = COPY %6 + JALR64Pseudo killed %41, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0_64 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %43 = COPY $v0_64 %1 = LW %43, 0 :: (dereferenceable load 4 from @k) - B %bb.3._ZTW1k.exit, implicit-def dead %at + B %bb.3._ZTW1k.exit, implicit-def dead $at bb.2.init.i.i: successors: %bb.3._ZTW1k.exit(0x80000000) - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %24 = LUi64 target-flags(mips-call-hi16) &__tls_get_addr %25 = DADDu killed %24, %6 %26 = LD %25, target-flags(mips-call-lo16) &__tls_get_addr :: (load 8 from call-entry &__tls_get_addr) %27 = DADDiu %6, target-flags(mips-tlsldm) @__tls_guard - %a0_64 = COPY %27 - %gp_64 = COPY %6 - JALR64Pseudo killed %26, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0_64 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %28 = COPY %v0_64 + $a0_64 = COPY %27 + $gp_64 = COPY %6 + JALR64Pseudo killed %26, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0_64 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %28 = COPY $v0_64 %29 = DADDiu %28, target-flags(mips-dtprel-hi) @__tls_guard - %30 = ADDiu %zero, 1 + %30 = ADDiu $zero, 1 SB killed %30, killed %29, target-flags(mips-dtprel-lo) @__tls_guard :: (store 1 into @__tls_guard) - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %31 = LUi64 target-flags(mips-call-hi16) @_Z1gi %32 = DADDu killed %31, %6 - %33 = DADDiu %zero_64, 3 + %33 = DADDiu $zero_64, 3 %34 = LD killed %32, target-flags(mips-call-lo16) @_Z1gi :: (load 8 from call-entry @_Z1gi) - %a0_64 = COPY %33 - %gp_64 = COPY %6 - JALR64Pseudo killed %34, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %35 = COPY %v0 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + $a0_64 = COPY %33 + $gp_64 = COPY %6 + JALR64Pseudo killed %34, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %35 = COPY $v0 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %36 = LD %25, target-flags(mips-call-lo16) &__tls_get_addr :: (load 8 from call-entry &__tls_get_addr) %37 = DADDiu %6, target-flags(mips-tlsgd) @k - %a0_64 = COPY %37 - %gp_64 = COPY %6 - JALR64Pseudo killed %36, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0_64 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %38 = COPY %v0_64 + $a0_64 = COPY %37 + $gp_64 = COPY %6 + JALR64Pseudo killed %36, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0_64 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %38 = COPY $v0_64 SW %35, %38, 0 :: (store 4 into @k) %2 = COPY %35 @@ -241,35 +241,35 @@ %44 = LUi64 target-flags(mips-got-hi16) @_ZTH1j %45 = DADDu killed %44, %6 %46 = LD killed %45, target-flags(mips-got-lo16) @_ZTH1j :: (load 8 from got) - BEQ64 killed %46, %zero_64, %bb.5._ZTW1j.exit, implicit-def dead %at - B %bb.4, implicit-def dead %at + BEQ64 killed %46, $zero_64, %bb.5._ZTW1j.exit, implicit-def dead $at + B %bb.4, implicit-def dead $at bb.4 (%ir-block.2): successors: %bb.5._ZTW1j.exit(0x80000000) - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %47 = LUi64 target-flags(mips-call-hi16) @_ZTH1j %48 = DADDu killed %47, %6 %49 = LD killed %48, target-flags(mips-call-lo16) @_ZTH1j :: (load 8 from call-entry @_ZTH1j) - %gp_64 = COPY %6 - JALR64Pseudo killed %49, csr_n64, implicit-def dead %ra, implicit %gp_64, implicit-def %sp - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp + $gp_64 = COPY %6 + JALR64Pseudo killed %49, csr_n64, implicit-def dead $ra, implicit $gp_64, implicit-def $sp + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp bb.5._ZTW1j.exit: - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %50 = LUi64 target-flags(mips-call-hi16) &__tls_get_addr %51 = DADDu killed %50, %6 %52 = LD killed %51, target-flags(mips-call-lo16) &__tls_get_addr :: (load 8 from call-entry &__tls_get_addr) %53 = DADDiu %6, target-flags(mips-tlsgd) @j - %a0_64 = COPY %53 - %gp_64 = COPY %6 - JALR64Pseudo killed %52, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0_64 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %54 = COPY %v0_64 + $a0_64 = COPY %53 + $gp_64 = COPY %6 + JALR64Pseudo killed %52, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0_64 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %54 = COPY $v0_64 %55 = LW %54, 0 :: (dereferenceable load 4 from @j) %56 = ADDu %4, killed %55 - %v0 = COPY %56 - RetRA implicit %v0 + $v0 = COPY %56 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir =================================================================== --- test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir +++ test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir @@ -45,9 +45,9 @@ - { id: 11, class: gpr32, preferred-register: '' } - { id: 12, class: gpr32, preferred-register: '' } liveins: - - { reg: '%a0', virtual-reg: '%0' } - - { reg: '%t9', virtual-reg: '' } - - { reg: '%v0', virtual-reg: '' } + - { reg: '$a0', virtual-reg: '%0' } + - { reg: '$t9', virtual-reg: '' } + - { reg: '$v0', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -70,17 +70,17 @@ constants: body: | bb.0.entry: - liveins: %a0, %t9, %v0 + liveins: $a0, $t9, $v0 - %1 = ADDu %v0, %t9 - %0 = COPY %a0 - ADJCALLSTACKDOWN 16, 0, implicit-def dead %sp, implicit %sp + %1 = ADDu $v0, $t9 + %0 = COPY $a0 + ADJCALLSTACKDOWN 16, 0, implicit-def dead $sp, implicit $sp %2 = LW %1, target-flags(mips-got-call) @_Z1gi :: (load 4 from call-entry @_Z1gi) - %a0 = COPY %0 - %gp = COPY %1 - JALRPseudo killed %2, csr_o32_fpxx, implicit-def dead %ra, implicit %a0, implicit %gp, implicit-def %sp, implicit-def %v0 - ADJCALLSTACKUP 16, 0, implicit-def dead %sp, implicit %sp - %3 = COPY %v0 + $a0 = COPY %0 + $gp = COPY %1 + JALRPseudo killed %2, csr_o32_fpxx, implicit-def dead $ra, implicit $a0, implicit $gp, implicit-def $sp, implicit-def $v0 + ADJCALLSTACKUP 16, 0, implicit-def dead $sp, implicit $sp + %3 = COPY $v0 %4 = ADDu %3, %0 %5 = LW %1, target-flags(mips-got) @v :: (load 4 from got) %6 = LW killed %5, 0 :: (dereferenceable load 4 from @v) @@ -88,8 +88,8 @@ %8 = LW %1, target-flags(mips-got) @j :: (load 4 from got) %9 = LW killed %8, 0 :: (dereferenceable load 4 from @j) %10 = ADDu killed %7, killed %9 - %v0 = COPY %10 - RetRA implicit %v0 + $v0 = COPY %10 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/mirparser/target-flags-pic.mir =================================================================== --- test/CodeGen/Mips/mirparser/target-flags-pic.mir +++ test/CodeGen/Mips/mirparser/target-flags-pic.mir @@ -46,8 +46,8 @@ - { id: 12, class: gpr64, preferred-register: '' } - { id: 13, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%0' } - - { reg: '%t9_64', virtual-reg: '' } + - { reg: '$a0_64', virtual-reg: '%0' } + - { reg: '$t9_64', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -70,19 +70,19 @@ constants: body: | bb.0.entry: - liveins: %a0_64, %t9_64 + liveins: $a0_64, $t9_64 %12 = LUi64 target-flags(mips-gpoff-hi) @_Z2k1i - %13 = DADDu %12, %t9_64 + %13 = DADDu %12, $t9_64 %1 = DADDiu %13, target-flags(mips-gpoff-lo) @_Z2k1i - %0 = COPY %a0_64 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + %0 = COPY $a0_64 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %2 = LD %1, target-flags(mips-got-call) @_Z1gi :: (load 8 from call-entry @_Z1gi) - %a0_64 = COPY %0 - %gp_64 = COPY %1 - JALR64Pseudo killed %2, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %gp_64, implicit-def %sp, implicit-def %v0 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %3 = COPY %v0 + $a0_64 = COPY %0 + $gp_64 = COPY %1 + JALR64Pseudo killed %2, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %3 = COPY $v0 %4 = COPY %0.sub_32 %5 = ADDu %3, killed %4 %6 = LD %1, target-flags(mips-got-disp) @v :: (load 8 from got) @@ -91,8 +91,8 @@ %9 = LD %1, target-flags(mips-got-disp) @j :: (load 8 from got) %10 = LW killed %9, 0 :: (dereferenceable load 4 from @j) %11 = ADDu killed %8, killed %10 - %v0 = COPY %11 - RetRA implicit %v0 + $v0 = COPY %11 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/mirparser/target-flags-static-tls.mir =================================================================== --- test/CodeGen/Mips/mirparser/target-flags-static-tls.mir +++ test/CodeGen/Mips/mirparser/target-flags-static-tls.mir @@ -110,8 +110,8 @@ - { id: 50, class: gpr64, preferred-register: '' } - { id: 51, class: gpr64, preferred-register: '' } liveins: - - { reg: '%a0_64', virtual-reg: '%5' } - - { reg: '%t9_64', virtual-reg: '' } + - { reg: '$a0_64', virtual-reg: '%5' } + - { reg: '$t9_64', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -135,18 +135,18 @@ body: | bb.0.entry: successors: %bb.1.entry._ZTW1k.exit_crit_edge(0x7fe00000), %bb.2.init.i.i(0x00200000) - liveins: %a0_64, %t9_64 + liveins: $a0_64, $t9_64 %50 = LUi64 target-flags(mips-gpoff-hi) @_Z2k1i - %51 = DADDu %50, %t9_64 + %51 = DADDu %50, $t9_64 %43 = DADDiu %51, target-flags(mips-gpoff-lo) @_Z2k1i - %5 = COPY %a0_64 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp + %5 = COPY $a0_64 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp %6 = COPY %5.sub_32 - %a0_64 = COPY %5 - JAL @_Z1gi, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit-def %sp, implicit-def %v0 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %7 = COPY %v0 + $a0_64 = COPY %5 + JAL @_Z1gi, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit-def $sp, implicit-def $v0 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %7 = COPY $v0 %8 = ADDu %7, killed %6 %9 = LUi64 target-flags(mips-highest) @v %10 = DADDiu killed %9, target-flags(mips-higher) @v @@ -157,46 +157,46 @@ %0 = ADDu killed %8, killed %14 %15 = LUi64 target-flags(mips-tprel-hi) @__tls_guard %16 = DADDiu killed %15, target-flags(mips-tprel-lo) @__tls_guard - %17 = RDHWR64 %hwr29 - %v1_64 = COPY %17 - %18 = COPY %v1_64 + %17 = RDHWR64 $hwr29 + $v1_64 = COPY %17 + %18 = COPY $v1_64 %19 = DADDu %18, killed %16 %20 = LBu killed %19, 0 :: (dereferenceable load 1 from @__tls_guard) - BEQ killed %20, %zero, %bb.2.init.i.i, implicit-def dead %at - J %bb.1.entry._ZTW1k.exit_crit_edge, implicit-def dead %at + BEQ killed %20, $zero, %bb.2.init.i.i, implicit-def dead $at + J %bb.1.entry._ZTW1k.exit_crit_edge, implicit-def dead $at bb.1.entry._ZTW1k.exit_crit_edge: successors: %bb.3._ZTW1k.exit(0x80000000) %32 = LUi64 target-flags(mips-tprel-hi) @k %33 = DADDiu killed %32, target-flags(mips-tprel-lo) @k - %34 = RDHWR64 %hwr29 - %v1_64 = COPY %34 - %35 = COPY %v1_64 + %34 = RDHWR64 $hwr29 + $v1_64 = COPY %34 + %35 = COPY $v1_64 %36 = DADDu %35, killed %33 %1 = LW killed %36, 0 :: (dereferenceable load 4 from @k) - J %bb.3._ZTW1k.exit, implicit-def dead %at + J %bb.3._ZTW1k.exit, implicit-def dead $at bb.2.init.i.i: successors: %bb.3._ZTW1k.exit(0x80000000) %21 = LUi64 target-flags(mips-tprel-hi) @__tls_guard %22 = DADDiu killed %21, target-flags(mips-tprel-lo) @__tls_guard - %23 = RDHWR64 %hwr29 - %v1_64 = COPY %23 - %24 = COPY %v1_64 + %23 = RDHWR64 $hwr29 + $v1_64 = COPY %23 + %24 = COPY $v1_64 %25 = DADDu %24, killed %22 - %26 = ADDiu %zero, 1 + %26 = ADDiu $zero, 1 SB killed %26, killed %25, 0 :: (store 1 into @__tls_guard) %27 = LUi64 target-flags(mips-tprel-hi) @k %28 = DADDiu killed %27, target-flags(mips-tprel-lo) @k %29 = DADDu %24, killed %28 - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp - %30 = DADDiu %zero_64, 3 - %a0_64 = COPY %30 - JAL @_Z1gi, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit-def %sp, implicit-def %v0 - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %31 = COPY %v0 + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + %30 = DADDiu $zero_64, 3 + $a0_64 = COPY %30 + JAL @_Z1gi, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit-def $sp, implicit-def $v0 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + %31 = COPY $v0 SW %31, killed %29, 0 :: (store 4 into @k) %2 = COPY %31 @@ -211,26 +211,26 @@ %40 = DADDiu killed %39, target-flags(mips-abs-hi) @_ZTH1j %41 = DSLL killed %40, 16 %42 = DADDiu killed %41, target-flags(mips-abs-lo) @_ZTH1j - BEQ64 killed %42, %zero_64, %bb.5._ZTW1j.exit, implicit-def dead %at - J %bb.4, implicit-def dead %at + BEQ64 killed %42, $zero_64, %bb.5._ZTW1j.exit, implicit-def dead $at + J %bb.4, implicit-def dead $at bb.4 (%ir-block.2): successors: %bb.5._ZTW1j.exit(0x80000000) - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp - JAL @_ZTH1j, csr_n64, implicit-def dead %ra, implicit-def %sp - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + JAL @_ZTH1j, csr_n64, implicit-def dead $ra, implicit-def $sp + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp bb.5._ZTW1j.exit: - %44 = RDHWR64 %hwr29 - %v1_64 = COPY %44 + %44 = RDHWR64 $hwr29 + $v1_64 = COPY %44 %45 = LD %43, target-flags(mips-gottprel) @j :: (load 8) - %46 = COPY %v1_64 + %46 = COPY $v1_64 %47 = DADDu %46, killed %45 %48 = LW killed %47, 0 :: (dereferenceable load 4 from @j) %49 = ADDu %4, killed %48 - %v0 = COPY %49 - RetRA implicit %v0 + $v0 = COPY %49 + RetRA implicit $v0 ... Index: test/CodeGen/Mips/msa/emergency-spill.mir =================================================================== --- test/CodeGen/Mips/msa/emergency-spill.mir +++ test/CodeGen/Mips/msa/emergency-spill.mir @@ -77,11 +77,11 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%a0_64', virtual-reg: '' } - - { reg: '%a1_64', virtual-reg: '' } - - { reg: '%a2_64', virtual-reg: '' } - - { reg: '%a3_64', virtual-reg: '' } - - { reg: '%t0_64', virtual-reg: '' } + - { reg: '$a0_64', virtual-reg: '' } + - { reg: '$a1_64', virtual-reg: '' } + - { reg: '$a2_64', virtual-reg: '' } + - { reg: '$a3_64', virtual-reg: '' } + - { reg: '$t0_64', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -122,91 +122,91 @@ constants: body: | bb.0.entry: - liveins: %a0_64, %a1_64, %a2_64, %a3_64, %t0_64 + liveins: $a0_64, $a1_64, $a2_64, $a3_64, $t0_64 - SD killed %a0_64, %stack.1.a, 0 :: (store 8 into %ir.1, align 16) - SD killed %a1_64, %stack.1.a, 8 :: (store 8 into %ir.2) - %w0 = LD_B %stack.1.a, 0 :: (dereferenceable load 16 from %ir.a) - SD killed %a2_64, %stack.2.b, 0 :: (store 8 into %ir.4, align 16) - SD killed %a3_64, %stack.2.b, 8 :: (store 8 into %ir.5) - %w1 = LD_B %stack.2.b, 0 :: (dereferenceable load 16 from %ir.b) - ST_B killed %w0, %stack.3.a.addr, 0 :: (store 16 into %ir.a.addr) - ST_B killed %w1, %stack.4.b.addr, 0 :: (store 16 into %ir.b.addr) - SW %t0, %stack.5.c.addr, 0, implicit killed %t0_64 :: (store 4 into %ir.c.addr) - %at_64 = LEA_ADDiu64 %stack.8, 0 - SD killed %at_64, %stack.6.g, 0 :: (store 8 into %ir.g) - %a1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - ADJCALLSTACKDOWN 0, 0, implicit-def dead %sp, implicit %sp - %a0_64 = LEA_ADDiu64 %stack.4.b.addr, 0 - JAL @h, csr_n64, implicit-def dead %ra, implicit %a0_64, implicit %a1_64, implicit-def %sp - ADJCALLSTACKUP 0, 0, implicit-def dead %sp, implicit %sp - %at_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %v0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %v1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %a0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %a1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %a2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %a3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t4_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t5_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t6_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t7_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %s0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %s1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %s2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %s3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %s4_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %s5_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %s6_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %s7_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t8_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %t9_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %ra_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) - %w0 = LD_B %stack.3.a.addr, 0 :: (dereferenceable load 16 from %ir.a.addr) - SD %at_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %v0_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %v1_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %a0_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %a1_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %a2_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %a3_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t0_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t1_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t2_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t3_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t4_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t5_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t6_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t7_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %s0_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %s1_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %s2_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %s3_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %s4_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %s5_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %s6_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %s7_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t8_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %t9_64, %stack.7.d, 0 :: (store 8 into %ir.d) - SD %ra_64, %stack.7.d, 0 :: (store 8 into %ir.d) - %at_64 = LD %stack.7.d, 0 :: (dereferenceable load 8 from %ir.d) - %v0 = LB %at_64, 0 :: (load 1 from %ir.arrayidx) - %w1 = FILL_B killed %v0 - %w0 = ADDV_B killed %w0, killed %w1 - %at = LB killed %at_64, 1 :: (load 1 from %ir.arrayidx3) - %w1 = FILL_B killed %at - %w0 = ADDV_B killed %w0, killed %w1 - %w1 = LD_B %stack.4.b.addr, 0 :: (dereferenceable load 16 from %ir.b.addr) - %w0 = ADDV_B killed %w1, killed %w0 - ST_B killed %w0, %stack.4.b.addr, 0 :: (store 16 into %ir.b.addr) - %w0 = LD_B %stack.4.b.addr, 0 :: (dereferenceable load 16 from %ir.b.addr) - ST_B killed %w0, %stack.0.retval, 0 :: (store 16 into %ir.retval) - %v0_64 = LD %stack.0.retval, 0 :: (dereferenceable load 8 from %ir.20, align 16) - %v1_64 = LD %stack.0.retval, 8 :: (dereferenceable load 8 from %ir.20 + 8, align 16) - RetRA implicit %v0_64, implicit %v1_64 + SD killed $a0_64, %stack.1.a, 0 :: (store 8 into %ir.1, align 16) + SD killed $a1_64, %stack.1.a, 8 :: (store 8 into %ir.2) + $w0 = LD_B %stack.1.a, 0 :: (dereferenceable load 16 from %ir.a) + SD killed $a2_64, %stack.2.b, 0 :: (store 8 into %ir.4, align 16) + SD killed $a3_64, %stack.2.b, 8 :: (store 8 into %ir.5) + $w1 = LD_B %stack.2.b, 0 :: (dereferenceable load 16 from %ir.b) + ST_B killed $w0, %stack.3.a.addr, 0 :: (store 16 into %ir.a.addr) + ST_B killed $w1, %stack.4.b.addr, 0 :: (store 16 into %ir.b.addr) + SW $t0, %stack.5.c.addr, 0, implicit killed $t0_64 :: (store 4 into %ir.c.addr) + $at_64 = LEA_ADDiu64 %stack.8, 0 + SD killed $at_64, %stack.6.g, 0 :: (store 8 into %ir.g) + $a1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + $a0_64 = LEA_ADDiu64 %stack.4.b.addr, 0 + JAL @h, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $a1_64, implicit-def $sp + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + $at_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $v0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $v1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $a0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $a1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $a2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $a3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t4_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t5_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t6_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t7_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $s0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $s1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $s2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $s3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $s4_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $s5_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $s6_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $s7_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t8_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $t9_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $ra_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) + $w0 = LD_B %stack.3.a.addr, 0 :: (dereferenceable load 16 from %ir.a.addr) + SD $at_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $v0_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $v1_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $a0_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $a1_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $a2_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $a3_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t0_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t1_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t2_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t3_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t4_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t5_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t6_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t7_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $s0_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $s1_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $s2_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $s3_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $s4_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $s5_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $s6_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $s7_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t8_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $t9_64, %stack.7.d, 0 :: (store 8 into %ir.d) + SD $ra_64, %stack.7.d, 0 :: (store 8 into %ir.d) + $at_64 = LD %stack.7.d, 0 :: (dereferenceable load 8 from %ir.d) + $v0 = LB $at_64, 0 :: (load 1 from %ir.arrayidx) + $w1 = FILL_B killed $v0 + $w0 = ADDV_B killed $w0, killed $w1 + $at = LB killed $at_64, 1 :: (load 1 from %ir.arrayidx3) + $w1 = FILL_B killed $at + $w0 = ADDV_B killed $w0, killed $w1 + $w1 = LD_B %stack.4.b.addr, 0 :: (dereferenceable load 16 from %ir.b.addr) + $w0 = ADDV_B killed $w1, killed $w0 + ST_B killed $w0, %stack.4.b.addr, 0 :: (store 16 into %ir.b.addr) + $w0 = LD_B %stack.4.b.addr, 0 :: (dereferenceable load 16 from %ir.b.addr) + ST_B killed $w0, %stack.0.retval, 0 :: (store 16 into %ir.retval) + $v0_64 = LD %stack.0.retval, 0 :: (dereferenceable load 8 from %ir.20, align 16) + $v1_64 = LD %stack.0.retval, 8 :: (dereferenceable load 8 from %ir.20 + 8, align 16) + RetRA implicit $v0_64, implicit $v1_64 ... Index: test/CodeGen/Mips/sll-micromips-r6-encoding.mir =================================================================== --- test/CodeGen/Mips/sll-micromips-r6-encoding.mir +++ test/CodeGen/Mips/sll-micromips-r6-encoding.mir @@ -17,7 +17,7 @@ tracksRegLiveness: false registers: liveins: - - { reg: '%a0', virtual-reg: '' } + - { reg: '$a0', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false Index: test/CodeGen/PowerPC/aantidep-def-ec.mir =================================================================== --- test/CodeGen/PowerPC/aantidep-def-ec.mir +++ test/CodeGen/PowerPC/aantidep-def-ec.mir @@ -46,8 +46,8 @@ exposesReturnsTwice: false tracksRegLiveness: true liveins: - - { reg: '%x3' } - - { reg: '%x4' } + - { reg: '$x3' } + - { reg: '$x4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -63,36 +63,36 @@ hasVAStart: false hasMustTailInVarArgFunc: false fixedStack: - - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%x30' } - - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%x29' } + - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '$x30' } + - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$x29' } body: | bb.0.entry: - liveins: %x3, %x4, %x29, %x30, %x29, %x30 + liveins: $x3, $x4, $x29, $x30, $x29, $x30 - %x0 = MFLR8 implicit %lr8 - STD %x0, 16, %x1 - %x1 = STDU %x1, -144, %x1 - STD killed %x29, 120, %x1 :: (store 8 into %fixed-stack.1) - STD killed %x30, 128, %x1 :: (store 8 into %fixed-stack.0, align 16) - %x30 = OR8 %x4, %x4 - %x3 = LD 0, killed %x3 :: (load 8 from %ir.p1) - %x29 = ADDI8 killed %x3, -48 - %x3 = ADDIStocHA %x2, @tasklist_lock - %x3 = LDtocL @tasklist_lock, killed %x3, implicit %x2 :: (load 8 from got) - BL8_NOP @__raw_read_unlock, csr_svr464_altivec, implicit-def %lr8, implicit %rm, implicit %x3, implicit %x2, implicit-def %r1, implicit-def dead %x3 - %r3 = LI 0 - STW killed %r3, 0, killed %x30 :: (volatile store 4 into %ir.p2) + $x0 = MFLR8 implicit $lr8 + STD $x0, 16, $x1 + $x1 = STDU $x1, -144, $x1 + STD killed $x29, 120, $x1 :: (store 8 into %fixed-stack.1) + STD killed $x30, 128, $x1 :: (store 8 into %fixed-stack.0, align 16) + $x30 = OR8 $x4, $x4 + $x3 = LD 0, killed $x3 :: (load 8 from %ir.p1) + $x29 = ADDI8 killed $x3, -48 + $x3 = ADDIStocHA $x2, @tasklist_lock + $x3 = LDtocL @tasklist_lock, killed $x3, implicit $x2 :: (load 8 from got) + BL8_NOP @__raw_read_unlock, csr_svr464_altivec, implicit-def $lr8, implicit $rm, implicit $x3, implicit $x2, implicit-def $r1, implicit-def dead $x3 + $r3 = LI 0 + STW killed $r3, 0, killed $x30 :: (volatile store 4 into %ir.p2) INLINEASM &"#compiler barrier", 25 - INLINEASM &"\0Alwsync \0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0Async \0A", 25, 131083, def early-clobber %r3, 851977, killed %x29, 12, implicit-def dead early-clobber %cr0 + INLINEASM &"\0Alwsync \0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0Async \0A", 25, 131083, def early-clobber $r3, 851977, killed $x29, 12, implicit-def dead early-clobber $cr0 ; CHECK-LABEL: @mm_update_next_owner ; CHECK-NOT: lwarx 29, 0, 29 ; CHECK-NOT: stwcx. 29, 0, 29 - %cr0 = CMPLWI killed %r3, 0 - %x30 = LD 128, %x1 :: (load 8 from %fixed-stack.0, align 16) - %x29 = LD 120, %x1 :: (load 8 from %fixed-stack.1) - %x1 = ADDI8 %x1, 144 - %x0 = LD 16, %x1 - MTLR8 %x0, implicit-def %lr8 - BLR8 implicit %lr8, implicit %rm + $cr0 = CMPLWI killed $r3, 0 + $x30 = LD 128, $x1 :: (load 8 from %fixed-stack.0, align 16) + $x29 = LD 120, $x1 :: (load 8 from %fixed-stack.1) + $x1 = ADDI8 $x1, 144 + $x0 = LD 16, $x1 + MTLR8 $x0, implicit-def $lr8 + BLR8 implicit $lr8, implicit $rm ... Index: test/CodeGen/PowerPC/addegluecrash.ll =================================================================== --- test/CodeGen/PowerPC/addegluecrash.ll +++ test/CodeGen/PowerPC/addegluecrash.ll @@ -23,7 +23,7 @@ ; CHECK-NEXT: cmpld 7, 4, 5 ; CHECK-NEXT: mfocrf 10, 1 ; CHECK-NEXT: rlwinm 10, 10, 29, 31, 31 -; CHECK-NEXT: # implicit-def: %x4 +; CHECK-NEXT: # implicit-def: $x4 ; CHECK-NEXT: mr 4, 10 ; CHECK-NEXT: clrldi 4, 4, 32 ; CHECK-NEXT: std 4, 0(3) Index: test/CodeGen/PowerPC/addisdtprelha-nonr3.mir =================================================================== --- test/CodeGen/PowerPC/addisdtprelha-nonr3.mir +++ test/CodeGen/PowerPC/addisdtprelha-nonr3.mir @@ -42,36 +42,36 @@ hasVAStart: false hasMustTailInVarArgFunc: false fixedStack: - - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%x30' } + - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '$x30' } - { id: 1, offset: -8, size: 8, alignment: 8, isImmutable: true, isAliased: false } body: | bb.0.entry: - liveins: %x30, %x30 + liveins: $x30, $x30 - %x0 = MFLR8 implicit %lr8 - STD %x31, -8, %x1 - STD killed %x0, 16, %x1 - %x1 = STDU %x1, -64, %x1 - %x3 = ADDIStlsldHA %x2, @x - %x31 = OR8 %x1, %x1 - %x3 = ADDItlsldL killed %x3, @x - STD killed %x30, 48, %x31 :: (store 8 into %fixed-stack.0, align 16) - %x3 = GETtlsldADDR killed %x3, @x, implicit-def dead %x0, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def %lr8, implicit-def %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7 - %x12 = ADDIStlsgdHA %x2, @y - %x30 = OR8 killed %x3, %x3 - %x3 = ADDItlsgdL killed %x12, @y - %x3 = GETtlsADDR killed %x3, @y, implicit-def dead %x0, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def %lr8, implicit-def %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7 - %x4 = ADDISdtprelHA killed %x30, @x + $x0 = MFLR8 implicit $lr8 + STD $x31, -8, $x1 + STD killed $x0, 16, $x1 + $x1 = STDU $x1, -64, $x1 + $x3 = ADDIStlsldHA $x2, @x + $x31 = OR8 $x1, $x1 + $x3 = ADDItlsldL killed $x3, @x + STD killed $x30, 48, $x31 :: (store 8 into %fixed-stack.0, align 16) + $x3 = GETtlsldADDR killed $x3, @x, implicit-def dead $x0, implicit-def dead $x4, implicit-def dead $x5, implicit-def dead $x6, implicit-def dead $x7, implicit-def dead $x8, implicit-def dead $x9, implicit-def dead $x10, implicit-def dead $x11, implicit-def dead $x12, implicit-def $lr8, implicit-def $ctr8, implicit-def dead $cr0, implicit-def dead $cr1, implicit-def dead $cr5, implicit-def dead $cr6, implicit-def dead $cr7 + $x12 = ADDIStlsgdHA $x2, @y + $x30 = OR8 killed $x3, $x3 + $x3 = ADDItlsgdL killed $x12, @y + $x3 = GETtlsADDR killed $x3, @y, implicit-def dead $x0, implicit-def dead $x4, implicit-def dead $x5, implicit-def dead $x6, implicit-def dead $x7, implicit-def dead $x8, implicit-def dead $x9, implicit-def dead $x10, implicit-def dead $x11, implicit-def dead $x12, implicit-def $lr8, implicit-def $ctr8, implicit-def dead $cr0, implicit-def dead $cr1, implicit-def dead $cr5, implicit-def dead $cr6, implicit-def dead $cr7 + $x4 = ADDISdtprelHA killed $x30, @x ; CHECK: addis 4, 30, x@dtprel@ha - %x5 = LI8 1 - %r6 = LI 20 - %x30 = LD 48, %x31 :: (load 8 from %fixed-stack.0, align 16) - STB8 killed %x5, target-flags(ppc-dtprel-lo) @x, killed %x4 :: (store 1 into @x) - STW killed %r6, 0, killed %x3 :: (store 4 into @y) - %x1 = ADDI8 %x1, 64 - %x0 = LD 16, %x1 - %x31 = LD -8, %x1 - MTLR8 killed %x0, implicit-def %lr8 - BLR8 implicit %lr8, implicit %rm + $x5 = LI8 1 + $r6 = LI 20 + $x30 = LD 48, $x31 :: (load 8 from %fixed-stack.0, align 16) + STB8 killed $x5, target-flags(ppc-dtprel-lo) @x, killed $x4 :: (store 1 into @x) + STW killed $r6, 0, killed $x3 :: (store 4 into @y) + $x1 = ADDI8 $x1, 64 + $x0 = LD 16, $x1 + $x31 = LD -8, $x1 + MTLR8 killed $x0, implicit-def $lr8 + BLR8 implicit $lr8, implicit $rm ... Index: test/CodeGen/PowerPC/aggressive-anti-dep-breaker-subreg.ll =================================================================== --- test/CodeGen/PowerPC/aggressive-anti-dep-breaker-subreg.ll +++ test/CodeGen/PowerPC/aggressive-anti-dep-breaker-subreg.ll @@ -10,7 +10,7 @@ lnext: %elementArray = load i32*, i32** %elementArrayPtr, align 8 ; CHECK: lwz [[LDREG:[0-9]+]], 124(1) # 4-byte Folded Reload -; CHECK: # implicit-def: %x[[TEMPREG:[0-9]+]] +; CHECK: # implicit-def: $x[[TEMPREG:[0-9]+]] %element = load i32, i32* %elementArray, align 4 ; CHECK: mr [[TEMPREG]], [[LDREG]] ; CHECK: clrldi 4, [[TEMPREG]], 32 Index: test/CodeGen/PowerPC/byval-agg-info.ll =================================================================== --- test/CodeGen/PowerPC/byval-agg-info.ll +++ test/CodeGen/PowerPC/byval-agg-info.ll @@ -13,5 +13,5 @@ ; Make sure that the MMO on the store has no offset from the byval ; variable itself (we used to have mem:ST8[%v+64]). -; CHECK: STD killed renamable %x5, 176, %x1; mem:ST8[%v](align=16) +; CHECK: STD killed renamable $x5, 176, $x1; mem:ST8[%v](align=16) Index: test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir =================================================================== --- test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir +++ test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir @@ -94,8 +94,8 @@ - { id: 4, class: gprc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -118,18 +118,18 @@ constants: body: | bb.0.entry: - liveins: %x0, %x4 + liveins: $x0, $x4 - %1:g8rc = COPY %x4 - %0:g8rc = COPY %x0 + %1:g8rc = COPY $x4 + %0:g8rc = COPY $x0 %2:gprc = LI 44 %3:gprc = COPY %1.sub_32 - %4:gprc = ADD4 killed %r0, killed %2 + %4:gprc = ADD4 killed $r0, killed %2 ; CHECK: li 3, 44 ; CHECK: add 3, 0, 3 %5:g8rc = EXTSW_32_64 killed %4 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -148,8 +148,8 @@ - { id: 4, class: gprc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -172,18 +172,18 @@ constants: body: | bb.0.entry: - liveins: %x0, %x4 + liveins: $x0, $x4 - %1:g8rc = COPY %x4 - %0:g8rc = COPY %x0 + %1:g8rc = COPY $x4 + %0:g8rc = COPY $x0 %2:gprc = COPY %0.sub_32 %3:gprc = LI 44 - %4:gprc = ADD4 killed %3, killed %r0 + %4:gprc = ADD4 killed %3, killed $r0 ; CHECK: li 3, 44 ; CHECK: add 3, 3, 0 %5:g8rc = EXTSW_32_64 killed %4 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -202,8 +202,8 @@ - { id: 4, class: gprc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -226,17 +226,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1:g8rc = COPY %x4 - %0:g8rc = COPY %x3 + %1:g8rc = COPY $x4 + %0:g8rc = COPY $x3 %2:gprc = COPY %0.sub_32 - %r0 = LI 44 - %4:gprc = ADD4 killed %r0, killed %2 + $r0 = LI 44 + %4:gprc = ADD4 killed $r0, killed %2 ; CHECK: addi 3, 3, 44 %5:g8rc = EXTSW_32_64 killed %4 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -255,8 +255,8 @@ - { id: 4, class: gprc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -279,17 +279,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1:g8rc = COPY %x4 - %0:g8rc = COPY %x3 + %1:g8rc = COPY $x4 + %0:g8rc = COPY $x3 %2:gprc = COPY %0.sub_32 - %r0 = LI 44 - %4:gprc = ADD4 killed %2, killed %r0 + $r0 = LI 44 + %4:gprc = ADD4 killed %2, killed $r0 ; CHECK: addi 3, 3, 44 %5:g8rc = EXTSW_32_64 killed %4 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -305,8 +305,8 @@ - { id: 1, class: g8rc, preferred-register: '' } - { id: 2, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x0', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x0', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -329,15 +329,15 @@ constants: body: | bb.0.entry: - liveins: %x0, %x4 + liveins: $x0, $x4 - %1:g8rc = COPY %x4 + %1:g8rc = COPY $x4 %0:g8rc_and_g8rc_nox0 = LI8 44 - %2:g8rc = LDX %0, %x0 :: (load 8 from %ir.1, !tbaa !3) + %2:g8rc = LDX %0, $x0 :: (load 8 from %ir.1, !tbaa !3) ; CHECK: li 3, 44 ; CHECK: ldx 3, 3, 0 - %x3 = COPY %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -353,8 +353,8 @@ - { id: 1, class: g8rc, preferred-register: '' } - { id: 2, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -377,14 +377,14 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 %1:g8rc = LI8 44 %0:g8rc_and_g8rc_nox0 = LI8 44 - %2:g8rc = LDX %zero8, %1 :: (load 8 from %ir.1, !tbaa !3) + %2:g8rc = LDX $zero8, %1 :: (load 8 from %ir.1, !tbaa !3) ; CHECK: ld 3, 44(0) - %x3 = COPY %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -400,8 +400,8 @@ - { id: 1, class: g8rc, preferred-register: '' } - { id: 2, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -424,13 +424,13 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %x0 = LI8 44 - %0:g8rc_and_g8rc_nox0 = COPY %x3 - %2:g8rc = LDX %0, %x0 :: (load 8 from %ir.1, !tbaa !3) + $x0 = LI8 44 + %0:g8rc_and_g8rc_nox0 = COPY $x3 + %2:g8rc = LDX %0, $x0 :: (load 8 from %ir.1, !tbaa !3) ; CHECK: ld 3, 44(3) - %x3 = COPY %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... Index: test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir =================================================================== --- test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir +++ test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir @@ -226,7 +226,7 @@ - { id: 3, class: g8rc, preferred-register: '' } - { id: 4, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -249,17 +249,17 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 - %0 = COPY %x3 + %0 = COPY $x3 %1 = COPY %0.sub_32 %3 = IMPLICIT_DEF %2 = LI 170 %4 = RLWNM killed %1, %2, 20, 27 ; CHECK: RLWINM killed %1, 10, 20, 27 ; CHECK-LATE: rlwinm 3, 3, 10, 20, 27 - %x3 = EXTSW_32_64 %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -276,7 +276,7 @@ - { id: 1, class: g8rc, preferred-register: '' } - { id: 2, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -299,15 +299,15 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 %0 = LI8 234 - %1 = COPY %x3 + %1 = COPY $x3 %2 = RLWNM8 %1, %0, 20, 27 ; CHECK: RLWINM8 %1, 10, 20, 27 ; CHECK-LATE: rlwinm 3, 3, 10, 20, 27 - %x3 = COPY %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -331,8 +331,8 @@ - { id: 8, class: g8rc, preferred-register: '' } - { id: 9, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -355,23 +355,23 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI -22 - %4 = RLWNMo %2, %3, 24, 31, implicit-def %cr0 - ; CHECK: RLWINMo %2, 10, 24, 31, implicit-def %cr0 + %4 = RLWNMo %2, %3, 24, 31, implicit-def $cr0 + ; CHECK: RLWINMo %2, 10, 24, 31, implicit-def $cr0 ; CHECK-LATE: li 3, -22 ; CHECK-LATE: rlwinm. 5, 4, 10, 24, 31 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq %8 = IMPLICIT_DEF %7 = INSERT_SUBREG %8, killed %6, 1 %9 = RLDICL killed %7, 0, 32 - %x3 = COPY %9 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %9 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -394,8 +394,8 @@ - { id: 7, class: crrc, preferred-register: '' } - { id: 8, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -418,19 +418,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI8 -18 - %3 = RLWNM8o %1, %2, 20, 27, implicit-def %cr0 - ; CHECK: RLWINM8o %1, 14, 20, 27, implicit-def %cr0 + %3 = RLWNM8o %1, %2, 20, 27, implicit-def $cr0 + ; CHECK: RLWINM8o %1, 14, 20, 27, implicit-def $cr0 ; CHECK-LATE: rlwinm. 3, 4, 14, 20, 27 - %7 = COPY killed %cr0 + %7 = COPY killed $cr0 %6 = RLDICL killed %3, 0, 32 %8 = ISEL8 %1, %6, %7.sub_eq - %x3 = COPY %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -453,8 +453,8 @@ - { id: 7, class: g8rc, preferred-register: '' } - { id: 8, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -477,17 +477,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %5 = LI 210 %8 = SLW killed %2, killed %5 ; CHECK: RLWINM killed %2, 18, 0, 13 ; CHECK-LATE: slwi 3, 4, 18 - %x3 = EXTSW_32_64 %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -511,8 +511,8 @@ - { id: 8, class: g8rc, preferred-register: '' } - { id: 9, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -535,22 +535,22 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 35 %3 = COPY %0.sub_32 - %4 = SLWo %3, %2, implicit-def %cr0 - ; CHECK: ANDIo %3, 0, implicit-def %cr0 + %4 = SLWo %3, %2, implicit-def $cr0 + ; CHECK: ANDIo %3, 0, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 0 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq %8 = IMPLICIT_DEF %7 = INSERT_SUBREG %8, killed %6, 1 %9 = RLDICL killed %7, 0, 32 - %x3 = COPY %9 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %9 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -573,8 +573,8 @@ - { id: 7, class: g8rc, preferred-register: '' } - { id: 8, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -597,17 +597,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 48 %5 = COPY %0.sub_32 %8 = SRW killed %5, killed %2 ; CHECK: LI 0 ; CHECK-LATE: li 3, 0 - %x3 = EXTSW_32_64 %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -631,8 +631,8 @@ - { id: 8, class: g8rc, preferred-register: '' } - { id: 9, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -655,22 +655,22 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI -7 %3 = COPY %0.sub_32 - %4 = SRWo %3, %2, implicit-def %cr0 - ; CHECK: ANDIo %3, 0, implicit-def %cr0 + %4 = SRWo %3, %2, implicit-def $cr0 + ; CHECK: ANDIo %3, 0, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 0 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq %8 = IMPLICIT_DEF %7 = INSERT_SUBREG %8, killed %6, 1 %9 = RLDICL killed %7, 0, 32 - %x3 = COPY %9 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %9 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -690,8 +690,8 @@ - { id: 4, class: gprc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -714,19 +714,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 48 %3 = COPY %0.sub_32 - %4 = SRAW killed %3, killed %2, implicit-def dead %carry + %4 = SRAW killed %3, killed %2, implicit-def dead $carry ; CHECK: LI 48 - ; CHECK: SRAW killed %3, killed %2, implicit-def dead %carry + ; CHECK: SRAW killed %3, killed %2, implicit-def dead $carry ; CHECK-LATE: sraw 3, 3, 4 %5 = EXTSW_32_64 killed %4 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -748,8 +748,8 @@ - { id: 6, class: gprc, preferred-register: '' } - { id: 7, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -772,20 +772,20 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 80 %3 = COPY %0.sub_32 - %4 = SRAWo killed %3, %2, implicit-def dead %carry, implicit-def %cr0 - ; CHECK: SRAWo killed %3, %2, implicit-def dead %carry, implicit-def %cr0 + %4 = SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0 + ; CHECK: SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0 ; CHECK-LATE: sraw. 3, 3, 4 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %4, %5.sub_eq %7 = EXTSW_32_64 killed %6 - %x3 = COPY %7 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %7 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -804,8 +804,8 @@ - { id: 3, class: gprc, preferred-register: '' } - { id: 4, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -828,17 +828,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI 140 %4 = RLDCL %0, killed %3, 0 ; CHECK: RLDICL %0, 12, 0 ; CHECK-LATE: rotldi 3, 3, 12 - %x3 = COPY %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -859,8 +859,8 @@ - { id: 5, class: crrc, preferred-register: '' } - { id: 6, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -883,19 +883,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = RLDICL %1, 0, 58 %3 = LI -37 - %4 = RLDCLo %0, killed %3, 0, implicit-def %cr0 - ; CHECK: RLDICLo %0, 27, 0, implicit-def %cr0 + %4 = RLDCLo %0, killed %3, 0, implicit-def $cr0 + ; CHECK: RLDICLo %0, 27, 0, implicit-def $cr0 ; CHECK-LATE: rldicl. 5, 3, 27, 0 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL8 %2, %0, %5.sub_eq - %x3 = COPY %6 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %6 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -914,8 +914,8 @@ - { id: 3, class: gprc, preferred-register: '' } - { id: 4, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -938,17 +938,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI 300 %4 = RLDCR %0, killed %3, 0 ; CHECK: RLDICR %0, 44, 0 ; CHECK-LATE: rldicr 3, 3, 44, 0 - %x3 = COPY %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -969,8 +969,8 @@ - { id: 5, class: crrc, preferred-register: '' } - { id: 6, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -993,19 +993,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = RLDICL %1, 0, 58 %3 = LI -18 - %4 = RLDCRo %0, killed %3, 0, implicit-def %cr0 - ; CHECK: RLDICRo %0, 46, 0, implicit-def %cr0 + %4 = RLDCRo %0, killed %3, 0, implicit-def $cr0 + ; CHECK: RLDICRo %0, 46, 0, implicit-def $cr0 ; CHECK-LATE: rldicr. 5, 3, 46, 0 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL8 %2, %0, %5.sub_eq - %x3 = COPY %6 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %6 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1023,8 +1023,8 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1047,16 +1047,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI -13 %3 = SLD %0, killed %2 ; CHECK: LI8 0 ; CHECK-LATE: li 3, 0 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1076,8 +1076,8 @@ - { id: 4, class: crrc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1100,18 +1100,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 88 - %3 = SLDo %0, killed %2, implicit-def %cr0 - ; CHECK: ANDIo8 %0, 0, implicit-def %cr0 + %3 = SLDo %0, killed %2, implicit-def $cr0 + ; CHECK: ANDIo8 %0, 0, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 0 - %4 = COPY killed %cr0 + %4 = COPY killed $cr0 %5 = ISEL8 %1, %0, %4.sub_eq - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1129,8 +1129,8 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1153,16 +1153,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 400 %3 = SRD %0, killed %2 ; CHECK: RLDICL %0, 48, 16 ; CHECK-LATE: rldicl 3, 3, 48, 16 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1182,8 +1182,8 @@ - { id: 4, class: crrc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1206,18 +1206,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 64 - %3 = SRDo %0, killed %2, implicit-def %cr0 - ; CHECK: ANDIo8 %0, 0, implicit-def %cr0 + %3 = SRDo %0, killed %2, implicit-def $cr0 + ; CHECK: ANDIo8 %0, 0, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 0 - %4 = COPY killed %cr0 + %4 = COPY killed $cr0 %5 = ISEL8 %1, %0, %4.sub_eq - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1235,8 +1235,8 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1259,16 +1259,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI -44 - %3 = SRAD %0, killed %2, implicit-def dead %carry - ; CHECK: SRAD %0, killed %2, implicit-def dead %carry + %3 = SRAD %0, killed %2, implicit-def dead $carry + ; CHECK: SRAD %0, killed %2, implicit-def dead $carry ; CHECK-LATE: srad 3, 3, 4 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1288,8 +1288,8 @@ - { id: 4, class: crrc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1312,18 +1312,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 68 - %3 = SRADo %0, killed %2, implicit-def dead %carry, implicit-def %cr0 - ; CHECK: SRADo %0, killed %2, implicit-def dead %carry, implicit-def %cr0 + %3 = SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0 + ; CHECK: SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0 ; CHECK-LATE: srad. 3, 3, 5 - %4 = COPY killed %cr0 + %4 = COPY killed $cr0 %5 = ISEL8 %1, %3, %4.sub_eq - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- Index: test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir =================================================================== --- test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir +++ test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir @@ -1009,8 +1009,8 @@ - { id: 5, class: gprc, preferred-register: '' } - { id: 6, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1033,10 +1033,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 33 %3 = COPY %0.sub_32 %4 = ADD4 killed %3, %2 @@ -1046,8 +1046,8 @@ ; CHECK-LATE: addi 3, 3, 33 ; CHECK-LATE: addi 3, 3, 33 %6 = EXTSW_32_64 killed %5 - %x3 = COPY %6 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %6 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1065,8 +1065,8 @@ - { id: 2, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1089,18 +1089,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 %1 = LI8 33 - %0 = COPY %x3 + %0 = COPY $x3 %2 = ADD8 %0, %1 %3 = ADD8 killed %1, killed %2 ; CHECK: ADDI8 %0, 33 ; CHECK: ADDI8 killed %2, 33 ; CHECK-LATE: addi 3, 3, 33 ; CHECK-LATE: addi 3, 3, 33 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1123,10 +1123,10 @@ - { id: 7, class: g8rc, preferred-register: '' } - { id: 8, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } - - { reg: '%x6', virtual-reg: '%3' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } + - { reg: '$x6', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1149,22 +1149,22 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5, %x6 + liveins: $x3, $x4, $x5, $x6 - %3 = COPY %x6 - %2 = COPY %x5 - %1 = COPY %x4 - %0 = COPY %x3 + %3 = COPY $x6 + %2 = COPY $x5 + %1 = COPY $x4 + %0 = COPY $x3 %4 = COPY %0.sub_32 %5 = LI 55 - %6 = ADDC %5, %4, implicit-def %carry - ; CHECK: ADDIC %4, 55, implicit-def %carry + %6 = ADDC %5, %4, implicit-def $carry + ; CHECK: ADDIC %4, 55, implicit-def $carry ; CHECK-LATE: addic 3, 3, 55 - %7 = ADDE8 %3, %1, implicit-def dead %carry, implicit %carry + %7 = ADDE8 %3, %1, implicit-def dead $carry, implicit $carry %8 = EXTSW_32_64 %6 - %x3 = COPY %8 - %x4 = COPY %7 - BLR8 implicit %lr8, implicit %rm, implicit %x3, implicit %x4 + $x3 = COPY %8 + $x4 = COPY %7 + BLR8 implicit $lr8, implicit $rm, implicit $x3, implicit $x4 ... --- @@ -1184,10 +1184,10 @@ - { id: 4, class: g8rc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } - - { reg: '%x6', virtual-reg: '%3' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } + - { reg: '$x6', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1210,19 +1210,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5, %x6 + liveins: $x3, $x4, $x5, $x6 - %3 = COPY %x6 - %2 = COPY %x5 - %1 = COPY %x4 + %3 = COPY $x6 + %2 = COPY $x5 + %1 = COPY $x4 %0 = LI8 777 - %4 = ADDC8 %2, %0, implicit-def %carry - ; CHECK: ADDIC8 %2, 777, implicit-def %carry + %4 = ADDC8 %2, %0, implicit-def $carry + ; CHECK: ADDIC8 %2, 777, implicit-def $carry ; CHECK-LATE: addic 3, 5, 777 - %5 = ADDE8 %3, %1, implicit-def dead %carry, implicit %carry - %x3 = COPY %4 - %x4 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3, implicit %x4 + %5 = ADDE8 %3, %1, implicit-def dead $carry, implicit $carry + $x3 = COPY %4 + $x4 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3, implicit $x4 ... --- @@ -1245,8 +1245,8 @@ - { id: 7, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 8, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1269,21 +1269,21 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 %1 = LI 433 - %0 = COPY %x3 + %0 = COPY $x3 %2 = COPY %0.sub_32 - %3 = ADDCo %1, %2, implicit-def %cr0, implicit-def %carry - ; CHECK: ADDICo %2, 433, implicit-def %cr0, implicit-def %carry + %3 = ADDCo %1, %2, implicit-def $cr0, implicit-def $carry + ; CHECK: ADDICo %2, 433, implicit-def $cr0, implicit-def $carry ; CHECK-LATE: addic. 3, 3, 433 - %4 = COPY killed %cr0 + %4 = COPY killed $cr0 %5 = COPY %4.sub_eq %6 = LI8 0 %7 = LI8 -1 %8 = ISEL8 %7, %6, %5 - %x3 = COPY %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1301,7 +1301,7 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1324,16 +1324,16 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 - %0 = COPY %x3 + %0 = COPY $x3 %1 = LI 77 %2 = ADDI killed %1, 44 %3 = EXTSW_32_64 killed %2 ; CHECK: LI 121 ; CHECK-LATE: li 3, 121 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1351,7 +1351,7 @@ - { id: 2, class: g8rc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1374,16 +1374,16 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 - %0 = COPY %x3 + %0 = COPY $x3 %1 = LI8 333 %2 = ADDI8 killed %1, 44 ; CHECK: LI8 377 ; CHECK-LATE: li 3, 377 %3 = EXTSW killed %2 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1404,8 +1404,8 @@ - { id: 5, class: gprc, preferred-register: '' } - { id: 6, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1428,19 +1428,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 %1 = LI 78 - %0 = COPY %x3 + %0 = COPY $x3 %2 = COPY %0.sub_32 - %3 = ANDo %1, %2, implicit-def %cr0 - ; CHECK: ANDIo %2, 78, implicit-def %cr0 + %3 = ANDo %1, %2, implicit-def $cr0 + ; CHECK: ANDIo %2, 78, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 78 - %4 = COPY killed %cr0 + %4 = COPY killed $cr0 %5 = ISEL %2, %1, %4.sub_eq %6 = EXTSW_32_64 killed %5 - %x3 = COPY %6 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %6 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1459,8 +1459,8 @@ - { id: 3, class: crrc, preferred-register: '' } - { id: 4, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1483,17 +1483,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 %1 = LI8 321 - %0 = COPY %x3 - %2 = AND8o %1, %0, implicit-def %cr0 - ; CHECK: ANDIo8 %0, 321, implicit-def %cr0 + %0 = COPY $x3 + %2 = AND8o %1, %0, implicit-def $cr0 + ; CHECK: ANDIo8 %0, 321, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 321 - %3 = COPY killed %cr0 + %3 = COPY killed $cr0 %4 = ISEL8 %1, %0, %3.sub_eq - %x3 = COPY %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1513,8 +1513,8 @@ - { id: 4, class: g8rc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1537,17 +1537,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 %1 = LI8 65533 - %0 = COPY %x3 + %0 = COPY $x3 %2 = CMPD %0, %1 ; CHECK: CMPDI %0, -3 ; CHECK-LATE: cmpdi 3, -3 - %4 = ISEL8 %zero8, %0, %2.sub_gt + %4 = ISEL8 $zero8, %0, %2.sub_gt %5 = ADD8 killed %4, %1 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1567,8 +1567,8 @@ - { id: 4, class: g8rc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1591,16 +1591,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 89 %2 = CMPDI %0, 87 - %4 = ISEL8 %zero8, %0, %2.sub_gt + %4 = ISEL8 $zero8, %0, %2.sub_gt ; CHECK: LI8 0 %5 = ADD8 killed %4, %1 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1620,8 +1620,8 @@ - { id: 4, class: g8rc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1644,16 +1644,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 87 %2 = CMPDI %0, 87 - %4 = ISEL8 %zero8, %0, %2.sub_gt + %4 = ISEL8 $zero8, %0, %2.sub_gt ; CHECK: COPY %0 %5 = ADD8 killed %4, %1 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1673,8 +1673,8 @@ - { id: 4, class: g8rc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1697,17 +1697,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 %1 = LI8 99 - %0 = COPY %x3 + %0 = COPY $x3 %2 = CMPLD %0, %1 ; CHECK: CMPLDI %0, 99 ; CHECK-LATE: cmpldi 3, 99 - %4 = ISEL8 %zero8, %0, %2.sub_gt + %4 = ISEL8 $zero8, %0, %2.sub_gt %5 = ADD8 killed %4, %1 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1727,8 +1727,8 @@ - { id: 4, class: g8rc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1751,16 +1751,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 65534 %2 = CMPLDI %0, 65535 - %4 = ISEL8 %zero8, %0, %2.sub_gt + %4 = ISEL8 $zero8, %0, %2.sub_gt ; CHECK: COPY %0 %5 = ADD8 killed %4, %1 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1783,8 +1783,8 @@ - { id: 7, class: gprc, preferred-register: '' } - { id: 8, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1807,19 +1807,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI -1 %3 = COPY %0.sub_32 %4 = CMPW %3, %2 ; CHECK: CMPWI %3, -1 - %6 = ISEL %zero, %3, %4.sub_gt + %6 = ISEL $zero, %3, %4.sub_gt %7 = ADD4 killed %6, %2 %8 = EXTSW_32_64 killed %7 - %x3 = COPY %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1842,8 +1842,8 @@ - { id: 7, class: gprc, preferred-register: '' } - { id: 8, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1866,19 +1866,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI -3 %4 = CMPWI %3, 87 - %6 = ISEL %zero, %3, %4.sub_gt + %6 = ISEL $zero, %3, %4.sub_gt ; CHECK: COPY %3 %7 = ADD4 killed %6, killed %2 %8 = EXTSW_32_64 killed %7 - %x3 = COPY %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1903,8 +1903,8 @@ - { id: 9, class: g8rc, preferred-register: '' } - { id: 10, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1927,22 +1927,22 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 32767 %3 = COPY %0.sub_32 %4 = CMPLW %3, %2 ; CHECK: CMPLWI %3, 32767 ; CHECK-LATE: cmplwi 3, 32767 - %6 = ISEL %zero, %3, %4.sub_gt + %6 = ISEL $zero, %3, %4.sub_gt %7 = ADD4 killed %6, %2 %9 = IMPLICIT_DEF %8 = INSERT_SUBREG %9, killed %7, 1 %10 = RLDICL killed %8, 0, 32 - %x3 = COPY %10 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %10 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -1967,8 +1967,8 @@ - { id: 9, class: g8rc, preferred-register: '' } - { id: 10, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1991,21 +1991,21 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI -3 %4 = CMPLWI %3, 87 - %6 = ISEL %zero, %3, %4.sub_gt + %6 = ISEL $zero, %3, %4.sub_gt ; CHECK: LI 0 %7 = ADD4 killed %6, killed %2 %9 = IMPLICIT_DEF %8 = INSERT_SUBREG %9, killed %7, 1 %10 = RLDICL killed %8, 0, 32 - %x3 = COPY %10 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %10 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2037,8 +2037,8 @@ - { id: 16, class: g8rc, preferred-register: '' } - { id: 17, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2061,10 +2061,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2082,8 +2082,8 @@ %15 = IMPLICIT_DEF %14 = INSERT_SUBREG %15, killed %13, 1 %16 = RLWINM8 killed %14, 0, 24, 31 - %x3 = COPY %16 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %16 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2114,8 +2114,8 @@ - { id: 15, class: g8rc, preferred-register: '' } - { id: 16, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2138,9 +2138,9 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 45 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 @@ -2161,8 +2161,8 @@ %15 = IMPLICIT_DEF %14 = INSERT_SUBREG %15, killed %13, 1 %16 = RLWINM8 killed %14, 0, 24, 31 - %x3 = COPY %16 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %16 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2194,8 +2194,8 @@ - { id: 16, class: g8rc, preferred-register: '' } - { id: 17, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2218,10 +2218,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2239,8 +2239,8 @@ %15 = IMPLICIT_DEF %14 = INSERT_SUBREG %15, killed %13, 1 %16 = RLWINM8 killed %14, 0, 16, 31 - %x3 = COPY %16 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %16 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2271,8 +2271,8 @@ - { id: 15, class: g8rc, preferred-register: '' } - { id: 16, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2295,10 +2295,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2316,8 +2316,8 @@ %15 = IMPLICIT_DEF %14 = INSERT_SUBREG %15, killed %13, 1 %16 = RLWINM8 killed %14, 0, 16, 31 - %x3 = COPY %16 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %16 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2349,8 +2349,8 @@ - { id: 16, class: g8rc, preferred-register: '' } - { id: 17, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2373,10 +2373,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2394,8 +2394,8 @@ %15 = IMPLICIT_DEF %14 = INSERT_SUBREG %15, killed %13, 1 %16 = EXTSH8 killed %14 - %x3 = COPY %16 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %16 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2426,8 +2426,8 @@ - { id: 15, class: g8rc, preferred-register: '' } - { id: 16, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2450,10 +2450,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2473,8 +2473,8 @@ %15 = IMPLICIT_DEF %14 = INSERT_SUBREG %15, killed %13, 1 %16 = EXTSH8 killed %14 - %x3 = COPY %16 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %16 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2507,8 +2507,8 @@ - { id: 17, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 18, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2531,10 +2531,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2554,8 +2554,8 @@ %15 = IMPLICIT_DEF %14 = INSERT_SUBREG %15, killed %13, 1 %16 = RLDICL killed %14, 0, 32 - %x3 = COPY %16 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %16 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2586,8 +2586,8 @@ - { id: 15, class: g8rc, preferred-register: '' } - { id: 16, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2610,9 +2610,9 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 1000 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 @@ -2633,8 +2633,8 @@ %15 = IMPLICIT_DEF %14 = INSERT_SUBREG %15, killed %13, 1 %16 = RLDICL killed %14, 0, 32 - %x3 = COPY %16 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %16 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2662,8 +2662,8 @@ - { id: 12, class: g8rc, preferred-register: '' } - { id: 13, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2686,9 +2686,9 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 444 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 @@ -2706,8 +2706,8 @@ ; CHECK: LWA 444, killed %11 ; CHECK-LATE: lwa 3, 444(4) %13 = ADD8 killed %12, killed %7 - %x3 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2737,8 +2737,8 @@ - { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 15, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2761,10 +2761,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2781,8 +2781,8 @@ ; CHECK: LDU 200, %0 ; CHECK-LATE: ldu 4, 200(3) %13 = ADD8 killed %12, killed %7 - %x3 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2810,8 +2810,8 @@ - { id: 12, class: g8rc, preferred-register: '' } - { id: 13, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2834,10 +2834,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2854,8 +2854,8 @@ ; CHECK: LD 280, %0 ; CHECK-LATE: ld 12, 280(3) %13 = ADD8 killed %12, killed %7 - %x3 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -2885,8 +2885,8 @@ - { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 15, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2909,10 +2909,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -2928,9 +2928,9 @@ %12,%15 = LFDUX %0, killed %11 :: (load 8 from %ir.arrayidx3, !tbaa !12) ; CHECK: LFDU 16, %0 ; CHECK-LATE: lfdu 1, 16(3) - %13 = FADD killed %7, killed %12, implicit %rm - %f1 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %f1 + %13 = FADD killed %7, killed %12, implicit $rm + $f1 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $f1 ... --- @@ -2958,8 +2958,8 @@ - { id: 12, class: f8rc, preferred-register: '' } - { id: 13, class: f8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -2982,9 +2982,9 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 -20 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 @@ -3001,9 +3001,9 @@ %12 = LFDX %0, killed %11 :: (load 8 from %ir.arrayidx3, !tbaa !12) ; CHECK: LFD -20, killed %11 ; CHECK-LATE: lfd 1, -20(4) - %13 = FADD killed %7, killed %12, implicit %rm - %f1 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %f1 + %13 = FADD killed %7, killed %12, implicit $rm + $f1 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $f1 ... --- @@ -3042,8 +3042,8 @@ - { id: 23, class: g8rc, preferred-register: '' } - { id: 24, class: vrrc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3081,41 +3081,41 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI8 72 %3, %4 = LFSUX %0, killed %2 :: (load 4 from %ir.arrayidx, !tbaa !14) ; CHECK: LFSU 72, %0 ; CHECK-LATE: lfsu 0, 72(3) - %5 = FCTIWUZ killed %3, implicit %rm + %5 = FCTIWUZ killed %3, implicit $rm %6 = ADDI8 %stack.4, 0 - STFIWX killed %5, %zero8, killed %6 + STFIWX killed %5, $zero8, killed %6 %7 = LWZ 0, %stack.4 :: (load 4 from %stack.4) %8 = LFS 4, %4 :: (load 4 from %ir.3, !tbaa !14) - %10 = FCTIWUZ %8, implicit %rm + %10 = FCTIWUZ %8, implicit $rm %11 = ADDI8 %stack.1, 0 - STFIWX killed %10, %zero8, killed %11 + STFIWX killed %10, $zero8, killed %11 %12 = LWZ 0, %stack.1 :: (load 4 from %stack.1) %13 = LFS 8, %4 :: (load 4 from %ir.5, !tbaa !14) - %15 = FCTIWUZ %13, implicit %rm + %15 = FCTIWUZ %13, implicit $rm %16 = ADDI8 %stack.2, 0 - STFIWX killed %15, %zero8, killed %16 + STFIWX killed %15, $zero8, killed %16 %17 = LWZ 0, %stack.2 :: (load 4 from %stack.2) %18 = LFS 12, %4 :: (load 4 from %ir.7, !tbaa !14) - %20 = FCTIWUZ %18, implicit %rm + %20 = FCTIWUZ %18, implicit $rm %21 = ADDI8 %stack.3, 0 - STFIWX killed %20, %zero8, killed %21 + STFIWX killed %20, $zero8, killed %21 %22 = LWZ 0, %stack.3 :: (load 4 from %stack.3) STW killed %7, 0, %stack.0 :: (store 4 into %stack.0, align 16) STW killed %22, 12, %stack.0 :: (store 4 into %stack.0 + 12) STW killed %17, 8, %stack.0 :: (store 4 into %stack.0 + 8, align 8) STW killed %12, 4, %stack.0 :: (store 4 into %stack.0 + 4) %23 = ADDI8 %stack.0, 0 - %24 = LVX %zero8, killed %23 :: (load 16 from %stack.0) - %v2 = COPY %24 - BLR8 implicit %lr8, implicit %rm, implicit %v2 + %24 = LVX $zero8, killed %23 :: (load 16 from %stack.0) + $v2 = COPY %24 + BLR8 implicit $lr8, implicit $rm, implicit $v2 ... --- @@ -3143,8 +3143,8 @@ - { id: 12, class: f4rc, preferred-register: '' } - { id: 13, class: f4rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3167,10 +3167,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -3186,9 +3186,9 @@ %12 = LFSX %0, killed %11 :: (load 4 from %ir.arrayidx3, !tbaa !14) ; CHECK: LFS -88, %0 ; CHECK-LATE: lfs 1, -88(3) - %13 = FADDS killed %7, killed %12, implicit %rm - %f1 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %f1 + %13 = FADDS killed %7, killed %12, implicit $rm + $f1 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $f1 ... --- @@ -3216,8 +3216,8 @@ - { id: 12, class: vsfrc, preferred-register: '' } - { id: 13, class: vsfrc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3240,28 +3240,28 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF %4 = INSERT_SUBREG %5, killed %3, 1 %6 = LI8 100 - %7 = LXSDX %0, killed %6, implicit %rm :: (load 8 from %ir.arrayidx, !tbaa !12) + %7 = LXSDX %0, killed %6, implicit $rm :: (load 8 from %ir.arrayidx, !tbaa !12) ; CHECK: LXSD 100, %0 ; CHECK-LATE: lxsd 0, 100(3) %8 = ADDI %2, 2 %10 = IMPLICIT_DEF %9 = INSERT_SUBREG %10, killed %8, 1 %11 = LI8 -120 - %12 = LXSDX %0, killed %11, implicit %rm :: (load 8 from %ir.arrayidx3, !tbaa !12) + %12 = LXSDX %0, killed %11, implicit $rm :: (load 8 from %ir.arrayidx3, !tbaa !12) ; CHECK: LXSD -120, %0 ; CHECK-LATE: lxsd 1, -120(3) - %13 = XSADDDP killed %7, killed %12, implicit %rm - %f1 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %f1 + %13 = XSADDDP killed %7, killed %12, implicit $rm + $f1 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $f1 ... --- @@ -3289,8 +3289,8 @@ - { id: 12, class: vssrc, preferred-register: '' } - { id: 13, class: vssrc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3313,10 +3313,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -3333,8 +3333,8 @@ ; CHECK: LXSSP -92, %0 ; CHECK-LATE: lxssp 1, -92(3) %13 = XSADDSP killed %7, killed %12 - %f1 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %f1 + $f1 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $f1 ... --- @@ -3362,8 +3362,8 @@ - { id: 12, class: vrrc, preferred-register: '' } - { id: 13, class: vrrc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3386,10 +3386,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = ADDI %2, 1 %5 = IMPLICIT_DEF @@ -3406,8 +3406,8 @@ ; CHECK: LXV -16, %0 ; CHECK-LATE: lxv 35, -16(3) %13 = VADDUWM killed %12, killed %7 - %v2 = COPY %13 - BLR8 implicit %lr8, implicit %rm, implicit %v2 + $v2 = COPY %13 + BLR8 implicit $lr8, implicit $rm, implicit $v2 ... --- @@ -3425,8 +3425,8 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3449,16 +3449,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI 99 %3 = COPY %1.sub_32 %2 = OR %0, %3 ; CHECK: ORI %3, 99 ; CHECK-LATE: ori 3, 4, 99 - %x3 = EXTSW_32_64 %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3475,8 +3475,8 @@ - { id: 1, class: g8rc, preferred-register: '' } - { id: 2, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3499,15 +3499,15 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 777 %2 = OR8 %1, %0 ; CHECK: ORI8 %1, 777 ; CHECK-LATE: ori 3, 4, 777 - %x3 = COPY %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3523,7 +3523,7 @@ - { id: 0, class: gprc, preferred-register: '' } - { id: 1, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3546,14 +3546,14 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 %0 = LI 777 %1 = ORI %0, 88 ; CHECK: LI 857 ; CHECK-LATE: li 3, 857 - %x3 = EXTSW_32_64 %1 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %1 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3569,7 +3569,7 @@ - { id: 0, class: g8rc, preferred-register: '' } - { id: 1, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3592,14 +3592,14 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 %0 = LI8 8721 %1 = ORI8 %0, 99 ; CHECK: LI8 8819 ; CHECK-LATE: li 3, 8819 - %x3 = COPY %1 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %1 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3618,8 +3618,8 @@ - { id: 3, class: gprc, preferred-register: '' } - { id: 4, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3642,17 +3642,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI 14 %4 = RLDCL %0, killed %3, 0 ; CHECK: RLDICL %0, 14, 0 ; CHECK-LATE: rotldi 3, 3, 14 - %x3 = COPY %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3673,8 +3673,8 @@ - { id: 5, class: crrc, preferred-register: '' } - { id: 6, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3697,19 +3697,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = RLDICL %1, 0, 58 %3 = LI 37 - %4 = RLDCLo %0, killed %3, 0, implicit-def %cr0 - ; CHECK: RLDICLo %0, 37, 0, implicit-def %cr0 + %4 = RLDCLo %0, killed %3, 0, implicit-def $cr0 + ; CHECK: RLDICLo %0, 37, 0, implicit-def $cr0 ; CHECK-LATE: rldicl. 5, 3, 37, 0 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL8 %2, %0, %5.sub_eq - %x3 = COPY %6 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %6 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3728,8 +3728,8 @@ - { id: 3, class: gprc, preferred-register: '' } - { id: 4, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3752,17 +3752,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI 0 %4 = RLDCR %0, killed %3, 0 ; CHECK: RLDICR %0, 0, 0 ; CHECK-LATE: rldicr 3, 3, 0, 0 - %x3 = COPY %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3783,8 +3783,8 @@ - { id: 5, class: crrc, preferred-register: '' } - { id: 6, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3807,19 +3807,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = RLDICL %1, 0, 58 %3 = LI 18 - %4 = RLDCRo %0, killed %3, 0, implicit-def %cr0 - ; CHECK: RLDICRo %0, 18, 0, implicit-def %cr0 + %4 = RLDCRo %0, killed %3, 0, implicit-def $cr0 + ; CHECK: RLDICRo %0, 18, 0, implicit-def $cr0 ; CHECK-LATE: rldicr. 5, 3, 18, 0 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL8 %2, %0, %5.sub_eq - %x3 = COPY %6 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %6 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3835,7 +3835,7 @@ - { id: 0, class: g8rc, preferred-register: '' } - { id: 1, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3858,14 +3858,14 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 %0 = LI8 -1 %1 = RLDICL %0, 53, 49 ; CHECK: LI8 32767 ; CHECK-LATE: li 3, 32767 - %x3 = COPY %1 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %1 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3884,8 +3884,8 @@ - { id: 3, class: crrc, preferred-register: '' } - { id: 4, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3908,18 +3908,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 -1 - %2 = RLDICLo %0, 53, 48, implicit-def %cr0 + %2 = RLDICLo %0, 53, 48, implicit-def $cr0 ; CHECK: ANDIo8 %0, 65535 ; CHECK-LATE: li 3, -1 ; CHECK-LATE: andi. 3, 3, 65535 - %3 = COPY killed %cr0 + %3 = COPY killed $cr0 %4 = ISEL8 %1, %2, %3.sub_eq - %x3 = COPY %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3938,8 +3938,8 @@ - { id: 3, class: crrc, preferred-register: '' } - { id: 4, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -3962,17 +3962,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 200 - %2 = RLDICLo %0, 61, 3, implicit-def %cr0 + %2 = RLDICLo %0, 61, 3, implicit-def $cr0 ; CHECK-NOT: ANDI ; CHECK-LATE-NOT: andi. - %3 = COPY killed %cr0 + %3 = COPY killed $cr0 %4 = ISEL8 %1, %2, %3.sub_eq - %x3 = COPY %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -3991,7 +3991,7 @@ - { id: 3, class: g8rc, preferred-register: '' } - { id: 4, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4014,17 +4014,17 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 - %0 = COPY %x3 + %0 = COPY $x3 %1 = COPY %0.sub_32 %3 = IMPLICIT_DEF %2 = LI 17 %4 = RLWINM killed %2, 4, 20, 27 ; CHECK: LI 272 ; CHECK-LATE: li 3, 272 - %x3 = EXTSW_32_64 %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4043,7 +4043,7 @@ - { id: 3, class: g8rc, preferred-register: '' } - { id: 4, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4066,17 +4066,17 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 - %0 = COPY %x3 + %0 = COPY $x3 %1 = COPY %0.sub_32 %3 = IMPLICIT_DEF %2 = LI 2 %4 = RLWINM killed %2, 31, 0, 31 ; CHECK: LI 1 ; CHECK-LATE: li 3, 1 - %x3 = EXTSW_32_64 %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4095,7 +4095,7 @@ - { id: 3, class: g8rc, preferred-register: '' } - { id: 4, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4118,17 +4118,17 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 - %0 = COPY %x3 + %0 = COPY $x3 %1 = COPY %0.sub_32 %3 = IMPLICIT_DEF %2 = LI 1 %4 = RLWINM killed %2, 31, 0, 31 ; CHECK: RLWINM killed %2, 31, 0, 31 ; CHECK-LATE: rotlwi 3, 3, 31 - %x3 = EXTSW_32_64 %4 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %4 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4144,7 +4144,7 @@ - { id: 0, class: g8rc, preferred-register: '' } - { id: 1, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4167,14 +4167,14 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 %0 = LI8 234 %1 = RLWINM8 %0, 4, 20, 27 ; CHECK: LI8 3744 ; CHECK-LATE: li 3, 3744 - %x3 = COPY %1 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %1 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4198,8 +4198,8 @@ - { id: 8, class: g8rc, preferred-register: '' } - { id: 9, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4222,23 +4222,23 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI -22 - %4 = RLWINMo %3, 0, 24, 31, implicit-def %cr0 + %4 = RLWINMo %3, 0, 24, 31, implicit-def $cr0 ; CHECK: ANDIo %3, 234 ; CHECK-LATE: li 3, -22 ; CHECK-LATE: andi. 5, 3, 234 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq %8 = IMPLICIT_DEF %7 = INSERT_SUBREG %8, killed %6, 1 %9 = RLDICL killed %7, 0, 32 - %x3 = COPY %9 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %9 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4262,8 +4262,8 @@ - { id: 8, class: g8rc, preferred-register: '' } - { id: 9, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4286,22 +4286,22 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI -22 - %4 = RLWINMo %3, 5, 24, 31, implicit-def %cr0 + %4 = RLWINMo %3, 5, 24, 31, implicit-def $cr0 ; CHECK-NOT: ANDI ; CHECK-LATE-NOT: andi. - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq %8 = IMPLICIT_DEF %7 = INSERT_SUBREG %8, killed %6, 1 %9 = RLDICL killed %7, 0, 32 - %x3 = COPY %9 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %9 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4324,8 +4324,8 @@ - { id: 7, class: crrc, preferred-register: '' } - { id: 8, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4348,20 +4348,20 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI8 -18 - %3 = RLWINM8o %2, 4, 20, 27, implicit-def %cr0 + %3 = RLWINM8o %2, 4, 20, 27, implicit-def $cr0 ; CHECK: ANDIo8 %2, 3808 ; CHECK-LATE: li 3, -18 ; CHECK-LATE: andi. 3, 3, 3808 - %7 = COPY killed %cr0 + %7 = COPY killed $cr0 %6 = RLDICL killed %3, 0, 32 %8 = ISEL8 %1, %6, %7.sub_eq - %x3 = COPY %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4379,8 +4379,8 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4403,16 +4403,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 13 %3 = SLD %0, killed %2 ; CHECK: RLDICR %0, 13, 50 ; CHECK-LATE: sldi 3, 3, 13 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4432,8 +4432,8 @@ - { id: 4, class: crrc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4456,18 +4456,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 17 - %3 = SLDo %0, killed %2, implicit-def %cr0 - ; CHECK: RLDICRo %0, 17, 46, implicit-def %cr0 + %3 = SLDo %0, killed %2, implicit-def $cr0 + ; CHECK: RLDICRo %0, 17, 46, implicit-def $cr0 ; CHECK-LATE: rldicr. 5, 3, 17, 46 - %4 = COPY killed %cr0 + %4 = COPY killed $cr0 %5 = ISEL8 %1, %0, %4.sub_eq - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4485,8 +4485,8 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4509,16 +4509,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 4 %3 = SRD %0, killed %2 ; CHECK: RLDICL %0, 60, 4 ; CHECK-LATE: rldicl 3, 3, 60, 4 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4538,8 +4538,8 @@ - { id: 4, class: crrc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4562,18 +4562,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 17 - %3 = SRDo %0, killed %2, implicit-def %cr0 - ; CHECK: RLDICLo %0, 47, 17, implicit-def %cr0 + %3 = SRDo %0, killed %2, implicit-def $cr0 + ; CHECK: RLDICLo %0, 47, 17, implicit-def $cr0 ; CHECK-LATE: rldicl. 5, 3, 47, 17 - %4 = COPY killed %cr0 + %4 = COPY killed $cr0 %5 = ISEL8 %1, %0, %4.sub_eq - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4596,8 +4596,8 @@ - { id: 7, class: g8rc, preferred-register: '' } - { id: 8, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4620,17 +4620,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = COPY %1.sub_32 %5 = LI 21 %8 = SLW killed %2, killed %5 ; CHECK: RLWINM killed %2, 21, 0, 10 ; CHECK-LATE: slwi 3, 4, 21 - %x3 = EXTSW_32_64 %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4654,8 +4654,8 @@ - { id: 8, class: g8rc, preferred-register: '' } - { id: 9, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4678,22 +4678,22 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 11 %3 = COPY %0.sub_32 - %4 = SLWo %3, %2, implicit-def %cr0 - ; CHECK: RLWINMo %3, 11, 0, 20, implicit-def %cr0 + %4 = SLWo %3, %2, implicit-def $cr0 + ; CHECK: RLWINMo %3, 11, 0, 20, implicit-def $cr0 ; CHECK-LATE: rlwinm. 5, 3, 11, 0, 20 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq %8 = IMPLICIT_DEF %7 = INSERT_SUBREG %8, killed %6, 1 %9 = RLDICL killed %7, 0, 32 - %x3 = COPY %9 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %9 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4716,8 +4716,8 @@ - { id: 7, class: g8rc, preferred-register: '' } - { id: 8, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4740,17 +4740,17 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 8 %5 = COPY %0.sub_32 %8 = SRW killed %5, killed %2 ; CHECK: RLWINM killed %5, 24, 8, 31 ; CHECK-LATE: srwi 3, 3, 8 - %x3 = EXTSW_32_64 %8 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %8 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4774,8 +4774,8 @@ - { id: 8, class: g8rc, preferred-register: '' } - { id: 9, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4798,22 +4798,22 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 7 %3 = COPY %0.sub_32 - %4 = SRWo %3, %2, implicit-def %cr0 + %4 = SRWo %3, %2, implicit-def $cr0 ; CHECK: RLWINMo %3, 25, 7, 31 ; CHECK-LATE: rlwinm. 5, 3, 25, 7, 31 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq %8 = IMPLICIT_DEF %7 = INSERT_SUBREG %8, killed %6, 1 %9 = RLDICL killed %7, 0, 32 - %x3 = COPY %9 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %9 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4833,8 +4833,8 @@ - { id: 4, class: gprc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4857,18 +4857,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 15 %3 = COPY %0.sub_32 - %4 = SRAW killed %3, killed %2, implicit-def dead %carry - ; CHECK: SRAWI killed %3, 15, implicit-def dead %carry + %4 = SRAW killed %3, killed %2, implicit-def dead $carry + ; CHECK: SRAWI killed %3, 15, implicit-def dead $carry ; CHECK-LATE: srawi 3, 3, 15 %5 = EXTSW_32_64 killed %4 - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4890,8 +4890,8 @@ - { id: 6, class: gprc, preferred-register: '' } - { id: 7, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4914,20 +4914,20 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 8 %3 = COPY %0.sub_32 - %4 = SRAWo killed %3, %2, implicit-def dead %carry, implicit-def %cr0 - ; CHECK: SRAWIo killed %3, 8, implicit-def dead %carry, implicit-def %cr0 + %4 = SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0 + ; CHECK: SRAWIo killed %3, 8, implicit-def dead $carry, implicit-def $cr0 ; CHECK-LATE: srawi. 3, 3, 8 - %5 = COPY killed %cr0 + %5 = COPY killed $cr0 %6 = ISEL %2, %4, %5.sub_eq %7 = EXTSW_32_64 killed %6 - %x3 = COPY %7 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %7 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4945,8 +4945,8 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -4969,16 +4969,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 44 - %3 = SRAD %0, killed %2, implicit-def dead %carry - ; CHECK: SRADI %0, 44, implicit-def dead %carry + %3 = SRAD %0, killed %2, implicit-def dead $carry + ; CHECK: SRADI %0, 44, implicit-def dead $carry ; CHECK-LATE: sradi 3, 3, 44 - %x3 = COPY %3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -4998,8 +4998,8 @@ - { id: 4, class: crrc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5022,18 +5022,18 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 - %0 = COPY %x3 + %1 = COPY $x4 + %0 = COPY $x3 %2 = LI 61 - %3 = SRADo %0, killed %2, implicit-def dead %carry, implicit-def %cr0 - ; CHECK: SRADIo %0, 61, implicit-def dead %carry, implicit-def %cr0 + %3 = SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0 + ; CHECK: SRADIo %0, 61, implicit-def dead $carry, implicit-def $cr0 ; CHECK-LATE: sradi. 3, 3, 61 - %4 = COPY killed %cr0 + %4 = COPY killed $cr0 %5 = ISEL8 %1, %3, %4.sub_eq - %x3 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -5062,9 +5062,9 @@ - { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5087,11 +5087,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %2 = COPY %x5 - %1 = COPY %x4 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $x4 + %0 = COPY $x3 %3 = COPY %1.sub_32 %4 = COPY %2.sub_32 %5 = ADDI %4, 1 @@ -5108,7 +5108,7 @@ %14 = STBUX %3, %0, killed %12 :: (store 1 into %ir.arrayidx3, !tbaa !3) ; CHECK: STBU %3, 777, %0 ; CHECK-LATE: 4, 777(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5135,9 +5135,9 @@ - { id: 11, class: g8rc, preferred-register: '' } - { id: 12, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5160,10 +5160,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %2 = COPY %x5 - %1 = COPY %x4 + %2 = COPY $x5 + %1 = COPY $x4 %0 = LI8 975 %3 = COPY %1.sub_32 %4 = COPY %2.sub_32 @@ -5181,7 +5181,7 @@ STBX %3, %0, killed %12 :: (store 1 into %ir.arrayidx3, !tbaa !3) ; CHECK: STB %3, 975, killed %12 ; CHECK-LATE: stb 4, 975(5) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5210,9 +5210,9 @@ - { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5235,11 +5235,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %2 = COPY %x5 - %1 = COPY %x4 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $x4 + %0 = COPY $x3 %3 = COPY %1.sub_32 %4 = COPY %2.sub_32 %5 = ADDI %4, 1 @@ -5256,7 +5256,7 @@ %14 = STHUX %3, %0, killed %12 :: (store 2 into %ir.arrayidx3, !tbaa !6) ; CHECK: STHU %3, -761, %0 ; CHECK-LATE: sthu 4, -761(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5283,9 +5283,9 @@ - { id: 11, class: g8rc, preferred-register: '' } - { id: 12, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5308,11 +5308,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %2 = COPY %x5 - %1 = COPY %x4 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $x4 + %0 = COPY $x3 %3 = COPY %1.sub_32 %4 = COPY %2.sub_32 %5 = ADDI %4, 1 @@ -5329,7 +5329,7 @@ STHX %3, %0, killed %12 :: (store 1 into %ir.arrayidx3, !tbaa !3) ; CHECK: STH %3, -900, %0 ; CHECK-LATE: sth 4, -900(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5358,9 +5358,9 @@ - { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5383,11 +5383,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %2 = COPY %x5 - %1 = COPY %x4 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $x4 + %0 = COPY $x3 %3 = COPY %1.sub_32 %4 = COPY %2.sub_32 %5 = ADDI %4, 1 @@ -5404,7 +5404,7 @@ %14 = STWUX %3, %0, killed %12 :: (store 4 into %ir.arrayidx3, !tbaa !8) ; CHECK: STWU %3, 0, %0 ; CHECK-LATE: stwu 4, 0(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5431,9 +5431,9 @@ - { id: 11, class: g8rc, preferred-register: '' } - { id: 12, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5456,11 +5456,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %2 = COPY %x5 - %1 = COPY %x4 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $x4 + %0 = COPY $x3 %3 = COPY %1.sub_32 %4 = COPY %2.sub_32 %5 = ADDI %4, 1 @@ -5477,7 +5477,7 @@ STWX %3, %0, killed %12 :: (store 4 into %ir.arrayidx3, !tbaa !8) ; CHECK: STW %3, 99, %0 ; CHECK-LATE: stw 4, 99(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5505,9 +5505,9 @@ - { id: 12, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5530,11 +5530,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %2 = COPY %x5 - %1 = COPY %x4 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $x4 + %0 = COPY $x3 %3 = COPY %2.sub_32 %4 = ADDI %3, 1 %6 = IMPLICIT_DEF @@ -5550,7 +5550,7 @@ %13 = STDUX %1, %0, killed %11 :: (store 8 into %ir.arrayidx3, !tbaa !10) ; CHECK: STDU %1, -8, %0 ; CHECK-LATE: stdu 4, -8(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5576,9 +5576,9 @@ - { id: 10, class: g8rc, preferred-register: '' } - { id: 11, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5601,10 +5601,10 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %2 = COPY %x5 - %1 = COPY %x4 + %2 = COPY $x5 + %1 = COPY $x4 %0 = LI8 1000 %3 = COPY %2.sub_32 %4 = ADDI %3, 1 @@ -5621,7 +5621,7 @@ STDX %1, %0, killed %11 :: (store 8 into %ir.arrayidx3, !tbaa !10) ; CHECK: STD %1, 1000, killed %11 ; CHECK-LATE: 4, 1000(6) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5647,9 +5647,9 @@ - { id: 10, class: g8rc, preferred-register: '' } - { id: 11, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%f1', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$f1', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5672,11 +5672,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %f1, %x5 + liveins: $x3, $f1, $x5 - %2 = COPY %x5 - %1 = COPY %f1 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $f1 + %0 = COPY $x3 %3 = COPY %2.sub_32 %4 = ADDI %3, 1 %6 = IMPLICIT_DEF @@ -5692,7 +5692,7 @@ STFSX %1, %0, killed %11 :: (store 4 into %ir.arrayidx3, !tbaa !14) ; CHECK: STFS %1, -401, %0 ; CHECK-LATE: stfs 1, -401(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5720,9 +5720,9 @@ - { id: 12, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%f1', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$f1', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5745,11 +5745,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %f1, %x5 + liveins: $x3, $f1, $x5 - %2 = COPY %x5 - %1 = COPY %f1 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $f1 + %0 = COPY $x3 %3 = COPY %2.sub_32 %4 = ADDI %3, 1 %6 = IMPLICIT_DEF @@ -5765,7 +5765,7 @@ %13 = STFSUX %1, %0, killed %11 :: (store 4 into %ir.arrayidx3, !tbaa !14) ; CHECK: STFSU %1, 987, %0 ; CHECK-LATE: stfsu 1, 987(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5791,9 +5791,9 @@ - { id: 10, class: g8rc, preferred-register: '' } - { id: 11, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%f1', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$f1', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5816,11 +5816,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %f1, %x5 + liveins: $x3, $f1, $x5 - %2 = COPY %x5 - %1 = COPY %f1 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $f1 + %0 = COPY $x3 %3 = COPY %2.sub_32 %4 = ADDI %3, 1 %6 = IMPLICIT_DEF @@ -5836,7 +5836,7 @@ STFDX %1, %0, killed %11 :: (store 8 into %ir.arrayidx3, !tbaa !12) ; CHECK: STFD %1, -873, %0 ; CHECK-LATE: stfd 1, -873(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5864,9 +5864,9 @@ - { id: 12, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%f1', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$f1', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5889,11 +5889,11 @@ constants: body: | bb.0.entry: - liveins: %x3, %f1, %x5 + liveins: $x3, $f1, $x5 - %2 = COPY %x5 - %1 = COPY %f1 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $f1 + %0 = COPY $x3 %3 = COPY %2.sub_32 %4 = ADDI %3, 1 %6 = IMPLICIT_DEF @@ -5909,7 +5909,7 @@ %13 = STFDUX %1, %0, killed %11 :: (store 8 into %ir.arrayidx3, !tbaa !12) ; CHECK: STFDU %1, 6477, %0 ; CHECK-LATE: stfdu 1, 6477(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5927,9 +5927,9 @@ - { id: 2, class: g8rc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%f1', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$f1', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -5952,16 +5952,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %f1, %x5 + liveins: $x3, $f1, $x5 - %2 = COPY %x5 - %1 = COPY %f1 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $f1 + %0 = COPY $x3 %3 = LI8 444 STXSSPX %1, %0, killed %3 :: (store 4 into %ir.arrayidx, !tbaa !14) ; CHECK: STXSSP %1, 444, %0 ; CHECK-LATE: stxssp 1, 444(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -5979,9 +5979,9 @@ - { id: 2, class: g8rc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%f1', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$f1', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -6004,16 +6004,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %f1, %x5 + liveins: $x3, $f1, $x5 - %2 = COPY %x5 - %1 = COPY %f1 - %0 = COPY %x3 + %2 = COPY $x5 + %1 = COPY $f1 + %0 = COPY $x3 %3 = LI8 4 - STXSDX %1, %0, killed %3, implicit %rm :: (store 8 into %ir.arrayidx, !tbaa !12) + STXSDX %1, %0, killed %3, implicit $rm :: (store 8 into %ir.arrayidx, !tbaa !12) ; CHECK: STXSD %1, 4, %0 ; CHECK-LATE: stxsd 1, 4(3) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -6031,9 +6031,9 @@ - { id: 2, class: g8rc, preferred-register: '' } - { id: 3, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%v2', virtual-reg: '%1' } - - { reg: '%x7', virtual-reg: '%2' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$v2', virtual-reg: '%1' } + - { reg: '$x7', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -6056,16 +6056,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %v2, %x7 + liveins: $x3, $v2, $x7 - %2 = COPY %x7 - %1 = COPY %v2 + %2 = COPY $x7 + %1 = COPY $v2 %0 = LI8 16 %3 = RLDICR %2, 4, 59 STXVX %1, %0, killed %3 :: (store 16 into %ir.arrayidx, !tbaa !3) ; CHECK: STXV %1, 16, killed %3 ; CHECK-LATE: stxv 34, 16(4) - BLR8 implicit %lr8, implicit %rm + BLR8 implicit $lr8, implicit $rm ... --- @@ -6088,10 +6088,10 @@ - { id: 7, class: gprc, preferred-register: '' } - { id: 8, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } - - { reg: '%x6', virtual-reg: '%3' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } + - { reg: '$x6', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -6114,22 +6114,22 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5, %x6 + liveins: $x3, $x4, $x5, $x6 - %3 = COPY %x6 - %2 = COPY %x5 - %1 = COPY %x4 + %3 = COPY $x6 + %2 = COPY $x5 + %1 = COPY $x4 %6 = COPY %3.sub_32 %7 = COPY %2.sub_32 %8 = COPY %1.sub_32 %0 = LI 55 - %4 = SUBFC %7, %0, implicit-def %carry + %4 = SUBFC %7, %0, implicit-def $carry ; CHECK: SUBFIC %7, 55 ; CHECK-LATE: subfic 3, 5, 55 - %5 = SUBFE %6, %8, implicit-def dead %carry, implicit %carry - %x3 = EXTSW_32_64 %4 - %x4 = EXTSW_32_64 %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3, implicit %x4 + %5 = SUBFE %6, %8, implicit-def dead $carry, implicit $carry + $x3 = EXTSW_32_64 %4 + $x4 = EXTSW_32_64 %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3, implicit $x4 ... --- @@ -6149,10 +6149,10 @@ - { id: 4, class: g8rc, preferred-register: '' } - { id: 5, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } - - { reg: '%x5', virtual-reg: '%2' } - - { reg: '%x6', virtual-reg: '%3' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } + - { reg: '$x5', virtual-reg: '%2' } + - { reg: '$x6', virtual-reg: '%3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -6175,19 +6175,19 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4, %x5, %x6 + liveins: $x3, $x4, $x5, $x6 - %3 = COPY %x6 - %2 = COPY %x5 - %1 = COPY %x4 + %3 = COPY $x6 + %2 = COPY $x5 + %1 = COPY $x4 %0 = LI8 7635 - %4 = SUBFC8 %2, %0, implicit-def %carry + %4 = SUBFC8 %2, %0, implicit-def $carry ; CHECK: SUBFIC8 %2, 7635 ; CHECK-LATE: subfic 3, 5, 7635 - %5 = SUBFE8 %3, %1, implicit-def dead %carry, implicit %carry - %x3 = COPY %4 - %x4 = COPY %5 - BLR8 implicit %lr8, implicit %rm, implicit %x3, implicit %x4 + %5 = SUBFE8 %3, %1, implicit-def dead $carry, implicit $carry + $x3 = COPY %4 + $x4 = COPY %5 + BLR8 implicit $lr8, implicit $rm, implicit $x3, implicit $x4 ... --- @@ -6205,8 +6205,8 @@ - { id: 2, class: gprc, preferred-register: '' } - { id: 3, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -6229,16 +6229,16 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 %1 = LI 10101 - %0 = COPY %x3 + %0 = COPY $x3 %3 = COPY %0.sub_32 %2 = XOR %1, %3 ; CHECK: XORI %3, 10101 ; CHECK-LATE: 3, 3, 10101 - %x3 = EXTSW_32_64 %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -6255,8 +6255,8 @@ - { id: 1, class: g8rc, preferred-register: '' } - { id: 2, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } - - { reg: '%x4', virtual-reg: '%1' } + - { reg: '$x3', virtual-reg: '%0' } + - { reg: '$x4', virtual-reg: '%1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -6279,15 +6279,15 @@ constants: body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %1 = COPY %x4 + %1 = COPY $x4 %0 = LI8 5535 %2 = XOR8 %1, %0 ; CHECK: XORI8 %1, 5535 ; CHECK-LATE: xori 3, 4, 5535 - %x3 = COPY %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -6303,7 +6303,7 @@ - { id: 0, class: gprc, preferred-register: '' } - { id: 1, class: gprc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -6326,14 +6326,14 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 %0 = LI 871 %1 = XORI %0, 17 ; CHECK: LI 886 ; CHECK-LATE: li 3, 886 - %x3 = EXTSW_32_64 %1 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = EXTSW_32_64 %1 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... --- @@ -6349,7 +6349,7 @@ - { id: 0, class: g8rc, preferred-register: '' } - { id: 1, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x3', virtual-reg: '%0' } + - { reg: '$x3', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -6372,13 +6372,13 @@ constants: body: | bb.0.entry: - liveins: %x3 + liveins: $x3 %0 = LI8 453 %1 = XORI8 %0, 17 ; CHECK: LI8 468 ; CHECK-LATE: li 3, 468 - %x3 = COPY %1 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %1 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... Index: test/CodeGen/PowerPC/debuginfo-split-int.ll =================================================================== --- test/CodeGen/PowerPC/debuginfo-split-int.ll +++ test/CodeGen/PowerPC/debuginfo-split-int.ll @@ -26,10 +26,10 @@ ; CHECK: [[DL:![0-9]+]] = !DILocalVariable(name: "result" ; ; High 32 bits in R3, low 32 bits in R4 -; CHECK: %0:gprc = COPY %r3 -; CHECK: DBG_VALUE debug-use %0, debug-use %noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 0, 32) -; CHECK: %1:gprc = COPY %r4 -; CHECK: DBG_VALUE debug-use %1, debug-use %noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 32, 32) +; CHECK: %0:gprc = COPY $r3 +; CHECK: DBG_VALUE debug-use %0, debug-use $noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 0, 32) +; CHECK: %1:gprc = COPY $r4 +; CHECK: DBG_VALUE debug-use %1, debug-use $noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 32, 32) define void @bar() local_unnamed_addr #0 !dbg !6 { %1 = alloca i64, align 8 %2 = tail call i64 @foo() Index: test/CodeGen/PowerPC/debuginfo-stackarg.ll =================================================================== --- test/CodeGen/PowerPC/debuginfo-stackarg.ll +++ test/CodeGen/PowerPC/debuginfo-stackarg.ll @@ -33,7 +33,7 @@ ; We expect to find a DBG_VALUE refering to the metadata id for bar5, using the lowest ; of the two fixed stack offsets found earlier. ; CHECK-LABEL: body: -; CHECK: DBG_VALUE %r1, 0, !17, !DIExpression(DW_OP_plus_uconst, 56) +; CHECK: DBG_VALUE $r1, 0, !17, !DIExpression(DW_OP_plus_uconst, 56) entry: tail call void @llvm.dbg.value(metadata i64 %bar1, metadata !13, metadata !DIExpression()), !dbg !18 tail call void @llvm.dbg.value(metadata i64 %bar2, metadata !14, metadata !DIExpression()), !dbg !19 Index: test/CodeGen/PowerPC/expand-isel-1.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-1.mir +++ test/CodeGen/PowerPC/expand-isel-1.mir @@ -22,8 +22,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x0' } - - { reg: '%x3' } + - { reg: '$x0' } + - { reg: '$x3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -40,18 +40,18 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x0, %x3 + liveins: $x0, $x3 - %r5 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r0 = ISEL %zero, %r0, %cr0gt + $r5 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r0 = ISEL $zero, $r0, $cr0gt ; CHECK-LABEL: testExpandISEL - ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]] + ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]] ; CHECK-NEXT: B %[[SUCCESSOR:bb.[0-9]+]] ; CHECK: [[TRUE]] - ; CHECK: %r0 = ADDI %zero, 0 + ; CHECK: $r0 = ADDI $zero, 0 - %x3 = EXTSW_32_64 %r0 + $x3 = EXTSW_32_64 $r0 ... Index: test/CodeGen/PowerPC/expand-isel-10.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-10.mir +++ test/CodeGen/PowerPC/expand-isel-10.mir @@ -23,7 +23,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x3' } + - { reg: '$x3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -40,15 +40,15 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x3 + liveins: $x3 - %r5 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r3 = ISEL %r3, %r3, %cr0gt - %x3 = EXTSW_32_64 %r3 - ; CHECK: %r5 = ADDI %r3, 1 - ; CHECK: %cr0 = CMPWI %r3, 0 - ; CHECK-NOT: %r3 = ISEL %r3, %r3, %cr0gt - ; CHECK: %x3 = EXTSW_32_64 %r3 + $r5 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r3 = ISEL $r3, $r3, $cr0gt + $x3 = EXTSW_32_64 $r3 + ; CHECK: $r5 = ADDI $r3, 1 + ; CHECK: $cr0 = CMPWI $r3, 0 + ; CHECK-NOT: $r3 = ISEL $r3, $r3, $cr0gt + ; CHECK: $x3 = EXTSW_32_64 $r3 ... Index: test/CodeGen/PowerPC/expand-isel-2.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-2.mir +++ test/CodeGen/PowerPC/expand-isel-2.mir @@ -22,9 +22,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x0' } - - { reg: '%x3' } - - { reg: '%x4' } + - { reg: '$x0' } + - { reg: '$x3' } + - { reg: '$x4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -41,17 +41,17 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x0, %x3, %x4 + liveins: $x0, $x3, $x4 - %r5 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r3 = ISEL %zero, %r4, %cr0gt - ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]] + $r5 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r3 = ISEL $zero, $r4, $cr0gt + ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]] ; CHECK: %[[FALSE:bb.[0-9]+]] - ; CHECK: %r3 = ORI %r4, 0 + ; CHECK: $r3 = ORI $r4, 0 ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]] ; CHECK: [[TRUE]] - ; CHECK: %r3 = ADDI %zero, 0 + ; CHECK: $r3 = ADDI $zero, 0 - %x3 = EXTSW_32_64 %r3 + $x3 = EXTSW_32_64 $r3 ... Index: test/CodeGen/PowerPC/expand-isel-3.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-3.mir +++ test/CodeGen/PowerPC/expand-isel-3.mir @@ -22,9 +22,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x0' } - - { reg: '%x3' } - - { reg: '%x4' } + - { reg: '$x0' } + - { reg: '$x3' } + - { reg: '$x4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -41,18 +41,18 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x0, %x3, %x4 + liveins: $x0, $x3, $x4 - %r5 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r3 = ISEL %r4, %r0, %cr0gt - ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]] + $r5 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r3 = ISEL $r4, $r0, $cr0gt + ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]] ; CHECK: %[[FALSE:bb.[0-9]+]] - ; CHECK: %r3 = ORI %r0, 0 + ; CHECK: $r3 = ORI $r0, 0 ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]] ; CHECK: [[TRUE]] - ; CHECK: %r3 = ADDI %r4, 0 + ; CHECK: $r3 = ADDI $r4, 0 - %x3 = EXTSW_32_64 %r3 + $x3 = EXTSW_32_64 $r3 ... Index: test/CodeGen/PowerPC/expand-isel-4.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-4.mir +++ test/CodeGen/PowerPC/expand-isel-4.mir @@ -1,5 +1,5 @@ # This file tests the scenario: ISEL R0, ZERO, RX, CR (X != 0) -# It also tests redundant liveins (%x7) and killed registers. +# It also tests redundant liveins ($x7) and killed registers. # RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s --- | @@ -23,9 +23,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x0' } - - { reg: '%x3' } - - { reg: '%x7' } + - { reg: '$x0' } + - { reg: '$x3' } + - { reg: '$x7' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -42,18 +42,18 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x0, %x3, %x7 + liveins: $x0, $x3, $x7 - %r5 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r0 = ISEL killed %zero, killed %r5, killed %cr0gt, implicit killed %cr0 - ; CHECK: BC killed %cr0gt, %[[TRUE:bb.[0-9]+]] + $r5 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r0 = ISEL killed $zero, killed $r5, killed $cr0gt, implicit killed $cr0 + ; CHECK: BC killed $cr0gt, %[[TRUE:bb.[0-9]+]] ; CHECK: %[[FALSE:bb.[0-9]+]] - ; CHECK: %r0 = ORI killed %r5, 0 + ; CHECK: $r0 = ORI killed $r5, 0 ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]] ; CHECK: [[TRUE]] - ; CHECK: %r0 = ADDI killed %zero, 0 + ; CHECK: $r0 = ADDI killed $zero, 0 - %x0 = EXTSW_32_64 killed %r0 + $x0 = EXTSW_32_64 killed $r0 ... Index: test/CodeGen/PowerPC/expand-isel-5.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-5.mir +++ test/CodeGen/PowerPC/expand-isel-5.mir @@ -22,8 +22,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x0' } - - { reg: '%x3' } + - { reg: '$x0' } + - { reg: '$x3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -40,15 +40,15 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x0, %x3 + liveins: $x0, $x3 - %r5 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r0 = ISEL %r5, %r0, %cr0gt - ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]] + $r5 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r0 = ISEL $r5, $r0, $cr0gt + ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]] ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]] ; CHECK: [[TRUE]] - ; CHECK: %r0 = ADDI %r5, 0 - %x3 = EXTSW_32_64 %r0 + ; CHECK: $r0 = ADDI $r5, 0 + $x3 = EXTSW_32_64 $r0 ... Index: test/CodeGen/PowerPC/expand-isel-6.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-6.mir +++ test/CodeGen/PowerPC/expand-isel-6.mir @@ -23,8 +23,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x0' } - - { reg: '%x3' } + - { reg: '$x0' } + - { reg: '$x3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -41,17 +41,17 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x0, %x3 + liveins: $x0, $x3 - %r5 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r3 = ISEL %zero, %r0, %cr0gt - ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]] + $r5 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r3 = ISEL $zero, $r0, $cr0gt + ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]] ; CHECK: %[[FALSE:bb.[0-9]+]] - ; CHECK: %r3 = ORI %r0, 0 + ; CHECK: $r3 = ORI $r0, 0 ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]] ; CHECK: [[TRUE]] - ; CHECK: %r3 = ADDI %zero, 0 + ; CHECK: $r3 = ADDI $zero, 0 ... Index: test/CodeGen/PowerPC/expand-isel-7.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-7.mir +++ test/CodeGen/PowerPC/expand-isel-7.mir @@ -22,9 +22,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x3' } - - { reg: '%x4' } - - { reg: '%x5' } + - { reg: '$x3' } + - { reg: '$x4' } + - { reg: '$x5' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -41,18 +41,18 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %r4 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r5 = ISEL %r3, %r4, %cr0gt - ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]] + $r4 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r5 = ISEL $r3, $r4, $cr0gt + ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]] ; CHECK: %[[FALSE:bb.[0-9]+]] - ; CHECK: %r5 = ORI %r4, 0 + ; CHECK: $r5 = ORI $r4, 0 ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]] ; CHECK: [[TRUE]] - ; CHECK: %r5 = ADDI %r3, 0 + ; CHECK: $r5 = ADDI $r3, 0 - %x5 = EXTSW_32_64 %r5 + $x5 = EXTSW_32_64 $r5 ... Index: test/CodeGen/PowerPC/expand-isel-8.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-8.mir +++ test/CodeGen/PowerPC/expand-isel-8.mir @@ -22,9 +22,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x3' } - - { reg: '%x4' } - - { reg: '%x5' } + - { reg: '$x3' } + - { reg: '$x4' } + - { reg: '$x5' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -41,25 +41,25 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x3, %x4, %x5 + liveins: $x3, $x4, $x5 - %r4 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r5 = ISEL %r3, %r4, %cr0gt - %r3 = ISEL %r4, %r5, %cr0gt - %r4 = ISEL %r3, %r5, %cr0gt - ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]] + $r4 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r5 = ISEL $r3, $r4, $cr0gt + $r3 = ISEL $r4, $r5, $cr0gt + $r4 = ISEL $r3, $r5, $cr0gt + ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]] ; CHECK: %[[FALSE:bb.[0-9]+]] - ; CHECK: %r5 = ORI %r4, 0 - ; CHECK: %r3 = ORI %r5, 0 - ; CHECK: %r4 = ORI %r5, 0 + ; CHECK: $r5 = ORI $r4, 0 + ; CHECK: $r3 = ORI $r5, 0 + ; CHECK: $r4 = ORI $r5, 0 ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]] ; CHECK: [[TRUE]] - ; CHECK: %r5 = ADDI %r3, 0 - ; CHECK: %r3 = ADDI %r4, 0 - ; CHECK: %r4 = ADDI %r3, 0 + ; CHECK: $r5 = ADDI $r3, 0 + ; CHECK: $r3 = ADDI $r4, 0 + ; CHECK: $r4 = ADDI $r3, 0 - %x5 = EXTSW_32_64 %r5 - %x3 = EXTSW_32_64 %r3 + $x5 = EXTSW_32_64 $r5 + $x3 = EXTSW_32_64 $r3 ... Index: test/CodeGen/PowerPC/expand-isel-9.mir =================================================================== --- test/CodeGen/PowerPC/expand-isel-9.mir +++ test/CodeGen/PowerPC/expand-isel-9.mir @@ -23,8 +23,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%x3' } - - { reg: '%x4' } + - { reg: '$x3' } + - { reg: '$x4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -41,14 +41,14 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x3, %x4 + liveins: $x3, $x4 - %r5 = ADDI %r3, 1 - %cr0 = CMPWI %r3, 0 - %r3 = ISEL %r4, %r4, %cr0gt + $r5 = ADDI $r3, 1 + $cr0 = CMPWI $r3, 0 + $r3 = ISEL $r4, $r4, $cr0gt ; Test fold ISEL to a copy - ; CHECK: %r3 = OR %r4, %r4 + ; CHECK: $r3 = OR $r4, $r4 - %x3 = EXTSW_32_64 %r3 + $x3 = EXTSW_32_64 $r3 ... Index: test/CodeGen/PowerPC/fp64-to-int16.ll =================================================================== --- test/CodeGen/PowerPC/fp64-to-int16.ll +++ test/CodeGen/PowerPC/fp64-to-int16.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: xori 3, 3, 65534 ; CHECK-NEXT: cntlzw 3, 3 ; CHECK-NEXT: srwi 3, 3, 5 -; CHECK-NEXT: # implicit-def: %x4 +; CHECK-NEXT: # implicit-def: $x4 ; CHECK-NEXT: mr 4, 3 ; CHECK-NEXT: mr 3, 4 ; CHECK-NEXT: blr Index: test/CodeGen/PowerPC/livephysregs.mir =================================================================== --- test/CodeGen/PowerPC/livephysregs.mir +++ test/CodeGen/PowerPC/livephysregs.mir @@ -4,49 +4,49 @@ # recalculated list if okay and contains all the non-saved and saved CSRs. # CHECK-LABEL: name: func # CHECK: bb.3: -# CHECK-NEXT: liveins: %x30, %x29, %x3, %x6 -# CHECK: %x4 = RLDICR killed %x6, 16, 47 -# CHECK: %x3 = OR8 killed %x4, killed %x3 -# CHECK: BLR8 implicit %lr8, implicit %rm, implicit %x3 +# CHECK-NEXT: liveins: $x30, $x29, $x3, $x6 +# CHECK: $x4 = RLDICR killed $x6, 16, 47 +# CHECK: $x3 = OR8 killed $x4, killed $x3 +# CHECK: BLR8 implicit $lr8, implicit $rm, implicit $x3 --- name: func tracksRegLiveness: true fixedStack: - - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%x30' } - - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%x29' } + - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '$x30' } + - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$x29' } - { id: 2, offset: -8, size: 8, alignment: 8, isImmutable: true, isAliased: false } body: | bb.0: - liveins: %x3, %x5, %x29, %x30 + liveins: $x3, $x5, $x29, $x30 - %x6 = RLWINM8 %x3, 16, 16, 31 - %x3 = RLDICL killed %x3, 0, 48 - BC undef %cr5lt, %bb.3 + $x6 = RLWINM8 $x3, 16, 16, 31 + $x3 = RLDICL killed $x3, 0, 48 + BC undef $cr5lt, %bb.3 bb.1: - liveins: %x3, %x6, %x29, %x30 + liveins: $x3, $x6, $x29, $x30 - %x4 = RLDICR killed %x6, 16, 47 - %x3 = OR8 killed %x4, killed %x3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x4 = RLDICR killed $x6, 16, 47 + $x3 = OR8 killed $x4, killed $x3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 bb.3: - liveins: %x3, %x5, %x6, %x29, %x30 + liveins: $x3, $x5, $x6, $x29, $x30 - dead %x5 = ADD8 %x5, %x6 - BC undef %cr5lt, %bb.1 + dead $x5 = ADD8 $x5, $x6 + BC undef $cr5lt, %bb.1 bb.6: - liveins: %x3, %x6, %x29, %x30 - STD killed %x29, -24, %x1 :: (store 8 into %fixed-stack.1) - STD killed %x30, -16, %x1 :: (store 8 into %fixed-stack.0, align 16) - NOP implicit-def dead %x29 - NOP implicit-def dead %x30 + liveins: $x3, $x6, $x29, $x30 + STD killed $x29, -24, $x1 :: (store 8 into %fixed-stack.1) + STD killed $x30, -16, $x1 :: (store 8 into %fixed-stack.0, align 16) + NOP implicit-def dead $x29 + NOP implicit-def dead $x30 - %x30 = LD -16, %x1 :: (load 8 from %fixed-stack.0, align 16) - %x29 = LD -24, %x1 :: (load 8 from %fixed-stack.1) + $x30 = LD -16, $x1 :: (load 8 from %fixed-stack.0, align 16) + $x29 = LD -24, $x1 :: (load 8 from %fixed-stack.1) - %x4 = RLDICR killed %x6, 16, 47 - %x3 = OR8 killed %x4, killed %x3 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x4 = RLDICR killed $x6, 16, 47 + $x3 = OR8 killed $x4, killed $x3 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ... Index: test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir =================================================================== --- test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir +++ test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir @@ -68,9 +68,9 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - liveins: %x2 + liveins: $x2 - %0 = ADDIStocHA %x2, @b + %0 = ADDIStocHA $x2, @b %1 = LD target-flags(ppc-toc-lo) @b, killed %0 :: (load 8 from @b) %2 = LWZ 0, %1 :: (load 4 from %ir.0) %3 = LI 0 @@ -83,7 +83,7 @@ STW %4, 0, %1 :: (store 4 into %ir.0) %10 = EXTSW_32_64 %8 STW %8, 0, %1 :: (store 4 into %ir.0) - %x3 = COPY %10 - BLR8 implicit %x3, implicit %lr8, implicit %rm + $x3 = COPY %10 + BLR8 implicit $x3, implicit $lr8, implicit $rm ... Index: test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll =================================================================== --- test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll +++ test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll @@ -7,12 +7,12 @@ %2 = zext i32 %1 to i64 %3 = shl i64 %2, 48 %4 = ashr exact i64 %3, 48 -; CHECK: ANDIo8 killed {{[^,]+}}, 65520, implicit-def dead %cr0 +; CHECK: ANDIo8 killed {{[^,]+}}, 65520, implicit-def dead $cr0 ; CHECK: CMPLDI ; CHECK: BCC -; CHECK: ANDIo8 {{[^,]+}}, 65520, implicit-def %cr0 -; CHECK: COPY %cr0 +; CHECK: ANDIo8 {{[^,]+}}, 65520, implicit-def $cr0 +; CHECK: COPY $cr0 ; CHECK: BCC %5 = icmp eq i64 %4, 0 br i1 %5, label %foo, label %bar @@ -26,8 +26,8 @@ ; CHECK-LABEL: fn2 define signext i32 @fn2(i64 %a, i64 %b) { -; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, implicit-def %cr0 -; CHECK: [[CREG:[^, ]+]]:crrc = COPY killed %cr +; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, implicit-def $cr0 +; CHECK: [[CREG:[^, ]+]]:crrc = COPY killed $cr ; CHECK: BCC 12, killed [[CREG]] %1 = or i64 %b, %a %2 = icmp sgt i64 %1, -1 @@ -42,8 +42,8 @@ ; CHECK-LABEL: fn3 define signext i32 @fn3(i32 %a) { -; CHECK: ANDIo killed {{[%0-9]+}}, 10, implicit-def %cr0 -; CHECK: [[CREG:[^, ]+]]:crrc = COPY %cr0 +; CHECK: ANDIo killed {{[%0-9]+}}, 10, implicit-def $cr0 +; CHECK: [[CREG:[^, ]+]]:crrc = COPY $cr0 ; CHECK: BCC 76, killed [[CREG]] %1 = and i32 %a, 10 %2 = icmp ne i32 %1, 0 Index: test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir =================================================================== --- test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir +++ test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir @@ -63,10 +63,10 @@ - { id: 26, class: g8rc_and_g8rc_nox0 } - { id: 27, class: g8rc_and_g8rc_nox0 } liveins: - - { reg: '%x3', virtual-reg: '%6' } - - { reg: '%x4', virtual-reg: '%7' } - - { reg: '%x5', virtual-reg: '%8' } - - { reg: '%x6', virtual-reg: '%9' } + - { reg: '$x3', virtual-reg: '%6' } + - { reg: '$x4', virtual-reg: '%7' } + - { reg: '$x5', virtual-reg: '%8' } + - { reg: '$x6', virtual-reg: '%9' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -84,12 +84,12 @@ body: | bb.0.top: successors: %bb.1.loop - liveins: %x3, %x4, %x5, %x6 + liveins: $x3, $x4, $x5, $x6 - %9 = COPY %x6 - %8 = COPY %x5 - %7 = COPY %x4 - %6 = COPY %x3 + %9 = COPY $x6 + %8 = COPY $x5 + %7 = COPY $x4 + %6 = COPY $x3 %14 = COPY %9 %13 = COPY %8 %12 = COPY %7 @@ -104,8 +104,8 @@ %1 = PHI %12, %bb.0.top, %5, %bb.3.loop %2 = PHI %13, %bb.0.top, %4, %bb.3.loop %3 = PHI %14, %bb.0.top, %5, %bb.3.loop - %15 = SUBFC8 %3, %1, implicit-def %carry - %16 = SUBFE8 %2, %0, implicit-def dead %carry, implicit %carry + %15 = SUBFC8 %3, %1, implicit-def $carry + %16 = SUBFE8 %2, %0, implicit-def dead $carry, implicit $carry %17 = ADDI8 %16, -1 %18 = ADDI8 %15, -1 %19 = ANDC8 killed %17, %16 @@ -114,8 +114,8 @@ %24 = CNTLZD killed %20 %25 = CMPLDI %15, 0 BCC 76, %25, %bb.2.loop - ; CHECK: SUBFC8o %3, %1, implicit-def %carry, implicit-def %cr0 - ; CHECK: COPY killed %cr0 + ; CHECK: SUBFC8o %3, %1, implicit-def $carry, implicit-def $cr0 + ; CHECK: COPY killed $cr0 ; CHECK: BCC bb.4: Index: test/CodeGen/PowerPC/quadint-return.ll =================================================================== --- test/CodeGen/PowerPC/quadint-return.ll +++ test/CodeGen/PowerPC/quadint-return.ll @@ -14,6 +14,6 @@ ; CHECK: ********** Function: foo ; CHECK: ********** FAST REGISTER ALLOCATION ********** -; CHECK: %x3 = COPY %{{[0-9]+}} -; CHECK-NEXT: %x4 = COPY %{{[0-9]+}} +; CHECK: $x3 = COPY %{{[0-9]+}} +; CHECK-NEXT: $x4 = COPY %{{[0-9]+}} ; CHECK-NEXT: BLR Index: test/CodeGen/PowerPC/scavenging.mir =================================================================== --- test/CodeGen/PowerPC/scavenging.mir +++ test/CodeGen/PowerPC/scavenging.mir @@ -5,12 +5,12 @@ tracksRegLiveness: true body: | bb.0: - ; CHECK: [[REG0:%r[0-9]+]] = LI 42 + ; CHECK: [[REG0:\$r[0-9]+]] = LI 42 ; CHECK-NEXT: NOP implicit killed [[REG0]] %0 : gprc = LI 42 NOP implicit %0 - ; CHECK: [[REG1:%r[0-9]+]] = LI 42 + ; CHECK: [[REG1:\$r[0-9]+]] = LI 42 ; CHECK-NEXT: NOP ; CHECK-NEXT: NOP implicit [[REG1]] ; CHECK-NEXT: NOP @@ -21,50 +21,50 @@ NOP NOP implicit %1 - ; CHECK: [[REG2:%r[0-9]+]] = LI 42 + ; CHECK: [[REG2:\$r[0-9]+]] = LI 42 ; CHECK-NEXT: NOP implicit [[REG2]] %2 : gprc = LI 42 NOP implicit %2 - %x0 = IMPLICIT_DEF - %x1 = IMPLICIT_DEF - %x2 = IMPLICIT_DEF - %x3 = IMPLICIT_DEF - %x4 = IMPLICIT_DEF - %x27 = IMPLICIT_DEF - %x28 = IMPLICIT_DEF - %x29 = IMPLICIT_DEF - %x30 = IMPLICIT_DEF + $x0 = IMPLICIT_DEF + $x1 = IMPLICIT_DEF + $x2 = IMPLICIT_DEF + $x3 = IMPLICIT_DEF + $x4 = IMPLICIT_DEF + $x27 = IMPLICIT_DEF + $x28 = IMPLICIT_DEF + $x29 = IMPLICIT_DEF + $x30 = IMPLICIT_DEF - ; CHECK-NOT: %x0 = LI 42 - ; CHECK-NOT: %x1 = LI 42 - ; CHECK-NOT: %x2 = LI 42 - ; CHECK-NOT: %x3 = LI 42 - ; CHECK-NOT: %x4 = LI 42 - ; CHECK-NOT: %x5 = LI 42 - ; CHECK-NOT: %x27 = LI 42 - ; CHECK-NOT: %x28 = LI 42 - ; CHECK-NOT: %x29 = LI 42 - ; CHECK-NOT: %x30 = LI 42 - ; CHECK: [[REG3:%r[0-9]+]] = LI 42 - ; CHECK-NEXT: %x5 = IMPLICIT_DEF + ; CHECK-NOT: $x0 = LI 42 + ; CHECK-NOT: $x1 = LI 42 + ; CHECK-NOT: $x2 = LI 42 + ; CHECK-NOT: $x3 = LI 42 + ; CHECK-NOT: $x4 = LI 42 + ; CHECK-NOT: $x5 = LI 42 + ; CHECK-NOT: $x27 = LI 42 + ; CHECK-NOT: $x28 = LI 42 + ; CHECK-NOT: $x29 = LI 42 + ; CHECK-NOT: $x30 = LI 42 + ; CHECK: [[REG3:\$r[0-9]+]] = LI 42 + ; CHECK-NEXT: $x5 = IMPLICIT_DEF ; CHECK-NEXT: NOP implicit killed [[REG2]] ; CHECK-NEXT: NOP implicit killed [[REG3]] %3 : gprc = LI 42 - %x5 = IMPLICIT_DEF + $x5 = IMPLICIT_DEF NOP implicit %2 NOP implicit %3 - NOP implicit %x0 - NOP implicit %x1 - NOP implicit %x2 - NOP implicit %x3 - NOP implicit %x4 - NOP implicit %x5 - NOP implicit %x27 - NOP implicit %x28 - NOP implicit %x29 - NOP implicit %x30 + NOP implicit $x0 + NOP implicit $x1 + NOP implicit $x2 + NOP implicit $x3 + NOP implicit $x4 + NOP implicit $x5 + NOP implicit $x27 + NOP implicit $x28 + NOP implicit $x29 + NOP implicit $x30 ... --- # CHECK-LABEL: name: scav0 @@ -76,76 +76,76 @@ - { id: 0, type: variable-sized, offset: -32, alignment: 1 } body: | bb.0: - %x0 = IMPLICIT_DEF - %x1 = IMPLICIT_DEF - %x2 = IMPLICIT_DEF - %x3 = IMPLICIT_DEF - %x4 = IMPLICIT_DEF - %x5 = IMPLICIT_DEF - %x6 = IMPLICIT_DEF - %x7 = IMPLICIT_DEF - %x8 = IMPLICIT_DEF - %x9 = IMPLICIT_DEF - %x10 = IMPLICIT_DEF - %x11 = IMPLICIT_DEF - %x12 = IMPLICIT_DEF - %x13 = IMPLICIT_DEF - %x14 = IMPLICIT_DEF - %x15 = IMPLICIT_DEF - %x16 = IMPLICIT_DEF - %x17 = IMPLICIT_DEF - %x18 = IMPLICIT_DEF - %x19 = IMPLICIT_DEF - %x20 = IMPLICIT_DEF - %x21 = IMPLICIT_DEF - %x22 = IMPLICIT_DEF - %x23 = IMPLICIT_DEF - %x24 = IMPLICIT_DEF - %x25 = IMPLICIT_DEF - %x26 = IMPLICIT_DEF - %x27 = IMPLICIT_DEF - %x28 = IMPLICIT_DEF - %x29 = IMPLICIT_DEF - %x30 = IMPLICIT_DEF + $x0 = IMPLICIT_DEF + $x1 = IMPLICIT_DEF + $x2 = IMPLICIT_DEF + $x3 = IMPLICIT_DEF + $x4 = IMPLICIT_DEF + $x5 = IMPLICIT_DEF + $x6 = IMPLICIT_DEF + $x7 = IMPLICIT_DEF + $x8 = IMPLICIT_DEF + $x9 = IMPLICIT_DEF + $x10 = IMPLICIT_DEF + $x11 = IMPLICIT_DEF + $x12 = IMPLICIT_DEF + $x13 = IMPLICIT_DEF + $x14 = IMPLICIT_DEF + $x15 = IMPLICIT_DEF + $x16 = IMPLICIT_DEF + $x17 = IMPLICIT_DEF + $x18 = IMPLICIT_DEF + $x19 = IMPLICIT_DEF + $x20 = IMPLICIT_DEF + $x21 = IMPLICIT_DEF + $x22 = IMPLICIT_DEF + $x23 = IMPLICIT_DEF + $x24 = IMPLICIT_DEF + $x25 = IMPLICIT_DEF + $x26 = IMPLICIT_DEF + $x27 = IMPLICIT_DEF + $x28 = IMPLICIT_DEF + $x29 = IMPLICIT_DEF + $x30 = IMPLICIT_DEF - ; CHECK: STD killed [[SPILLEDREG:%x[0-9]+]] + ; CHECK: STD killed [[SPILLEDREG:\$x[0-9]+]] ; CHECK: [[SPILLEDREG]] = LI8 42 ; CHECK: NOP implicit killed [[SPILLEDREG]] ; CHECK: [[SPILLEDREG]] = LD %0 : g8rc = LI8 42 NOP implicit %0 - NOP implicit %x0 - NOP implicit %x1 - NOP implicit %x2 - NOP implicit %x3 - NOP implicit %x4 - NOP implicit %x5 - NOP implicit %x6 - NOP implicit %x7 - NOP implicit %x8 - NOP implicit %x9 - NOP implicit %x10 - NOP implicit %x11 - NOP implicit %x12 - NOP implicit %x13 - NOP implicit %x14 - NOP implicit %x15 - NOP implicit %x16 - NOP implicit %x17 - NOP implicit %x18 - NOP implicit %x19 - NOP implicit %x20 - NOP implicit %x21 - NOP implicit %x22 - NOP implicit %x23 - NOP implicit %x24 - NOP implicit %x25 - NOP implicit %x26 - NOP implicit %x27 - NOP implicit %x28 - NOP implicit %x29 - NOP implicit %x30 + NOP implicit $x0 + NOP implicit $x1 + NOP implicit $x2 + NOP implicit $x3 + NOP implicit $x4 + NOP implicit $x5 + NOP implicit $x6 + NOP implicit $x7 + NOP implicit $x8 + NOP implicit $x9 + NOP implicit $x10 + NOP implicit $x11 + NOP implicit $x12 + NOP implicit $x13 + NOP implicit $x14 + NOP implicit $x15 + NOP implicit $x16 + NOP implicit $x17 + NOP implicit $x18 + NOP implicit $x19 + NOP implicit $x20 + NOP implicit $x21 + NOP implicit $x22 + NOP implicit $x23 + NOP implicit $x24 + NOP implicit $x25 + NOP implicit $x26 + NOP implicit $x27 + NOP implicit $x28 + NOP implicit $x29 + NOP implicit $x30 ... --- # Check for bug where we would refuse to spill before the first instruction in a @@ -153,7 +153,7 @@ # CHECK-LABEL: name: spill_at_begin # CHECK: bb.0: # CHECK: liveins: -# CHECK: STD killed [[REG:%x[0-9]+]]{{.*}}(store 8 into %stack.{{[0-9]+}}) +# CHECK: STD killed [[REG:\$x[0-9]+]]{{.*}}(store 8 into %stack.{{[0-9]+}}) # CHECK: [[REG]] = LIS8 0 # CHECK: [[REG]] = ORI8 killed [[REG]], 48 # CHECK: NOP implicit killed [[REG]] @@ -166,41 +166,41 @@ - { id: 0, type: variable-sized, offset: -32, alignment: 1 } body: | bb.0: - liveins: %x0, %x1, %x2, %x3, %x4, %x5, %x6, %x7, %x8, %x9, %x10, %x11, %x12, %x13, %x14, %x15, %x16, %x17, %x18, %x19, %x20, %x21, %x22, %x23, %x24, %x25, %x26, %x27, %x28, %x29, %x30, %x31 + liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31 %0 : g8rc = LIS8 0 %1 : g8rc = ORI8 %0, 48 NOP implicit %1 - NOP implicit %x0 - NOP implicit %x1 - NOP implicit %x2 - NOP implicit %x3 - NOP implicit %x4 - NOP implicit %x5 - NOP implicit %x6 - NOP implicit %x7 - NOP implicit %x8 - NOP implicit %x9 - NOP implicit %x10 - NOP implicit %x11 - NOP implicit %x12 - NOP implicit %x13 - NOP implicit %x14 - NOP implicit %x15 - NOP implicit %x16 - NOP implicit %x17 - NOP implicit %x18 - NOP implicit %x19 - NOP implicit %x20 - NOP implicit %x21 - NOP implicit %x22 - NOP implicit %x23 - NOP implicit %x24 - NOP implicit %x25 - NOP implicit %x26 - NOP implicit %x27 - NOP implicit %x28 - NOP implicit %x29 - NOP implicit %x30 - NOP implicit %x31 + NOP implicit $x0 + NOP implicit $x1 + NOP implicit $x2 + NOP implicit $x3 + NOP implicit $x4 + NOP implicit $x5 + NOP implicit $x6 + NOP implicit $x7 + NOP implicit $x8 + NOP implicit $x9 + NOP implicit $x10 + NOP implicit $x11 + NOP implicit $x12 + NOP implicit $x13 + NOP implicit $x14 + NOP implicit $x15 + NOP implicit $x16 + NOP implicit $x17 + NOP implicit $x18 + NOP implicit $x19 + NOP implicit $x20 + NOP implicit $x21 + NOP implicit $x22 + NOP implicit $x23 + NOP implicit $x24 + NOP implicit $x25 + NOP implicit $x26 + NOP implicit $x27 + NOP implicit $x28 + NOP implicit $x29 + NOP implicit $x30 + NOP implicit $x31 ... Index: test/CodeGen/PowerPC/tls_get_addr_fence1.mir =================================================================== --- test/CodeGen/PowerPC/tls_get_addr_fence1.mir +++ test/CodeGen/PowerPC/tls_get_addr_fence1.mir @@ -27,7 +27,7 @@ - { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 2, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x2' } + - { reg: '$x2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -50,17 +50,17 @@ constants: body: | bb.0.entry: - liveins: %x2 - %0 = ADDIStlsgdHA %x2, @tls_var - %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead %x0, implicit-def dead %x3, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def dead %lr8, implicit-def dead %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7 + liveins: $x2 + %0 = ADDIStlsgdHA $x2, @tls_var + %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead $x0, implicit-def dead $x3, implicit-def dead $x4, implicit-def dead $x5, implicit-def dead $x6, implicit-def dead $x7, implicit-def dead $x8, implicit-def dead $x9, implicit-def dead $x10, implicit-def dead $x11, implicit-def dead $x12, implicit-def dead $lr8, implicit-def dead $ctr8, implicit-def dead $cr0, implicit-def dead $cr1, implicit-def dead $cr5, implicit-def dead $cr6, implicit-def dead $cr7 %2 = LWZ8 0, killed %1 :: (dereferenceable load 4 from @tls_var) - %x3 = COPY %2 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %2 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ; CHECK-LABEL: bb.0.entry - ; CHECK: %[[reg1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStlsgdHA %x2, @tls_var + ; CHECK: %[[reg1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStlsgdHA $x2, @tls_var ; CHECK: ADJCALLSTACKDOWN 0, 0 - ; CHECK: %x3 = ADDItlsgdL %[[reg1]], @tls_var - ; CHECK: %x3 = GETtlsADDR %x3, @tls_var + ; CHECK: $x3 = ADDItlsgdL %[[reg1]], @tls_var + ; CHECK: $x3 = GETtlsADDR $x3, @tls_var ; CHECK: ADJCALLSTACKUP 0, 0 ; CHECK: BLR8 ... Index: test/CodeGen/PowerPC/tls_get_addr_fence2.mir =================================================================== --- test/CodeGen/PowerPC/tls_get_addr_fence2.mir +++ test/CodeGen/PowerPC/tls_get_addr_fence2.mir @@ -27,7 +27,7 @@ - { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' } - { id: 2, class: g8rc, preferred-register: '' } liveins: - - { reg: '%x2' } + - { reg: '$x2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -50,14 +50,14 @@ constants: body: | bb.0.entry: - liveins: %x2 - ADJCALLSTACKDOWN 32, 0, implicit-def %r1, implicit %r1 - %0 = ADDIStlsgdHA %x2, @tls_var - %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead %x0, implicit-def dead %x3, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def dead %lr8, implicit-def dead %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7 + liveins: $x2 + ADJCALLSTACKDOWN 32, 0, implicit-def $r1, implicit $r1 + %0 = ADDIStlsgdHA $x2, @tls_var + %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead $x0, implicit-def dead $x3, implicit-def dead $x4, implicit-def dead $x5, implicit-def dead $x6, implicit-def dead $x7, implicit-def dead $x8, implicit-def dead $x9, implicit-def dead $x10, implicit-def dead $x11, implicit-def dead $x12, implicit-def dead $lr8, implicit-def dead $ctr8, implicit-def dead $cr0, implicit-def dead $cr1, implicit-def dead $cr5, implicit-def dead $cr6, implicit-def dead $cr7 %2 = LWZ8 0, killed %1 :: (dereferenceable load 4 from @tls_var) - %x3 = COPY %2 - ADJCALLSTACKUP 32, 0, implicit-def %r1, implicit %r1 - BLR8 implicit %lr8, implicit %rm, implicit %x3 + $x3 = COPY %2 + ADJCALLSTACKUP 32, 0, implicit-def $r1, implicit $r1 + BLR8 implicit $lr8, implicit $rm, implicit $x3 ; CHECK-LABEL: bb.0.entry ; CHECK-NOT: ADJCALLSTACKDOWN 0, 0 ; CHECK-NOT: ADJCALLSTACKUP 0, 0 Index: test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir =================================================================== --- test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir +++ test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir @@ -149,52 +149,52 @@ %11 = VGBM 0 %43 = LHIMux 0 %44 = LARL %const.0 - %45 = VL64 %44, 0, %noreg :: (load 8 from constant-pool) + %45 = VL64 %44, 0, $noreg :: (load 8 from constant-pool) bb.1: ADJCALLSTACKDOWN 0, 0 %12 = LZDR - %f0d = COPY %12 - CallBRASL &fmod, killed %f0d, undef %f2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def %f0d + $f0d = COPY %12 + CallBRASL &fmod, killed $f0d, undef $f2d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $f0d ADJCALLSTACKUP 0, 0 - KILL killed %f0d + KILL killed $f0d bb.2: - %17 = VLGVH %11, %noreg, 0 + %17 = VLGVH %11, $noreg, 0 %19 = LHR %17.subreg_l32 undef %20.subreg_l64 = LGHI 0 %20 = DSGFR %20, %19 - %22 = VLGVH %11, %noreg, 3 + %22 = VLGVH %11, $noreg, 3 %24 = LHR %22.subreg_l32 undef %25.subreg_l64 = LGHI 0 %25 = DSGFR %25, %24 - %31 = VLGVH %11, %noreg, 1 + %31 = VLGVH %11, $noreg, 1 %33 = LHR %31.subreg_l32 undef %34.subreg_l64 = LGHI 0 %34 = DSGFR %34, %33 - %37 = VLGVH %11, %noreg, 2 + %37 = VLGVH %11, $noreg, 2 %39 = LHR %37.subreg_l32 undef %40.subreg_l64 = LGHI 0 %40 = DSGFR %40, %39 - CHIMux %43, 0, implicit-def %cc - BRC 14, 6, %bb.2, implicit killed %cc + CHIMux %43, 0, implicit-def $cc + BRC 14, 6, %bb.2, implicit killed $cc J %bb.3 bb.3: - WFCDB undef %46, %45, implicit-def %cc - %48 = IPM implicit killed %cc - %48 = AFIMux %48, 268435456, implicit-def dead %cc + WFCDB undef %46, %45, implicit-def $cc + %48 = IPM implicit killed $cc + %48 = AFIMux %48, 268435456, implicit-def dead $cc %6 = RISBMux undef %6, %48, 31, 159, 35 - WFCDB undef %50, %45, implicit-def %cc - BRC 15, 6, %bb.1, implicit killed %cc + WFCDB undef %50, %45, implicit-def $cc + BRC 15, 6, %bb.1, implicit killed $cc J %bb.4 bb.4: %36 = VLVGP %25.subreg_l64, %25.subreg_l64 - %36 = VLVGH %36, %20.subreg_l32, %noreg, 0 - %36 = VLVGH %36, %34.subreg_l32, %noreg, 1 - dead %36 = VLVGH %36, %40.subreg_l32, %noreg, 2 - %4 = LG undef %42, 0, %noreg :: (load 8 from `i64* undef`) + %36 = VLVGH %36, %20.subreg_l32, $noreg, 0 + %36 = VLVGH %36, %34.subreg_l32, $noreg, 1 + dead %36 = VLVGH %36, %40.subreg_l32, $noreg, 2 + %4 = LG undef %42, 0, $noreg :: (load 8 from `i64* undef`) undef %57.subreg_h64 = LLILL 0 undef %66.subreg_h64 = LLILL 0 undef %79.subreg_h64 = LLILL 0 @@ -204,61 +204,61 @@ bb.5: bb.6: - %51 = VLGVH undef %7, %noreg, 0 + %51 = VLGVH undef %7, $noreg, 0 %53 = LLHRMux %51.subreg_l32 - %54 = VLGVH undef %1, %noreg, 0 + %54 = VLGVH undef %1, $noreg, 0 %57.subreg_l32 = LLHRMux %54.subreg_l32 %58 = COPY %57 %58 = DLR %58, %53 - %60 = VLGVH undef %7, %noreg, 3 + %60 = VLGVH undef %7, $noreg, 3 %62 = LLHRMux %60.subreg_l32 - %63 = VLGVH undef %1, %noreg, 3 + %63 = VLGVH undef %1, $noreg, 3 %66.subreg_l32 = LLHRMux %63.subreg_l32 %67 = COPY %66 %67 = DLR %67, %62 - %73 = VLGVH undef %7, %noreg, 1 + %73 = VLGVH undef %7, $noreg, 1 %75 = LLHRMux %73.subreg_l32 - %76 = VLGVH undef %1, %noreg, 1 + %76 = VLGVH undef %1, $noreg, 1 %79.subreg_l32 = LLHRMux %76.subreg_l32 %80 = COPY %79 %80 = DLR %80, %75 - %83 = VLGVH undef %7, %noreg, 2 + %83 = VLGVH undef %7, $noreg, 2 %85 = LLHRMux %83.subreg_l32 - %86 = VLGVH undef %1, %noreg, 2 + %86 = VLGVH undef %1, $noreg, 2 %89.subreg_l32 = LLHRMux %86.subreg_l32 %90 = COPY %89 %90 = DLR %90, %85 - CHIMux %92, 0, implicit-def %cc - BRC 14, 6, %bb.7, implicit killed %cc + CHIMux %92, 0, implicit-def $cc + BRC 14, 6, %bb.7, implicit killed $cc J %bb.6 bb.7: - CGHI undef %93, 0, implicit-def %cc - %96 = IPM implicit killed %cc - CGHI undef %97, 0, implicit-def %cc - BRC 14, 6, %bb.6, implicit killed %cc + CGHI undef %93, 0, implicit-def $cc + %96 = IPM implicit killed $cc + CGHI undef %97, 0, implicit-def $cc + BRC 14, 6, %bb.6, implicit killed $cc bb.8: - CHIMux %6, 0, implicit-def %cc + CHIMux %6, 0, implicit-def $cc %10 = LLILL 41639 - dead %10 = LOCGR %10, %4, 14, 6, implicit killed %cc - CHIMux %92, 0, implicit-def %cc - BRC 14, 6, %bb.5, implicit killed %cc + dead %10 = LOCGR %10, %4, 14, 6, implicit killed $cc + CHIMux %92, 0, implicit-def $cc + BRC 14, 6, %bb.5, implicit killed $cc J %bb.9 bb.9: %82 = VLVGP %67.subreg_h64, %67.subreg_h64 - %82 = VLVGH %82, %58.subreg_hl32, %noreg, 0 - %82 = VLVGH %82, %80.subreg_hl32, %noreg, 1 - dead %82 = VLVGH %82, %90.subreg_hl32, %noreg, 2 - %96 = AFIMux %96, 1879048192, implicit-def dead %cc - %96 = SRL %96, %noreg, 31 - dead %11 = VLVGF %11, %96, %noreg, 1 + %82 = VLVGH %82, %58.subreg_hl32, $noreg, 0 + %82 = VLVGH %82, %80.subreg_hl32, $noreg, 1 + dead %82 = VLVGH %82, %90.subreg_hl32, $noreg, 2 + %96 = AFIMux %96, 1879048192, implicit-def dead $cc + %96 = SRL %96, $noreg, 31 + dead %11 = VLVGF %11, %96, $noreg, 1 %100 = LHIMux 0 bb.10: - CHIMux %100, 0, implicit-def %cc - BRC 14, 6, %bb.10, implicit killed %cc + CHIMux %100, 0, implicit-def $cc + BRC 14, 6, %bb.10, implicit killed $cc J %bb.11 bb.11: Index: test/CodeGen/SystemZ/clear-liverange-spillreg.mir =================================================================== --- test/CodeGen/SystemZ/clear-liverange-spillreg.mir +++ test/CodeGen/SystemZ/clear-liverange-spillreg.mir @@ -162,15 +162,15 @@ bb.0: successors: %bb.2(0x00000001), %bb.1(0x7fffffff) - CHIMux undef %20, 3, implicit-def %cc - BRC 14, 8, %bb.2, implicit killed %cc + CHIMux undef %20, 3, implicit-def $cc + BRC 14, 8, %bb.2, implicit killed $cc J %bb.1 bb.1: successors: %bb.2(0x00000001), %bb.3(0x7fffffff) - CHIMux undef %21, 0, implicit-def %cc - BRC 14, 6, %bb.3, implicit killed %cc + CHIMux undef %21, 0, implicit-def $cc + BRC 14, 6, %bb.3, implicit killed $cc J %bb.2 bb.2: @@ -178,15 +178,15 @@ bb.3: successors: %bb.6(0x00000001), %bb.4(0x7fffffff) - CHIMux undef %23, 2, implicit-def %cc - BRC 14, 8, %bb.6, implicit killed %cc + CHIMux undef %23, 2, implicit-def $cc + BRC 14, 8, %bb.6, implicit killed $cc J %bb.4 bb.4: successors: %bb.5(0x00000001), %bb.7(0x7fffffff) - CHIMux undef %24, 1, implicit-def %cc - BRC 14, 6, %bb.7, implicit killed %cc + CHIMux undef %24, 1, implicit-def $cc + BRC 14, 6, %bb.7, implicit killed $cc J %bb.5 bb.5: @@ -196,48 +196,48 @@ bb.7: successors: %bb.47(0x00000001), %bb.8(0x7fffffff) - CHIMux undef %25, 1, implicit-def %cc - BRC 14, 8, %bb.47, implicit killed %cc + CHIMux undef %25, 1, implicit-def $cc + BRC 14, 8, %bb.47, implicit killed $cc J %bb.8 bb.8: successors: %bb.46(0x00000001), %bb.48(0x7fffffff) - CHIMux undef %26, 2, implicit-def %cc - BRC 14, 8, %bb.46, implicit killed %cc + CHIMux undef %26, 2, implicit-def $cc + BRC 14, 8, %bb.46, implicit killed $cc J %bb.48 bb.9: successors: %bb.36(0x00000001), %bb.10(0x7fffffff) - CHIMux undef %31, 1, implicit-def %cc - BRC 14, 8, %bb.36, implicit killed %cc + CHIMux undef %31, 1, implicit-def $cc + BRC 14, 8, %bb.36, implicit killed $cc J %bb.10 bb.10: successors: %bb.35(0x00000001), %bb.37(0x7fffffff) - CHIMux undef %32, 2, implicit-def %cc - BRC 14, 8, %bb.35, implicit killed %cc + CHIMux undef %32, 2, implicit-def $cc + BRC 14, 8, %bb.35, implicit killed $cc J %bb.37 bb.11: %4 = COPY %60 - %6 = SLLG %120, %noreg, 1 + %6 = SLLG %120, $noreg, 1 %7 = LA %6, 64, %41 - %6 = AGR %6, %42, implicit-def dead %cc - %45 = SRLK %120.subreg_l32, %noreg, 31 - %45 = AR %45, %120.subreg_l32, implicit-def dead %cc - %45 = NIFMux %45, 536870910, implicit-def dead %cc - %47 = SRK %120.subreg_l32, %45, implicit-def dead %cc - %47 = SLL %47, %noreg, 3 + %6 = AGR %6, %42, implicit-def dead $cc + %45 = SRLK %120.subreg_l32, $noreg, 31 + %45 = AR %45, %120.subreg_l32, implicit-def dead $cc + %45 = NIFMux %45, 536870910, implicit-def dead $cc + %47 = SRK %120.subreg_l32, %45, implicit-def dead $cc + %47 = SLL %47, $noreg, 3 %81 = LGFR %47 bb.12: successors: %bb.56, %bb.13 - CHIMux %38, 0, implicit-def %cc - BRC 14, 8, %bb.13, implicit killed %cc + CHIMux %38, 0, implicit-def $cc + BRC 14, 8, %bb.13, implicit killed $cc bb.56: J %bb.16 @@ -247,24 +247,24 @@ ADJCALLSTACKDOWN 0, 0 %49 = LGFR %120.subreg_l32 - %r2d = COPY %49 - CallBRASL @Get_Direct_Cost8x8, killed %r2d, undef %r3d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def %r2d + $r2d = COPY %49 + CallBRASL @Get_Direct_Cost8x8, killed $r2d, undef $r3d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $r2d ADJCALLSTACKUP 0, 0 - %51 = COPY killed %r2d + %51 = COPY killed $r2d MVHHI %7, 0, 0 :: (store 2) - %12 = ARK %51.subreg_l32, %125, implicit-def dead %cc - CFIMux %51.subreg_l32, 2147483647, implicit-def %cc - %12 = LOCRMux %12, %126, 14, 8, implicit killed %cc - CFIMux %125, 2147483647, implicit-def %cc - %12 = LOCRMux %12, %126, 14, 8, implicit killed %cc - CHIMux undef %56, 0, implicit-def %cc - BRC 14, 6, %bb.15, implicit killed %cc + %12 = ARK %51.subreg_l32, %125, implicit-def dead $cc + CFIMux %51.subreg_l32, 2147483647, implicit-def $cc + %12 = LOCRMux %12, %126, 14, 8, implicit killed $cc + CFIMux %125, 2147483647, implicit-def $cc + %12 = LOCRMux %12, %126, 14, 8, implicit killed $cc + CHIMux undef %56, 0, implicit-def $cc + BRC 14, 6, %bb.15, implicit killed $cc J %bb.14 bb.14: - %124 = AHIMux %124, 1, implicit-def dead %cc + %124 = AHIMux %124, 1, implicit-def dead $cc ADJCALLSTACKDOWN 0, 0 - CallBRASL @store_coding_state, undef %r2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc + CallBRASL @store_coding_state, undef $r2d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc ADJCALLSTACKUP 0, 0 %125 = COPY %12 J %bb.16 @@ -274,8 +274,8 @@ bb.16: successors: %bb.12(0x7c000000), %bb.17(0x04000000) - CLGFI undef %59, 4, implicit-def %cc - BRC 14, 4, %bb.12, implicit killed %cc + CLGFI undef %59, 4, implicit-def $cc + BRC 14, 4, %bb.12, implicit killed $cc J %bb.17 bb.17: @@ -283,44 +283,44 @@ MVHI %0, 332, 2 :: (store 4) %60 = COPY %126 - %60 = AR %60, %4, implicit-def dead %cc - %18 = LHMux %6, 0, %noreg :: (load 2) - CHIMux %38, 0, implicit-def %cc - BRC 14, 6, %bb.19, implicit killed %cc + %60 = AR %60, %4, implicit-def dead $cc + %18 = LHMux %6, 0, $noreg :: (load 2) + CHIMux %38, 0, implicit-def $cc + BRC 14, 6, %bb.19, implicit killed $cc J %bb.18 bb.18: - %62 = SLLG %81, %noreg, 1 + %62 = SLLG %81, $noreg, 1 %64 = LA %62, 0, %63 - %65 = LG undef %66, 0, %noreg :: (load 8) - %67 = LGF undef %68, 0, %noreg :: (load 4) + %65 = LG undef %66, 0, $noreg :: (load 8) + %67 = LGF undef %68, 0, $noreg :: (load 4) MVC undef %69, 0, 2, %64, 0 :: (store 2), (load 2) %70 = COPY %81 - %70 = OILL64 %70, 3, implicit-def dead %cc - %71 = LA %70, 2, %noreg - %72 = SLLG %71, %noreg, 1 + %70 = OILL64 %70, 3, implicit-def dead $cc + %71 = LA %70, 2, $noreg + %72 = SLLG %71, $noreg, 1 %73 = LHMux %72, 0, %63 :: (load 2) %74 = LA %70, 2, %67 - %75 = SLLG %74, %noreg, 1 - %76 = LG %65, 0, %noreg :: (load 8) + %75 = SLLG %74, $noreg, 1 + %76 = LG %65, 0, $noreg :: (load 8) STHMux %73, %76, 0, %75 :: (store 2) - %77 = LG undef %78, 0, %noreg :: (load 8) + %77 = LG undef %78, 0, $noreg :: (load 8) %79 = LHRL @rec_mbY8x8 :: (load 2) - STHMux %79, %77, 0, %noreg :: (store 2) + STHMux %79, %77, 0, $noreg :: (store 2) %80 = LHMux %72, 0, %63 :: (load 2) STHMux %80, %77, 0, %75 :: (store 2) - %81 = OILL64 %81, 7, implicit-def dead %cc - %82 = SLLG %81, %noreg, 1 + %81 = OILL64 %81, 7, implicit-def dead $cc + %82 = SLLG %81, $noreg, 1 %83 = LHMux %82, 0, %63 :: (load 2) - STHMux %83, %77, 0, %noreg :: (store 2) + STHMux %83, %77, 0, $noreg :: (store 2) %84 = LA %62, 64, %63 MVC undef %85, 0, 2, %84, 0 :: (store 2), (load 2) - %86 = SLLG %70, %noreg, 1 + %86 = SLLG %70, $noreg, 1 %87 = LHMux %86, 64, %63 :: (load 2) - %88 = SLLG %67, %noreg, 3 + %88 = SLLG %67, $noreg, 3 %89 = LG %65, 16, %88 :: (load 8) %90 = LA %70, 0, %67 - %91 = SLLG %90, %noreg, 1 + %91 = SLLG %90, $noreg, 1 STHMux %87, %89, 0, %91 :: (store 2) %92 = LA %72, 64, %63 MVC undef %93, 0, 2, %92, 0 :: (store 2), (load 2) @@ -332,39 +332,39 @@ bb.19: successors: %bb.20(0x04000000), %bb.11(0x7c000000) - %98 = LGH %7, 0, %noreg :: (load 2) - %99 = LGH undef %100, 0, %noreg :: (load 2) + %98 = LGH %7, 0, $noreg :: (load 2) + %99 = LGH undef %100, 0, $noreg :: (load 2) ADJCALLSTACKDOWN 0, 0 %101 = LGFR %120.subreg_l32 %102 = LGFR %18 - %r2d = COPY %101 - %r3d = COPY %102 - %r4d = LGHI 0 - %r5d = COPY %98 - %r6d = COPY %99 - CallBRASL @SetRefAndMotionVectors, killed %r2d, killed %r3d, killed %r4d, killed %r5d, killed %r6d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc + $r2d = COPY %101 + $r3d = COPY %102 + $r4d = LGHI 0 + $r5d = COPY %98 + $r6d = COPY %99 + CallBRASL @SetRefAndMotionVectors, killed $r2d, killed $r3d, killed $r4d, killed $r5d, killed $r6d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc ADJCALLSTACKUP 0, 0 ADJCALLSTACKDOWN 0, 0 - CallBRASL @reset_coding_state, undef %r2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc + CallBRASL @reset_coding_state, undef $r2d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc ADJCALLSTACKUP 0, 0 - %120 = LA %120, 1, %noreg - CGHI %120, 4, implicit-def %cc - BRC 14, 6, %bb.11, implicit killed %cc + %120 = LA %120, 1, $noreg + CGHI %120, 4, implicit-def $cc + BRC 14, 6, %bb.11, implicit killed $cc J %bb.20 bb.20: successors: %bb.22(0x00000001), %bb.21(0x7fffffff) MVHI undef %105, 0, 0 :: (store 4) - CHIMux undef %106, 3, implicit-def %cc - BRC 14, 8, %bb.22, implicit killed %cc + CHIMux undef %106, 3, implicit-def $cc + BRC 14, 8, %bb.22, implicit killed $cc J %bb.21 bb.21: successors: %bb.22(0x00000001), %bb.23(0x7fffffff) - CHIMux undef %107, 0, implicit-def %cc - BRC 14, 6, %bb.23, implicit killed %cc + CHIMux undef %107, 0, implicit-def $cc + BRC 14, 6, %bb.23, implicit killed $cc J %bb.22 bb.22: @@ -373,21 +373,21 @@ successors: %bb.26(0x00000001), %bb.24(0x7fffffff) ADJCALLSTACKDOWN 0, 0 - CallBRASL @Get_Direct_CostMB, undef %f0d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def dead %r2d + CallBRASL @Get_Direct_CostMB, undef $f0d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc, implicit-def dead $r2d ADJCALLSTACKUP 0, 0 ADJCALLSTACKDOWN 0, 0 - %r2d = LGHI 0 - CallBRASL @SetModesAndRefframeForBlocks, killed %r2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc + $r2d = LGHI 0 + CallBRASL @SetModesAndRefframeForBlocks, killed $r2d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc ADJCALLSTACKUP 0, 0 - CHIMux undef %111, 13, implicit-def %cc - BRC 14, 8, %bb.26, implicit killed %cc + CHIMux undef %111, 13, implicit-def $cc + BRC 14, 8, %bb.26, implicit killed $cc J %bb.24 bb.24: successors: %bb.25(0x00000001), %bb.27(0x7fffffff) - CHIMux undef %112, 8, implicit-def %cc - BRC 14, 6, %bb.27, implicit killed %cc + CHIMux undef %112, 8, implicit-def $cc + BRC 14, 6, %bb.27, implicit killed $cc J %bb.25 bb.25: @@ -397,8 +397,8 @@ bb.27: successors: %bb.28, %bb.29 - CHIMux undef %114, 0, implicit-def %cc - BRC 14, 6, %bb.29, implicit killed %cc + CHIMux undef %114, 0, implicit-def $cc + BRC 14, 6, %bb.29, implicit killed $cc bb.28: %130 = CDFBR %60 @@ -410,16 +410,16 @@ bb.30: successors: %bb.33(0x00000001), %bb.31(0x7fffffff) - VST64 %130, undef %117, 0, %noreg :: (store 8) - CHIMux undef %118, 2, implicit-def %cc - BRC 14, 8, %bb.33, implicit killed %cc + VST64 %130, undef %117, 0, $noreg :: (store 8) + CHIMux undef %118, 2, implicit-def $cc + BRC 14, 8, %bb.33, implicit killed $cc J %bb.31 bb.31: successors: %bb.32(0x00000001), %bb.34(0x7fffffff) - CHIMux undef %119, 1, implicit-def %cc - BRC 14, 6, %bb.34, implicit killed %cc + CHIMux undef %119, 1, implicit-def $cc + BRC 14, 6, %bb.34, implicit killed $cc J %bb.32 bb.32: @@ -436,15 +436,15 @@ bb.37: successors: %bb.40(0x00000001), %bb.38(0x7fffffff) - CHIMux undef %33, 1, implicit-def %cc - BRC 14, 8, %bb.40, implicit killed %cc + CHIMux undef %33, 1, implicit-def $cc + BRC 14, 8, %bb.40, implicit killed $cc J %bb.38 bb.38: successors: %bb.39(0x00000001), %bb.41(0x7fffffff) - CHIMux undef %34, 2, implicit-def %cc - BRC 14, 6, %bb.41, implicit killed %cc + CHIMux undef %34, 2, implicit-def $cc + BRC 14, 6, %bb.41, implicit killed $cc J %bb.39 bb.39: @@ -454,15 +454,15 @@ bb.41: successors: %bb.44(0x00000001), %bb.42(0x7fffffff) - CHIMux undef %35, 1, implicit-def %cc - BRC 14, 8, %bb.44, implicit killed %cc + CHIMux undef %35, 1, implicit-def $cc + BRC 14, 8, %bb.44, implicit killed $cc J %bb.42 bb.42: successors: %bb.43(0x00000001), %bb.45(0x7fffffff) - CHIMux undef %36, 2, implicit-def %cc - BRC 14, 6, %bb.45, implicit killed %cc + CHIMux undef %36, 2, implicit-def $cc + BRC 14, 6, %bb.45, implicit killed $cc J %bb.43 bb.43: @@ -470,7 +470,7 @@ bb.44: bb.45: - %0 = LG undef %22, 0, %noreg :: (load 8) + %0 = LG undef %22, 0, $noreg :: (load 8) %38 = LHIMux 0 STRL %38, @bi_pred_me :: (store 4) %120 = LGHI 0 @@ -490,15 +490,15 @@ bb.48: successors: %bb.51(0x00000001), %bb.49(0x7fffffff) - CHIMux undef %27, 1, implicit-def %cc - BRC 14, 8, %bb.51, implicit killed %cc + CHIMux undef %27, 1, implicit-def $cc + BRC 14, 8, %bb.51, implicit killed $cc J %bb.49 bb.49: successors: %bb.50(0x00000001), %bb.52(0x7fffffff) - CHIMux undef %28, 2, implicit-def %cc - BRC 14, 6, %bb.52, implicit killed %cc + CHIMux undef %28, 2, implicit-def $cc + BRC 14, 6, %bb.52, implicit killed $cc J %bb.50 bb.50: @@ -508,15 +508,15 @@ bb.52: successors: %bb.55(0x00000001), %bb.53(0x7fffffff) - CHIMux undef %29, 1, implicit-def %cc - BRC 14, 8, %bb.55, implicit killed %cc + CHIMux undef %29, 1, implicit-def $cc + BRC 14, 8, %bb.55, implicit killed $cc J %bb.53 bb.53: successors: %bb.54(0x00000001), %bb.9(0x7fffffff) - CHIMux undef %30, 2, implicit-def %cc - BRC 14, 6, %bb.9, implicit killed %cc + CHIMux undef %30, 2, implicit-def $cc + BRC 14, 6, %bb.9, implicit killed $cc J %bb.54 bb.54: Index: test/CodeGen/SystemZ/cond-move-04.mir =================================================================== --- test/CodeGen/SystemZ/cond-move-04.mir +++ test/CodeGen/SystemZ/cond-move-04.mir @@ -61,14 +61,14 @@ %5 = LHIMux 10 bb.1 (%ir-block.2): - CHIMux %3, 0, implicit-def %cc - %0 = LOCRMux undef %0, %5, 14, 6, implicit %cc - %0 = LOCRMux %0, %2, 14, 6, implicit killed %cc + CHIMux %3, 0, implicit-def $cc + %0 = LOCRMux undef %0, %5, 14, 6, implicit $cc + %0 = LOCRMux %0, %2, 14, 6, implicit killed $cc ADJCALLSTACKDOWN 0, 0 %7 = LGFR %0 - %r3d = LGHI 0 - %r4d = COPY %7 - CallBRASL @foo, undef %r2d, killed %r3d, killed %r4d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def dead %r2d + $r3d = LGHI 0 + $r4d = COPY %7 + CallBRASL @foo, undef $r2d, killed $r3d, killed $r4d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc, implicit-def dead $r2d ADJCALLSTACKUP 0, 0 J %bb.1 Index: test/CodeGen/SystemZ/cond-move-05.mir =================================================================== --- test/CodeGen/SystemZ/cond-move-05.mir +++ test/CodeGen/SystemZ/cond-move-05.mir @@ -67,10 +67,10 @@ undef %3.subreg_l64:gr128bit = LGHI 1 %3.subreg_h64:gr128bit = LLILL 0 %3:gr128bit = DLGR %3, %0 - CLFIMux %3.subreg_hl32, 3631842929, implicit-def %cc - %6:grx32bit = LOCRMux undef %6, %3.subreg_hl32, 14, 4, implicit killed %cc - CHIMux %6, 0, implicit-def %cc - BRC 14, 8, %bb.2.for.inc591.1.i.i, implicit killed %cc + CLFIMux %3.subreg_hl32, 3631842929, implicit-def $cc + %6:grx32bit = LOCRMux undef %6, %3.subreg_hl32, 14, 4, implicit killed $cc + CHIMux %6, 0, implicit-def $cc + BRC 14, 8, %bb.2.for.inc591.1.i.i, implicit killed $cc J %bb.1.cleanup584.i.i bb.1.cleanup584.i.i: Index: test/CodeGen/SystemZ/fp-cmp-07.mir =================================================================== --- test/CodeGen/SystemZ/fp-cmp-07.mir +++ test/CodeGen/SystemZ/fp-cmp-07.mir @@ -24,21 +24,21 @@ name: f15 tracksRegLiveness: true liveins: - - { reg: '%f0s', virtual-reg: '' } - - { reg: '%r2d', virtual-reg: '' } + - { reg: '$f0s', virtual-reg: '' } + - { reg: '$r2d', virtual-reg: '' } body: | bb.0.entry: - liveins: %f0s, %r2d + liveins: $f0s, $r2d - LTEBRCompare %f0s, %f0s, implicit-def %cc - %f2s = LER %f0s - INLINEASM &"blah $0", 1, 9, %f2s - CondReturn 15, 4, implicit %f0s, implicit %cc + LTEBRCompare $f0s, $f0s, implicit-def $cc + $f2s = LER $f0s + INLINEASM &"blah $0", 1, 9, $f2s + CondReturn 15, 4, implicit $f0s, implicit $cc bb.1.store: - liveins: %f0s, %r2d + liveins: $f0s, $r2d - STE %f0s, killed %r2d, 0, %noreg :: (store 4 into %ir.dest) - Return implicit %f0s + STE $f0s, killed $r2d, 0, $noreg :: (store 4 into %ir.dest) + Return implicit $f0s ... Index: test/CodeGen/SystemZ/fp-conv-17.mir =================================================================== --- test/CodeGen/SystemZ/fp-conv-17.mir +++ test/CodeGen/SystemZ/fp-conv-17.mir @@ -121,82 +121,82 @@ - { id: 34, class: fp64bit } - { id: 35, class: fp64bit } liveins: - - { reg: '%r2d', virtual-reg: '%0' } - - { reg: '%r3d', virtual-reg: '%1' } + - { reg: '$r2d', virtual-reg: '%0' } + - { reg: '$r3d', virtual-reg: '%1' } body: | bb.0 (%ir-block.0): - liveins: %r2d, %r3d + liveins: $r2d, $r3d - %1 = COPY %r3d - %0 = COPY %r2d - %2 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %3 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %4 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %5 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %6 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %7 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %8 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %9 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %10 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %11 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %12 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %13 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %14 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %15 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %16 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %17 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - %18 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2) - STE %2, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %3, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %4, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %5, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %6, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %7, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %8, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %9, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %10, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %11, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %12, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %13, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %14, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %15, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %16, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %17, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) - STE %18, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2) + %1 = COPY $r3d + %0 = COPY $r2d + %2 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %3 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %4 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %5 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %6 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %7 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %8 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %9 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %10 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %11 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %12 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %13 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %14 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %15 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %16 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %17 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + %18 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2) + STE %2, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %3, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %4, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %5, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %6, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %7, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %8, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %9, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %10, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %11, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %12, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %13, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %14, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %15, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %16, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %17, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) + STE %18, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2) %19 = LDEBR %2 - STD %19, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %19, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %20 = LDEBR %3 - STD %20, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %20, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %21 = LDEBR %4 - STD %21, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %21, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %22 = LDEBR %5 - STD %22, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %22, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %23 = LDEBR %6 - STD %23, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %23, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %24 = LDEBR %7 - STD %24, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %24, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %25 = LDEBR %8 - STD %25, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %25, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %26 = LDEBR %9 - STD %26, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %26, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %27 = LDEBR %10 - STD %27, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %27, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %28 = LDEBR %11 - STD %28, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %28, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %29 = LDEBR %12 - STD %29, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %29, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %30 = LDEBR %13 - STD %30, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %30, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %31 = LDEBR %14 - STD %31, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %31, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %32 = LDEBR %15 - STD %32, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %32, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %33 = LDEBR %16 - STD %33, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %33, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %34 = LDEBR %17 - STD %34, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %34, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) %35 = LDEBR %18 - STD %35, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1) + STD %35, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1) Return ... Index: test/CodeGen/SystemZ/load-and-test.mir =================================================================== --- test/CodeGen/SystemZ/load-and-test.mir +++ test/CodeGen/SystemZ/load-and-test.mir @@ -13,18 +13,18 @@ tracksRegLiveness: true body: | bb.0 (): - liveins: %r1d - renamable %r0l = L %r1d, 0, %noreg - CLFIMux killed renamable %r0l, 0, implicit-def %cc - BRC 14, 10, %bb.2, implicit %cc + liveins: $r1d + renamable $r0l = L $r1d, 0, $noreg + CLFIMux killed renamable $r0l, 0, implicit-def $cc + BRC 14, 10, %bb.2, implicit $cc bb.1 (): - liveins: %r0l - ST killed renamable %r0l, %r15d, 164, %noreg + liveins: $r0l + ST killed renamable $r0l, $r15d, 164, $noreg bb.2 (): - liveins: %r0l - ST killed renamable %r0l, %r15d, 164, %noreg + liveins: $r0l + ST killed renamable $r0l, $r15d, 164, $noreg Return ... @@ -36,17 +36,17 @@ tracksRegLiveness: true body: | bb.0 (): - liveins: %r1d - renamable %r0l = L %r1d, 0, %noreg - CLFIMux killed renamable %r0l, 0, implicit-def %cc - BRC 14, 8, %bb.2, implicit %cc + liveins: $r1d + renamable $r0l = L $r1d, 0, $noreg + CLFIMux killed renamable $r0l, 0, implicit-def $cc + BRC 14, 8, %bb.2, implicit $cc bb.1 (): - liveins: %r0l - ST killed renamable %r0l, %r15d, 164, %noreg + liveins: $r0l + ST killed renamable $r0l, $r15d, 164, $noreg bb.2 (): - liveins: %r0l - ST killed renamable %r0l, %r15d, 164, %noreg + liveins: $r0l + ST killed renamable $r0l, $r15d, 164, $noreg Return ... Index: test/CodeGen/SystemZ/lower-copy-undef-src.mir =================================================================== --- test/CodeGen/SystemZ/lower-copy-undef-src.mir +++ test/CodeGen/SystemZ/lower-copy-undef-src.mir @@ -5,10 +5,10 @@ # dropped. --- # CHECK-LABEL: name: undef_copy -# CHECK: %r13d = KILL undef %r0d, implicit killed %r12q, implicit-def %r12q +# CHECK: $r13d = KILL undef $r0d, implicit killed $r12q, implicit-def $r12q name: undef_copy tracksRegLiveness: true body: | bb.0: - liveins: %r12q - %r13d = COPY undef %r0d, implicit killed %r12q, implicit-def %r12q + liveins: $r12q + $r13d = COPY undef $r0d, implicit killed $r12q, implicit-def $r12q Index: test/CodeGen/SystemZ/pr32505.ll =================================================================== --- test/CodeGen/SystemZ/pr32505.ll +++ test/CodeGen/SystemZ/pr32505.ll @@ -10,8 +10,8 @@ ; CHECK-NEXT: lbh %r1, 0(%r2) ; CHECK-NEXT: ldgr %f0, %r1 ; CHECK-NEXT: ldgr %f2, %r0 -; CHECK-NEXT: # kill: def %f0s killed %f0s killed %f0d -; CHECK-NEXT: # kill: def %f2s killed %f2s killed %f2d +; CHECK-NEXT: # kill: def $f0s killed $f0s killed $f0d +; CHECK-NEXT: # kill: def $f2s killed $f2s killed $f2d ; CHECK-NEXT: br %r14 %L17 = load <2 x i8>, <2 x i8>* %a %Se21 = sext <2 x i8> %L17 to <2 x i32> Index: test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir =================================================================== --- test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir +++ test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir @@ -17,18 +17,18 @@ - { id: 0, class: gr128bit } - { id: 1, class: gr64bit } - { id: 2, class: addr64bit } -# CHECK: %r0q = L128 -# CHECK-NEXT: %r0l = COPY renamable %r1l +# CHECK: $r0q = L128 +# CHECK-NEXT: $r0l = COPY renamable $r1l # Although R0L partially redefines R0Q, it must not mark R0Q as kill # because R1D is still live through that instruction. -# CHECK-NOT: implicit killed %r0q -# CHECK-NEXT: %r2d = COPY renamable %r1d +# CHECK-NOT: implicit killed $r0q +# CHECK-NEXT: $r2d = COPY renamable $r1d # CHECK-NEXT: LARL body: | bb.0: %0.subreg_hl32 = COPY %0.subreg_l32 %1 = COPY %0.subreg_l64 %2 = LARL @g_167 - STC %1.subreg_l32, %2, 8, %noreg + STC %1.subreg_l32, %2, 8, $noreg ... Index: test/CodeGen/SystemZ/store_nonbytesized_vecs.ll =================================================================== --- test/CodeGen/SystemZ/store_nonbytesized_vecs.ll +++ test/CodeGen/SystemZ/store_nonbytesized_vecs.ll @@ -80,7 +80,7 @@ ; CHECK-NEXT: vlgvf %r1, %v24, 0 ; CHECK-NEXT: stc %r1, 30(%r2) ; CHECK-NEXT: llgtr %r0, %r1 -; CHECK-NEXT: # kill: def %r1l killed %r1l killed %r1d def %r1d +; CHECK-NEXT: # kill: def $r1l killed $r1l killed $r1d def $r1d ; CHECK-NEXT: srl %r1, 8 ; CHECK-NEXT: sth %r1, 28(%r2) ; CHECK-NEXT: vlgvf %r1, %v24, 1 Index: test/CodeGen/Thumb/machine-cse-physreg.mir =================================================================== --- test/CodeGen/Thumb/machine-cse-physreg.mir +++ test/CodeGen/Thumb/machine-cse-physreg.mir @@ -3,9 +3,9 @@ # This is a contrived example made to expose a bug in # MachineCSE, see PR32538. -# MachineCSE must not remove this def of %cpsr: +# MachineCSE must not remove this def of $cpsr: # CHECK-LABEL: bb.1: -# CHECK: , %cpsr = tLSLri +# CHECK: , $cpsr = tLSLri ... --- @@ -16,20 +16,20 @@ - { id: 2, class: tgpr } - { id: 3, class: tgpr } liveins: - - { reg: '%r0', virtual-reg: '%0' } + - { reg: '$r0', virtual-reg: '%0' } body: | bb.0: - liveins: %r0 - %0 = COPY %r0 - %1, %cpsr = tLSLri %0, 2, 14, %noreg - tCMPi8 %0, 5, 14, %noreg, implicit-def %cpsr - tBcc %bb.8, 8, %cpsr + liveins: $r0 + %0 = COPY $r0 + %1, $cpsr = tLSLri %0, 2, 14, $noreg + tCMPi8 %0, 5, 14, $noreg, implicit-def $cpsr + tBcc %bb.8, 8, $cpsr bb.1: - %2, %cpsr = tLSLri %0, 2, 14, %noreg + %2, $cpsr = tLSLri %0, 2, 14, $noreg bb.8: - liveins: %cpsr - %3 = COPY %cpsr - tSTRi killed %3, %0, 0, 14, %noreg + liveins: $cpsr + %3 = COPY $cpsr + tSTRi killed %3, %0, 0, 14, $noreg ... Index: test/CodeGen/Thumb/tbb-reuse.mir =================================================================== --- test/CodeGen/Thumb/tbb-reuse.mir +++ test/CodeGen/Thumb/tbb-reuse.mir @@ -63,21 +63,21 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%r0' } -calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13', - '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4', - '%r5', '%r6', '%r7', '%r8', '%r9', '%r10', '%r11', - '%s16', '%s17', '%s18', '%s19', '%s20', '%s21', - '%s22', '%s23', '%s24', '%s25', '%s26', '%s27', - '%s28', '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11', - '%d10_d12', '%d11_d13', '%d12_d14', '%d13_d15', - '%q4_q5', '%q5_q6', '%q6_q7', '%q4_q5_q6_q7', '%r4_r5', - '%r6_r7', '%r8_r9', '%r10_r11', '%d8_d9_d10', '%d9_d10_d11', - '%d10_d11_d12', '%d11_d12_d13', '%d12_d13_d14', - '%d13_d14_d15', '%d8_d10_d12', '%d9_d11_d13', '%d10_d12_d14', - '%d11_d13_d15', '%d8_d10_d12_d14', '%d9_d11_d13_d15', - '%d9_d10', '%d11_d12', '%d13_d14', '%d9_d10_d11_d12', - '%d11_d12_d13_d14' ] + - { reg: '$r0' } +calleeSavedRegisters: [ '$lr', '$d8', '$d9', '$d10', '$d11', '$d12', '$d13', + '$d14', '$d15', '$q4', '$q5', '$q6', '$q7', '$r4', + '$r5', '$r6', '$r7', '$r8', '$r9', '$r10', '$r11', + '$s16', '$s17', '$s18', '$s19', '$s20', '$s21', + '$s22', '$s23', '$s24', '$s25', '$s26', '$s27', + '$s28', '$s29', '$s30', '$s31', '$d8_d10', '$d9_d11', + '$d10_d12', '$d11_d13', '$d12_d14', '$d13_d15', + '$q4_q5', '$q5_q6', '$q6_q7', '$q4_q5_q6_q7', '$r4_r5', + '$r6_r7', '$r8_r9', '$r10_r11', '$d8_d9_d10', '$d9_d10_d11', + '$d10_d11_d12', '$d11_d12_d13', '$d12_d13_d14', + '$d13_d14_d15', '$d8_d10_d12', '$d9_d11_d13', '$d10_d12_d14', + '$d11_d13_d15', '$d8_d10_d12_d14', '$d9_d11_d13_d15', + '$d9_d10', '$d11_d12', '$d13_d14', '$d9_d10_d11_d12', + '$d11_d12_d13_d14' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -93,8 +93,8 @@ hasVAStart: false hasMustTailInVarArgFunc: false stack: - - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr', callee-saved-restored: false } - - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r7' } + - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$lr', callee-saved-restored: false } + - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '$r7' } jumpTable: kind: inline entries: @@ -106,46 +106,46 @@ body: | bb.0.entry: successors: %bb.2.default(0x19999998), %bb.1.entry(0x66666668) - liveins: %r0, %r7, %lr + liveins: $r0, $r7, $lr - frame-setup tPUSH 14, %noreg, killed %r7, killed %lr, implicit-def %sp, implicit %sp + frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp frame-setup CFI_INSTRUCTION def_cfa_offset 8 - frame-setup CFI_INSTRUCTION offset %lr, -4 - frame-setup CFI_INSTRUCTION offset %r7, -8 - %r1, dead %cpsr = tSUBi3 %r0, 1, 14, %noreg - tCMPi8 %r1, 3, 14, %noreg, implicit-def %cpsr - tBcc %bb.2.default, 8, killed %cpsr + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + $r1, dead $cpsr = tSUBi3 $r0, 1, 14, $noreg + tCMPi8 $r1, 3, 14, $noreg, implicit-def $cpsr + tBcc %bb.2.default, 8, killed $cpsr bb.1.entry: successors: %bb.3.lab1(0x20000000), %bb.4.lab2(0x20000000), %bb.5.lab3(0x20000000), %bb.6.lab4(0x20000000) - liveins: %r0, %r1 + liveins: $r0, $r1 - %r1, dead %cpsr = tLSLri killed %r1, 2, 14, %noreg - %r2 = tLEApcrelJT %jump-table.0, 14, %noreg - %r2 = tLDRr killed %r1, killed %r2, 14, %noreg :: (load 4 from jump-table) - %r1, dead %cpsr = tLSLri %r2, 2, 14, %noreg - tBR_JTr killed %r2, %jump-table.0 + $r1, dead $cpsr = tLSLri killed $r1, 2, 14, $noreg + $r2 = tLEApcrelJT %jump-table.0, 14, $noreg + $r2 = tLDRr killed $r1, killed $r2, 14, $noreg :: (load 4 from jump-table) + $r1, dead $cpsr = tLSLri $r2, 2, 14, $noreg + tBR_JTr killed $r2, %jump-table.0 bb.2.default: - tBL 14, %noreg, @exit0, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp - tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp + tBL 14, $noreg, @exit0, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp bb.3.lab1: - liveins: %r0,%r1 + liveins: $r0,$r1 - tBL 14, %noreg, @exit1, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit-def %sp - tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp + tBL 14, $noreg, @exit1, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp + tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp bb.4.lab2: - tBL 14, %noreg, @exit2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp - tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp + tBL 14, $noreg, @exit2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp bb.5.lab3: - tBL 14, %noreg, @exit3, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp - tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp + tBL 14, $noreg, @exit3, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp bb.6.lab4: - tBL 14, %noreg, @exit4, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp - tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp + tBL 14, $noreg, @exit4, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp ... Index: test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir =================================================================== --- test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir +++ test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir @@ -4,25 +4,25 @@ body: | bb.0: successors: %bb.2, %bb.1 - liveins: %d0, %r0, %r1 + liveins: $d0, $r0, $r1 - t2CMPri killed %r1, 0, 14, %noreg, implicit-def %cpsr - t2Bcc %bb.2, 0, killed %cpsr + t2CMPri killed $r1, 0, 14, $noreg, implicit-def $cpsr + t2Bcc %bb.2, 0, killed $cpsr bb.1: - liveins: %d0, %r0 + liveins: $d0, $r0 - %d16 = VDUP32d killed %r0, 14, %noreg + $d16 = VDUP32d killed $r0, 14, $noreg ; Verify that the neon instructions haven't been conditionalized: ; CHECK-LABEL: NeonVdupMul ; CHECK: vdup.32 ; CHECK: vmul.i32 - %d0 = VMULv2i32 killed %d16, killed %d0, 14, %noreg + $d0 = VMULv2i32 killed $d16, killed $d0, 14, $noreg bb.2: - liveins: %d0 + liveins: $d0 - tBX_RET 14, %noreg, implicit %d0 + tBX_RET 14, $noreg, implicit $d0 ... --- @@ -30,25 +30,25 @@ body: | bb.0.entry: successors: %bb.1, %bb.2 - liveins: %r0, %r1 + liveins: $r0, $r1 - t2CMPri killed %r1, 0, 14, %noreg, implicit-def %cpsr - t2Bcc %bb.2, 1, killed %cpsr + t2CMPri killed $r1, 0, 14, $noreg, implicit-def $cpsr + t2Bcc %bb.2, 1, killed $cpsr bb.1: - %d0 = VMOVv2i32 0, 14, %noreg - tBX_RET 14, %noreg, implicit %d0 + $d0 = VMOVv2i32 0, 14, $noreg + tBX_RET 14, $noreg, implicit $d0 bb.2: - liveins: %r0 + liveins: $r0 - %d0 = VLDRD killed %r0, 0, 14, %noreg + $d0 = VLDRD killed $r0, 0, 14, $noreg ; Verify that the neon instruction VMOVv2i32 hasn't been conditionalized, ; but the VLDR instruction that is available both in the VFP and Advanced ; SIMD extensions has. ; CHECK-LABEL: NeonVmovVfpLdr ; CHECK-DAG: vmov.i32 d0, #0x0 ; CHECK-DAG: vldr{{ne|eq}} d0, [r0] - tBX_RET 14, %noreg, implicit %d0 + tBX_RET 14, $noreg, implicit $d0 ... Index: test/CodeGen/Thumb2/t2sizereduction.mir =================================================================== --- test/CodeGen/Thumb2/t2sizereduction.mir +++ test/CodeGen/Thumb2/t2sizereduction.mir @@ -36,48 +36,48 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%r0', virtual-reg: '' } - - { reg: '%r1', virtual-reg: '' } + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } body: | ; CHECK-LABEL: name: test ; CHECK: bb.0.entry: ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK: liveins: %r0, %r1 - ; CHECK: %r2 = tMOVr %r0, 14, %noreg - ; CHECK: %r0, dead %cpsr = tMOVi8 1, 14, %noreg - ; CHECK: tCMPi8 %r1, 1, 14, %noreg, implicit-def %cpsr - ; CHECK: t2Bcc %bb.2, 11, killed %cpsr + ; CHECK: liveins: $r0, $r1 + ; CHECK: $r2 = tMOVr $r0, 14, $noreg + ; CHECK: $r0, dead $cpsr = tMOVi8 1, 14, $noreg + ; CHECK: tCMPi8 $r1, 1, 14, $noreg, implicit-def $cpsr + ; CHECK: t2Bcc %bb.2, 11, killed $cpsr ; CHECK: bb.1.for.body: ; CHECK: successors: %bb.2(0x40000000), %bb.1(0x40000000) - ; CHECK: liveins: %r0, %r1, %r2 - ; CHECK: %r0, dead %cpsr = tMUL %r2, killed %r0, 14, %noreg - ; CHECK: %r2, dead %cpsr = tADDi8 killed %r2, 1, 14, %noreg - ; CHECK: %r1, %cpsr = tSUBi8 killed %r1, 1, 14, %noreg - ; CHECK: t2Bcc %bb.1, 1, killed %cpsr + ; CHECK: liveins: $r0, $r1, $r2 + ; CHECK: $r0, dead $cpsr = tMUL $r2, killed $r0, 14, $noreg + ; CHECK: $r2, dead $cpsr = tADDi8 killed $r2, 1, 14, $noreg + ; CHECK: $r1, $cpsr = tSUBi8 killed $r1, 1, 14, $noreg + ; CHECK: t2Bcc %bb.1, 1, killed $cpsr ; CHECK: bb.2.for.cond.cleanup: - ; CHECK: liveins: %r0 - ; CHECK: tBX_RET 14, %noreg, implicit %r0 + ; CHECK: liveins: $r0 + ; CHECK: tBX_RET 14, $noreg, implicit $r0 bb.0.entry: successors: %bb.1.for.body, %bb.2.for.cond.cleanup - liveins: %r0, %r1 + liveins: $r0, $r1 - %r2 = tMOVr %r0, 14, _ - %r0 = t2MOVi 1, 14, _, _ - t2CMPri %r1, 1, 14, _, implicit-def %cpsr - t2Bcc %bb.2.for.cond.cleanup, 11, killed %cpsr + $r2 = tMOVr $r0, 14, _ + $r0 = t2MOVi 1, 14, _, _ + t2CMPri $r1, 1, 14, _, implicit-def $cpsr + t2Bcc %bb.2.for.cond.cleanup, 11, killed $cpsr bb.1.for.body: successors: %bb.2.for.cond.cleanup, %bb.1.for.body - liveins: %r0, %r1, %r2 + liveins: $r0, $r1, $r2 - %r0 = t2MUL %r2, killed %r0, 14, _ - %r2 = t2ADDri killed %r2, 1, 14, _, _ - %r1 = t2SUBri killed %r1, 1, 14, _, def %cpsr - t2Bcc %bb.1.for.body, 1, killed %cpsr + $r0 = t2MUL $r2, killed $r0, 14, _ + $r2 = t2ADDri killed $r2, 1, 14, _, _ + $r1 = t2SUBri killed $r1, 1, 14, _, def $cpsr + t2Bcc %bb.1.for.body, 1, killed $cpsr bb.2.for.cond.cleanup: - liveins: %r0 + liveins: $r0 - tBX_RET 14, _, implicit %r0 + tBX_RET 14, _, implicit $r0 ... Index: test/CodeGen/Thumb2/tbb-removeadd.mir =================================================================== --- test/CodeGen/Thumb2/tbb-removeadd.mir +++ test/CodeGen/Thumb2/tbb-removeadd.mir @@ -44,8 +44,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%r0' } - - { reg: '%r1' } + - { reg: '$r0' } + - { reg: '$r1' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -75,49 +75,49 @@ body: | bb.0.entry: successors: %bb.6.sw.epilog(0x0ccccccb), %bb.1.entry(0x73333335) - liveins: %r0, %r1 + liveins: $r0, $r1 - tCMPi8 %r0, 4, 14, %noreg, implicit-def %cpsr - t2Bcc %bb.6.sw.epilog, 8, killed %cpsr + tCMPi8 $r0, 4, 14, $noreg, implicit-def $cpsr + t2Bcc %bb.6.sw.epilog, 8, killed $cpsr bb.1.entry: successors: %bb.2.sw.bb(0x1c71c71c), %bb.3.sw.bb1(0x1c71c71c), %bb.5.sw.epilog.sink.split(0x1c71c71c), %bb.6.sw.epilog(0x0e38e38e), %bb.4.sw.bb3(0x1c71c71c) - liveins: %r0, %r1 + liveins: $r0, $r1 - %r2 = t2LEApcrelJT %jump-table.0, 14, %noreg - %r3 = t2ADDrs killed %r2, %r0, 18, 14, %noreg, %noreg - %r2, dead %cpsr = tMOVi8 1, 14, %noreg - t2BR_JT killed %r3, killed %r0, %jump-table.0 + $r2 = t2LEApcrelJT %jump-table.0, 14, $noreg + $r3 = t2ADDrs killed $r2, $r0, 18, 14, $noreg, $noreg + $r2, dead $cpsr = tMOVi8 1, 14, $noreg + t2BR_JT killed $r3, killed $r0, %jump-table.0 bb.2.sw.bb: successors: %bb.5.sw.epilog.sink.split(0x80000000) - liveins: %r1 + liveins: $r1 - %r2, dead %cpsr = tMOVi8 0, 14, %noreg - t2B %bb.5.sw.epilog.sink.split, 14, %noreg + $r2, dead $cpsr = tMOVi8 0, 14, $noreg + t2B %bb.5.sw.epilog.sink.split, 14, $noreg bb.3.sw.bb1: successors: %bb.5.sw.epilog.sink.split(0x80000000) - liveins: %r1 + liveins: $r1 - %r0, dead %cpsr = tMOVi8 0, 14, %noreg - %r2, dead %cpsr = tMOVi8 1, 14, %noreg - tSTRi killed %r0, %r1, 0, 14, %noreg :: (store 4 into %ir.p) - t2B %bb.5.sw.epilog.sink.split, 14, %noreg + $r0, dead $cpsr = tMOVi8 0, 14, $noreg + $r2, dead $cpsr = tMOVi8 1, 14, $noreg + tSTRi killed $r0, $r1, 0, 14, $noreg :: (store 4 into %ir.p) + t2B %bb.5.sw.epilog.sink.split, 14, $noreg bb.4.sw.bb3: successors: %bb.5.sw.epilog.sink.split(0x80000000) - liveins: %r1 + liveins: $r1 - %r2, dead %cpsr = tMOVi8 2, 14, %noreg + $r2, dead $cpsr = tMOVi8 2, 14, $noreg bb.5.sw.epilog.sink.split: successors: %bb.6.sw.epilog(0x80000000) - liveins: %r1, %r2 + liveins: $r1, $r2 - tSTRi killed %r2, killed %r1, 0, 14, %noreg :: (store 4 into %ir.p) + tSTRi killed $r2, killed $r1, 0, 14, $noreg :: (store 4 into %ir.p) bb.6.sw.epilog: - tBX_RET 14, %noreg + tBX_RET 14, $noreg ... Index: test/CodeGen/X86/2006-11-17-IllegalMove.ll =================================================================== --- test/CodeGen/X86/2006-11-17-IllegalMove.ll +++ test/CodeGen/X86/2006-11-17-IllegalMove.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: # %bb.1: # %cond_next129 ; CHECK-NEXT: movb 0, %al ; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %al ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: cmpq %rax, %rax Index: test/CodeGen/X86/2010-05-28-Crash.ll =================================================================== --- test/CodeGen/X86/2010-05-28-Crash.ll +++ test/CodeGen/X86/2010-05-28-Crash.ll @@ -45,7 +45,7 @@ !18 = !DIFile(filename: "f.c", directory: "/tmp") !19 = !{} -;CHECK: DEBUG_VALUE: bar:x <- %e +;CHECK: DEBUG_VALUE: bar:x <- $e ;CHECK: Ltmp ;CHECK: DEBUG_VALUE: foo:y <- 1{{$}} !20 = !{i32 1, !"Debug Info Version", i32 3} Index: test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll =================================================================== --- test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll +++ test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll @@ -11,7 +11,7 @@ ; Function Attrs: noinline nounwind optsize readnone ssp define i32 @_ZN3foo3bazEi(%struct.foo* nocapture %this, i32 %x) #0 align 2 !dbg !4 { entry: - ; CHECK: DEBUG_VALUE: baz:this <- %rdi{{$}} + ; CHECK: DEBUG_VALUE: baz:this <- $rdi{{$}} tail call void @llvm.dbg.value(metadata %struct.foo* %this, i64 0, metadata !13, metadata !16), !dbg !17 tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !18, metadata !16), !dbg !17 %0 = mul nsw i32 %x, 7, !dbg !19 Index: test/CodeGen/X86/3addr-or.ll =================================================================== --- test/CodeGen/X86/3addr-or.ll +++ test/CodeGen/X86/3addr-or.ll @@ -5,7 +5,7 @@ define i32 @test1(i32 %x) nounwind ssp { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: shll $5, %edi ; CHECK-NEXT: leal 3(%rdi), %eax ; CHECK-NEXT: retq @@ -20,7 +20,7 @@ define i64 @test2(i8 %A, i8 %B) nounwind { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: shll $4, %edi ; CHECK-NEXT: andl $48, %edi ; CHECK-NEXT: movzbl %sil, %eax @@ -55,8 +55,8 @@ define i32 @test4(i32 %a, i32 %b) nounwind readnone ssp { ; CHECK-LABEL: test4: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $6, %edi ; CHECK-NEXT: andl $16, %esi ; CHECK-NEXT: leal (%rsi,%rdi), %eax Index: test/CodeGen/X86/GlobalISel/add-scalar.ll =================================================================== --- test/CodeGen/X86/GlobalISel/add-scalar.ll +++ test/CodeGen/X86/GlobalISel/add-scalar.ll @@ -28,8 +28,8 @@ define i32 @test_add_i32(i32 %arg1, i32 %arg2) { ; X64-LABEL: test_add_i32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: leal (%rsi,%rdi), %eax ; X64-NEXT: retq ; @@ -45,10 +45,10 @@ define i16 @test_add_i16(i16 %arg1, i16 %arg2) { ; X64-LABEL: test_add_i16: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: leal (%rsi,%rdi), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; ; X32-LABEL: test_add_i16: Index: test/CodeGen/X86/GlobalISel/ext-x86-64.ll =================================================================== --- test/CodeGen/X86/GlobalISel/ext-x86-64.ll +++ test/CodeGen/X86/GlobalISel/ext-x86-64.ll @@ -6,7 +6,7 @@ define i64 @test_zext_i1(i8 %a) { ; X64-LABEL: test_zext_i1: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: andq $1, %rdi ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: retq Index: test/CodeGen/X86/GlobalISel/ext.ll =================================================================== --- test/CodeGen/X86/GlobalISel/ext.ll +++ test/CodeGen/X86/GlobalISel/ext.ll @@ -13,7 +13,7 @@ ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: andb $1, %al -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl %val = trunc i32 %a to i1 %r = zext i1 %val to i8 @@ -31,7 +31,7 @@ ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: andw $1, %ax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl %val = trunc i32 %a to i1 %r = zext i1 %val to i16 Index: test/CodeGen/X86/GlobalISel/gep.ll =================================================================== --- test/CodeGen/X86/GlobalISel/gep.ll +++ test/CodeGen/X86/GlobalISel/gep.ll @@ -13,7 +13,7 @@ ; ; X64-LABEL: test_gep_i8: ; X64: # %bb.0: -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: movsbq %sil, %rax ; X64-NEXT: leaq (%rdi,%rax,4), %rax ; X64-NEXT: retq @@ -47,7 +47,7 @@ ; ; X64-LABEL: test_gep_i16: ; X64: # %bb.0: -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: movswq %si, %rax ; X64-NEXT: leaq (%rdi,%rax,4), %rax ; X64-NEXT: retq Index: test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll =================================================================== --- test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll +++ test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll @@ -31,22 +31,22 @@ ; X32: G_STORE [[LOAD]](s8), [[GV]](p0) :: (store 1 into @a1_8bit) ; X32: G_STORE [[LOAD6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit) ; X32: G_STORE [[LOAD7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit) - ; X32: %al = COPY [[LOAD]](s8) - ; X32: RET 0, implicit %al + ; X32: $al = COPY [[LOAD]](s8) + ; X32: RET 0, implicit $al ; X64-LABEL: name: test_i8_args_8 ; X64: bb.1.entry: - ; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X64: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) - ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi + ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi ; X64: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) - ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx + ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx ; X64: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32) - ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY %ecx + ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY $ecx ; X64: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[COPY3]](s32) - ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY %r8d + ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY $r8d ; X64: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[COPY4]](s32) - ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY %r9d + ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d ; X64: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[COPY5]](s32) ; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 ; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.1, align 0) @@ -58,8 +58,8 @@ ; X64: G_STORE [[TRUNC]](s8), [[GV]](p0) :: (store 1 into @a1_8bit) ; X64: G_STORE [[LOAD]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit) ; X64: G_STORE [[LOAD1]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit) - ; X64: %al = COPY [[TRUNC]](s8) - ; X64: RET 0, implicit %al + ; X64: $al = COPY [[TRUNC]](s8) + ; X64: RET 0, implicit $al i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) { entry: store i8 %arg1, i8* @a1_8bit @@ -97,17 +97,17 @@ ; X32: G_STORE [[LOAD]](s32), [[GV]](p0) :: (store 4 into @a1_32bit) ; X32: G_STORE [[LOAD6]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit) ; X32: G_STORE [[LOAD7]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit) - ; X32: %eax = COPY [[LOAD]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[LOAD]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_i32_args_8 ; X64: bb.1.entry: - ; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi - ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx - ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY %ecx - ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY %r8d - ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY %r9d + ; X64: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi + ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx + ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY $ecx + ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY $r8d + ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d ; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 ; X64: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0) ; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 @@ -118,8 +118,8 @@ ; X64: G_STORE [[COPY]](s32), [[GV]](p0) :: (store 4 into @a1_32bit) ; X64: G_STORE [[LOAD]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit) ; X64: G_STORE [[LOAD1]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit) - ; X64: %eax = COPY [[COPY]](s32) - ; X64: RET 0, implicit %eax + ; X64: $eax = COPY [[COPY]](s32) + ; X64: RET 0, implicit $eax i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8) { entry: store i32 %arg1, i32* @a1_32bit @@ -182,18 +182,18 @@ ; X32: G_STORE [[MV6]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit, align 4) ; X32: G_STORE [[MV7]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit, align 4) ; X32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64) - ; X32: %eax = COPY [[UV]](s32) - ; X32: %edx = COPY [[UV1]](s32) - ; X32: RET 0, implicit %eax, implicit %edx + ; X32: $eax = COPY [[UV]](s32) + ; X32: $edx = COPY [[UV1]](s32) + ; X32: RET 0, implicit $eax, implicit $edx ; X64-LABEL: name: test_i64_args_8 ; X64: bb.1.entry: - ; X64: liveins: %rcx, %rdi, %rdx, %rsi, %r8, %r9 - ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi - ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi - ; X64: [[COPY2:%[0-9]+]]:_(s64) = COPY %rdx - ; X64: [[COPY3:%[0-9]+]]:_(s64) = COPY %rcx - ; X64: [[COPY4:%[0-9]+]]:_(s64) = COPY %r8 - ; X64: [[COPY5:%[0-9]+]]:_(s64) = COPY %r9 + ; X64: liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9 + ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi + ; X64: [[COPY2:%[0-9]+]]:_(s64) = COPY $rdx + ; X64: [[COPY3:%[0-9]+]]:_(s64) = COPY $rcx + ; X64: [[COPY4:%[0-9]+]]:_(s64) = COPY $r8 + ; X64: [[COPY5:%[0-9]+]]:_(s64) = COPY $r9 ; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 ; X64: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 0) ; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 @@ -204,8 +204,8 @@ ; X64: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @a1_64bit) ; X64: G_STORE [[LOAD]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit) ; X64: G_STORE [[LOAD1]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit) - ; X64: %rax = COPY [[COPY]](s64) - ; X64: RET 0, implicit %rax + ; X64: $rax = COPY [[COPY]](s64) + ; X64: RET 0, implicit $rax i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) { ; ... a bunch more that we don't track ... entry: @@ -222,15 +222,15 @@ ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0) ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) - ; X32: %fp0 = COPY [[LOAD1]](s32) - ; X32: RET 0, implicit %fp0 + ; X32: $fp0 = COPY [[LOAD1]](s32) + ; X32: RET 0, implicit $fp0 ; X64-LABEL: name: test_float_args ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %xmm0, %xmm1 - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0 - ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1 - ; X64: %xmm0 = COPY [[COPY1]](s32) - ; X64: RET 0, implicit %xmm0 + ; X64: liveins: $xmm0, $xmm1 + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0 + ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1 + ; X64: $xmm0 = COPY [[COPY1]](s32) + ; X64: RET 0, implicit $xmm0 ret float %arg2 } @@ -241,57 +241,57 @@ ; X32: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 0) ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0, align 0) - ; X32: %fp0 = COPY [[LOAD1]](s64) - ; X32: RET 0, implicit %fp0 + ; X32: $fp0 = COPY [[LOAD1]](s64) + ; X32: RET 0, implicit $fp0 ; X64-LABEL: name: test_double_args ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %xmm0, %xmm1 - ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0 - ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1 - ; X64: %xmm0 = COPY [[COPY1]](s64) - ; X64: RET 0, implicit %xmm0 + ; X64: liveins: $xmm0, $xmm1 + ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0 + ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1 + ; X64: $xmm0 = COPY [[COPY1]](s64) + ; X64: RET 0, implicit $xmm0 ret double %arg2 } define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) { ; X32-LABEL: name: test_v4i32_args ; X32: bb.1 (%ir-block.0): - ; X32: liveins: %xmm0, %xmm1 - ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0 - ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 - ; X32: %xmm0 = COPY [[COPY1]](<4 x s32>) - ; X32: RET 0, implicit %xmm0 + ; X32: liveins: $xmm0, $xmm1 + ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 + ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 + ; X32: $xmm0 = COPY [[COPY1]](<4 x s32>) + ; X32: RET 0, implicit $xmm0 ; X64-LABEL: name: test_v4i32_args ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %xmm0, %xmm1 - ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0 - ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 - ; X64: %xmm0 = COPY [[COPY1]](<4 x s32>) - ; X64: RET 0, implicit %xmm0 + ; X64: liveins: $xmm0, $xmm1 + ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 + ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 + ; X64: $xmm0 = COPY [[COPY1]](<4 x s32>) + ; X64: RET 0, implicit $xmm0 ret <4 x i32> %arg2 } define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) { ; X32-LABEL: name: test_v8i32_args ; X32: bb.1 (%ir-block.0): - ; X32: liveins: %xmm0, %xmm1 - ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0 - ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 + ; X32: liveins: $xmm0, $xmm1 + ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 + ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 ; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) ; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV]](<8 x s32>) - ; X32: %xmm0 = COPY [[UV]](<4 x s32>) - ; X32: %xmm1 = COPY [[UV1]](<4 x s32>) - ; X32: RET 0, implicit %xmm0, implicit %xmm1 + ; X32: $xmm0 = COPY [[UV]](<4 x s32>) + ; X32: $xmm1 = COPY [[UV1]](<4 x s32>) + ; X32: RET 0, implicit $xmm0, implicit $xmm1 ; X64-LABEL: name: test_v8i32_args ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %xmm0, %xmm1 - ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0 - ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 + ; X64: liveins: $xmm0, $xmm1 + ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 + ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 ; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) ; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV]](<8 x s32>) - ; X64: %xmm0 = COPY [[UV]](<4 x s32>) - ; X64: %xmm1 = COPY [[UV1]](<4 x s32>) - ; X64: RET 0, implicit %xmm0, implicit %xmm1 + ; X64: $xmm0 = COPY [[UV]](<4 x s32>) + ; X64: $xmm1 = COPY [[UV1]](<4 x s32>) + ; X64: RET 0, implicit $xmm0, implicit $xmm1 ret <8 x i32> %arg1 } @@ -307,19 +307,19 @@ } define i32 * @test_memop_i32(i32 * %p1) { -;X64 liveins: %rdi +;X64 liveins: $rdi ; X32-LABEL: name: test_memop_i32 ; X32: bb.1 (%ir-block.0): ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) - ; X32: %eax = COPY [[LOAD]](p0) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[LOAD]](p0) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_memop_i32 ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %rdi - ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi - ; X64: %rax = COPY [[COPY]](p0) - ; X64: RET 0, implicit %rax + ; X64: liveins: $rdi + ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi + ; X64: $rax = COPY [[COPY]](p0) + ; X64: RET 0, implicit $rax ret i32 * %p1; } @@ -327,15 +327,15 @@ define void @test_trivial_call() { ; X32-LABEL: name: test_trivial_call ; X32: bb.1 (%ir-block.0): - ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: CALLpcrel32 @trivial_callee, csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: CALLpcrel32 @trivial_callee, csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: RET 0 ; X64-LABEL: name: test_trivial_call ; X64: bb.1 (%ir-block.0): - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: CALL64pcrel32 @trivial_callee, csr_64, implicit %rsp, implicit %ssp - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: CALL64pcrel32 @trivial_callee, csr_64, implicit $rsp, implicit $ssp + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: RET 0 call void @trivial_callee() ret void @@ -349,28 +349,28 @@ ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0) ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) - ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp + ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32) ; X32: G_STORE [[LOAD1]](s32), [[GEP]](p0) :: (store 4 into stack, align 0) - ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0) - ; X32: CALLpcrel32 @simple_arg_callee, csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: CALLpcrel32 @simple_arg_callee, csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: RET 0 ; X64-LABEL: name: test_simple_arg ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %edi, %esi - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: %edi = COPY [[COPY1]](s32) - ; X64: %esi = COPY [[COPY]](s32) - ; X64: CALL64pcrel32 @simple_arg_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: liveins: $edi, $esi + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: $edi = COPY [[COPY1]](s32) + ; X64: $esi = COPY [[COPY]](s32) + ; X64: CALL64pcrel32 @simple_arg_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: RET 0 call void @simple_arg_callee(i32 %in1, i32 %in0) ret void @@ -382,63 +382,63 @@ ; X32: bb.1 (%ir-block.0): ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) - ; X32: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp + ; X32: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack, align 0) - ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0) - ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; X32: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C2]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP2]](p0) :: (store 4 into stack + 8, align 0) - ; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; X32: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY3]], [[C3]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP3]](p0) :: (store 4 into stack + 12, align 0) - ; X32: [[COPY4:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY4:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; X32: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY4]], [[C4]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP4]](p0) :: (store 4 into stack + 16, align 0) - ; X32: [[COPY5:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY5:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 ; X32: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY5]], [[C5]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP5]](p0) :: (store 4 into stack + 20, align 0) - ; X32: [[COPY6:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY6:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; X32: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[COPY6]], [[C6]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP6]](p0) :: (store 4 into stack + 24, align 0) - ; X32: [[COPY7:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY7:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 28 ; X32: [[GEP7:%[0-9]+]]:_(p0) = G_GEP [[COPY7]], [[C7]](s32) ; X32: G_STORE [[LOAD]](s32), [[GEP7]](p0) :: (store 4 into stack + 28, align 0) - ; X32: CALLpcrel32 @simple_arg8_callee, csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 32, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: CALLpcrel32 @simple_arg8_callee, csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 32, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: RET 0 ; X64-LABEL: name: test_simple_arg8_call ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %edi - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; X64: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: %edi = COPY [[COPY]](s32) - ; X64: %esi = COPY [[COPY]](s32) - ; X64: %edx = COPY [[COPY]](s32) - ; X64: %ecx = COPY [[COPY]](s32) - ; X64: %r8d = COPY [[COPY]](s32) - ; X64: %r9d = COPY [[COPY]](s32) - ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsp + ; X64: liveins: $edi + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; X64: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: $edi = COPY [[COPY]](s32) + ; X64: $esi = COPY [[COPY]](s32) + ; X64: $edx = COPY [[COPY]](s32) + ; X64: $ecx = COPY [[COPY]](s32) + ; X64: $r8d = COPY [[COPY]](s32) + ; X64: $r9d = COPY [[COPY]](s32) + ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsp ; X64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; X64: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64) ; X64: G_STORE [[COPY]](s32), [[GEP]](p0) :: (store 4 into stack, align 0) - ; X64: [[COPY2:%[0-9]+]]:_(p0) = COPY %rsp + ; X64: [[COPY2:%[0-9]+]]:_(p0) = COPY $rsp ; X64: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; X64: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C1]](s64) ; X64: G_STORE [[COPY]](s32), [[GEP1]](p0) :: (store 4 into stack + 8, align 0) - ; X64: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi, implicit %edx, implicit %ecx, implicit %r8d, implicit %r9d - ; X64: ADJCALLSTACKUP64 16, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi, implicit $edx, implicit $ecx, implicit $r8d, implicit $r9d + ; X64: ADJCALLSTACKUP64 16, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: RET 0 call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0) ret void @@ -449,28 +449,28 @@ ; X32-LABEL: name: test_simple_return_callee ; X32: bb.1 (%ir-block.0): ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 - ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp + ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s32) ; X32: G_STORE [[C]](s32), [[GEP]](p0) :: (store 4 into stack, align 0) - ; X32: CALLpcrel32 @simple_return_callee, csr_32, implicit %esp, implicit %ssp, implicit-def %eax - ; X32: [[COPY1:%[0-9]+]]:_(s32) = COPY %eax - ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: CALLpcrel32 @simple_return_callee, csr_32, implicit $esp, implicit $ssp, implicit-def $eax + ; X32: [[COPY1:%[0-9]+]]:_(s32) = COPY $eax + ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY1]] - ; X32: %eax = COPY [[ADD]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[ADD]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_simple_return_callee ; X64: bb.1 (%ir-block.0): ; X64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: %edi = COPY [[C]](s32) - ; X64: CALL64pcrel32 @simple_return_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit-def %eax - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %eax - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: $edi = COPY [[C]](s32) + ; X64: CALL64pcrel32 @simple_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $eax + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $eax + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]] - ; X64: %eax = COPY [[ADD]](s32) - ; X64: RET 0, implicit %eax + ; X64: $eax = COPY [[ADD]](s32) + ; X64: RET 0, implicit $eax %call = call i32 @simple_return_callee(i32 5) %r = add i32 %call, %call ret i32 %r @@ -480,51 +480,51 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) { ; X32-LABEL: name: test_split_return_callee ; X32: bb.1 (%ir-block.0): - ; X32: liveins: %xmm0, %xmm1, %xmm2 - ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0 - ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 - ; X32: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY %xmm2 + ; X32: liveins: $xmm0, $xmm1, $xmm2 + ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 + ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 + ; X32: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 16 from %fixed-stack.0, align 0) ; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) ; X32: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY2]](<4 x s32>), [[LOAD]](<4 x s32>) - ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV1]](<8 x s32>) - ; X32: %xmm0 = COPY [[UV]](<4 x s32>) - ; X32: %xmm1 = COPY [[UV1]](<4 x s32>) - ; X32: CALLpcrel32 @split_return_callee, csr_32, implicit %esp, implicit %ssp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1 - ; X32: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0 - ; X32: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 + ; X32: $xmm0 = COPY [[UV]](<4 x s32>) + ; X32: $xmm1 = COPY [[UV1]](<4 x s32>) + ; X32: CALLpcrel32 @split_return_callee, csr_32, implicit $esp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1 + ; X32: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 + ; X32: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 ; X32: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>) - ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[MV]], [[MV2]] ; X32: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>) - ; X32: %xmm0 = COPY [[UV2]](<4 x s32>) - ; X32: %xmm1 = COPY [[UV3]](<4 x s32>) - ; X32: RET 0, implicit %xmm0, implicit %xmm1 + ; X32: $xmm0 = COPY [[UV2]](<4 x s32>) + ; X32: $xmm1 = COPY [[UV3]](<4 x s32>) + ; X32: RET 0, implicit $xmm0, implicit $xmm1 ; X64-LABEL: name: test_split_return_callee ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %xmm0, %xmm1, %xmm2, %xmm3 - ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0 - ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 - ; X64: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY %xmm2 - ; X64: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY %xmm3 + ; X64: liveins: $xmm0, $xmm1, $xmm2, $xmm3 + ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 + ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 + ; X64: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2 + ; X64: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm3 ; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) ; X64: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY2]](<4 x s32>), [[COPY3]](<4 x s32>) - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV1]](<8 x s32>) - ; X64: %xmm0 = COPY [[UV]](<4 x s32>) - ; X64: %xmm1 = COPY [[UV1]](<4 x s32>) - ; X64: CALL64pcrel32 @split_return_callee, csr_64, implicit %rsp, implicit %ssp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1 - ; X64: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0 - ; X64: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 + ; X64: $xmm0 = COPY [[UV]](<4 x s32>) + ; X64: $xmm1 = COPY [[UV1]](<4 x s32>) + ; X64: CALL64pcrel32 @split_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1 + ; X64: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 + ; X64: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 ; X64: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY4]](<4 x s32>), [[COPY5]](<4 x s32>) - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[MV]], [[MV2]] ; X64: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>) - ; X64: %xmm0 = COPY [[UV2]](<4 x s32>) - ; X64: %xmm1 = COPY [[UV3]](<4 x s32>) - ; X64: RET 0, implicit %xmm0, implicit %xmm1 + ; X64: $xmm0 = COPY [[UV2]](<4 x s32>) + ; X64: $xmm1 = COPY [[UV3]](<4 x s32>) + ; X64: RET 0, implicit $xmm0, implicit $xmm1 %call = call <8 x i32> @split_return_callee(<8 x i32> %arg2) %r = add <8 x i32> %arg1, %call ret <8 x i32> %r @@ -535,17 +535,17 @@ ; X32: bb.1 (%ir-block.0): ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD:%[0-9]+]]:gr32(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) - ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: CALL32r [[LOAD]](p0), csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: CALL32r [[LOAD]](p0), csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: RET 0 ; X64-LABEL: name: test_indirect_call ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %rdi - ; X64: [[COPY:%[0-9]+]]:gr64(p0) = COPY %rdi - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: CALL64r [[COPY]](p0), csr_64, implicit %rsp, implicit %ssp - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: liveins: $rdi + ; X64: [[COPY:%[0-9]+]]:gr64(p0) = COPY $rdi + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: CALL64r [[COPY]](p0), csr_64, implicit $rsp, implicit $ssp + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: RET 0 call void %func() ret void @@ -559,51 +559,51 @@ ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) ; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p0) :: (load 1 from %ir.addr) - ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp + ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32) ; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s8) ; X32: G_STORE [[ANYEXT]](s32), [[GEP]](p0) :: (store 4 into stack, align 0) - ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp + ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32) ; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8) ; X32: G_STORE [[SEXT]](s32), [[GEP1]](p0) :: (store 4 into stack, align 0) - ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY %esp + ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C2]](s32) ; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8) ; X32: G_STORE [[ZEXT]](s32), [[GEP2]](p0) :: (store 4 into stack, align 0) - ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: RET 0 ; X64-LABEL: name: test_abi_exts_call ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %rdi - ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi + ; X64: liveins: $rdi + ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi ; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.addr) - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8) - ; X64: %edi = COPY [[ANYEXT]](s32) - ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: $edi = COPY [[ANYEXT]](s32) + ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8) - ; X64: %edi = COPY [[SEXT]](s32) - ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: $edi = COPY [[SEXT]](s32) + ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8) - ; X64: %edi = COPY [[ZEXT]](s32) - ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: $edi = COPY [[ZEXT]](s32) + ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: RET 0 %val = load i8, i8* %addr call void @take_char(i8 %val) @@ -622,31 +622,31 @@ ; X32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) ; X32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr) ; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.val_ptr) - ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp + ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32) ; X32: G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 0) - ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32) ; X32: G_STORE [[LOAD3]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0) - ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: RET 0 ; X64-LABEL: name: test_variadic_call_1 ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %rdi, %rsi - ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi - ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi + ; X64: liveins: $rdi, $rsi + ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi + ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi ; X64: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.addr_ptr) ; X64: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p0) :: (load 4 from %ir.val_ptr) - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: %rdi = COPY [[LOAD]](p0) - ; X64: %esi = COPY [[LOAD1]](s32) - ; X64: %al = MOV8ri 0 - ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %esi, implicit %al - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: $rdi = COPY [[LOAD]](p0) + ; X64: $esi = COPY [[LOAD1]](s32) + ; X64: $al = MOV8ri 0 + ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit $al + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: RET 0 %addr = load i8*, i8** %addr_ptr %val = load i32, i32* %val_ptr @@ -663,31 +663,31 @@ ; X32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) ; X32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr) ; X32: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.val_ptr, align 4) - ; X32: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp - ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp + ; X32: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp + ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32) ; X32: G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 0) - ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp + ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32) ; X32: G_STORE [[LOAD3]](s64), [[GEP1]](p0) :: (store 8 into stack + 4, align 0) - ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit %esp, implicit %ssp - ; X32: ADJCALLSTACKUP32 12, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp + ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp + ; X32: ADJCALLSTACKUP32 12, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: RET 0 ; X64-LABEL: name: test_variadic_call_2 ; X64: bb.1 (%ir-block.0): - ; X64: liveins: %rdi, %rsi - ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi - ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi + ; X64: liveins: $rdi, $rsi + ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi + ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi ; X64: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.addr_ptr) ; X64: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load 8 from %ir.val_ptr) - ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp - ; X64: %rdi = COPY [[LOAD]](p0) - ; X64: %xmm0 = COPY [[LOAD1]](s64) - ; X64: %al = MOV8ri 1 - ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %xmm0, implicit %al - ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp + ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; X64: $rdi = COPY [[LOAD]](p0) + ; X64: $xmm0 = COPY [[LOAD1]](s64) + ; X64: $al = MOV8ri 1 + ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $xmm0, implicit $al + ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: RET 0 %addr = load i8*, i8** %addr_ptr %val = load double, double* %val_ptr Index: test/CodeGen/X86/GlobalISel/legalize-add-v128.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-add-v128.mir +++ test/CodeGen/X86/GlobalISel/legalize-add-v128.mir @@ -33,18 +33,18 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; ALL-LABEL: name: test_add_v16i8 ; ALL: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF ; ALL: [[DEF1:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF ; ALL: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[DEF]], [[DEF1]] - ; ALL: %xmm0 = COPY [[ADD]](<16 x s8>) + ; ALL: $xmm0 = COPY [[ADD]](<16 x s8>) ; ALL: RET 0 %0(<16 x s8>) = IMPLICIT_DEF %1(<16 x s8>) = IMPLICIT_DEF %2(<16 x s8>) = G_ADD %0, %1 - %xmm0 = COPY %2 + $xmm0 = COPY %2 RET 0 ... @@ -59,18 +59,18 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; ALL-LABEL: name: test_add_v8i16 ; ALL: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF ; ALL: [[DEF1:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF ; ALL: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[DEF]], [[DEF1]] - ; ALL: %xmm0 = COPY [[ADD]](<8 x s16>) + ; ALL: $xmm0 = COPY [[ADD]](<8 x s16>) ; ALL: RET 0 %0(<8 x s16>) = IMPLICIT_DEF %1(<8 x s16>) = IMPLICIT_DEF %2(<8 x s16>) = G_ADD %0, %1 - %xmm0 = COPY %2 + $xmm0 = COPY %2 RET 0 ... @@ -85,18 +85,18 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; ALL-LABEL: name: test_add_v4i32 ; ALL: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF ; ALL: [[DEF1:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF ; ALL: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[DEF]], [[DEF1]] - ; ALL: %xmm0 = COPY [[ADD]](<4 x s32>) + ; ALL: $xmm0 = COPY [[ADD]](<4 x s32>) ; ALL: RET 0 %0(<4 x s32>) = IMPLICIT_DEF %1(<4 x s32>) = IMPLICIT_DEF %2(<4 x s32>) = G_ADD %0, %1 - %xmm0 = COPY %2 + $xmm0 = COPY %2 RET 0 ... @@ -111,18 +111,18 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; ALL-LABEL: name: test_add_v2i64 ; ALL: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF ; ALL: [[DEF1:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF ; ALL: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[DEF]], [[DEF1]] - ; ALL: %xmm0 = COPY [[ADD]](<2 x s64>) + ; ALL: $xmm0 = COPY [[ADD]](<2 x s64>) ; ALL: RET 0 %0(<2 x s64>) = IMPLICIT_DEF %1(<2 x s64>) = IMPLICIT_DEF %2(<2 x s64>) = G_ADD %0, %1 - %xmm0 = COPY %2 + $xmm0 = COPY %2 RET 0 ... Index: test/CodeGen/X86/GlobalISel/legalize-add-v256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-add-v256.mir +++ test/CodeGen/X86/GlobalISel/legalize-add-v256.mir @@ -36,7 +36,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; CHECK-LABEL: name: test_add_v32i8 ; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF @@ -51,15 +51,15 @@ ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]] ; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) - ; SSE2: %ymm0 = COPY [[MV]](<32 x s8>) - ; AVX1: %ymm0 = COPY [[MV]](<32 x s8>) + ; SSE2: $ymm0 = COPY [[MV]](<32 x s8>) + ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>) ; AVX2: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]] - ; AVX2: %ymm0 = COPY [[ADD]](<32 x s8>) + ; AVX2: $ymm0 = COPY [[ADD]](<32 x s8>) ; ALL: RET 0 %0(<32 x s8>) = IMPLICIT_DEF %1(<32 x s8>) = IMPLICIT_DEF %2(<32 x s8>) = G_ADD %0, %1 - %ymm0 = COPY %2 + $ymm0 = COPY %2 RET 0 ... @@ -74,7 +74,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_add_v16i16 ; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF @@ -89,15 +89,15 @@ ; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]] ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]] ; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) - ; SSE2: %ymm0 = COPY [[MV]](<16 x s16>) - ; AVX1: %ymm0 = COPY [[MV]](<16 x s16>) + ; SSE2: $ymm0 = COPY [[MV]](<16 x s16>) + ; AVX1: $ymm0 = COPY [[MV]](<16 x s16>) ; AVX2: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]] - ; AVX2: %ymm0 = COPY [[ADD]](<16 x s16>) + ; AVX2: $ymm0 = COPY [[ADD]](<16 x s16>) ; ALL: RET 0 %0(<16 x s16>) = IMPLICIT_DEF %1(<16 x s16>) = IMPLICIT_DEF %2(<16 x s16>) = G_ADD %0, %1 - %ymm0 = COPY %2 + $ymm0 = COPY %2 RET 0 ... @@ -112,7 +112,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_add_v8i32 ; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF @@ -122,20 +122,20 @@ ; SSE2: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]] ; SSE2: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]] ; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>) - ; SSE2: %ymm0 = COPY [[MV]](<8 x s32>) + ; SSE2: $ymm0 = COPY [[MV]](<8 x s32>) ; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>) ; AVX1: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>) ; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]] ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]] ; AVX1: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>) - ; AVX1: %ymm0 = COPY [[MV]](<8 x s32>) + ; AVX1: $ymm0 = COPY [[MV]](<8 x s32>) ; AVX2: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]] - ; AVX2: %ymm0 = COPY [[ADD]](<8 x s32>) + ; AVX2: $ymm0 = COPY [[ADD]](<8 x s32>) ; ALL: RET 0 %0(<8 x s32>) = IMPLICIT_DEF %1(<8 x s32>) = IMPLICIT_DEF %2(<8 x s32>) = G_ADD %0, %1 - %ymm0 = COPY %2 + $ymm0 = COPY %2 RET 0 ... @@ -150,7 +150,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_add_v4i64 ; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF @@ -165,15 +165,15 @@ ; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]] ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]] ; AVX1: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>) - ; SSE2: %ymm0 = COPY [[MV]](<4 x s64>) - ; AVX1: %ymm0 = COPY [[MV]](<4 x s64>) + ; SSE2: $ymm0 = COPY [[MV]](<4 x s64>) + ; AVX1: $ymm0 = COPY [[MV]](<4 x s64>) ; AVX2: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]] - ; AVX2: %ymm0 = COPY [[ADD]](<4 x s64>) + ; AVX2: $ymm0 = COPY [[ADD]](<4 x s64>) ; ALL: RET 0 %0(<4 x s64>) = IMPLICIT_DEF %1(<4 x s64>) = IMPLICIT_DEF %2(<4 x s64>) = G_ADD %0, %1 - %ymm0 = COPY %2 + $ymm0 = COPY %2 RET 0 ... Index: test/CodeGen/X86/GlobalISel/legalize-add-v512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-add-v512.mir +++ test/CodeGen/X86/GlobalISel/legalize-add-v512.mir @@ -40,7 +40,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_add_v64i8 ; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF @@ -52,20 +52,20 @@ ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]] ; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>) - ; AVX1: %zmm0 = COPY [[MV]](<64 x s8>) + ; AVX1: $zmm0 = COPY [[MV]](<64 x s8>) ; AVX512F: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>) ; AVX512F: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>) ; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV]], [[UV2]] ; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV1]], [[UV3]] ; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>) - ; AVX512F: %zmm0 = COPY [[MV]](<64 x s8>) + ; AVX512F: $zmm0 = COPY [[MV]](<64 x s8>) ; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[DEF]], [[DEF1]] - ; AVX512BW: %zmm0 = COPY [[ADD]](<64 x s8>) + ; AVX512BW: $zmm0 = COPY [[ADD]](<64 x s8>) ; ALL: RET 0 %0(<64 x s8>) = IMPLICIT_DEF %1(<64 x s8>) = IMPLICIT_DEF %2(<64 x s8>) = G_ADD %0, %1 - %zmm0 = COPY %2 + $zmm0 = COPY %2 RET 0 ... @@ -80,7 +80,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_add_v32i16 ; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF @@ -92,20 +92,20 @@ ; AVX1: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV3]], [[UV7]] ; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>) - ; AVX1: %zmm0 = COPY [[MV]](<32 x s16>) + ; AVX1: $zmm0 = COPY [[MV]](<32 x s16>) ; AVX512F: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>) ; AVX512F: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>) ; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV]], [[UV2]] ; AVX512F: [[ADD1:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV1]], [[UV3]] ; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>) - ; AVX512F: %zmm0 = COPY [[MV]](<32 x s16>) + ; AVX512F: $zmm0 = COPY [[MV]](<32 x s16>) ; AVX512BW: [[ADD:%[0-9]+]]:_(<32 x s16>) = G_ADD [[DEF]], [[DEF1]] - ; AVX512BW: %zmm0 = COPY [[ADD]](<32 x s16>) + ; AVX512BW: $zmm0 = COPY [[ADD]](<32 x s16>) ; ALL: RET 0 %0(<32 x s16>) = IMPLICIT_DEF %1(<32 x s16>) = IMPLICIT_DEF %2(<32 x s16>) = G_ADD %0, %1 - %zmm0 = COPY %2 + $zmm0 = COPY %2 RET 0 ... @@ -120,7 +120,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_add_v16i32 ; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF @@ -132,16 +132,16 @@ ; AVX1: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV3]], [[UV7]] ; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>) - ; AVX1: %zmm0 = COPY [[MV]](<16 x s32>) + ; AVX1: $zmm0 = COPY [[MV]](<16 x s32>) ; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]] - ; AVX512F: %zmm0 = COPY [[ADD]](<16 x s32>) + ; AVX512F: $zmm0 = COPY [[ADD]](<16 x s32>) ; AVX512BW: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]] - ; AVX512BW: %zmm0 = COPY [[ADD]](<16 x s32>) + ; AVX512BW: $zmm0 = COPY [[ADD]](<16 x s32>) ; ALL: RET 0 %0(<16 x s32>) = IMPLICIT_DEF %1(<16 x s32>) = IMPLICIT_DEF %2(<16 x s32>) = G_ADD %0, %1 - %zmm0 = COPY %2 + $zmm0 = COPY %2 RET 0 ... @@ -156,7 +156,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_add_v8i64 ; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF @@ -168,16 +168,16 @@ ; AVX1: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV3]], [[UV7]] ; AVX1: [[MV:%[0-9]+]]:_(<8 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>) - ; AVX1: %zmm0 = COPY [[MV]](<8 x s64>) + ; AVX1: $zmm0 = COPY [[MV]](<8 x s64>) ; AVX512F: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]] - ; AVX512F: %zmm0 = COPY [[ADD]](<8 x s64>) + ; AVX512F: $zmm0 = COPY [[ADD]](<8 x s64>) ; AVX512BW: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]] - ; AVX512BW: %zmm0 = COPY [[ADD]](<8 x s64>) + ; AVX512BW: $zmm0 = COPY [[ADD]](<8 x s64>) ; ALL: RET 0 %0(<8 x s64>) = IMPLICIT_DEF %1(<8 x s64>) = IMPLICIT_DEF %2(<8 x s64>) = G_ADD %0, %1 - %zmm0 = COPY %2 + $zmm0 = COPY %2 RET 0 ... @@ -200,13 +200,13 @@ # body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1, %ymm2, %ymm3 + liveins: $ymm0, $ymm1, $ymm2, $ymm3 ; ALL-LABEL: name: test_add_v64i8_2 - ; ALL: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY %ymm0 - ; ALL: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY %ymm1 - ; ALL: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY %ymm2 - ; ALL: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY %ymm3 + ; ALL: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0 + ; ALL: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1 + ; ALL: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2 + ; ALL: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3 ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY]](<32 x s8>) ; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY1]](<32 x s8>) ; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY2]](<32 x s8>) @@ -217,29 +217,29 @@ ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]] ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) ; AVX1: [[MV1:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>) - ; AVX1: %ymm0 = COPY [[MV]](<32 x s8>) - ; AVX1: %ymm1 = COPY [[MV1]](<32 x s8>) + ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>) + ; AVX1: $ymm1 = COPY [[MV1]](<32 x s8>) ; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY]], [[COPY2]] ; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY1]], [[COPY3]] - ; AVX512F: %ymm0 = COPY [[ADD]](<32 x s8>) - ; AVX512F: %ymm1 = COPY [[ADD1]](<32 x s8>) + ; AVX512F: $ymm0 = COPY [[ADD]](<32 x s8>) + ; AVX512F: $ymm1 = COPY [[ADD1]](<32 x s8>) ; AVX512BW: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>) ; AVX512BW: [[MV1:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>) ; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[MV]], [[MV1]] ; AVX512BW: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[ADD]](<64 x s8>) - ; AVX512BW: %ymm0 = COPY [[UV]](<32 x s8>) - ; AVX512BW: %ymm1 = COPY [[UV1]](<32 x s8>) - ; ALL: RET 0, implicit %ymm0, implicit %ymm1 - %2(<32 x s8>) = COPY %ymm0 - %3(<32 x s8>) = COPY %ymm1 - %4(<32 x s8>) = COPY %ymm2 - %5(<32 x s8>) = COPY %ymm3 + ; AVX512BW: $ymm0 = COPY [[UV]](<32 x s8>) + ; AVX512BW: $ymm1 = COPY [[UV1]](<32 x s8>) + ; ALL: RET 0, implicit $ymm0, implicit $ymm1 + %2(<32 x s8>) = COPY $ymm0 + %3(<32 x s8>) = COPY $ymm1 + %4(<32 x s8>) = COPY $ymm2 + %5(<32 x s8>) = COPY $ymm3 %0(<64 x s8>) = G_MERGE_VALUES %2(<32 x s8>), %3(<32 x s8>) %1(<64 x s8>) = G_MERGE_VALUES %4(<32 x s8>), %5(<32 x s8>) %6(<64 x s8>) = G_ADD %0, %1 %7(<32 x s8>), %8(<32 x s8>) = G_UNMERGE_VALUES %6(<64 x s8>) - %ymm0 = COPY %7(<32 x s8>) - %ymm1 = COPY %8(<32 x s8>) - RET 0, implicit %ymm0, implicit %ymm1 + $ymm0 = COPY %7(<32 x s8>) + $ymm1 = COPY %8(<32 x s8>) + RET 0, implicit $ymm0, implicit $ymm1 ... Index: test/CodeGen/X86/GlobalISel/legalize-add.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-add.mir +++ test/CodeGen/X86/GlobalISel/legalize-add.mir @@ -18,7 +18,7 @@ - { id: 0, class: _, preferred-register: '' } - { id: 1, class: _, preferred-register: '' } - { id: 2, class: _, preferred-register: '' } -# CHECK: %0(s32) = COPY %edx +# CHECK: %0(s32) = COPY $edx # CHECK-NEXT: %3(s8) = G_TRUNC %0(s32) # CHECK-NEXT: %4(s8) = G_TRUNC %0(s32) # CHECK-NEXT: %5(s8) = G_ADD %3, %4 @@ -27,26 +27,26 @@ bb.1 (%ir-block.0): ; X64-LABEL: name: test_add_i1 - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edx + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edx ; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; X64: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; X64: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]] ; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s8) - ; X64: %eax = COPY [[ANYEXT]](s32) + ; X64: $eax = COPY [[ANYEXT]](s32) ; X64: RET 0 ; X32-LABEL: name: test_add_i1 - ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edx + ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edx ; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; X32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; X32: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]] ; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s8) - ; X32: %eax = COPY [[ANYEXT]](s32) + ; X32: $eax = COPY [[ANYEXT]](s32) ; X32: RET 0 - %0(s32) = COPY %edx + %0(s32) = COPY $edx %1(s1) = G_TRUNC %0(s32) %2(s1) = G_ADD %1, %1 %3:_(s32) = G_ANYEXT %2 - %eax = COPY %3 + $eax = COPY %3 RET 0 ... --- @@ -64,18 +64,18 @@ ; X64: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF ; X64: [[DEF1:%[0-9]+]]:_(s32) = IMPLICIT_DEF ; X64: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[DEF]], [[DEF1]] - ; X64: %eax = COPY [[ADD]](s32) + ; X64: $eax = COPY [[ADD]](s32) ; X64: RET 0 ; X32-LABEL: name: test_add_i32 ; X32: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF ; X32: [[DEF1:%[0-9]+]]:_(s32) = IMPLICIT_DEF ; X32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[DEF]], [[DEF1]] - ; X32: %eax = COPY [[ADD]](s32) + ; X32: $eax = COPY [[ADD]](s32) ; X32: RET 0 %0(s32) = IMPLICIT_DEF %1(s32) = IMPLICIT_DEF %2(s32) = G_ADD %0, %1 - %eax = COPY %2 + $eax = COPY %2 RET 0 ... @@ -94,7 +94,7 @@ ; X64: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF ; X64: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF ; X64: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[DEF]], [[DEF1]] - ; X64: %rax = COPY [[ADD]](s64) + ; X64: $rax = COPY [[ADD]](s64) ; X64: RET 0 ; X32-LABEL: name: test_add_i64 ; X32: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF @@ -106,12 +106,12 @@ ; X32: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV]], [[UV2]], [[TRUNC]] ; X32: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDE1]] ; X32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDE]](s32), [[UADDE2]](s32) - ; X32: %rax = COPY [[MV]](s64) + ; X32: $rax = COPY [[MV]](s64) ; X32: RET 0 %0(s64) = IMPLICIT_DEF %1(s64) = IMPLICIT_DEF %2(s64) = G_ADD %0, %1 - %rax = COPY %2 + $rax = COPY %2 RET 0 ... Index: test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir @@ -41,18 +41,18 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: test_and_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[TRUNC1]] ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s8) - ; CHECK: %eax = COPY [[ANYEXT]](s32) + ; CHECK: $eax = COPY [[ANYEXT]](s32) ; CHECK: RET 0 - %0(s32) = COPY %edx + %0(s32) = COPY $edx %1(s1) = G_TRUNC %0(s32) %2(s1) = G_AND %1, %1 %3:_(s32) = G_ANYEXT %2 - %eax = COPY %3 + $eax = COPY %3 RET 0 ... --- @@ -72,12 +72,12 @@ ; CHECK-LABEL: name: test_and_i8 ; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF ; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[DEF]], [[DEF]] - ; CHECK: %al = COPY [[AND]](s8) - ; CHECK: RET 0, implicit %al + ; CHECK: $al = COPY [[AND]](s8) + ; CHECK: RET 0, implicit $al %0(s8) = IMPLICIT_DEF %1(s8) = G_AND %0, %0 - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -97,12 +97,12 @@ ; CHECK-LABEL: name: test_and_i16 ; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF ; CHECK: [[AND:%[0-9]+]]:_(s16) = G_AND [[DEF]], [[DEF]] - ; CHECK: %ax = COPY [[AND]](s16) - ; CHECK: RET 0, implicit %ax + ; CHECK: $ax = COPY [[AND]](s16) + ; CHECK: RET 0, implicit $ax %0(s16) = IMPLICIT_DEF %1(s16) = G_AND %0, %0 - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -122,12 +122,12 @@ ; CHECK-LABEL: name: test_and_i32 ; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[DEF]] - ; CHECK: %eax = COPY [[AND]](s32) - ; CHECK: RET 0, implicit %eax + ; CHECK: $eax = COPY [[AND]](s32) + ; CHECK: RET 0, implicit $eax %0(s32) = IMPLICIT_DEF %1(s32) = G_AND %0, %0 - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -147,11 +147,11 @@ ; CHECK-LABEL: name: test_and_i64 ; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[DEF]], [[DEF]] - ; CHECK: %rax = COPY [[AND]](s64) - ; CHECK: RET 0, implicit %rax + ; CHECK: $rax = COPY [[AND]](s64) + ; CHECK: RET 0, implicit $rax %0(s64) = IMPLICIT_DEF %1(s64) = G_AND %0, %0 - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/legalize-brcond.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-brcond.mir +++ test/CodeGen/X86/GlobalISel/legalize-brcond.mir @@ -30,17 +30,17 @@ # ALL-NEXT: G_BRCOND %1(s1), %[[TRUE:bb.[0-9]+]] # ALL-NEXT: G_BR %[[FALSE:bb.[0-9]+]] # ALL: [[TRUE]].{{[a-zA-Z0-9.]+}}: -# ALL-NEXT: %eax = COPY %2(s32) -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %2(s32) +# ALL-NEXT: RET 0, implicit $eax # ALL: [[FALSE]].{{[a-zA-Z0-9.]+}}: -# ALL-NEXT: %eax = COPY %3(s32) -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %3(s32) +# ALL-NEXT: RET 0, implicit $eax body: | bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %2(s32) = G_CONSTANT i32 0 %3(s32) = G_CONSTANT i32 1 %1(s1) = G_TRUNC %0(s32) @@ -48,11 +48,11 @@ G_BR %bb.3 bb.2.if.then: - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax bb.3.if.else: - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/legalize-cmp.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-cmp.mir +++ test/CodeGen/X86/GlobalISel/legalize-cmp.mir @@ -45,21 +45,21 @@ - { id: 3, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_cmp_i8 - ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil - ; CHECK: [[COPY1:%[0-9]+]]:_(s8) = COPY %sil + ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil + ; CHECK: [[COPY1:%[0-9]+]]:_(s8) = COPY $sil ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s8), [[COPY1]] ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) - ; CHECK: %eax = COPY [[ZEXT]](s32) - ; CHECK: RET 0, implicit %eax - %0(s8) = COPY %dil - %1(s8) = COPY %sil + ; CHECK: $eax = COPY [[ZEXT]](s32) + ; CHECK: RET 0, implicit $eax + %0(s8) = COPY $dil + %1(s8) = COPY $sil %2(s1) = G_ICMP intpred(ult), %0(s8), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -74,21 +74,21 @@ - { id: 3, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_cmp_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di - ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY %si + ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di + ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY $si ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s16), [[COPY1]] ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) - ; CHECK: %eax = COPY [[ZEXT]](s32) - ; CHECK: RET 0, implicit %eax - %0(s16) = COPY %di - %1(s16) = COPY %si + ; CHECK: $eax = COPY [[ZEXT]](s32) + ; CHECK: RET 0, implicit $eax + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s1) = G_ICMP intpred(ult), %0(s16), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -103,21 +103,21 @@ - { id: 3, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_cmp_i32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY1]] ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) - ; CHECK: %eax = COPY [[ZEXT]](s32) - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: $eax = COPY [[ZEXT]](s32) + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(ult), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -132,21 +132,21 @@ - { id: 3, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; CHECK-LABEL: name: test_cmp_i64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]] ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) - ; CHECK: %eax = COPY [[ZEXT]](s32) - ; CHECK: RET 0, implicit %eax - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + ; CHECK: $eax = COPY [[ZEXT]](s32) + ; CHECK: RET 0, implicit $eax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s1) = G_ICMP intpred(ult), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -161,20 +161,20 @@ - { id: 3, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; CHECK-LABEL: name: test_cmp_p0 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi - ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](p0), [[COPY1]] ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) - ; CHECK: %eax = COPY [[ZEXT]](s32) - ; CHECK: RET 0, implicit %eax - %0(p0) = COPY %rdi - %1(p0) = COPY %rsi + ; CHECK: $eax = COPY [[ZEXT]](s32) + ; CHECK: RET 0, implicit $eax + %0(p0) = COPY $rdi + %1(p0) = COPY $rsi %2(s1) = G_ICMP intpred(ult), %0(p0), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/legalize-constant.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-constant.mir +++ test/CodeGen/X86/GlobalISel/legalize-constant.mir @@ -20,46 +20,46 @@ ; X32-LABEL: name: test_constant ; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1 ; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8) - ; X32: %eax = COPY [[ANYEXT]](s32) + ; X32: $eax = COPY [[ANYEXT]](s32) ; X32: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 8 - ; X32: %al = COPY [[C1]](s8) + ; X32: $al = COPY [[C1]](s8) ; X32: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 16 - ; X32: %ax = COPY [[C2]](s16) + ; X32: $ax = COPY [[C2]](s16) ; X32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 - ; X32: %eax = COPY [[C3]](s32) + ; X32: $eax = COPY [[C3]](s32) ; X32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 64 ; X32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; X32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C4]](s32), [[C5]](s32) - ; X32: %rax = COPY [[MV]](s64) + ; X32: $rax = COPY [[MV]](s64) ; X32: RET 0 ; X64-LABEL: name: test_constant ; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1 ; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8) - ; X64: %eax = COPY [[ANYEXT]](s32) + ; X64: $eax = COPY [[ANYEXT]](s32) ; X64: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 8 - ; X64: %al = COPY [[C1]](s8) + ; X64: $al = COPY [[C1]](s8) ; X64: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 16 - ; X64: %ax = COPY [[C2]](s16) + ; X64: $ax = COPY [[C2]](s16) ; X64: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 - ; X64: %eax = COPY [[C3]](s32) + ; X64: $eax = COPY [[C3]](s32) ; X64: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; X64: %rax = COPY [[C4]](s64) + ; X64: $rax = COPY [[C4]](s64) ; X64: RET 0 %0(s1) = G_CONSTANT i1 1 %5:_(s32) = G_ANYEXT %0 - %eax = COPY %5 + $eax = COPY %5 %1(s8) = G_CONSTANT i8 8 - %al = COPY %1 + $al = COPY %1 %2(s16) = G_CONSTANT i16 16 - %ax = COPY %2 + $ax = COPY %2 %3(s32) = G_CONSTANT i32 32 - %eax = COPY %3 + $eax = COPY %3 %4(s64) = G_CONSTANT i64 64 - %rax = COPY %4 + $rax = COPY %4 RET 0 ... @@ -73,17 +73,17 @@ ; X32-LABEL: name: test_fconstant ; X32: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 - ; X32: %eax = COPY [[C]](s32) + ; X32: $eax = COPY [[C]](s32) ; X32: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 - ; X32: %rax = COPY [[C1]](s64) + ; X32: $rax = COPY [[C1]](s64) ; X64-LABEL: name: test_fconstant ; X64: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 - ; X64: %eax = COPY [[C]](s32) + ; X64: $eax = COPY [[C]](s32) ; X64: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 - ; X64: %rax = COPY [[C1]](s64) + ; X64: $rax = COPY [[C1]](s64) %0(s32) = G_FCONSTANT float 1.0 - %eax = COPY %0 + $eax = COPY %0 %1(s64) = G_FCONSTANT double 2.0 - %rax = COPY %1 + $rax = COPY %1 ... Index: test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir +++ test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir @@ -73,19 +73,19 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_sext_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s8) ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[TRUNC]](s1) - ; CHECK: %rax = COPY [[SEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s8) = COPY %dil + ; CHECK: $rax = COPY [[SEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s8) = COPY $dil %1(s1) = G_TRUNC %0(s8) %2(s64) = G_SEXT %1(s1) - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... --- @@ -98,17 +98,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_sext_i8 - ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s8) - ; CHECK: %rax = COPY [[SEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s8) = COPY %dil + ; CHECK: $rax = COPY [[SEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s8) = COPY $dil %1(s64) = G_SEXT %0(s8) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -121,17 +121,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_sext_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s16) - ; CHECK: %rax = COPY [[SEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s16) = COPY %di + ; CHECK: $rax = COPY [[SEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s16) = COPY $di %1(s64) = G_SEXT %0(s16) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -144,17 +144,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_sext_i32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s32) - ; CHECK: %rax = COPY [[SEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s32) = COPY %edi + ; CHECK: $rax = COPY [[SEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s32) = COPY $edi %1(s64) = G_SEXT %0(s32) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -168,20 +168,20 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_zext_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8) ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]] - ; CHECK: %rax = COPY [[AND]](s64) - ; CHECK: RET 0, implicit %rax - %0(s8) = COPY %dil + ; CHECK: $rax = COPY [[AND]](s64) + ; CHECK: RET 0, implicit $rax + %0(s8) = COPY $dil %1(s1) = G_TRUNC %0(s8) %2(s64) = G_ZEXT %1(s1) - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... --- @@ -194,17 +194,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_zext_i8 - ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s8) - ; CHECK: %rax = COPY [[ZEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s8) = COPY %dil + ; CHECK: $rax = COPY [[ZEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s8) = COPY $dil %1(s64) = G_ZEXT %0(s8) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -217,17 +217,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_zext_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s16) - ; CHECK: %rax = COPY [[ZEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s16) = COPY %di + ; CHECK: $rax = COPY [[ZEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s16) = COPY $di %1(s64) = G_ZEXT %0(s16) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -240,17 +240,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_zext_i32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32) - ; CHECK: %rax = COPY [[ZEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s32) = COPY %edi + ; CHECK: $rax = COPY [[ZEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s32) = COPY $edi %1(s64) = G_ZEXT %0(s32) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -264,18 +264,18 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_anyext_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8) - ; CHECK: %rax = COPY [[ANYEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s8) = COPY %dil + ; CHECK: $rax = COPY [[ANYEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s8) = COPY $dil %1(s1) = G_TRUNC %0(s8) %2(s64) = G_ANYEXT %1(s1) - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... --- @@ -288,17 +288,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_anyext_i8 - ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8) - ; CHECK: %rax = COPY [[ANYEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s8) = COPY %dil + ; CHECK: $rax = COPY [[ANYEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s8) = COPY $dil %1(s64) = G_ANYEXT %0(s8) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -311,17 +311,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_anyext_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s16) - ; CHECK: %rax = COPY [[ANYEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s16) = COPY %di + ; CHECK: $rax = COPY [[ANYEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s16) = COPY $di %1(s64) = G_ANYEXT %0(s16) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -334,17 +334,17 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_anyext_i32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s32) - ; CHECK: %rax = COPY [[ANYEXT]](s64) - ; CHECK: RET 0, implicit %rax - %0(s32) = COPY %edi + ; CHECK: $rax = COPY [[ANYEXT]](s64) + ; CHECK: RET 0, implicit $rax + %0(s32) = COPY $edi %1(s64) = G_ANYEXT %0(s32) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/legalize-ext.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-ext.mir +++ test/CodeGen/X86/GlobalISel/legalize-ext.mir @@ -101,27 +101,27 @@ - { id: 2, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_zext_i1toi8 - ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 ; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]] - ; X32: %al = COPY [[AND]](s8) - ; X32: RET 0, implicit %al + ; X32: $al = COPY [[AND]](s8) + ; X32: RET 0, implicit $al ; X64-LABEL: name: test_zext_i1toi8 - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 ; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]] - ; X64: %al = COPY [[AND]](s8) - ; X64: RET 0, implicit %al - %1:_(s32) = COPY %edi + ; X64: $al = COPY [[AND]](s8) + ; X64: RET 0, implicit $al + %1:_(s32) = COPY $edi %0:_(s1) = G_TRUNC %1(s32) %2:_(s8) = G_ZEXT %0(s1) - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -134,27 +134,27 @@ - { id: 1, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_zext_i1toi16 - ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X32: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1 ; X32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) ; X32: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]] - ; X32: %ax = COPY [[AND]](s16) - ; X32: RET 0, implicit %ax + ; X32: $ax = COPY [[AND]](s16) + ; X32: RET 0, implicit $ax ; X64-LABEL: name: test_zext_i1toi16 - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X64: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1 ; X64: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) ; X64: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]] - ; X64: %ax = COPY [[AND]](s16) - ; X64: RET 0, implicit %ax - %1:_(s32) = COPY %edi + ; X64: $ax = COPY [[AND]](s16) + ; X64: RET 0, implicit $ax + %1:_(s32) = COPY $edi %0:_(s1) = G_TRUNC %1(s32) %2:_(s16) = G_ZEXT %0(s1) - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -168,27 +168,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_zext_i1 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8) ; X32: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]] - ; X32: %eax = COPY [[AND]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[AND]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_zext_i1 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8) ; X64: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]] - ; X64: %eax = COPY [[AND]](s32) - ; X64: RET 0, implicit %eax - %0(s8) = COPY %dil + ; X64: $eax = COPY [[AND]](s32) + ; X64: RET 0, implicit $eax + %0(s8) = COPY $dil %1(s1) = G_TRUNC %0(s8) %2(s32) = G_ZEXT %1(s1) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -201,22 +201,22 @@ - { id: 1, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_zext_i8toi16 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X32: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[COPY]](s8) - ; X32: %ax = COPY [[ZEXT]](s16) - ; X32: RET 0, implicit %ax + ; X32: $ax = COPY [[ZEXT]](s16) + ; X32: RET 0, implicit $ax ; X64-LABEL: name: test_zext_i8toi16 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X64: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[COPY]](s8) - ; X64: %ax = COPY [[ZEXT]](s16) - ; X64: RET 0, implicit %ax - %0(s8) = COPY %dil + ; X64: $ax = COPY [[ZEXT]](s16) + ; X64: RET 0, implicit $ax + %0(s8) = COPY $dil %1(s16) = G_ZEXT %0(s8) - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -229,22 +229,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_zext_i8 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s8) - ; X32: %eax = COPY [[ZEXT]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[ZEXT]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_zext_i8 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s8) - ; X64: %eax = COPY [[ZEXT]](s32) - ; X64: RET 0, implicit %eax - %0(s8) = COPY %dil + ; X64: $eax = COPY [[ZEXT]](s32) + ; X64: RET 0, implicit $eax + %0(s8) = COPY $dil %1(s32) = G_ZEXT %0(s8) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -257,22 +257,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_zext_i16 - ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s16) - ; X32: %eax = COPY [[ZEXT]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[ZEXT]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_zext_i16 - ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s16) - ; X64: %eax = COPY [[ZEXT]](s32) - ; X64: RET 0, implicit %eax - %0(s16) = COPY %di + ; X64: $eax = COPY [[ZEXT]](s32) + ; X64: RET 0, implicit $eax + %0(s16) = COPY $di %1(s32) = G_ZEXT %0(s16) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -285,20 +285,20 @@ - { id: 1, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_sext_i1toi8 ; X32: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF - ; X32: %al = COPY [[DEF]](s8) - ; X32: RET 0, implicit %al + ; X32: $al = COPY [[DEF]](s8) + ; X32: RET 0, implicit $al ; X64-LABEL: name: test_sext_i1toi8 ; X64: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF - ; X64: %al = COPY [[DEF]](s8) - ; X64: RET 0, implicit %al + ; X64: $al = COPY [[DEF]](s8) + ; X64: RET 0, implicit $al %0(s1) = G_IMPLICIT_DEF %1(s8) = G_SEXT %0(s1) - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -311,20 +311,20 @@ - { id: 1, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_sext_i1toi16 ; X32: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF - ; X32: %ax = COPY [[DEF]](s16) - ; X32: RET 0, implicit %ax + ; X32: $ax = COPY [[DEF]](s16) + ; X32: RET 0, implicit $ax ; X64-LABEL: name: test_sext_i1toi16 ; X64: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF - ; X64: %ax = COPY [[DEF]](s16) - ; X64: RET 0, implicit %ax + ; X64: $ax = COPY [[DEF]](s16) + ; X64: RET 0, implicit $ax %0(s1) = G_IMPLICIT_DEF %1(s16) = G_SEXT %0(s1) - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -338,20 +338,20 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_sext_i1 ; X32: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF - ; X32: %eax = COPY [[DEF]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[DEF]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_sext_i1 ; X64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF - ; X64: %eax = COPY [[DEF]](s32) - ; X64: RET 0, implicit %eax + ; X64: $eax = COPY [[DEF]](s32) + ; X64: RET 0, implicit $eax %0(s1) = G_IMPLICIT_DEF %2(s32) = G_SEXT %0(s1) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -364,22 +364,22 @@ - { id: 1, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_sext_i8toi16 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X32: [[SEXT:%[0-9]+]]:_(s16) = G_SEXT [[COPY]](s8) - ; X32: %ax = COPY [[SEXT]](s16) - ; X32: RET 0, implicit %ax + ; X32: $ax = COPY [[SEXT]](s16) + ; X32: RET 0, implicit $ax ; X64-LABEL: name: test_sext_i8toi16 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X64: [[SEXT:%[0-9]+]]:_(s16) = G_SEXT [[COPY]](s8) - ; X64: %ax = COPY [[SEXT]](s16) - ; X64: RET 0, implicit %ax - %0(s8) = COPY %dil + ; X64: $ax = COPY [[SEXT]](s16) + ; X64: RET 0, implicit $ax + %0(s8) = COPY $dil %1(s16) = G_SEXT %0(s8) - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -392,22 +392,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_sext_i8 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s8) - ; X32: %eax = COPY [[SEXT]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[SEXT]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_sext_i8 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s8) - ; X64: %eax = COPY [[SEXT]](s32) - ; X64: RET 0, implicit %eax - %0(s8) = COPY %dil + ; X64: $eax = COPY [[SEXT]](s32) + ; X64: RET 0, implicit $eax + %0(s8) = COPY $dil %1(s32) = G_SEXT %0(s8) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -420,22 +420,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_sext_i16 - ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16) - ; X32: %eax = COPY [[SEXT]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[SEXT]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_sext_i16 - ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16) - ; X64: %eax = COPY [[SEXT]](s32) - ; X64: RET 0, implicit %eax - %0(s16) = COPY %di + ; X64: $eax = COPY [[SEXT]](s32) + ; X64: RET 0, implicit $eax + %0(s16) = COPY $di %1(s32) = G_SEXT %0(s16) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -449,23 +449,23 @@ - { id: 2, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_anyext_i1toi8 - ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) - ; X32: %al = COPY [[TRUNC]](s8) - ; X32: RET 0, implicit %al + ; X32: $al = COPY [[TRUNC]](s8) + ; X32: RET 0, implicit $al ; X64-LABEL: name: test_anyext_i1toi8 - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) - ; X64: %al = COPY [[TRUNC]](s8) - ; X64: RET 0, implicit %al - %0(s32) = COPY %edi + ; X64: $al = COPY [[TRUNC]](s8) + ; X64: RET 0, implicit $al + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s8) = G_ANYEXT %1(s1) - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -479,23 +479,23 @@ - { id: 2, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_anyext_i1toi16 - ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) - ; X32: %ax = COPY [[TRUNC]](s16) - ; X32: RET 0, implicit %ax + ; X32: $ax = COPY [[TRUNC]](s16) + ; X32: RET 0, implicit $ax ; X64-LABEL: name: test_anyext_i1toi16 - ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi + ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; X64: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) - ; X64: %ax = COPY [[TRUNC]](s16) - ; X64: RET 0, implicit %ax - %0(s32) = COPY %edi + ; X64: $ax = COPY [[TRUNC]](s16) + ; X64: RET 0, implicit $ax + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s16) = G_ANYEXT %1(s1) - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -509,23 +509,23 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_anyext_i1 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8) - ; X32: %eax = COPY [[ANYEXT]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[ANYEXT]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_anyext_i1 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8) - ; X64: %eax = COPY [[ANYEXT]](s32) - ; X64: RET 0, implicit %eax - %0(s8) = COPY %dil + ; X64: $eax = COPY [[ANYEXT]](s32) + ; X64: RET 0, implicit $eax + %0(s8) = COPY $dil %1(s1) = G_TRUNC %0(s8) %2(s32) = G_ANYEXT %1(s1) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -538,22 +538,22 @@ - { id: 1, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_anyext_i8toi16 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X32: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8) - ; X32: %ax = COPY [[ANYEXT]](s16) - ; X32: RET 0, implicit %ax + ; X32: $ax = COPY [[ANYEXT]](s16) + ; X32: RET 0, implicit $ax ; X64-LABEL: name: test_anyext_i8toi16 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X64: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8) - ; X64: %ax = COPY [[ANYEXT]](s16) - ; X64: RET 0, implicit %ax - %0(s8) = COPY %dil + ; X64: $ax = COPY [[ANYEXT]](s16) + ; X64: RET 0, implicit $ax + %0(s8) = COPY $dil %1(s16) = G_ANYEXT %0(s8) - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -566,22 +566,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_anyext_i8 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8) - ; X32: %eax = COPY [[ANYEXT]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[ANYEXT]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_anyext_i8 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil ; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8) - ; X64: %eax = COPY [[ANYEXT]](s32) - ; X64: RET 0, implicit %eax - %0(s8) = COPY %dil + ; X64: $eax = COPY [[ANYEXT]](s32) + ; X64: RET 0, implicit $eax + %0(s8) = COPY $dil %1(s32) = G_ANYEXT %0(s8) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -594,21 +594,21 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; X32-LABEL: name: test_anyext_i16 - ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16) - ; X32: %eax = COPY [[ANYEXT]](s32) - ; X32: RET 0, implicit %eax + ; X32: $eax = COPY [[ANYEXT]](s32) + ; X32: RET 0, implicit $eax ; X64-LABEL: name: test_anyext_i16 - ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di + ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di ; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16) - ; X64: %eax = COPY [[ANYEXT]](s32) - ; X64: RET 0, implicit %eax - %0(s16) = COPY %di + ; X64: $eax = COPY [[ANYEXT]](s32) + ; X64: RET 0, implicit $eax + %0(s16) = COPY $di %1(s32) = G_ANYEXT %0(s16) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir @@ -28,19 +28,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_fadd_float - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1 ; CHECK: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[FADD]](s32) - ; CHECK: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[FADD]](s32) + ; CHECK: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FADD %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -58,18 +58,18 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_fadd_double - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[FADD]](s64) - ; CHECK: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[FADD]](s64) + ; CHECK: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FADD %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir @@ -28,19 +28,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_fdiv_float - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1 ; CHECK: [[FDIV:%[0-9]+]]:_(s32) = G_FDIV [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[FDIV]](s32) - ; CHECK: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[FDIV]](s32) + ; CHECK: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FDIV %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -58,18 +58,18 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_fdiv_double - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1 ; CHECK: [[FDIV:%[0-9]+]]:_(s64) = G_FDIV [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[FDIV]](s64) - ; CHECK: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[FDIV]](s64) + ; CHECK: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FDIV %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir @@ -28,19 +28,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_fmul_float - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[FMUL]](s32) - ; CHECK: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[FMUL]](s32) + ; CHECK: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FMUL %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -58,18 +58,18 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_fmul_double - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1 ; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[FMUL]](s64) - ; CHECK: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[FMUL]](s64) + ; CHECK: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FMUL %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir @@ -19,16 +19,16 @@ - { id: 1, class: _, preferred-register: '' } body: | bb.1.entry: - liveins: %xmm0 + liveins: $xmm0 ; ALL-LABEL: name: test - ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0 + ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0 ; ALL: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[COPY]](s32) - ; ALL: %xmm0 = COPY [[FPEXT]](s64) - ; ALL: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 + ; ALL: $xmm0 = COPY [[FPEXT]](s64) + ; ALL: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 %1(s64) = G_FPEXT %0(s32) - %xmm0 = COPY %1(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir @@ -28,19 +28,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_fsub_float - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1 ; CHECK: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[FSUB]](s32) - ; CHECK: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[FSUB]](s32) + ; CHECK: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FSUB %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -58,18 +58,18 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_fsub_double - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1 ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[FSUB]](s64) - ; CHECK: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[FSUB]](s64) + ; CHECK: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FSUB %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir +++ test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir @@ -15,19 +15,19 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<8 x s32>) = COPY %ymm0 -# ALL-NEXT: %1:_(<4 x s32>) = COPY %xmm1 +# ALL: %0:_(<8 x s32>) = COPY $ymm0 +# ALL-NEXT: %1:_(<4 x s32>) = COPY $xmm1 # ALL-NEXT: %2:_(<8 x s32>) = G_INSERT %0, %1(<4 x s32>), 0 -# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>) -# ALL-NEXT: RET 0, implicit %ymm0 +# ALL-NEXT: $ymm0 = COPY %2(<8 x s32>) +# ALL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<8 x s32>) = COPY %ymm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<8 x s32>) = COPY $ymm0 + %1(<4 x s32>) = COPY $xmm1 %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir +++ test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir @@ -21,19 +21,19 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %ymm1 + liveins: $zmm0, $ymm1 ; ALL-LABEL: name: test_insert_128 - ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1 + ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 ; ALL: [[INSERT:%[0-9]+]]:_(<16 x s32>) = G_INSERT [[COPY]], [[COPY1]](<4 x s32>), 0 - ; ALL: %zmm0 = COPY [[INSERT]](<16 x s32>) - ; ALL: RET 0, implicit %ymm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<4 x s32>) = COPY %xmm1 + ; ALL: $zmm0 = COPY [[INSERT]](<16 x s32>) + ; ALL: RET 0, implicit $ymm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... --- @@ -47,18 +47,18 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %ymm1 + liveins: $zmm0, $ymm1 ; ALL-LABEL: name: test_insert_256 - ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY %ymm1 + ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $ymm1 ; ALL: [[INSERT:%[0-9]+]]:_(<16 x s32>) = G_INSERT [[COPY]], [[COPY1]](<8 x s32>), 0 - ; ALL: %zmm0 = COPY [[INSERT]](<16 x s32>) - ; ALL: RET 0, implicit %ymm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<8 x s32>) = COPY %ymm1 + ; ALL: $zmm0 = COPY [[INSERT]](<16 x s32>) + ; ALL: RET 0, implicit $ymm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<8 x s32>) = COPY $ymm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir @@ -30,7 +30,7 @@ - { id: 10, class: _, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; X64-LABEL: name: test_memop_s8tos32 ; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF @@ -88,7 +88,7 @@ # body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; X64-LABEL: name: test_memop_s64 ; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF Index: test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir @@ -33,7 +33,7 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: test_mul_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[MUL:%[0-9]+]]:_(s8) = G_MUL [[TRUNC]], [[TRUNC1]] @@ -43,7 +43,7 @@ ; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]] ; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) ; CHECK: RET 0 - %0(s32) = COPY %edx + %0(s32) = COPY $edx %1(s1) = G_TRUNC %0(s32) %2(s1) = G_MUL %1, %1 %3:_(p0) = G_IMPLICIT_DEF @@ -61,19 +61,19 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_mul_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di - ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY %si + ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di + ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY $si ; CHECK: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[COPY]], [[COPY1]] - ; CHECK: %ax = COPY [[MUL]](s16) - ; CHECK: RET 0, implicit %ax - %0(s16) = COPY %di - %1(s16) = COPY %si + ; CHECK: $ax = COPY [[MUL]](s16) + ; CHECK: RET 0, implicit $ax + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s16) = G_MUL %0, %1 - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -87,19 +87,19 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_mul_i32 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]] - ; CHECK: %eax = COPY [[MUL]](s32) - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: $eax = COPY [[MUL]](s32) + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_MUL %0, %1 - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -113,18 +113,18 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; CHECK-LABEL: name: test_mul_i64 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi - ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]] - ; CHECK: %rax = COPY [[MUL]](s64) - ; CHECK: RET 0, implicit %rax - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + ; CHECK: $rax = COPY [[MUL]](s64) + ; CHECK: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s64) = G_MUL %0, %1 - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir +++ test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir @@ -33,20 +33,20 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<8 x s16>) = COPY %xmm0 -# ALL-NEXT: %1:_(<8 x s16>) = COPY %xmm1 +# ALL: %0:_(<8 x s16>) = COPY $xmm0 +# ALL-NEXT: %1:_(<8 x s16>) = COPY $xmm1 # ALL-NEXT: %2:_(<8 x s16>) = G_MUL %0, %1 -# ALL-NEXT: %xmm0 = COPY %2(<8 x s16>) -# ALL-NEXT: RET 0, implicit %xmm0 +# ALL-NEXT: $xmm0 = COPY %2(<8 x s16>) +# ALL-NEXT: RET 0, implicit $xmm0 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<8 x s16>) = COPY %xmm0 - %1(<8 x s16>) = COPY %xmm1 + %0(<8 x s16>) = COPY $xmm0 + %1(<8 x s16>) = COPY $xmm1 %2(<8 x s16>) = G_MUL %0, %1 - %xmm0 = COPY %2(<8 x s16>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<8 x s16>) + RET 0, implicit $xmm0 ... --- @@ -63,20 +63,20 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<4 x s32>) = COPY %xmm0 -# ALL-NEXT: %1:_(<4 x s32>) = COPY %xmm1 +# ALL: %0:_(<4 x s32>) = COPY $xmm0 +# ALL-NEXT: %1:_(<4 x s32>) = COPY $xmm1 # ALL-NEXT: %2:_(<4 x s32>) = G_MUL %0, %1 -# ALL-NEXT: %xmm0 = COPY %2(<4 x s32>) -# ALL-NEXT: RET 0, implicit %xmm0 +# ALL-NEXT: $xmm0 = COPY %2(<4 x s32>) +# ALL-NEXT: RET 0, implicit $xmm0 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_MUL %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -93,19 +93,19 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<2 x s64>) = COPY %xmm0 -# ALL-NEXT: %1:_(<2 x s64>) = COPY %xmm1 +# ALL: %0:_(<2 x s64>) = COPY $xmm0 +# ALL-NEXT: %1:_(<2 x s64>) = COPY $xmm1 # ALL-NEXT: %2:_(<2 x s64>) = G_MUL %0, %1 -# ALL-NEXT: %xmm0 = COPY %2(<2 x s64>) -# ALL-NEXT: RET 0, implicit %xmm0 +# ALL-NEXT: $xmm0 = COPY %2(<2 x s64>) +# ALL-NEXT: RET 0, implicit $xmm0 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<2 x s64>) = COPY %xmm0 - %1(<2 x s64>) = COPY %xmm1 + %0(<2 x s64>) = COPY $xmm0 + %1(<2 x s64>) = COPY $xmm1 %2(<2 x s64>) = G_MUL %0, %1 - %xmm0 = COPY %2(<2 x s64>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<2 x s64>) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir +++ test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir @@ -33,20 +33,20 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<16 x s16>) = COPY %ymm0 -# ALL-NEXT: %1:_(<16 x s16>) = COPY %ymm1 +# ALL: %0:_(<16 x s16>) = COPY $ymm0 +# ALL-NEXT: %1:_(<16 x s16>) = COPY $ymm1 # ALL-NEXT: %2:_(<16 x s16>) = G_MUL %0, %1 -# ALL-NEXT: %ymm0 = COPY %2(<16 x s16>) -# ALL-NEXT: RET 0, implicit %ymm0 +# ALL-NEXT: $ymm0 = COPY %2(<16 x s16>) +# ALL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<16 x s16>) = COPY %ymm0 - %1(<16 x s16>) = COPY %ymm1 + %0(<16 x s16>) = COPY $ymm0 + %1(<16 x s16>) = COPY $ymm1 %2(<16 x s16>) = G_MUL %0, %1 - %ymm0 = COPY %2(<16 x s16>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<16 x s16>) + RET 0, implicit $ymm0 ... --- @@ -63,20 +63,20 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<8 x s32>) = COPY %ymm0 -# ALL-NEXT: %1:_(<8 x s32>) = COPY %ymm1 +# ALL: %0:_(<8 x s32>) = COPY $ymm0 +# ALL-NEXT: %1:_(<8 x s32>) = COPY $ymm1 # ALL-NEXT: %2:_(<8 x s32>) = G_MUL %0, %1 -# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>) -# ALL-NEXT: RET 0, implicit %ymm0 +# ALL-NEXT: $ymm0 = COPY %2(<8 x s32>) +# ALL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<8 x s32>) = COPY %ymm0 - %1(<8 x s32>) = COPY %ymm1 + %0(<8 x s32>) = COPY $ymm0 + %1(<8 x s32>) = COPY $ymm1 %2(<8 x s32>) = G_MUL %0, %1 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -93,19 +93,19 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<4 x s64>) = COPY %ymm0 -# ALL-NEXT: %1:_(<4 x s64>) = COPY %ymm1 +# ALL: %0:_(<4 x s64>) = COPY $ymm0 +# ALL-NEXT: %1:_(<4 x s64>) = COPY $ymm1 # ALL-NEXT: %2:_(<4 x s64>) = G_MUL %0, %1 -# ALL-NEXT: %ymm0 = COPY %2(<4 x s64>) -# ALL-NEXT: RET 0, implicit %ymm0 +# ALL-NEXT: $ymm0 = COPY %2(<4 x s64>) +# ALL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<4 x s64>) = COPY %ymm0 - %1(<4 x s64>) = COPY %ymm1 + %0(<4 x s64>) = COPY $ymm0 + %1(<4 x s64>) = COPY $ymm1 %2(<4 x s64>) = G_MUL %0, %1 - %ymm0 = COPY %2(<4 x s64>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<4 x s64>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir +++ test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir @@ -35,20 +35,20 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<32 x s16>) = COPY %zmm0 -# ALL-NEXT: %1:_(<32 x s16>) = COPY %zmm1 +# ALL: %0:_(<32 x s16>) = COPY $zmm0 +# ALL-NEXT: %1:_(<32 x s16>) = COPY $zmm1 # ALL-NEXT: %2:_(<32 x s16>) = G_MUL %0, %1 -# ALL-NEXT: %zmm0 = COPY %2(<32 x s16>) -# ALL-NEXT: RET 0, implicit %zmm0 +# ALL-NEXT: $zmm0 = COPY %2(<32 x s16>) +# ALL-NEXT: RET 0, implicit $zmm0 body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 - %0(<32 x s16>) = COPY %zmm0 - %1(<32 x s16>) = COPY %zmm1 + %0(<32 x s16>) = COPY $zmm0 + %1(<32 x s16>) = COPY $zmm1 %2(<32 x s16>) = G_MUL %0, %1 - %zmm0 = COPY %2(<32 x s16>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<32 x s16>) + RET 0, implicit $zmm0 ... --- @@ -65,20 +65,20 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<16 x s32>) = COPY %zmm0 -# ALL-NEXT: %1:_(<16 x s32>) = COPY %zmm1 +# ALL: %0:_(<16 x s32>) = COPY $zmm0 +# ALL-NEXT: %1:_(<16 x s32>) = COPY $zmm1 # ALL-NEXT: %2:_(<16 x s32>) = G_MUL %0, %1 -# ALL-NEXT: %zmm0 = COPY %2(<16 x s32>) -# ALL-NEXT: RET 0, implicit %zmm0 +# ALL-NEXT: $zmm0 = COPY %2(<16 x s32>) +# ALL-NEXT: RET 0, implicit $zmm0 body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 - %0(<16 x s32>) = COPY %zmm0 - %1(<16 x s32>) = COPY %zmm1 + %0(<16 x s32>) = COPY $zmm0 + %1(<16 x s32>) = COPY $zmm1 %2(<16 x s32>) = G_MUL %0, %1 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $zmm0 ... --- @@ -95,19 +95,19 @@ - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } -# ALL: %0:_(<8 x s64>) = COPY %zmm0 -# ALL-NEXT: %1:_(<8 x s64>) = COPY %zmm1 +# ALL: %0:_(<8 x s64>) = COPY $zmm0 +# ALL-NEXT: %1:_(<8 x s64>) = COPY $zmm1 # ALL-NEXT: %2:_(<8 x s64>) = G_MUL %0, %1 -# ALL-NEXT: %zmm0 = COPY %2(<8 x s64>) -# ALL-NEXT: RET 0, implicit %zmm0 +# ALL-NEXT: $zmm0 = COPY %2(<8 x s64>) +# ALL-NEXT: RET 0, implicit $zmm0 body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 - %0(<8 x s64>) = COPY %zmm0 - %1(<8 x s64>) = COPY %zmm1 + %0(<8 x s64>) = COPY $zmm0 + %1(<8 x s64>) = COPY $zmm1 %2(<8 x s64>) = G_MUL %0, %1 - %zmm0 = COPY %2(<8 x s64>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<8 x s64>) + RET 0, implicit $zmm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir @@ -41,7 +41,7 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: test_or_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[OR:%[0-9]+]]:_(s8) = G_OR [[TRUNC]], [[TRUNC1]] @@ -51,7 +51,7 @@ ; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]] ; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) ; CHECK: RET 0 - %0(s32) = COPY %edx + %0(s32) = COPY $edx %1(s1) = G_TRUNC %0(s32) %2(s1) = G_OR %1, %1 %3:_(p0) = G_IMPLICIT_DEF @@ -75,12 +75,12 @@ ; CHECK-LABEL: name: test_or_i8 ; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF ; CHECK: [[OR:%[0-9]+]]:_(s8) = G_OR [[DEF]], [[DEF]] - ; CHECK: %al = COPY [[OR]](s8) - ; CHECK: RET 0, implicit %al + ; CHECK: $al = COPY [[OR]](s8) + ; CHECK: RET 0, implicit $al %0(s8) = IMPLICIT_DEF %1(s8) = G_OR %0, %0 - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -100,12 +100,12 @@ ; CHECK-LABEL: name: test_or_i16 ; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF ; CHECK: [[OR:%[0-9]+]]:_(s16) = G_OR [[DEF]], [[DEF]] - ; CHECK: %ax = COPY [[OR]](s16) - ; CHECK: RET 0, implicit %ax + ; CHECK: $ax = COPY [[OR]](s16) + ; CHECK: RET 0, implicit $ax %0(s16) = IMPLICIT_DEF %1(s16) = G_OR %0, %0 - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -125,12 +125,12 @@ ; CHECK-LABEL: name: test_or_i32 ; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[DEF]], [[DEF]] - ; CHECK: %eax = COPY [[OR]](s32) - ; CHECK: RET 0, implicit %eax + ; CHECK: $eax = COPY [[OR]](s32) + ; CHECK: RET 0, implicit $eax %0(s32) = IMPLICIT_DEF %1(s32) = G_OR %0, %0 - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -150,11 +150,11 @@ ; CHECK-LABEL: name: test_or_i64 ; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[DEF]], [[DEF]] - ; CHECK: %rax = COPY [[OR]](s64) - ; CHECK: RET 0, implicit %rax + ; CHECK: $rax = COPY [[OR]](s64) + ; CHECK: RET 0, implicit $rax %0(s64) = IMPLICIT_DEF %1(s64) = G_OR %0, %0 - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/legalize-phi.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-phi.mir +++ test/CodeGen/X86/GlobalISel/legalize-phi.mir @@ -144,10 +144,10 @@ ; ALL-LABEL: name: test_i1 ; ALL: bb.0.entry: ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000) - ; ALL: liveins: %edi, %edx, %esi - ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi - ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx + ; ALL: liveins: $edi, $edx, $esi + ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx ; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; ALL: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) @@ -158,16 +158,16 @@ ; ALL: bb.2.cond.end: ; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC1]](s8), %bb.1, [[TRUNC]](s8), %bb.0 ; ALL: [[COPY3:%[0-9]+]]:_(s8) = COPY [[PHI]](s8) - ; ALL: %al = COPY [[COPY3]](s8) - ; ALL: RET 0, implicit %al + ; ALL: $al = COPY [[COPY3]](s8) + ; ALL: RET 0, implicit $al bb.1.entry: successors: %bb.3(0x40000000), %bb.2(0x40000000) - liveins: %edi, %edx, %esi + liveins: $edi, $edx, $esi - %0:_(s32) = COPY %edi - %3:_(s32) = COPY %esi + %0:_(s32) = COPY $edi + %3:_(s32) = COPY $esi %1:_(s1) = G_TRUNC %3(s32) - %4:_(s32) = COPY %edx + %4:_(s32) = COPY $edx %2:_(s1) = G_TRUNC %4(s32) %5:_(s32) = G_CONSTANT i32 0 %6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5 @@ -180,8 +180,8 @@ bb.3.cond.end: %7:_(s1) = G_PHI %2(s1), %bb.2, %1(s1), %bb.1 %8:_(s8) = G_ANYEXT %7(s1) - %al = COPY %8(s8) - RET 0, implicit %al + $al = COPY %8(s8) + RET 0, implicit $al ... --- @@ -210,11 +210,11 @@ ; ALL-LABEL: name: test_i8 ; ALL: bb.0.entry: ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000) - ; ALL: liveins: %edi, %edx, %esi - ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi + ; ALL: liveins: $edi, $edx, $esi + ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi ; ALL: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) - ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx + ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx ; ALL: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32) ; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] @@ -223,16 +223,16 @@ ; ALL: successors: %bb.2(0x80000000) ; ALL: bb.2.cond.end: ; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC1]](s8), %bb.1, [[TRUNC]](s8), %bb.0 - ; ALL: %al = COPY [[PHI]](s8) - ; ALL: RET 0, implicit %al + ; ALL: $al = COPY [[PHI]](s8) + ; ALL: RET 0, implicit $al bb.1.entry: successors: %bb.3(0x40000000), %bb.2(0x40000000) - liveins: %edi, %edx, %esi + liveins: $edi, $edx, $esi - %0:_(s32) = COPY %edi - %3:_(s32) = COPY %esi + %0:_(s32) = COPY $edi + %3:_(s32) = COPY $esi %1:_(s8) = G_TRUNC %3(s32) - %4:_(s32) = COPY %edx + %4:_(s32) = COPY $edx %2:_(s8) = G_TRUNC %4(s32) %5:_(s32) = G_CONSTANT i32 0 %6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5 @@ -244,8 +244,8 @@ bb.3.cond.end: %7:_(s8) = G_PHI %2(s8), %bb.2, %1(s8), %bb.1 - %al = COPY %7(s8) - RET 0, implicit %al + $al = COPY %7(s8) + RET 0, implicit $al ... --- @@ -274,11 +274,11 @@ ; ALL-LABEL: name: test_i16 ; ALL: bb.0.entry: ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000) - ; ALL: liveins: %edi, %edx, %esi - ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi + ; ALL: liveins: $edi, $edx, $esi + ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi ; ALL: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) - ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx + ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx ; ALL: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32) ; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] @@ -287,16 +287,16 @@ ; ALL: successors: %bb.2(0x80000000) ; ALL: bb.2.cond.end: ; ALL: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC1]](s16), %bb.1, [[TRUNC]](s16), %bb.0 - ; ALL: %ax = COPY [[PHI]](s16) - ; ALL: RET 0, implicit %ax + ; ALL: $ax = COPY [[PHI]](s16) + ; ALL: RET 0, implicit $ax bb.1.entry: successors: %bb.3(0x40000000), %bb.2(0x40000000) - liveins: %edi, %edx, %esi + liveins: $edi, $edx, $esi - %0:_(s32) = COPY %edi - %3:_(s32) = COPY %esi + %0:_(s32) = COPY $edi + %3:_(s32) = COPY $esi %1:_(s16) = G_TRUNC %3(s32) - %4:_(s32) = COPY %edx + %4:_(s32) = COPY $edx %2:_(s16) = G_TRUNC %4(s32) %5:_(s32) = G_CONSTANT i32 0 %6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5 @@ -308,8 +308,8 @@ bb.3.cond.end: %7:_(s16) = G_PHI %2(s16), %bb.2, %1(s16), %bb.1 - %ax = COPY %7(s16) - RET 0, implicit %ax + $ax = COPY %7(s16) + RET 0, implicit $ax ... --- @@ -336,10 +336,10 @@ ; ALL-LABEL: name: test_i32 ; ALL: bb.0.entry: ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; ALL: liveins: %edi, %edx, %esi - ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi - ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx + ; ALL: liveins: $edi, $edx, $esi + ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx ; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; ALL: G_BRCOND [[ICMP]](s1), %bb.1 @@ -351,15 +351,15 @@ ; ALL: successors: %bb.3(0x80000000) ; ALL: bb.3.cond.end: ; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2 - ; ALL: %eax = COPY [[PHI]](s32) - ; ALL: RET 0, implicit %eax + ; ALL: $eax = COPY [[PHI]](s32) + ; ALL: RET 0, implicit $eax bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi, %edx, %esi + liveins: $edi, $edx, $esi - %0(s32) = COPY %edi - %1(s32) = COPY %esi - %2(s32) = COPY %edx + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s32) = COPY $edx %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.2 @@ -376,8 +376,8 @@ bb.4.cond.end: %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3 - %eax = COPY %5(s32) - RET 0, implicit %eax + $eax = COPY %5(s32) + RET 0, implicit $eax ... --- @@ -404,10 +404,10 @@ ; ALL-LABEL: name: test_i64 ; ALL: bb.0.entry: ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; ALL: liveins: %edi, %rdx, %rsi - ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi - ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY %rdx + ; ALL: liveins: $edi, $rdx, $rsi + ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi + ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY $rdx ; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; ALL: G_BRCOND [[ICMP]](s1), %bb.1 @@ -419,15 +419,15 @@ ; ALL: successors: %bb.3(0x80000000) ; ALL: bb.3.cond.end: ; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2 - ; ALL: %rax = COPY [[PHI]](s64) - ; ALL: RET 0, implicit %rax + ; ALL: $rax = COPY [[PHI]](s64) + ; ALL: RET 0, implicit $rax bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi, %rdx, %rsi + liveins: $edi, $rdx, $rsi - %0(s32) = COPY %edi - %1(s64) = COPY %rsi - %2(s64) = COPY %rdx + %0(s32) = COPY $edi + %1(s64) = COPY $rsi + %2(s64) = COPY $rdx %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.2 @@ -444,8 +444,8 @@ bb.4.cond.end: %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3 - %rax = COPY %5(s64) - RET 0, implicit %rax + $rax = COPY %5(s64) + RET 0, implicit $rax ... --- @@ -475,10 +475,10 @@ ; ALL-LABEL: name: test_float ; ALL: bb.0.{{[a-zA-Z0-9]+}}: ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; ALL: liveins: %edi, %xmm0, %xmm1 - ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm0 - ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %xmm1 + ; ALL: liveins: $edi, $xmm0, $xmm1 + ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm0 + ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $xmm1 ; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; ALL: G_BRCOND [[ICMP]](s1), %bb.1 @@ -490,15 +490,15 @@ ; ALL: successors: %bb.3(0x80000000) ; ALL: bb.3.cond.end: ; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2 - ; ALL: %xmm0 = COPY [[PHI]](s32) - ; ALL: RET 0, implicit %xmm0 + ; ALL: $xmm0 = COPY [[PHI]](s32) + ; ALL: RET 0, implicit $xmm0 bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi, %xmm0, %xmm1 + liveins: $edi, $xmm0, $xmm1 - %0(s32) = COPY %edi - %1(s32) = COPY %xmm0 - %2(s32) = COPY %xmm1 + %0(s32) = COPY $edi + %1(s32) = COPY $xmm0 + %2(s32) = COPY $xmm1 %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.2 @@ -515,8 +515,8 @@ bb.4.cond.end: %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3 - %xmm0 = COPY %5(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %5(s32) + RET 0, implicit $xmm0 ... --- @@ -543,10 +543,10 @@ ; ALL-LABEL: name: test_double ; ALL: bb.0.{{[a-zA-Z0-9]+}}: ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; ALL: liveins: %edi, %xmm0, %xmm1 - ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm0 - ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY %xmm1 + ; ALL: liveins: $edi, $xmm0, $xmm1 + ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm0 + ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY $xmm1 ; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; ALL: G_BRCOND [[ICMP]](s1), %bb.1 @@ -558,15 +558,15 @@ ; ALL: successors: %bb.3(0x80000000) ; ALL: bb.3.cond.end: ; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2 - ; ALL: %xmm0 = COPY [[PHI]](s64) - ; ALL: RET 0, implicit %xmm0 + ; ALL: $xmm0 = COPY [[PHI]](s64) + ; ALL: RET 0, implicit $xmm0 bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi, %xmm0, %xmm1 + liveins: $edi, $xmm0, $xmm1 - %0(s32) = COPY %edi - %1(s64) = COPY %xmm0 - %2(s64) = COPY %xmm1 + %0(s32) = COPY $edi + %1(s64) = COPY $xmm0 + %2(s64) = COPY $xmm1 %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.2 @@ -583,7 +583,7 @@ bb.4.cond.end: %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3 - %xmm0 = COPY %5(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %5(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir +++ test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir @@ -33,7 +33,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; ALL-LABEL: name: test_sub_v16i8 ; ALL: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF @@ -43,7 +43,7 @@ %0(<16 x s8>) = IMPLICIT_DEF %1(<16 x s8>) = IMPLICIT_DEF %2(<16 x s8>) = G_SUB %0, %1 - %xmm0 = COPY %2 + $xmm0 = COPY %2 RET 0 ... @@ -58,7 +58,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; ALL-LABEL: name: test_sub_v8i16 ; ALL: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF @@ -68,7 +68,7 @@ %0(<8 x s16>) = IMPLICIT_DEF %1(<8 x s16>) = IMPLICIT_DEF %2(<8 x s16>) = G_SUB %0, %1 - %xmm0 = COPY %2 + $xmm0 = COPY %2 RET 0 ... @@ -83,7 +83,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; ALL-LABEL: name: test_sub_v4i32 ; ALL: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF @@ -93,7 +93,7 @@ %0(<4 x s32>) = IMPLICIT_DEF %1(<4 x s32>) = IMPLICIT_DEF %2(<4 x s32>) = G_SUB %0, %1 - %xmm0 = COPY %2 + $xmm0 = COPY %2 RET 0 ... @@ -108,7 +108,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; ALL-LABEL: name: test_sub_v2i64 ; ALL: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF @@ -118,7 +118,7 @@ %0(<2 x s64>) = IMPLICIT_DEF %1(<2 x s64>) = IMPLICIT_DEF %2(<2 x s64>) = G_SUB %0, %1 - %xmm0 = COPY %2 + $xmm0 = COPY %2 RET 0 ... Index: test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir +++ test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir @@ -34,7 +34,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_sub_v32i8 ; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF @@ -44,7 +44,7 @@ %0(<32 x s8>) = IMPLICIT_DEF %1(<32 x s8>) = IMPLICIT_DEF %2(<32 x s8>) = G_SUB %0, %1 - %ymm0 = COPY %2 + $ymm0 = COPY %2 RET 0 ... @@ -59,7 +59,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_sub_v16i16 ; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF @@ -69,7 +69,7 @@ %0(<16 x s16>) = IMPLICIT_DEF %1(<16 x s16>) = IMPLICIT_DEF %2(<16 x s16>) = G_SUB %0, %1 - %ymm0 = COPY %2 + $ymm0 = COPY %2 RET 0 ... @@ -84,7 +84,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_sub_v8i32 ; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF @@ -94,7 +94,7 @@ %0(<8 x s32>) = IMPLICIT_DEF %1(<8 x s32>) = IMPLICIT_DEF %2(<8 x s32>) = G_SUB %0, %1 - %ymm0 = COPY %2 + $ymm0 = COPY %2 RET 0 ... @@ -109,7 +109,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_sub_v4i64 ; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF @@ -119,7 +119,7 @@ %0(<4 x s64>) = IMPLICIT_DEF %1(<4 x s64>) = IMPLICIT_DEF %2(<4 x s64>) = G_SUB %0, %1 - %ymm0 = COPY %2 + $ymm0 = COPY %2 RET 0 ... Index: test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir +++ test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir @@ -34,7 +34,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_sub_v64i8 ; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF @@ -44,7 +44,7 @@ %0(<64 x s8>) = IMPLICIT_DEF %1(<64 x s8>) = IMPLICIT_DEF %2(<64 x s8>) = G_SUB %0, %1 - %zmm0 = COPY %2 + $zmm0 = COPY %2 RET 0 ... @@ -59,7 +59,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_sub_v32i16 ; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF @@ -69,7 +69,7 @@ %0(<32 x s16>) = IMPLICIT_DEF %1(<32 x s16>) = IMPLICIT_DEF %2(<32 x s16>) = G_SUB %0, %1 - %zmm0 = COPY %2 + $zmm0 = COPY %2 RET 0 ... @@ -84,7 +84,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_sub_v16i32 ; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF @@ -94,7 +94,7 @@ %0(<16 x s32>) = IMPLICIT_DEF %1(<16 x s32>) = IMPLICIT_DEF %2(<16 x s32>) = G_SUB %0, %1 - %zmm0 = COPY %2 + $zmm0 = COPY %2 RET 0 ... @@ -109,7 +109,7 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_sub_v8i64 ; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF @@ -119,7 +119,7 @@ %0(<8 x s64>) = IMPLICIT_DEF %1(<8 x s64>) = IMPLICIT_DEF %2(<8 x s64>) = G_SUB %0, %1 - %zmm0 = COPY %2 + $zmm0 = COPY %2 RET 0 ... Index: test/CodeGen/X86/GlobalISel/legalize-sub.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-sub.mir +++ test/CodeGen/X86/GlobalISel/legalize-sub.mir @@ -24,7 +24,7 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: test_sub_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[SUB:%[0-9]+]]:_(s8) = G_SUB [[TRUNC]], [[TRUNC1]] @@ -34,7 +34,7 @@ ; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]] ; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) ; CHECK: RET 0 - %0(s32) = COPY %edx + %0(s32) = COPY $edx %1(s1) = G_TRUNC %0(s32) %2(s1) = G_SUB %1, %1 %3:_(p0) = G_IMPLICIT_DEF @@ -53,19 +53,19 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_sub_i32 - ; CHECK: liveins: %edi, %esi - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]] - ; CHECK: %eax = COPY [[SUB]](s32) - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: $eax = COPY [[SUB]](s32) + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_SUB %0, %1 - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir +++ test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir @@ -41,12 +41,12 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: test_xor_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) ; CHECK: [[XOR:%[0-9]+]]:_(s8) = G_XOR [[TRUNC]], [[TRUNC1]] ; CHECK: RET 0 - %0(s32) = COPY %edx + %0(s32) = COPY $edx %1(s1) = G_TRUNC %0(s32) %2(s1) = G_XOR %1, %1 %3:_(p0) = G_IMPLICIT_DEF @@ -70,12 +70,12 @@ ; CHECK-LABEL: name: test_xor_i8 ; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF ; CHECK: [[XOR:%[0-9]+]]:_(s8) = G_XOR [[DEF]], [[DEF]] - ; CHECK: %al = COPY [[XOR]](s8) - ; CHECK: RET 0, implicit %al + ; CHECK: $al = COPY [[XOR]](s8) + ; CHECK: RET 0, implicit $al %0(s8) = IMPLICIT_DEF %1(s8) = G_XOR %0, %0 - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -95,12 +95,12 @@ ; CHECK-LABEL: name: test_xor_i16 ; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF ; CHECK: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[DEF]], [[DEF]] - ; CHECK: %ax = COPY [[XOR]](s16) - ; CHECK: RET 0, implicit %ax + ; CHECK: $ax = COPY [[XOR]](s16) + ; CHECK: RET 0, implicit $ax %0(s16) = IMPLICIT_DEF %1(s16) = G_XOR %0, %0 - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -120,12 +120,12 @@ ; CHECK-LABEL: name: test_xor_i32 ; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[DEF]], [[DEF]] - ; CHECK: %eax = COPY [[XOR]](s32) - ; CHECK: RET 0, implicit %eax + ; CHECK: $eax = COPY [[XOR]](s32) + ; CHECK: RET 0, implicit $eax %0(s32) = IMPLICIT_DEF %1(s32) = G_XOR %0, %0 - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -145,11 +145,11 @@ ; CHECK-LABEL: name: test_xor_i64 ; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[DEF]], [[DEF]] - ; CHECK: %rax = COPY [[XOR]](s64) - ; CHECK: RET 0, implicit %rax + ; CHECK: $rax = COPY [[XOR]](s64) + ; CHECK: RET 0, implicit $rax %0(s64) = IMPLICIT_DEF %1(s64) = G_XOR %0, %0 - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir =================================================================== --- test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir +++ test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir @@ -107,12 +107,12 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi - %0(p0) = COPY %rdi + %0(p0) = COPY $rdi %1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1, align 1) - %ymm0 = COPY %1(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %1(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -129,10 +129,10 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %ymm0 + liveins: $rdi, $ymm0 - %0(<8 x s32>) = COPY %ymm0 - %1(p0) = COPY %rdi + %0(<8 x s32>) = COPY $ymm0 + %1(p0) = COPY $rdi G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1, align 1) RET 0 Index: test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir +++ test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir @@ -100,12 +100,12 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi - %0(p0) = COPY %rdi + %0(p0) = COPY $rdi %1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 1) - %zmm0 = COPY %1(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %1(<16 x s32>) + RET 0, implicit $zmm0 ... --- @@ -122,10 +122,10 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %zmm0 + liveins: $rdi, $zmm0 - %0(<16 x s32>) = COPY %zmm0 - %1(p0) = COPY %rdi + %0(<16 x s32>) = COPY $zmm0 + %1(p0) = COPY $rdi G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 1) RET 0 Index: test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir =================================================================== --- test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir +++ test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir @@ -256,27 +256,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; FAST-LABEL: name: test_add_i8 - ; FAST: liveins: %edi, %esi - ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil - ; FAST: [[COPY1:%[0-9]+]]:gpr(s8) = COPY %sil + ; FAST: liveins: $edi, $esi + ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil + ; FAST: [[COPY1:%[0-9]+]]:gpr(s8) = COPY $sil ; FAST: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[COPY1]] - ; FAST: %al = COPY [[ADD]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[ADD]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_add_i8 - ; GREEDY: liveins: %edi, %esi - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s8) = COPY %sil + ; GREEDY: liveins: $edi, $esi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s8) = COPY $sil ; GREEDY: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[COPY1]] - ; GREEDY: %al = COPY [[ADD]](s8) - ; GREEDY: RET 0, implicit %al - %0(s8) = COPY %dil - %1(s8) = COPY %sil + ; GREEDY: $al = COPY [[ADD]](s8) + ; GREEDY: RET 0, implicit $al + %0(s8) = COPY $dil + %1(s8) = COPY $sil %2(s8) = G_ADD %0, %1 - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -292,27 +292,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; FAST-LABEL: name: test_add_i16 - ; FAST: liveins: %edi, %esi - ; FAST: [[COPY:%[0-9]+]]:gpr(s16) = COPY %di - ; FAST: [[COPY1:%[0-9]+]]:gpr(s16) = COPY %si + ; FAST: liveins: $edi, $esi + ; FAST: [[COPY:%[0-9]+]]:gpr(s16) = COPY $di + ; FAST: [[COPY1:%[0-9]+]]:gpr(s16) = COPY $si ; FAST: [[ADD:%[0-9]+]]:gpr(s16) = G_ADD [[COPY]], [[COPY1]] - ; FAST: %ax = COPY [[ADD]](s16) - ; FAST: RET 0, implicit %ax + ; FAST: $ax = COPY [[ADD]](s16) + ; FAST: RET 0, implicit $ax ; GREEDY-LABEL: name: test_add_i16 - ; GREEDY: liveins: %edi, %esi - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s16) = COPY %di - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s16) = COPY %si + ; GREEDY: liveins: $edi, $esi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s16) = COPY $di + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s16) = COPY $si ; GREEDY: [[ADD:%[0-9]+]]:gpr(s16) = G_ADD [[COPY]], [[COPY1]] - ; GREEDY: %ax = COPY [[ADD]](s16) - ; GREEDY: RET 0, implicit %ax - %0(s16) = COPY %di - %1(s16) = COPY %si + ; GREEDY: $ax = COPY [[ADD]](s16) + ; GREEDY: RET 0, implicit $ax + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s16) = G_ADD %0, %1 - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -328,27 +328,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; FAST-LABEL: name: test_add_i32 - ; FAST: liveins: %edi, %esi - ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi + ; FAST: liveins: $edi, $esi + ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi ; FAST: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY1]] - ; FAST: %eax = COPY [[ADD]](s32) - ; FAST: RET 0, implicit %eax + ; FAST: $eax = COPY [[ADD]](s32) + ; FAST: RET 0, implicit $eax ; GREEDY-LABEL: name: test_add_i32 - ; GREEDY: liveins: %edi, %esi - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi + ; GREEDY: liveins: $edi, $esi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi ; GREEDY: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY1]] - ; GREEDY: %eax = COPY [[ADD]](s32) - ; GREEDY: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; GREEDY: $eax = COPY [[ADD]](s32) + ; GREEDY: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_ADD %0, %1 - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -364,27 +364,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; FAST-LABEL: name: test_add_i64 - ; FAST: liveins: %rdi, %rsi - ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi - ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi + ; FAST: liveins: $rdi, $rsi + ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi + ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi ; FAST: [[ADD:%[0-9]+]]:gpr(s64) = G_ADD [[COPY]], [[COPY1]] - ; FAST: %rax = COPY [[ADD]](s64) - ; FAST: RET 0, implicit %rax + ; FAST: $rax = COPY [[ADD]](s64) + ; FAST: RET 0, implicit $rax ; GREEDY-LABEL: name: test_add_i64 - ; GREEDY: liveins: %rdi, %rsi - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi + ; GREEDY: liveins: $rdi, $rsi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi ; GREEDY: [[ADD:%[0-9]+]]:gpr(s64) = G_ADD [[COPY]], [[COPY1]] - ; GREEDY: %rax = COPY [[ADD]](s64) - ; GREEDY: RET 0, implicit %rax - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + ; GREEDY: $rax = COPY [[ADD]](s64) + ; GREEDY: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s64) = G_ADD %0, %1 - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... --- @@ -449,27 +449,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; FAST-LABEL: name: test_add_float - ; FAST: liveins: %xmm0, %xmm1 - ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0 - ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm1 + ; FAST: liveins: $xmm0, $xmm1 + ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0 + ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm1 ; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]] - ; FAST: %xmm0 = COPY [[FADD]](s32) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[FADD]](s32) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_add_float - ; GREEDY: liveins: %xmm0, %xmm1 - ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0 - ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm1 + ; GREEDY: liveins: $xmm0, $xmm1 + ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0 + ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm1 ; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]] - ; GREEDY: %xmm0 = COPY [[FADD]](s32) - ; GREEDY: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; GREEDY: $xmm0 = COPY [[FADD]](s32) + ; GREEDY: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FADD %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -485,27 +485,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; FAST-LABEL: name: test_add_double - ; FAST: liveins: %xmm0, %xmm1 - ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0 - ; FAST: [[COPY1:%[0-9]+]]:vecr(s64) = COPY %xmm1 + ; FAST: liveins: $xmm0, $xmm1 + ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0 + ; FAST: [[COPY1:%[0-9]+]]:vecr(s64) = COPY $xmm1 ; FAST: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[COPY]], [[COPY1]] - ; FAST: %xmm0 = COPY [[FADD]](s64) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[FADD]](s64) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_add_double - ; GREEDY: liveins: %xmm0, %xmm1 - ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0 - ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s64) = COPY %xmm1 + ; GREEDY: liveins: $xmm0, $xmm1 + ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0 + ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s64) = COPY $xmm1 ; GREEDY: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[COPY]], [[COPY1]] - ; GREEDY: %xmm0 = COPY [[FADD]](s64) - ; GREEDY: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; GREEDY: $xmm0 = COPY [[FADD]](s64) + ; GREEDY: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FADD %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... --- @@ -650,27 +650,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; FAST-LABEL: name: test_add_v4i32 - ; FAST: liveins: %xmm0, %xmm1 - ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0 - ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1 + ; FAST: liveins: $xmm0, $xmm1 + ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0 + ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1 ; FAST: [[ADD:%[0-9]+]]:vecr(<4 x s32>) = G_ADD [[COPY]], [[COPY1]] - ; FAST: %xmm0 = COPY [[ADD]](<4 x s32>) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[ADD]](<4 x s32>) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_add_v4i32 - ; GREEDY: liveins: %xmm0, %xmm1 - ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0 - ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1 + ; GREEDY: liveins: $xmm0, $xmm1 + ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0 + ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1 ; GREEDY: [[ADD:%[0-9]+]]:vecr(<4 x s32>) = G_ADD [[COPY]], [[COPY1]] - ; GREEDY: %xmm0 = COPY [[ADD]](<4 x s32>) - ; GREEDY: RET 0, implicit %xmm0 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + ; GREEDY: $xmm0 = COPY [[ADD]](<4 x s32>) + ; GREEDY: RET 0, implicit $xmm0 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_ADD %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -686,27 +686,27 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; FAST-LABEL: name: test_add_v4f32 - ; FAST: liveins: %xmm0, %xmm1 - ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0 - ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1 + ; FAST: liveins: $xmm0, $xmm1 + ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0 + ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1 ; FAST: [[FADD:%[0-9]+]]:vecr(<4 x s32>) = G_FADD [[COPY]], [[COPY1]] - ; FAST: %xmm0 = COPY [[FADD]](<4 x s32>) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[FADD]](<4 x s32>) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_add_v4f32 - ; GREEDY: liveins: %xmm0, %xmm1 - ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0 - ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1 + ; GREEDY: liveins: $xmm0, $xmm1 + ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0 + ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1 ; GREEDY: [[FADD:%[0-9]+]]:vecr(<4 x s32>) = G_FADD [[COPY]], [[COPY1]] - ; GREEDY: %xmm0 = COPY [[FADD]](<4 x s32>) - ; GREEDY: RET 0, implicit %xmm0 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + ; GREEDY: $xmm0 = COPY [[FADD]](<4 x s32>) + ; GREEDY: RET 0, implicit $xmm0 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_FADD %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -720,22 +720,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; FAST-LABEL: name: test_load_i8 - ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[LOAD:%[0-9]+]]:gpr(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.p1) - ; FAST: %al = COPY [[LOAD]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[LOAD]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_load_i8 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: [[LOAD:%[0-9]+]]:gpr(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.p1) - ; GREEDY: %al = COPY [[LOAD]](s8) - ; GREEDY: RET 0, implicit %al - %0(p0) = COPY %rdi + ; GREEDY: $al = COPY [[LOAD]](s8) + ; GREEDY: RET 0, implicit $al + %0(p0) = COPY $rdi %1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1) - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -749,22 +749,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; FAST-LABEL: name: test_load_i16 - ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[LOAD:%[0-9]+]]:gpr(s16) = G_LOAD [[COPY]](p0) :: (load 2 from %ir.p1) - ; FAST: %ax = COPY [[LOAD]](s16) - ; FAST: RET 0, implicit %ax + ; FAST: $ax = COPY [[LOAD]](s16) + ; FAST: RET 0, implicit $ax ; GREEDY-LABEL: name: test_load_i16 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: [[LOAD:%[0-9]+]]:gpr(s16) = G_LOAD [[COPY]](p0) :: (load 2 from %ir.p1) - ; GREEDY: %ax = COPY [[LOAD]](s16) - ; GREEDY: RET 0, implicit %ax - %0(p0) = COPY %rdi + ; GREEDY: $ax = COPY [[LOAD]](s16) + ; GREEDY: RET 0, implicit $ax + %0(p0) = COPY $rdi %1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1) - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -778,22 +778,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; FAST-LABEL: name: test_load_i32 - ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1) - ; FAST: %eax = COPY [[LOAD]](s32) - ; FAST: RET 0, implicit %eax + ; FAST: $eax = COPY [[LOAD]](s32) + ; FAST: RET 0, implicit $eax ; GREEDY-LABEL: name: test_load_i32 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1) - ; GREEDY: %eax = COPY [[LOAD]](s32) - ; GREEDY: RET 0, implicit %eax - %0(p0) = COPY %rdi + ; GREEDY: $eax = COPY [[LOAD]](s32) + ; GREEDY: RET 0, implicit $eax + %0(p0) = COPY $rdi %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -808,22 +808,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; FAST-LABEL: name: test_load_i64 - ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1) - ; FAST: %rax = COPY [[LOAD]](s64) - ; FAST: RET 0, implicit %rax + ; FAST: $rax = COPY [[LOAD]](s64) + ; FAST: RET 0, implicit $rax ; GREEDY-LABEL: name: test_load_i64 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1) - ; GREEDY: %rax = COPY [[LOAD]](s64) - ; GREEDY: RET 0, implicit %rax - %0(p0) = COPY %rdi + ; GREEDY: $rax = COPY [[LOAD]](s64) + ; GREEDY: RET 0, implicit $rax + %0(p0) = COPY $rdi %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -837,22 +837,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; FAST-LABEL: name: test_load_float - ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1) - ; FAST: %xmm0 = COPY [[LOAD]](s32) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[LOAD]](s32) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_load_float - ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1) - ; GREEDY: %xmm0 = COPY [[LOAD]](s32) - ; GREEDY: RET 0, implicit %xmm0 - %0(p0) = COPY %rdi + ; GREEDY: $xmm0 = COPY [[LOAD]](s32) + ; GREEDY: RET 0, implicit $xmm0 + %0(p0) = COPY $rdi %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) - %xmm0 = COPY %1(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s32) + RET 0, implicit $xmm0 ... --- @@ -866,22 +866,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; FAST-LABEL: name: test_load_double - ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1) - ; FAST: %xmm0 = COPY [[LOAD]](s64) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[LOAD]](s64) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_load_double - ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1) - ; GREEDY: %xmm0 = COPY [[LOAD]](s64) - ; GREEDY: RET 0, implicit %xmm0 - %0(p0) = COPY %rdi + ; GREEDY: $xmm0 = COPY [[LOAD]](s64) + ; GREEDY: RET 0, implicit $xmm0 + %0(p0) = COPY $rdi %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) - %xmm0 = COPY %1(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s64) + RET 0, implicit $xmm0 ... --- @@ -895,22 +895,22 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; FAST-LABEL: name: test_load_v4i32 - ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[LOAD:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.p1, align 1) - ; FAST: %xmm0 = COPY [[LOAD]](<4 x s32>) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[LOAD]](<4 x s32>) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_load_v4i32 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: [[LOAD:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.p1, align 1) - ; GREEDY: %xmm0 = COPY [[LOAD]](<4 x s32>) - ; GREEDY: RET 0, implicit %xmm0 - %0(p0) = COPY %rdi + ; GREEDY: $xmm0 = COPY [[LOAD]](<4 x s32>) + ; GREEDY: RET 0, implicit $xmm0 + %0(p0) = COPY $rdi %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1) - %xmm0 = COPY %1(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -924,25 +924,25 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %rsi + liveins: $edi, $rsi ; FAST-LABEL: name: test_store_i32 - ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi + ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi ; FAST: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1) - ; FAST: %rax = COPY [[COPY1]](p0) - ; FAST: RET 0, implicit %rax + ; FAST: $rax = COPY [[COPY1]](p0) + ; FAST: RET 0, implicit $rax ; GREEDY-LABEL: name: test_store_i32 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi ; GREEDY: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1) - ; GREEDY: %rax = COPY [[COPY1]](p0) - ; GREEDY: RET 0, implicit %rax - %0(s32) = COPY %edi - %1(p0) = COPY %rsi + ; GREEDY: $rax = COPY [[COPY1]](p0) + ; GREEDY: RET 0, implicit $rax + %0(s32) = COPY $edi + %1(p0) = COPY $rsi G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -956,25 +956,25 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; FAST-LABEL: name: test_store_i64 - ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi - ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi + ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi + ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi ; FAST: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1) - ; FAST: %rax = COPY [[COPY1]](p0) - ; FAST: RET 0, implicit %rax + ; FAST: $rax = COPY [[COPY1]](p0) + ; FAST: RET 0, implicit $rax ; GREEDY-LABEL: name: test_store_i64 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi ; GREEDY: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1) - ; GREEDY: %rax = COPY [[COPY1]](p0) - ; GREEDY: RET 0, implicit %rax - %0(s64) = COPY %rdi - %1(p0) = COPY %rsi + ; GREEDY: $rax = COPY [[COPY1]](p0) + ; GREEDY: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(p0) = COPY $rsi G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -991,29 +991,29 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %xmm0 + liveins: $rdi, $xmm0 ; FAST-LABEL: name: test_store_float - ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0 - ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0 + ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[COPY]](s32) ; FAST: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1) - ; FAST: %rax = COPY [[COPY1]](p0) - ; FAST: RET 0, implicit %rax + ; FAST: $rax = COPY [[COPY1]](p0) + ; FAST: RET 0, implicit $rax ; GREEDY-LABEL: name: test_store_float - ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0 - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0 + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1) - ; GREEDY: %rax = COPY [[COPY1]](p0) - ; GREEDY: RET 0, implicit %rax - %0(s32) = COPY %xmm0 - %1(p0) = COPY %rdi + ; GREEDY: $rax = COPY [[COPY1]](p0) + ; GREEDY: RET 0, implicit $rax + %0(s32) = COPY $xmm0 + %1(p0) = COPY $rdi G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -1030,30 +1030,30 @@ - { id: 1, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %xmm0 + liveins: $rdi, $xmm0 ; FAST-LABEL: name: test_store_double - ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0 - ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi + ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0 + ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi ; FAST: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) ; FAST: G_STORE [[COPY2]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1) - ; FAST: %rax = COPY [[COPY1]](p0) - ; FAST: RET 0, implicit %rax + ; FAST: $rax = COPY [[COPY1]](p0) + ; FAST: RET 0, implicit $rax ; GREEDY-LABEL: name: test_store_double - ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0 - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi + ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0 + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi ; GREEDY: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1) - ; GREEDY: %rax = COPY [[COPY1]](p0) - ; GREEDY: RET 0, implicit %rax - %0(s64) = COPY %xmm0 - %1(p0) = COPY %rdi + ; GREEDY: $rax = COPY [[COPY1]](p0) + ; GREEDY: RET 0, implicit $rax + %0(s64) = COPY $xmm0 + %1(p0) = COPY $rdi G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -1160,34 +1160,34 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; FAST-LABEL: name: test_icmp_eq_i8 - ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi + ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi ; FAST: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY]](s32) - ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi + ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi ; FAST: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY1]](s32) ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]] ; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1) - ; FAST: %al = COPY [[ANYEXT]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[ANYEXT]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_icmp_eq_i8 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi ; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY]](s32) - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi ; GREEDY: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY1]](s32) ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]] ; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1) - ; GREEDY: %al = COPY [[ANYEXT]](s8) - ; GREEDY: RET 0, implicit %al - %2:_(s32) = COPY %edi + ; GREEDY: $al = COPY [[ANYEXT]](s8) + ; GREEDY: RET 0, implicit $al + %2:_(s32) = COPY $edi %0:_(s8) = G_TRUNC %2(s32) - %3:_(s32) = COPY %esi + %3:_(s32) = COPY $esi %1:_(s8) = G_TRUNC %3(s32) %4:_(s1) = G_ICMP intpred(eq), %0(s8), %1 %5:_(s8) = G_ANYEXT %4(s1) - %al = COPY %5(s8) - RET 0, implicit %al + $al = COPY %5(s8) + RET 0, implicit $al ... --- @@ -1201,34 +1201,34 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; FAST-LABEL: name: test_icmp_eq_i16 - ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi + ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi ; FAST: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32) - ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi + ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi ; FAST: [[TRUNC1:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY1]](s32) ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]] ; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1) - ; FAST: %al = COPY [[ANYEXT]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[ANYEXT]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_icmp_eq_i16 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi ; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32) - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi ; GREEDY: [[TRUNC1:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY1]](s32) ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]] ; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1) - ; GREEDY: %al = COPY [[ANYEXT]](s8) - ; GREEDY: RET 0, implicit %al - %2:_(s32) = COPY %edi + ; GREEDY: $al = COPY [[ANYEXT]](s8) + ; GREEDY: RET 0, implicit $al + %2:_(s32) = COPY $edi %0:_(s16) = G_TRUNC %2(s32) - %3:_(s32) = COPY %esi + %3:_(s32) = COPY $esi %1:_(s16) = G_TRUNC %3(s32) %4:_(s1) = G_ICMP intpred(eq), %0(s16), %1 %5:_(s8) = G_ANYEXT %4(s1) - %al = COPY %5(s8) - RET 0, implicit %al + $al = COPY %5(s8) + RET 0, implicit $al ... --- @@ -1242,28 +1242,28 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; FAST-LABEL: name: test_icmp_eq_i32 - ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi + ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]] ; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1) - ; FAST: %al = COPY [[ANYEXT]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[ANYEXT]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_icmp_eq_i32 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]] ; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1) - ; GREEDY: %al = COPY [[ANYEXT]](s8) - ; GREEDY: RET 0, implicit %al - %0:_(s32) = COPY %edi - %1:_(s32) = COPY %esi + ; GREEDY: $al = COPY [[ANYEXT]](s8) + ; GREEDY: RET 0, implicit $al + %0:_(s32) = COPY $edi + %1:_(s32) = COPY $esi %2:_(s1) = G_ICMP intpred(eq), %0(s32), %1 %3:_(s8) = G_ANYEXT %2(s1) - %al = COPY %3(s8) - RET 0, implicit %al + $al = COPY %3(s8) + RET 0, implicit $al ... --- @@ -1277,28 +1277,28 @@ - { id: 2, class: _ } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; FAST-LABEL: name: test_icmp_eq_i64 - ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi - ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi + ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi + ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]] ; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1) - ; FAST: %al = COPY [[ANYEXT]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[ANYEXT]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_icmp_eq_i64 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]] ; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1) - ; GREEDY: %al = COPY [[ANYEXT]](s8) - ; GREEDY: RET 0, implicit %al - %0:_(s64) = COPY %rdi - %1:_(s64) = COPY %rsi + ; GREEDY: $al = COPY [[ANYEXT]](s8) + ; GREEDY: RET 0, implicit $al + %0:_(s64) = COPY $rdi + %1:_(s64) = COPY $rsi %2:_(s1) = G_ICMP intpred(eq), %0(s64), %1 %3:_(s8) = G_ANYEXT %2(s1) - %al = COPY %3(s8) - RET 0, implicit %al + $al = COPY %3(s8) + RET 0, implicit $al ... --- @@ -1318,17 +1318,17 @@ ; FAST-LABEL: name: test_xor_i8 ; FAST: [[DEF:%[0-9]+]]:gpr(s8) = IMPLICIT_DEF ; FAST: [[XOR:%[0-9]+]]:gpr(s8) = G_XOR [[DEF]], [[DEF]] - ; FAST: %al = COPY [[XOR]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[XOR]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_xor_i8 ; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = IMPLICIT_DEF ; GREEDY: [[XOR:%[0-9]+]]:gpr(s8) = G_XOR [[DEF]], [[DEF]] - ; GREEDY: %al = COPY [[XOR]](s8) - ; GREEDY: RET 0, implicit %al + ; GREEDY: $al = COPY [[XOR]](s8) + ; GREEDY: RET 0, implicit $al %0(s8) = IMPLICIT_DEF %1(s8) = G_XOR %0, %0 - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -1348,17 +1348,17 @@ ; FAST-LABEL: name: test_or_i16 ; FAST: [[DEF:%[0-9]+]]:gpr(s16) = IMPLICIT_DEF ; FAST: [[OR:%[0-9]+]]:gpr(s16) = G_OR [[DEF]], [[DEF]] - ; FAST: %ax = COPY [[OR]](s16) - ; FAST: RET 0, implicit %ax + ; FAST: $ax = COPY [[OR]](s16) + ; FAST: RET 0, implicit $ax ; GREEDY-LABEL: name: test_or_i16 ; GREEDY: [[DEF:%[0-9]+]]:gpr(s16) = IMPLICIT_DEF ; GREEDY: [[OR:%[0-9]+]]:gpr(s16) = G_OR [[DEF]], [[DEF]] - ; GREEDY: %ax = COPY [[OR]](s16) - ; GREEDY: RET 0, implicit %ax + ; GREEDY: $ax = COPY [[OR]](s16) + ; GREEDY: RET 0, implicit $ax %0(s16) = IMPLICIT_DEF %1(s16) = G_OR %0, %0 - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -1378,17 +1378,17 @@ ; FAST-LABEL: name: test_and_i32 ; FAST: [[DEF:%[0-9]+]]:gpr(s32) = IMPLICIT_DEF ; FAST: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[DEF]], [[DEF]] - ; FAST: %eax = COPY [[AND]](s32) - ; FAST: RET 0, implicit %eax + ; FAST: $eax = COPY [[AND]](s32) + ; FAST: RET 0, implicit $eax ; GREEDY-LABEL: name: test_and_i32 ; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = IMPLICIT_DEF ; GREEDY: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[DEF]], [[DEF]] - ; GREEDY: %eax = COPY [[AND]](s32) - ; GREEDY: RET 0, implicit %eax + ; GREEDY: $eax = COPY [[AND]](s32) + ; GREEDY: RET 0, implicit $eax %0(s32) = IMPLICIT_DEF %1(s32) = G_AND %0, %0 - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -1408,17 +1408,17 @@ ; FAST-LABEL: name: test_and_i64 ; FAST: [[DEF:%[0-9]+]]:gpr(s64) = IMPLICIT_DEF ; FAST: [[AND:%[0-9]+]]:gpr(s64) = G_AND [[DEF]], [[DEF]] - ; FAST: %rax = COPY [[AND]](s64) - ; FAST: RET 0, implicit %rax + ; FAST: $rax = COPY [[AND]](s64) + ; FAST: RET 0, implicit $rax ; GREEDY-LABEL: name: test_and_i64 ; GREEDY: [[DEF:%[0-9]+]]:gpr(s64) = IMPLICIT_DEF ; GREEDY: [[AND:%[0-9]+]]:gpr(s64) = G_AND [[DEF]], [[DEF]] - ; GREEDY: %rax = COPY [[AND]](s64) - ; GREEDY: RET 0, implicit %rax + ; GREEDY: $rax = COPY [[AND]](s64) + ; GREEDY: RET 0, implicit $rax %0(s64) = IMPLICIT_DEF %1(s64) = G_AND %0, %0 - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -1432,15 +1432,15 @@ bb.1.entry: ; FAST-LABEL: name: test_global_ptrv ; FAST: [[GV:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @g_int - ; FAST: %rax = COPY [[GV]](p0) - ; FAST: RET 0, implicit %rax + ; FAST: $rax = COPY [[GV]](p0) + ; FAST: RET 0, implicit $rax ; GREEDY-LABEL: name: test_global_ptrv ; GREEDY: [[GV:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @g_int - ; GREEDY: %rax = COPY [[GV]](p0) - ; GREEDY: RET 0, implicit %rax + ; GREEDY: $rax = COPY [[GV]](p0) + ; GREEDY: RET 0, implicit $rax %0(p0) = G_GLOBAL_VALUE @g_int - %rax = COPY %0(p0) - RET 0, implicit %rax + $rax = COPY %0(p0) + RET 0, implicit $rax ... --- @@ -1458,15 +1458,15 @@ bb.1 (%ir-block.0): ; FAST-LABEL: name: test_undef ; FAST: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF - ; FAST: %al = COPY [[DEF]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[DEF]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_undef ; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF - ; GREEDY: %al = COPY [[DEF]](s8) - ; GREEDY: RET 0, implicit %al + ; GREEDY: $al = COPY [[DEF]](s8) + ; GREEDY: RET 0, implicit $al %0(s8) = G_IMPLICIT_DEF - %al = COPY %0(s8) - RET 0, implicit %al + $al = COPY %0(s8) + RET 0, implicit $al ... --- @@ -1484,25 +1484,25 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; FAST-LABEL: name: test_undef2 - ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil + ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil ; FAST: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF ; FAST: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[DEF]] - ; FAST: %al = COPY [[ADD]](s8) - ; FAST: RET 0, implicit %al + ; FAST: $al = COPY [[ADD]](s8) + ; FAST: RET 0, implicit $al ; GREEDY-LABEL: name: test_undef2 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil ; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF ; GREEDY: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[DEF]] - ; GREEDY: %al = COPY [[ADD]](s8) - ; GREEDY: RET 0, implicit %al - %0(s8) = COPY %dil + ; GREEDY: $al = COPY [[ADD]](s8) + ; GREEDY: RET 0, implicit $al + %0(s8) = COPY $dil %1(s8) = G_IMPLICIT_DEF %2(s8) = G_ADD %0, %1 - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -1520,15 +1520,15 @@ bb.1 (%ir-block.0): ; FAST-LABEL: name: test_undef3 ; FAST: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF - ; FAST: %xmm0 = COPY [[DEF]](s32) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[DEF]](s32) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_undef3 ; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF - ; GREEDY: %xmm0 = COPY [[DEF]](s32) - ; GREEDY: RET 0, implicit %xmm0 + ; GREEDY: $xmm0 = COPY [[DEF]](s32) + ; GREEDY: RET 0, implicit $xmm0 %0(s32) = G_IMPLICIT_DEF - %xmm0 = COPY %0(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %0(s32) + RET 0, implicit $xmm0 ... --- @@ -1546,27 +1546,27 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %xmm0 + liveins: $xmm0 ; FAST-LABEL: name: test_undef4 - ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0 + ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0 ; FAST: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[DEF]](s32) ; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]] - ; FAST: %xmm0 = COPY [[FADD]](s32) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[FADD]](s32) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_undef4 - ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0 + ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0 ; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[DEF]](s32) ; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]] - ; GREEDY: %xmm0 = COPY [[FADD]](s32) - ; GREEDY: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 + ; GREEDY: $xmm0 = COPY [[FADD]](s32) + ; GREEDY: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 %1(s32) = G_IMPLICIT_DEF %2(s32) = G_FADD %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -1586,10 +1586,10 @@ ; FAST-LABEL: name: test_i32 ; FAST: bb.0.entry: ; FAST: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; FAST: liveins: %edi, %edx, %esi - ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi - ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY %edx + ; FAST: liveins: $edi, $edx, $esi + ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi + ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $edx ; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; FAST: G_BRCOND [[ICMP]](s1), %bb.1 @@ -1601,15 +1601,15 @@ ; FAST: successors: %bb.3(0x80000000) ; FAST: bb.3.cond.end: ; FAST: [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2 - ; FAST: %eax = COPY [[PHI]](s32) - ; FAST: RET 0, implicit %eax + ; FAST: $eax = COPY [[PHI]](s32) + ; FAST: RET 0, implicit $eax ; GREEDY-LABEL: name: test_i32 ; GREEDY: bb.0.entry: ; GREEDY: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; GREEDY: liveins: %edi, %edx, %esi - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi - ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s32) = COPY %edx + ; GREEDY: liveins: $edi, $edx, $esi + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi + ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $edx ; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; GREEDY: G_BRCOND [[ICMP]](s1), %bb.1 @@ -1621,15 +1621,15 @@ ; GREEDY: successors: %bb.3(0x80000000) ; GREEDY: bb.3.cond.end: ; GREEDY: [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2 - ; GREEDY: %eax = COPY [[PHI]](s32) - ; GREEDY: RET 0, implicit %eax + ; GREEDY: $eax = COPY [[PHI]](s32) + ; GREEDY: RET 0, implicit $eax bb.0.entry: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %edi, %edx, %esi + liveins: $edi, $edx, $esi - %0(s32) = COPY %edi - %1(s32) = COPY %esi - %2(s32) = COPY %edx + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s32) = COPY $edx %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.1 @@ -1645,8 +1645,8 @@ bb.3.cond.end: %5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2 - %eax = COPY %5(s32) - RET 0, implicit %eax + $eax = COPY %5(s32) + RET 0, implicit $eax ... --- @@ -1666,10 +1666,10 @@ ; FAST-LABEL: name: test_float ; FAST: bb.0.entry: ; FAST: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; FAST: liveins: %edi, %xmm0, %xmm1 - ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm0 - ; FAST: [[COPY2:%[0-9]+]]:vecr(s32) = COPY %xmm1 + ; FAST: liveins: $edi, $xmm0, $xmm1 + ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm0 + ; FAST: [[COPY2:%[0-9]+]]:vecr(s32) = COPY $xmm1 ; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; FAST: G_BRCOND [[ICMP]](s1), %bb.1 @@ -1681,15 +1681,15 @@ ; FAST: successors: %bb.3(0x80000000) ; FAST: bb.3.cond.end: ; FAST: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2 - ; FAST: %xmm0 = COPY [[PHI]](s32) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[PHI]](s32) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_float ; GREEDY: bb.0.entry: ; GREEDY: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; GREEDY: liveins: %edi, %xmm0, %xmm1 - ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi - ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm0 - ; GREEDY: [[COPY2:%[0-9]+]]:vecr(s32) = COPY %xmm1 + ; GREEDY: liveins: $edi, $xmm0, $xmm1 + ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi + ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm0 + ; GREEDY: [[COPY2:%[0-9]+]]:vecr(s32) = COPY $xmm1 ; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] ; GREEDY: G_BRCOND [[ICMP]](s1), %bb.1 @@ -1701,15 +1701,15 @@ ; GREEDY: successors: %bb.3(0x80000000) ; GREEDY: bb.3.cond.end: ; GREEDY: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2 - ; GREEDY: %xmm0 = COPY [[PHI]](s32) - ; GREEDY: RET 0, implicit %xmm0 + ; GREEDY: $xmm0 = COPY [[PHI]](s32) + ; GREEDY: RET 0, implicit $xmm0 bb.0.entry: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %edi, %xmm0, %xmm1 + liveins: $edi, $xmm0, $xmm1 - %0(s32) = COPY %edi - %1(s32) = COPY %xmm0 - %2(s32) = COPY %xmm1 + %0(s32) = COPY $edi + %1(s32) = COPY $xmm0 + %2(s32) = COPY $xmm1 %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.1 @@ -1725,8 +1725,8 @@ bb.3.cond.end: %5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2 - %xmm0 = COPY %5(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %5(s32) + RET 0, implicit $xmm0 ... --- @@ -1739,22 +1739,22 @@ - { id: 1, class: _, preferred-register: '' } body: | bb.1.entry: - liveins: %xmm0 + liveins: $xmm0 ; FAST-LABEL: name: test_fpext - ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0 + ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0 ; FAST: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[COPY]](s32) - ; FAST: %xmm0 = COPY [[FPEXT]](s64) - ; FAST: RET 0, implicit %xmm0 + ; FAST: $xmm0 = COPY [[FPEXT]](s64) + ; FAST: RET 0, implicit $xmm0 ; GREEDY-LABEL: name: test_fpext - ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0 + ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0 ; GREEDY: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[COPY]](s32) - ; GREEDY: %xmm0 = COPY [[FPEXT]](s64) - ; GREEDY: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 + ; GREEDY: $xmm0 = COPY [[FPEXT]](s64) + ; GREEDY: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 %1(s64) = G_FPEXT %0(s32) - %xmm0 = COPY %1(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s64) + RET 0, implicit $xmm0 ... --- Index: test/CodeGen/X86/GlobalISel/select-GV.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-GV.mir +++ test/CodeGen/X86/GlobalISel/select-GV.mir @@ -41,27 +41,27 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } # X64: %0:gr64 = IMPLICIT_DEF -# X64-NEXT: %1:gr64 = LEA64r %noreg, 1, %noreg, @g_int, %noreg -# X64-NEXT: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`) +# X64-NEXT: %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg +# X64-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) # X64-NEXT: RET 0 # # X64_DARWIN_PIC: %0:gr64 = IMPLICIT_DEF -# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r %rip, 1, %noreg, @g_int, %noreg -# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`) +# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg +# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) # X64_DARWIN_PIC-NEXT: RET 0 # # X32: %0:gr32 = IMPLICIT_DEF -# X32-NEXT: %1:gr32 = LEA32r %noreg, 1, %noreg, @g_int, %noreg -# X32-NEXT: MOV32mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`) +# X32-NEXT: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg +# X32-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) # X32-NEXT: RET 0 # # X32ABI: %0:low32_addr_access = IMPLICIT_DEF -# X32ABI-NEXT: %1:gr32 = LEA64_32r %noreg, 1, %noreg, @g_int, %noreg -# X32ABI-NEXT: MOV32mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`) +# X32ABI-NEXT: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg +# X32ABI-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) # X32ABI-NEXT: RET 0 body: | bb.1.entry: - liveins: %rdi + liveins: $rdi %0(p0) = IMPLICIT_DEF %1(p0) = G_GLOBAL_VALUE @g_int @@ -85,30 +85,30 @@ registers: - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } -# X64: %1:gr64 = LEA64r %noreg, 1, %noreg, @g_int, %noreg -# X64-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int) -# X64-NEXT: %eax = COPY %0 -# X64-NEXT: RET 0, implicit %eax +# X64: %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg +# X64-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) +# X64-NEXT: $eax = COPY %0 +# X64-NEXT: RET 0, implicit $eax # -# X64_DARWIN_PIC: %1:gr64 = LEA64r %rip, 1, %noreg, @g_int, %noreg -# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int) -# X64_DARWIN_PIC-NEXT: %eax = COPY %0 -# X64_DARWIN_PIC-NEXT: RET 0, implicit %eax +# X64_DARWIN_PIC: %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg +# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) +# X64_DARWIN_PIC-NEXT: $eax = COPY %0 +# X64_DARWIN_PIC-NEXT: RET 0, implicit $eax # -# X32: %1:gr32 = LEA32r %noreg, 1, %noreg, @g_int, %noreg -# X32-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int) -# X32-NEXT: %eax = COPY %0 -# X32-NEXT: RET 0, implicit %eax +# X32: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg +# X32-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) +# X32-NEXT: $eax = COPY %0 +# X32-NEXT: RET 0, implicit $eax # -# X32ABI: %1:gr32 = LEA64_32r %noreg, 1, %noreg, @g_int, %noreg -# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int) -# X32ABI-NEXT: %eax = COPY %0 -# X32ABI-NEXT: RET 0, implicit %eax +# X32ABI: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg +# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) +# X32ABI-NEXT: $eax = COPY %0 +# X32ABI-NEXT: RET 0, implicit $eax body: | bb.1.entry: %1(p0) = G_GLOBAL_VALUE @g_int %0(s32) = G_LOAD %1(p0) :: (load 4 from @g_int) - %eax = COPY %0(s32) - RET 0, implicit %eax + $eax = COPY %0(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/select-add-v128.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-add-v128.mir +++ test/CodeGen/X86/GlobalISel/select-add-v128.mir @@ -58,13 +58,13 @@ # AVX512BWVL: %2:vr128x = VPADDBZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<16 x s8>) = COPY %xmm0 - %1(<16 x s8>) = COPY %xmm1 + %0(<16 x s8>) = COPY $xmm0 + %1(<16 x s8>) = COPY $xmm1 %2(<16 x s8>) = G_ADD %0, %1 - %xmm0 = COPY %2(<16 x s8>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<16 x s8>) + RET 0, implicit $xmm0 ... --- @@ -100,13 +100,13 @@ # AVX512BWVL: %2:vr128x = VPADDWZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<8 x s16>) = COPY %xmm0 - %1(<8 x s16>) = COPY %xmm1 + %0(<8 x s16>) = COPY $xmm0 + %1(<8 x s16>) = COPY $xmm1 %2(<8 x s16>) = G_ADD %0, %1 - %xmm0 = COPY %2(<8 x s16>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<8 x s16>) + RET 0, implicit $xmm0 ... --- @@ -142,13 +142,13 @@ # AVX512BWVL: %2:vr128x = VPADDDZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_ADD %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -184,12 +184,12 @@ # AVX512BWVL: %2:vr128x = VPADDQZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<2 x s64>) = COPY %xmm0 - %1(<2 x s64>) = COPY %xmm1 + %0(<2 x s64>) = COPY $xmm0 + %1(<2 x s64>) = COPY $xmm1 %2(<2 x s64>) = G_ADD %0, %1 - %xmm0 = COPY %2(<2 x s64>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<2 x s64>) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-add-v256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-add-v256.mir +++ test/CodeGen/X86/GlobalISel/select-add-v256.mir @@ -54,13 +54,13 @@ # AVX512BWVL: %2:vr256x = VPADDBZ256rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<32 x s8>) = COPY %ymm0 - %1(<32 x s8>) = COPY %ymm1 + %0(<32 x s8>) = COPY $ymm0 + %1(<32 x s8>) = COPY $ymm1 %2(<32 x s8>) = G_ADD %0, %1 - %ymm0 = COPY %2(<32 x s8>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<32 x s8>) + RET 0, implicit $ymm0 ... --- @@ -94,13 +94,13 @@ # AVX512BWVL: %2:vr256x = VPADDWZ256rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<16 x s16>) = COPY %ymm0 - %1(<16 x s16>) = COPY %ymm1 + %0(<16 x s16>) = COPY $ymm0 + %1(<16 x s16>) = COPY $ymm1 %2(<16 x s16>) = G_ADD %0, %1 - %ymm0 = COPY %2(<16 x s16>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<16 x s16>) + RET 0, implicit $ymm0 ... --- @@ -134,13 +134,13 @@ # AVX512BWVL: %2:vr256x = VPADDDZ256rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<8 x s32>) = COPY %ymm0 - %1(<8 x s32>) = COPY %ymm1 + %0(<8 x s32>) = COPY $ymm0 + %1(<8 x s32>) = COPY $ymm1 %2(<8 x s32>) = G_ADD %0, %1 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -174,12 +174,12 @@ # AVX512BWVL: %2:vr256x = VPADDQZ256rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<4 x s64>) = COPY %ymm0 - %1(<4 x s64>) = COPY %ymm1 + %0(<4 x s64>) = COPY $ymm0 + %1(<4 x s64>) = COPY $ymm1 %2(<4 x s64>) = G_ADD %0, %1 - %ymm0 = COPY %2(<4 x s64>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<4 x s64>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/select-add-v512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-add-v512.mir +++ test/CodeGen/X86/GlobalISel/select-add-v512.mir @@ -36,19 +36,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_add_v64i8 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; ALL: [[VPADDBZrr:%[0-9]+]]:vr512 = VPADDBZrr [[COPY]], [[COPY1]] - ; ALL: %zmm0 = COPY [[VPADDBZrr]] - ; ALL: RET 0, implicit %zmm0 - %0(<64 x s8>) = COPY %zmm0 - %1(<64 x s8>) = COPY %zmm1 + ; ALL: $zmm0 = COPY [[VPADDBZrr]] + ; ALL: RET 0, implicit $zmm0 + %0(<64 x s8>) = COPY $zmm0 + %1(<64 x s8>) = COPY $zmm1 %2(<64 x s8>) = G_ADD %0, %1 - %zmm0 = COPY %2(<64 x s8>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<64 x s8>) + RET 0, implicit $zmm0 ... --- @@ -62,19 +62,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_add_v32i16 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; ALL: [[VPADDWZrr:%[0-9]+]]:vr512 = VPADDWZrr [[COPY]], [[COPY1]] - ; ALL: %zmm0 = COPY [[VPADDWZrr]] - ; ALL: RET 0, implicit %zmm0 - %0(<32 x s16>) = COPY %zmm0 - %1(<32 x s16>) = COPY %zmm1 + ; ALL: $zmm0 = COPY [[VPADDWZrr]] + ; ALL: RET 0, implicit $zmm0 + %0(<32 x s16>) = COPY $zmm0 + %1(<32 x s16>) = COPY $zmm1 %2(<32 x s16>) = G_ADD %0, %1 - %zmm0 = COPY %2(<32 x s16>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<32 x s16>) + RET 0, implicit $zmm0 ... --- @@ -88,19 +88,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_add_v16i32 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; ALL: [[VPADDDZrr:%[0-9]+]]:vr512 = VPADDDZrr [[COPY]], [[COPY1]] - ; ALL: %zmm0 = COPY [[VPADDDZrr]] - ; ALL: RET 0, implicit %zmm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<16 x s32>) = COPY %zmm1 + ; ALL: $zmm0 = COPY [[VPADDDZrr]] + ; ALL: RET 0, implicit $zmm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<16 x s32>) = COPY $zmm1 %2(<16 x s32>) = G_ADD %0, %1 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $zmm0 ... --- @@ -114,18 +114,18 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_add_v8i64 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; ALL: [[VPADDQZrr:%[0-9]+]]:vr512 = VPADDQZrr [[COPY]], [[COPY1]] - ; ALL: %zmm0 = COPY [[VPADDQZrr]] - ; ALL: RET 0, implicit %zmm0 - %0(<8 x s64>) = COPY %zmm0 - %1(<8 x s64>) = COPY %zmm1 + ; ALL: $zmm0 = COPY [[VPADDQZrr]] + ; ALL: RET 0, implicit $zmm0 + %0(<8 x s64>) = COPY $zmm0 + %1(<8 x s64>) = COPY $zmm1 %2(<8 x s64>) = G_ADD %0, %1 - %zmm0 = COPY %2(<8 x s64>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<8 x s64>) + RET 0, implicit $zmm0 ... Index: test/CodeGen/X86/GlobalISel/select-add-x32.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-add-x32.mir +++ test/CodeGen/X86/GlobalISel/select-add-x32.mir @@ -30,14 +30,14 @@ ; X32: [[DEF1:%[0-9]+]]:gr32 = IMPLICIT_DEF ; X32: [[DEF2:%[0-9]+]]:gr32 = IMPLICIT_DEF ; X32: [[DEF3:%[0-9]+]]:gr32 = IMPLICIT_DEF - ; X32: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[DEF]], [[DEF2]], implicit-def %eflags - ; X32: [[COPY:%[0-9]+]]:gr32 = COPY %eflags - ; X32: %eflags = COPY [[COPY]] - ; X32: [[ADC32rr:%[0-9]+]]:gr32 = ADC32rr [[DEF1]], [[DEF3]], implicit-def %eflags, implicit %eflags - ; X32: [[COPY1:%[0-9]+]]:gr32 = COPY %eflags - ; X32: %eax = COPY [[ADD32rr]] - ; X32: %edx = COPY [[ADC32rr]] - ; X32: RET 0, implicit %eax, implicit %edx + ; X32: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[DEF]], [[DEF2]], implicit-def $eflags + ; X32: [[COPY:%[0-9]+]]:gr32 = COPY $eflags + ; X32: $eflags = COPY [[COPY]] + ; X32: [[ADC32rr:%[0-9]+]]:gr32 = ADC32rr [[DEF1]], [[DEF3]], implicit-def $eflags, implicit $eflags + ; X32: [[COPY1:%[0-9]+]]:gr32 = COPY $eflags + ; X32: $eax = COPY [[ADD32rr]] + ; X32: $edx = COPY [[ADC32rr]] + ; X32: RET 0, implicit $eax, implicit $edx %0(s32) = IMPLICIT_DEF %1(s32) = IMPLICIT_DEF %2(s32) = IMPLICIT_DEF @@ -46,8 +46,8 @@ %4(s1) = G_TRUNC %9(s8) %5(s32), %6(s1) = G_UADDE %0, %2, %4 %7(s32), %8(s1) = G_UADDE %1, %3, %6 - %eax = COPY %5(s32) - %edx = COPY %7(s32) - RET 0, implicit %eax, implicit %edx + $eax = COPY %5(s32) + $edx = COPY %7(s32) + RET 0, implicit $eax, implicit $edx ... Index: test/CodeGen/X86/GlobalISel/select-add.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-add.mir +++ test/CodeGen/X86/GlobalISel/select-add.mir @@ -44,17 +44,17 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# ALL: %0:gr64 = COPY %rdi -# ALL-NEXT: %1:gr64 = COPY %rsi +# ALL: %0:gr64 = COPY $rdi +# ALL-NEXT: %1:gr64 = COPY $rsi # ALL-NEXT: %2:gr64 = ADD64rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s64) = G_ADD %0, %1 - %rax = COPY %2(s64) + $rax = COPY %2(s64) ... @@ -67,17 +67,17 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# ALL: %0:gr32 = COPY %edi -# ALL-NEXT: %1:gr32 = COPY %esi +# ALL: %0:gr32 = COPY $edi +# ALL-NEXT: %1:gr32 = COPY $esi # ALL-NEXT: %2:gr32 = ADD32rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi - %0(s32) = COPY %edi - %1(s32) = COPY %esi + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_ADD %0, %1 - %eax = COPY %2(s32) + $eax = COPY %2(s32) ... --- @@ -91,18 +91,18 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# ALL: %0:gr16 = COPY %di -# ALL: %1:gr16 = COPY %si -# ALL: %2:gr16 = ADD16rr %0, %1, implicit-def %eflags +# ALL: %0:gr16 = COPY $di +# ALL: %1:gr16 = COPY $si +# ALL: %2:gr16 = ADD16rr %0, %1, implicit-def $eflags body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi - %0(s16) = COPY %di - %1(s16) = COPY %si + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s16) = G_ADD %0, %1 - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -116,18 +116,18 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# ALL: %0:gr8 = COPY %dil -# ALL: %1:gr8 = COPY %sil -# ALL: %2:gr8 = ADD8rr %0, %1, implicit-def %eflags +# ALL: %0:gr8 = COPY $dil +# ALL: %1:gr8 = COPY $sil +# ALL: %2:gr8 = ADD8rr %0, %1, implicit-def $eflags body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi - %0(s8) = COPY %dil - %1(s8) = COPY %sil + %0(s8) = COPY $dil + %1(s8) = COPY $sil %2(s8) = G_ADD %0, %1 - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -142,23 +142,23 @@ - { id: 0, class: vecr } - { id: 1, class: vecr } - { id: 2, class: vecr } -# NO_AVX512VL: %0:vr128 = COPY %xmm0 -# NO_AVX512VL: %1:vr128 = COPY %xmm1 +# NO_AVX512VL: %0:vr128 = COPY $xmm0 +# NO_AVX512VL: %1:vr128 = COPY $xmm1 # SSE-NEXT: %2:vr128 = PADDDrr %0, %1 # AVX-NEXT: %2:vr128 = VPADDDrr %0, %1 # AVX512F-NEXT: %2:vr128 = VPADDDrr %0, %1 -# AVX512VL: %0:vr128x = COPY %xmm0 -# AVX512VL: %1:vr128x = COPY %xmm1 +# AVX512VL: %0:vr128x = COPY $xmm0 +# AVX512VL: %1:vr128x = COPY $xmm1 # AVX512VL-NEXT: %2:vr128x = VPADDDZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_ADD %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -173,26 +173,26 @@ - { id: 0, class: vecr } - { id: 1, class: vecr } - { id: 2, class: vecr } -# SSE: %0:vr128 = COPY %xmm0 -# SSE-NEXT: %1:vr128 = COPY %xmm1 +# SSE: %0:vr128 = COPY $xmm0 +# SSE-NEXT: %1:vr128 = COPY $xmm1 # SSE-NEXT: %2:vr128 = ADDPSrr %0, %1 -# AVX: %0:vr128 = COPY %xmm0 -# AVX-NEXT: %1:vr128 = COPY %xmm1 +# AVX: %0:vr128 = COPY $xmm0 +# AVX-NEXT: %1:vr128 = COPY $xmm1 # AVX-NEXT: %2:vr128 = VADDPSrr %0, %1 -# AVX512F: %0:vr128 = COPY %xmm0 -# AVX512F-NEXT: 1:vr128 = COPY %xmm1 +# AVX512F: %0:vr128 = COPY $xmm0 +# AVX512F-NEXT: 1:vr128 = COPY $xmm1 # AVX512F-NEXT: %2:vr128 = VADDPSrr %0, %1 -# AVX512VL: %0:vr128x = COPY %xmm0 -# AVX512VL-NEXT: %1:vr128x = COPY %xmm1 +# AVX512VL: %0:vr128x = COPY $xmm0 +# AVX512VL-NEXT: %1:vr128x = COPY $xmm1 # AVX512VL-NEXT: %2:vr128x = VADDPSZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_FADD %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-and-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-and-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-and-scalar.mir @@ -38,19 +38,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_and_i8 - ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil - ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil - ; ALL: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %al = COPY [[AND8rr]] - ; ALL: RET 0, implicit %al - %0(s8) = COPY %dil - %1(s8) = COPY %sil + ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil + ; ALL: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $al = COPY [[AND8rr]] + ; ALL: RET 0, implicit $al + %0(s8) = COPY $dil + %1(s8) = COPY $sil %2(s8) = G_AND %0, %1 - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -68,19 +68,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_and_i16 - ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di - ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si - ; ALL: [[AND16rr:%[0-9]+]]:gr16 = AND16rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %ax = COPY [[AND16rr]] - ; ALL: RET 0, implicit %ax - %0(s16) = COPY %di - %1(s16) = COPY %si + ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si + ; ALL: [[AND16rr:%[0-9]+]]:gr16 = AND16rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $ax = COPY [[AND16rr]] + ; ALL: RET 0, implicit $ax + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s16) = G_AND %0, %1 - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -98,19 +98,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_and_i32 - ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; ALL: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %eax = COPY [[AND32rr]] - ; ALL: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $eax = COPY [[AND32rr]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_AND %0, %1 - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -128,18 +128,18 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; ALL-LABEL: name: test_and_i64 - ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; ALL: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %rax = COPY [[AND64rr]] - ; ALL: RET 0, implicit %rax - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; ALL: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $rax = COPY [[AND64rr]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s64) = G_AND %0, %1 - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/select-blsi.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-blsi.mir +++ test/CodeGen/X86/GlobalISel/select-blsi.mir @@ -21,17 +21,17 @@ # G_SUB and G_AND both use %0 so we should match this. body: | bb.1: - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_blsi32rr - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[BLSI32rr:%[0-9]+]]:gr32 = BLSI32rr [[COPY]], implicit-def %eflags - ; CHECK: %edi = COPY [[BLSI32rr]] - %0(s32) = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[BLSI32rr:%[0-9]+]]:gr32 = BLSI32rr [[COPY]], implicit-def $eflags + ; CHECK: $edi = COPY [[BLSI32rr]] + %0(s32) = COPY $edi %1(s32) = G_CONSTANT i32 0 %2(s32) = G_SUB %1, %0 %3(s32) = G_AND %2, %0 - %edi = COPY %3 + $edi = COPY %3 ... --- @@ -47,17 +47,17 @@ # G_SUB and G_AND use different operands so we shouldn't match this. body: | bb.1: - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_blsi32rr_nomatch - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags - ; CHECK: [[SUB32ri:%[0-9]+]]:gr32 = SUB32ri [[MOV32r0_]], 0, implicit-def %eflags - ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[SUB32ri]], [[COPY]], implicit-def %eflags - ; CHECK: %edi = COPY [[AND32rr]] - %0(s32) = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags + ; CHECK: [[SUB32ri:%[0-9]+]]:gr32 = SUB32ri [[MOV32r0_]], 0, implicit-def $eflags + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[SUB32ri]], [[COPY]], implicit-def $eflags + ; CHECK: $edi = COPY [[AND32rr]] + %0(s32) = COPY $edi %1(s32) = G_CONSTANT i32 0 %2(s32) = G_SUB %1, %1 %3(s32) = G_AND %2, %0 - %edi = COPY %3 + $edi = COPY %3 ... Index: test/CodeGen/X86/GlobalISel/select-blsr.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-blsr.mir +++ test/CodeGen/X86/GlobalISel/select-blsr.mir @@ -18,17 +18,17 @@ # G_ADD and G_AND both use %0 so we should match this. body: | bb.1: - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_blsr32rr - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[BLSR32rr:%[0-9]+]]:gr32 = BLSR32rr [[COPY]], implicit-def %eflags - ; CHECK: %edi = COPY [[BLSR32rr]] - %0(s32) = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[BLSR32rr:%[0-9]+]]:gr32 = BLSR32rr [[COPY]], implicit-def $eflags + ; CHECK: $edi = COPY [[BLSR32rr]] + %0(s32) = COPY $edi %1(s32) = G_CONSTANT i32 -1 %2(s32) = G_ADD %0, %1 %3(s32) = G_AND %2, %0 - %edi = COPY %3 + $edi = COPY %3 ... --- @@ -44,17 +44,17 @@ # G_ADD and G_AND use different operands so we shouldn't match this. body: | bb.1: - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: test_blsr32rr_nomatch - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 4294967295 - ; CHECK: [[DEC32r:%[0-9]+]]:gr32 = DEC32r [[MOV32ri]], implicit-def %eflags - ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[DEC32r]], [[COPY]], implicit-def %eflags - ; CHECK: %edi = COPY [[AND32rr]] - %0(s32) = COPY %edi + ; CHECK: [[DEC32r:%[0-9]+]]:gr32 = DEC32r [[MOV32ri]], implicit-def $eflags + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[DEC32r]], [[COPY]], implicit-def $eflags + ; CHECK: $edi = COPY [[AND32rr]] + %0(s32) = COPY $edi %1(s32) = G_CONSTANT i32 -1 %2(s32) = G_ADD %1, %1 %3(s32) = G_AND %2, %0 - %edi = COPY %3 + $edi = COPY %3 ... Index: test/CodeGen/X86/GlobalISel/select-brcond.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-brcond.mir +++ test/CodeGen/X86/GlobalISel/select-brcond.mir @@ -27,28 +27,28 @@ - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } - { id: 3, class: gpr, preferred-register: '' } -# X64: %0:gr32 = COPY %edi -# X32: %0:gr32_abcd = COPY %edi -# CHECK-NEXT: %2:gr32 = MOV32r0 implicit-def %eflags +# X64: %0:gr32 = COPY $edi +# X32: %0:gr32_abcd = COPY $edi +# CHECK-NEXT: %2:gr32 = MOV32r0 implicit-def $eflags # CHECK-NEXT: %3:gr32 = MOV32ri 1 # CHECK-NEXT: %1:gr8 = COPY %0.sub_8bit -# CHECK-NEXT: TEST8ri %1, 1, implicit-def %eflags -# CHECK-NEXT: JNE_1 %[[TRUE:bb.[0-9]+]], implicit %eflags +# CHECK-NEXT: TEST8ri %1, 1, implicit-def $eflags +# CHECK-NEXT: JNE_1 %[[TRUE:bb.[0-9]+]], implicit $eflags # CHECK-NEXT: JMP_1 %[[FALSE:bb.[0-9]+]] # CHECK: [[TRUE]].{{[a-zA-Z0-9]+}}: -# CHECK-NEXT: %eax = COPY %2 -# CHECK-NEXT: RET 0, implicit %eax +# CHECK-NEXT: $eax = COPY %2 +# CHECK-NEXT: RET 0, implicit $eax # CHECK: [[FALSE]].{{[a-zA-Z0-9]+}}: -# CHECK-NEXT: %eax = COPY %3 -# CHECK-NEXT: RET 0, implicit %eax +# CHECK-NEXT: $eax = COPY %3 +# CHECK-NEXT: RET 0, implicit $eax body: | bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %2(s32) = G_CONSTANT i32 0 %3(s32) = G_CONSTANT i32 1 %1(s1) = G_TRUNC %0(s32) @@ -56,11 +56,11 @@ G_BR %bb.3 bb.2.true: - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax bb.3.false: - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/select-cmp.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-cmp.mir +++ test/CodeGen/X86/GlobalISel/select-cmp.mir @@ -93,23 +93,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_eq_i8 - ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY %dil - ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY %sil - ; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $dil + ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY $sil + ; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s8) = COPY %dil - %1(s8) = COPY %sil + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s8) = COPY $dil + %1(s8) = COPY $sil %2(s1) = G_ICMP intpred(eq), %0(s8), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -124,23 +124,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_eq_i16 - ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY %di - ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY %si - ; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY $di + ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY $si + ; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s16) = COPY %di - %1(s16) = COPY %si + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s1) = G_ICMP intpred(eq), %0(s16), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -155,23 +155,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; CHECK-LABEL: name: test_icmp_eq_i64 - ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s1) = G_ICMP intpred(eq), %0(s64), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -186,23 +186,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_eq_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(eq), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -217,23 +217,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_ne_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETNEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(ne), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -248,23 +248,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_ugt_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(ugt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -279,23 +279,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_uge_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(uge), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -310,23 +310,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_ult_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(ult), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -341,23 +341,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_ule_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(ule), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -372,23 +372,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_sgt_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(sgt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -403,23 +403,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_sge_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETGEr:%[0-9]+]]:gr8 = SETGEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETGEr:%[0-9]+]]:gr8 = SETGEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(sge), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -434,23 +434,23 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_slt_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETLr:%[0-9]+]]:gr8 = SETLr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETLr:%[0-9]+]]:gr8 = SETLr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(slt), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... --- @@ -465,22 +465,22 @@ - { id: 3, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; CHECK-LABEL: name: test_icmp_sle_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; CHECK: [[SETLEr:%[0-9]+]]:gr8 = SETLEr implicit %eflags + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETLEr:%[0-9]+]]:gr8 = SETLEr implicit $eflags ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLEr]], %subreg.sub_8bit - ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; CHECK: %eax = COPY [[AND32ri8_]] - ; CHECK: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s1) = G_ICMP intpred(sle), %0(s32), %1 %3(s32) = G_ZEXT %2(s1) - %eax = COPY %3(s32) - RET 0, implicit %eax + $eax = COPY %3(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/select-constant.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-constant.mir +++ test/CodeGen/X86/GlobalISel/select-constant.mir @@ -47,11 +47,11 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i8 ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 2 - ; CHECK: %al = COPY [[MOV8ri]] - ; CHECK: RET 0, implicit %al + ; CHECK: $al = COPY [[MOV8ri]] + ; CHECK: RET 0, implicit $al %0(s8) = G_CONSTANT i8 2 - %al = COPY %0(s8) - RET 0, implicit %al + $al = COPY %0(s8) + RET 0, implicit $al ... --- @@ -65,11 +65,11 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i16 ; CHECK: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 3 - ; CHECK: %ax = COPY [[MOV16ri]] - ; CHECK: RET 0, implicit %ax + ; CHECK: $ax = COPY [[MOV16ri]] + ; CHECK: RET 0, implicit $ax %0(s16) = G_CONSTANT i16 3 - %ax = COPY %0(s16) - RET 0, implicit %ax + $ax = COPY %0(s16) + RET 0, implicit $ax ... --- @@ -83,11 +83,11 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i32 ; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 4 - ; CHECK: %eax = COPY [[MOV32ri]] - ; CHECK: RET 0, implicit %eax + ; CHECK: $eax = COPY [[MOV32ri]] + ; CHECK: RET 0, implicit $eax %0(s32) = G_CONSTANT i32 4 - %eax = COPY %0(s32) - RET 0, implicit %eax + $eax = COPY %0(s32) + RET 0, implicit $eax ... --- @@ -99,12 +99,12 @@ body: | bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i32_0 - ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags - ; CHECK: %eax = COPY [[MOV32r0_]] - ; CHECK: RET 0, implicit %eax + ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags + ; CHECK: $eax = COPY [[MOV32r0_]] + ; CHECK: RET 0, implicit $eax %0(s32) = G_CONSTANT i32 0 - %eax = COPY %0(s32) - RET 0, implicit %eax + $eax = COPY %0(s32) + RET 0, implicit $eax ... --- @@ -118,11 +118,11 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i64 ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 68719476720 - ; CHECK: %rax = COPY [[MOV64ri]] - ; CHECK: RET 0, implicit %rax + ; CHECK: $rax = COPY [[MOV64ri]] + ; CHECK: RET 0, implicit $rax %0(s64) = G_CONSTANT i64 68719476720 - %rax = COPY %0(s64) - RET 0, implicit %rax + $rax = COPY %0(s64) + RET 0, implicit $rax ... --- @@ -137,11 +137,11 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i64_u32 ; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1879048192 - ; CHECK: %rax = COPY [[MOV64ri32_]] - ; CHECK: RET 0, implicit %rax + ; CHECK: $rax = COPY [[MOV64ri32_]] + ; CHECK: RET 0, implicit $rax %0(s64) = G_CONSTANT i64 1879048192 - %rax = COPY %0(s64) - RET 0, implicit %rax + $rax = COPY %0(s64) + RET 0, implicit $rax ... --- @@ -155,11 +155,11 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i64_i32 ; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 -1 - ; CHECK: %rax = COPY [[MOV64ri32_]] - ; CHECK: RET 0, implicit %rax + ; CHECK: $rax = COPY [[MOV64ri32_]] + ; CHECK: RET 0, implicit $rax %0(s64) = G_CONSTANT i64 -1 - %rax = COPY %0(s64) - RET 0, implicit %rax + $rax = COPY %0(s64) + RET 0, implicit $rax ... --- @@ -172,14 +172,14 @@ - { id: 1, class: gpr, preferred-register: '' } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; CHECK-LABEL: name: main - ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 0 - ; CHECK: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[MOV64ri32_]] :: (store 8 into %ir.data) + ; CHECK: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[MOV64ri32_]] :: (store 8 into %ir.data) ; CHECK: RET 0 - %0(p0) = COPY %rdi + %0(p0) = COPY $rdi %1(p0) = G_CONSTANT i64 0 G_STORE %1(p0), %0(p0) :: (store 8 into %ir.data) RET 0 Index: test/CodeGen/X86/GlobalISel/select-copy.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-copy.mir +++ test/CodeGen/X86/GlobalISel/select-copy.mir @@ -40,18 +40,18 @@ registers: - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } -# ALL %0:gr8 = COPY %al +# ALL %0:gr8 = COPY $al # ALL-NEXT %1:gr32 = MOVZX32rr8 %0 -# ALL-NEXT %eax = COPY %1 -# ALL-NEXT RET 0, implicit %eax +# ALL-NEXT $eax = COPY %1 +# ALL-NEXT RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %eax + liveins: $eax - %0(s8) = COPY %al + %0(s8) = COPY $al %1(s32) = G_ZEXT %0(s8) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -66,18 +66,18 @@ registers: - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } -# ALL: %0:gr8 = COPY %al +# ALL: %0:gr8 = COPY $al # ALL-NEXT: %1:gr32 = MOVZX32rr8 %0 -# ALL-NEXT: %eax = COPY %1 -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %1 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %eax + liveins: $eax - %0(s8) = COPY %al + %0(s8) = COPY $al %1(s32) = G_ZEXT %0(s8) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -94,20 +94,20 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# ALL %0:gr16 = COPY %ax +# ALL %0:gr16 = COPY $ax # ALL-NEXT %1:gr8 = COPY %0.sub_8bit # ALL-NEXT %2:gr32 = MOVZX32rr8 %1 -# ALL-NEXT %eax = COPY %2 -# ALL-NEXT RET 0, implicit %eax +# ALL-NEXT $eax = COPY %2 +# ALL-NEXT RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %eax + liveins: $eax - %0(s16) = COPY %ax + %0(s16) = COPY $ax %1(s8) = G_TRUNC %0(s16) %2(s32) = G_ZEXT %1(s8) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -124,20 +124,20 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# ALL %0:gr32 = COPY %eax +# ALL %0:gr32 = COPY $eax # ALL-NEXT %1:gr16 = COPY %0.sub_16bit # ALL-NEXT %2:gr32 = MOVZX32rr16 %1 -# ALL-NEXT %eax = COPY %2 -# ALL-NEXT RET 0, implicit %eax +# ALL-NEXT $eax = COPY %2 +# ALL-NEXT RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %eax + liveins: $eax - %0(s32) = COPY %eax + %0(s32) = COPY $eax %1(s16) = G_TRUNC %0(s32) %2(s32) = G_ZEXT %1(s16) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -154,20 +154,20 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# ALL %0:gr32[[ABCD]] = COPY %edx +# ALL %0:gr32[[ABCD]] = COPY $edx # ALL-NEXT %1:gr8 = COPY %0.sub_8bit # ALL-NEXT %2:gr32 = MOVZX32rr8 %1 -# ALL-NEXT %eax = COPY %2 -# ALL-NEXT RET 0, implicit %eax +# ALL-NEXT $eax = COPY %2 +# ALL-NEXT RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %eax,%edx + liveins: $eax,$edx - %0(s32) = COPY %edx + %0(s32) = COPY $edx %1(s8) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s8) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -184,20 +184,20 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# ALL %0:gr32 = COPY %edx +# ALL %0:gr32 = COPY $edx # ALL-NEXT %1:gr16 = COPY %0.sub_16bit # ALL-NEXT %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_16bit -# ALL-NEXT %eax = COPY %2 -# ALL-NEXT RET 0, implicit %eax +# ALL-NEXT $eax = COPY %2 +# ALL-NEXT RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %eax,%edx + liveins: $eax,$edx - %0(s32) = COPY %edx + %0(s32) = COPY $edx %1(s16) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s16) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir +++ test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir @@ -34,19 +34,19 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; ALL-LABEL: name: test_zext_i1 - ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil + ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_8bit - ; ALL: [[AND64ri8_:%[0-9]+]]:gr64 = AND64ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags - ; ALL: %rax = COPY [[AND64ri8_]] - ; ALL: RET 0, implicit %rax - %0(s8) = COPY %dil + ; ALL: [[AND64ri8_:%[0-9]+]]:gr64 = AND64ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags + ; ALL: $rax = COPY [[AND64ri8_]] + ; ALL: RET 0, implicit $rax + %0(s8) = COPY $dil %1(s1) = G_TRUNC %0(s8) %2(s64) = G_ZEXT %1(s1) - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... --- @@ -59,17 +59,17 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; ALL-LABEL: name: test_sext_i8 - ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil + ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil ; ALL: [[MOVSX64rr8_:%[0-9]+]]:gr64 = MOVSX64rr8 [[COPY]] - ; ALL: %rax = COPY [[MOVSX64rr8_]] - ; ALL: RET 0, implicit %rax - %0(s8) = COPY %dil + ; ALL: $rax = COPY [[MOVSX64rr8_]] + ; ALL: RET 0, implicit $rax + %0(s8) = COPY $dil %1(s64) = G_SEXT %0(s8) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -82,17 +82,17 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; ALL-LABEL: name: test_sext_i16 - ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di + ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di ; ALL: [[MOVSX64rr16_:%[0-9]+]]:gr64 = MOVSX64rr16 [[COPY]] - ; ALL: %rax = COPY [[MOVSX64rr16_]] - ; ALL: RET 0, implicit %rax - %0(s16) = COPY %di + ; ALL: $rax = COPY [[MOVSX64rr16_]] + ; ALL: RET 0, implicit $rax + %0(s16) = COPY $di %1(s64) = G_SEXT %0(s16) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -106,19 +106,19 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; ALL-LABEL: name: anyext_s64_from_s1 - ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi + ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_8bit - ; ALL: %rax = COPY [[SUBREG_TO_REG]] - ; ALL: RET 0, implicit %rax - %0(s64) = COPY %rdi + ; ALL: $rax = COPY [[SUBREG_TO_REG]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi %1(s1) = G_TRUNC %0(s64) %2(s64) = G_ANYEXT %1(s1) - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... --- name: anyext_s64_from_s8 @@ -131,19 +131,19 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; ALL-LABEL: name: anyext_s64_from_s8 - ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi + ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_8bit - ; ALL: %rax = COPY [[SUBREG_TO_REG]] - ; ALL: RET 0, implicit %rax - %0(s64) = COPY %rdi + ; ALL: $rax = COPY [[SUBREG_TO_REG]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi %1(s8) = G_TRUNC %0(s64) %2(s64) = G_ANYEXT %1(s8) - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... --- name: anyext_s64_from_s16 @@ -156,19 +156,19 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; ALL-LABEL: name: anyext_s64_from_s16 - ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_16bit - ; ALL: %rax = COPY [[SUBREG_TO_REG]] - ; ALL: RET 0, implicit %rax - %0(s64) = COPY %rdi + ; ALL: $rax = COPY [[SUBREG_TO_REG]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi %1(s16) = G_TRUNC %0(s64) %2(s64) = G_ANYEXT %1(s16) - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... --- name: anyext_s64_from_s32 @@ -181,17 +181,17 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; ALL-LABEL: name: anyext_s64_from_s32 - ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_32bit - ; ALL: %rax = COPY [[SUBREG_TO_REG]] - ; ALL: RET 0, implicit %rax - %0(s64) = COPY %rdi + ; ALL: $rax = COPY [[SUBREG_TO_REG]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi %1(s32) = G_TRUNC %0(s64) %2(s64) = G_ANYEXT %1(s32) - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/select-ext.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-ext.mir +++ test/CodeGen/X86/GlobalISel/select-ext.mir @@ -64,21 +64,21 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# X32: %0:gr32_abcd = COPY %edi -# X64: %0:gr32 = COPY %edi +# X32: %0:gr32_abcd = COPY $edi +# X64: %0:gr32 = COPY $edi # ALL_NEXT: %1:gr8 = COPY %0.sub_8bit -# ALL_NEXT: %2:gr8 = AND8ri %1, 1, implicit-def %eflags -# ALL_NEXT: %al = COPY %2 -# ALL_NEXT: RET 0, implicit %al +# ALL_NEXT: %2:gr8 = AND8ri %1, 1, implicit-def $eflags +# ALL_NEXT: $al = COPY %2 +# ALL_NEXT: RET 0, implicit $al body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s8) = G_ZEXT %1(s1) - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -102,22 +102,22 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# X32: %0:gr32_abcd = COPY %edi -# X64: %0:gr32 = COPY %edi +# X32: %0:gr32_abcd = COPY $edi +# X64: %0:gr32 = COPY $edi # ALL_NEXT: %1:gr8 = COPY %0.sub_8bit # ALL_NEXT: %3:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit -# ALL_NEXT: %2:gr16 = AND16ri8 %3, 1, implicit-def %eflags -# ALL_NEXT: %ax = COPY %2 -# ALL_NEXT: RET 0, implicit %ax +# ALL_NEXT: %2:gr16 = AND16ri8 %3, 1, implicit-def $eflags +# ALL_NEXT: $ax = COPY %2 +# ALL_NEXT: RET 0, implicit $ax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s16) = G_ZEXT %1(s1) - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -141,22 +141,22 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# X32: %0:gr32_abcd = COPY %edi -# X64: %0:gr32 = COPY %edi +# X32: %0:gr32_abcd = COPY $edi +# X64: %0:gr32 = COPY $edi # ALL_NEXT: %1:gr8 = COPY %0.sub_8bit # ALL_NEXT: %3:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit -# ALL_NEXT: %2:gr32 = AND32ri8 %3, 1, implicit-def %eflags -# ALL_NEXT: %eax = COPY %2 -# ALL_NEXT: RET 0, implicit %eax +# ALL_NEXT: %2:gr32 = AND32ri8 %3, 1, implicit-def $eflags +# ALL_NEXT: $eax = COPY %2 +# ALL_NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s32) = G_ZEXT %1(s1) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -171,18 +171,18 @@ registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# ALL: %0:gr8 = COPY %dil +# ALL: %0:gr8 = COPY $dil # ALL-NEXT: %1:gr32 = MOVZX32rr8 %0 -# ALL-NEXT: %eax = COPY %1 -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %1 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s8) = COPY %dil + %0(s8) = COPY $dil %1(s32) = G_ZEXT %0(s8) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -197,18 +197,18 @@ registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# ALL: %0:gr16 = COPY %di +# ALL: %0:gr16 = COPY $di # ALL-NEXT: %1:gr32 = MOVZX32rr16 %0 -# ALL-NEXT: %eax = COPY %1 -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %1 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s16) = COPY %di + %0(s16) = COPY $di %1(s32) = G_ZEXT %0(s16) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -223,18 +223,18 @@ registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# ALL: %0:gr8 = COPY %dil +# ALL: %0:gr8 = COPY $dil # ALL-NEXT: %1:gr32 = MOVSX32rr8 %0 -# ALL-NEXT: %eax = COPY %1 -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %1 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s8) = COPY %dil + %0(s8) = COPY $dil %1(s32) = G_SEXT %0(s8) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -249,18 +249,18 @@ registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# ALL: %0:gr16 = COPY %di +# ALL: %0:gr16 = COPY $di # ALL-NEXT: %1:gr32 = MOVSX32rr16 %0 -# ALL-NEXT: %eax = COPY %1 -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %1 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s16) = COPY %di + %0(s16) = COPY $di %1(s32) = G_SEXT %0(s16) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -282,20 +282,20 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# X32: %0:gr32_abcd = COPY %edi -# X64: %0:gr32 = COPY %edi +# X32: %0:gr32_abcd = COPY $edi +# X64: %0:gr32 = COPY $edi # ALL-NEXT: %1:gr8 = COPY %0.sub_8bit -# ALL-NEXT: %al = COPY %1 -# ALL-NEXT: RET 0, implicit %al +# ALL-NEXT: $al = COPY %1 +# ALL-NEXT: RET 0, implicit $al body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s8) = G_ANYEXT %1(s1) - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- name: test_anyext_i1toi16 @@ -316,21 +316,21 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# X32: %0:gr32_abcd = COPY %edi -# X64: %0:gr32 = COPY %edi +# X32: %0:gr32_abcd = COPY $edi +# X64: %0:gr32 = COPY $edi # ALL-NEXT: %1:gr8 = COPY %0.sub_8bit # ALL-NEXT: %2:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit -# ALL-NEXT: %ax = COPY %2 -# ALL-NEXT: RET 0, implicit %ax +# ALL-NEXT: $ax = COPY %2 +# ALL-NEXT: RET 0, implicit $ax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s16) = G_ANYEXT %1(s1) - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- name: test_anyext_i1toi32 @@ -351,21 +351,21 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# X32: %0:gr32_abcd = COPY %edi -# X64: %0:gr32 = COPY %edi +# X32: %0:gr32_abcd = COPY $edi +# X64: %0:gr32 = COPY $edi # ALL-NEXT: %1:gr8 = COPY %0.sub_8bit # ALL-NEXT: %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit -# ALL-NEXT: %eax = COPY %2 -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %2 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s1) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- name: test_anyext_i8toi16 @@ -386,21 +386,21 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# X32: %0:gr32_abcd = COPY %edi -# X64: %0:gr32 = COPY %edi +# X32: %0:gr32_abcd = COPY $edi +# X64: %0:gr32 = COPY $edi # ALL-NEXT: %1:gr8 = COPY %0.sub_8bit # ALL-NEXT: %2:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit -# ALL-NEXT: %ax = COPY %2 -# ALL-NEXT: RET 0, implicit %ax +# ALL-NEXT: $ax = COPY %2 +# ALL-NEXT: RET 0, implicit $ax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s8) = G_TRUNC %0(s32) %2(s16) = G_ANYEXT %1(s8) - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- name: test_anyext_i8toi32 @@ -421,21 +421,21 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# X32: %0:gr32_abcd = COPY %edi -# X64: %0:gr32 = COPY %edi +# X32: %0:gr32_abcd = COPY $edi +# X64: %0:gr32 = COPY $edi # ALL-NEXT: %1:gr8 = COPY %0.sub_8bit # ALL-NEXT: %2:gr32 = MOVZX32rr8 %1 -# ALL-NEXT: %eax = COPY %2 -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %2 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s8) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s8) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- name: test_anyext_i16toi32 @@ -451,18 +451,18 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# ALL: %0:gr32 = COPY %edi +# ALL: %0:gr32 = COPY $edi # ALL-NEXT: %1:gr16 = COPY %0.sub_16bit # ALL-NEXT: %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_16bit -# ALL-NEXT: %eax = COPY %2 -# ALL-NEXT: RET 0, implicit %eax +# ALL-NEXT: $eax = COPY %2 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi - %0(s32) = COPY %edi + %0(s32) = COPY $edi %1(s16) = G_TRUNC %0(s32) %2(s32) = G_ANYEXT %1(s16) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/select-extract-vec256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-extract-vec256.mir +++ test/CodeGen/X86/GlobalISel/select-extract-vec256.mir @@ -27,20 +27,20 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } -# AVX: %0:vr256 = COPY %ymm1 +# AVX: %0:vr256 = COPY $ymm1 # AVX-NEXT: %1:vr128 = COPY %0.sub_xmm -# AVX512VL: %0:vr256x = COPY %ymm1 +# AVX512VL: %0:vr256x = COPY $ymm1 # AVX512VL-NEXT: %1:vr128x = COPY %0.sub_xmm -# ALL-NEXT: %xmm0 = COPY %1 -# ALL-NEXT: RET 0, implicit %xmm0 +# ALL-NEXT: $xmm0 = COPY %1 +# ALL-NEXT: RET 0, implicit $xmm0 body: | bb.1 (%ir-block.0): - liveins: %ymm1 + liveins: $ymm1 - %0(<8 x s32>) = COPY %ymm1 + %0(<8 x s32>) = COPY $ymm1 %1(<4 x s32>) = G_EXTRACT %0(<8 x s32>), 0 - %xmm0 = COPY %1(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -59,22 +59,22 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } -# AVX: %0:vr256 = COPY %ymm1 +# AVX: %0:vr256 = COPY $ymm1 # AVX-NEXT: %1:vr128 = VEXTRACTF128rr %0, 1 -# AVX-NEXT: %xmm0 = COPY %1 -# AVX-NEXT: RET 0, implicit %xmm0 +# AVX-NEXT: $xmm0 = COPY %1 +# AVX-NEXT: RET 0, implicit $xmm0 # -# AVX512VL: %0:vr256x = COPY %ymm1 +# AVX512VL: %0:vr256x = COPY $ymm1 # AVX512VL-NEXT: %1:vr128x = VEXTRACTF32x4Z256rr %0, 1 -# AVX512VL-NEXT: %xmm0 = COPY %1 -# AVX512VL-NEXT: RET 0, implicit %xmm0 +# AVX512VL-NEXT: $xmm0 = COPY %1 +# AVX512VL-NEXT: RET 0, implicit $xmm0 body: | bb.1 (%ir-block.0): - liveins: %ymm1 + liveins: $ymm1 - %0(<8 x s32>) = COPY %ymm1 + %0(<8 x s32>) = COPY $ymm1 %1(<4 x s32>) = G_EXTRACT %0(<8 x s32>), 128 - %xmm0 = COPY %1(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(<4 x s32>) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-extract-vec512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-extract-vec512.mir +++ test/CodeGen/X86/GlobalISel/select-extract-vec512.mir @@ -32,18 +32,18 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } -# ALL: %0:vr512 = COPY %zmm1 +# ALL: %0:vr512 = COPY $zmm1 # ALL-NEXT: %1:vr128x = COPY %0.sub_xmm -# ALL-NEXT: %xmm0 = COPY %1 -# ALL-NEXT: RET 0, implicit %xmm0 +# ALL-NEXT: $xmm0 = COPY %1 +# ALL-NEXT: RET 0, implicit $xmm0 body: | bb.1 (%ir-block.0): - liveins: %zmm1 + liveins: $zmm1 - %0(<16 x s32>) = COPY %zmm1 + %0(<16 x s32>) = COPY $zmm1 %1(<4 x s32>) = G_EXTRACT %0(<16 x s32>), 0 - %xmm0 = COPY %1(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -58,18 +58,18 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } -# ALL: %0:vr512 = COPY %zmm1 +# ALL: %0:vr512 = COPY $zmm1 # ALL-NEXT: %1:vr128x = VEXTRACTF32x4Zrr %0, 1 -# ALL-NEXT: %xmm0 = COPY %1 -# ALL-NEXT: RET 0, implicit %xmm0 +# ALL-NEXT: $xmm0 = COPY %1 +# ALL-NEXT: RET 0, implicit $xmm0 body: | bb.1 (%ir-block.0): - liveins: %zmm1 + liveins: $zmm1 - %0(<16 x s32>) = COPY %zmm1 + %0(<16 x s32>) = COPY $zmm1 %1(<4 x s32>) = G_EXTRACT %0(<16 x s32>), 128 - %xmm0 = COPY %1(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -84,18 +84,18 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } -# ALL: %0:vr512 = COPY %zmm1 +# ALL: %0:vr512 = COPY $zmm1 # ALL-NEXT: %1:vr256x = COPY %0.sub_ymm -# ALL-NEXT: %ymm0 = COPY %1 -# ALL-NEXT: RET 0, implicit %ymm0 +# ALL-NEXT: $ymm0 = COPY %1 +# ALL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %zmm1 + liveins: $zmm1 - %0(<16 x s32>) = COPY %zmm1 + %0(<16 x s32>) = COPY $zmm1 %1(<8 x s32>) = G_EXTRACT %0(<16 x s32>), 0 - %ymm0 = COPY %1(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %1(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -110,17 +110,17 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } -# ALL: %0:vr512 = COPY %zmm1 +# ALL: %0:vr512 = COPY $zmm1 # ALL-NEXT: %1:vr256x = VEXTRACTF64x4Zrr %0, 1 -# ALL-NEXT: %ymm0 = COPY %1 -# ALL-NEXT: RET 0, implicit %ymm0 +# ALL-NEXT: $ymm0 = COPY %1 +# ALL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %zmm1 + liveins: $zmm1 - %0(<16 x s32>) = COPY %zmm1 + %0(<16 x s32>) = COPY $zmm1 %1(<8 x s32>) = G_EXTRACT %0(<16 x s32>), 256 - %ymm0 = COPY %1(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %1(<8 x s32>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir @@ -34,37 +34,37 @@ # body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; SSE-LABEL: name: test_fadd_float - ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1 + ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1 ; SSE: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[COPY]], [[COPY1]] - ; SSE: %xmm0 = COPY [[ADDSSrr]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: $xmm0 = COPY [[ADDSSrr]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_fadd_float - ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1 + ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1 ; AVX: [[VADDSSrr:%[0-9]+]]:fr32 = VADDSSrr [[COPY]], [[COPY1]] - ; AVX: %xmm0 = COPY [[VADDSSrr]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: $xmm0 = COPY [[VADDSSrr]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_fadd_float - ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1 + ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1 ; AVX512F: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY]], [[COPY1]] - ; AVX512F: %xmm0 = COPY [[VADDSSZrr]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: $xmm0 = COPY [[VADDSSZrr]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_fadd_float - ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1 + ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1 ; AVX512VL: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY]], [[COPY1]] - ; AVX512VL: %xmm0 = COPY [[VADDSSZrr]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; AVX512VL: $xmm0 = COPY [[VADDSSZrr]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FADD %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -85,36 +85,36 @@ # body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; SSE-LABEL: name: test_fadd_double - ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1 + ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1 ; SSE: [[ADDSDrr:%[0-9]+]]:fr64 = ADDSDrr [[COPY]], [[COPY1]] - ; SSE: %xmm0 = COPY [[ADDSDrr]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: $xmm0 = COPY [[ADDSDrr]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_fadd_double - ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1 + ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1 ; AVX: [[VADDSDrr:%[0-9]+]]:fr64 = VADDSDrr [[COPY]], [[COPY1]] - ; AVX: %xmm0 = COPY [[VADDSDrr]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: $xmm0 = COPY [[VADDSDrr]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_fadd_double - ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1 + ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1 ; AVX512F: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY]], [[COPY1]] - ; AVX512F: %xmm0 = COPY [[VADDSDZrr]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: $xmm0 = COPY [[VADDSDZrr]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_fadd_double - ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1 + ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1 ; AVX512VL: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY]], [[COPY1]] - ; AVX512VL: %xmm0 = COPY [[VADDSDZrr]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; AVX512VL: $xmm0 = COPY [[VADDSDZrr]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FADD %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-fconstant.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-fconstant.mir +++ test/CodeGen/X86/GlobalISel/select-fconstant.mir @@ -29,29 +29,29 @@ body: | bb.1.entry: ; CHECK_NOPIC64-LABEL: name: test_float - ; CHECK_NOPIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %rip, 1, %noreg, %const.0, %noreg - ; CHECK_NOPIC64: %xmm0 = COPY [[MOVSSrm]] - ; CHECK_NOPIC64: RET 0, implicit %xmm0 + ; CHECK_NOPIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $rip, 1, $noreg, %const.0, $noreg + ; CHECK_NOPIC64: $xmm0 = COPY [[MOVSSrm]] + ; CHECK_NOPIC64: RET 0, implicit $xmm0 ; CHECK_LARGE64-LABEL: name: test_float ; CHECK_LARGE64: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri %const.0 - ; CHECK_LARGE64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[MOV64ri]], 1, %noreg, 0, %noreg :: (load 8 from constant-pool, align 32) - ; CHECK_LARGE64: %xmm0 = COPY [[MOVSSrm]] - ; CHECK_LARGE64: RET 0, implicit %xmm0 + ; CHECK_LARGE64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[MOV64ri]], 1, $noreg, 0, $noreg :: (load 8 from constant-pool, align 32) + ; CHECK_LARGE64: $xmm0 = COPY [[MOVSSrm]] + ; CHECK_LARGE64: RET 0, implicit $xmm0 ; CHECK_SMALL32-LABEL: name: test_float - ; CHECK_SMALL32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %noreg, 1, %noreg, %const.0, %noreg - ; CHECK_SMALL32: %xmm0 = COPY [[MOVSSrm]] - ; CHECK_SMALL32: RET 0, implicit %xmm0 + ; CHECK_SMALL32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $noreg, 1, $noreg, %const.0, $noreg + ; CHECK_SMALL32: $xmm0 = COPY [[MOVSSrm]] + ; CHECK_SMALL32: RET 0, implicit $xmm0 ; CHECK_LARGE32-LABEL: name: test_float - ; CHECK_LARGE32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %noreg, 1, %noreg, %const.0, %noreg - ; CHECK_LARGE32: %xmm0 = COPY [[MOVSSrm]] - ; CHECK_LARGE32: RET 0, implicit %xmm0 + ; CHECK_LARGE32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $noreg, 1, $noreg, %const.0, $noreg + ; CHECK_LARGE32: $xmm0 = COPY [[MOVSSrm]] + ; CHECK_LARGE32: RET 0, implicit $xmm0 ; CHECK_PIC64-LABEL: name: test_float - ; CHECK_PIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %rip, 1, %noreg, %const.0, %noreg - ; CHECK_PIC64: %xmm0 = COPY [[MOVSSrm]] - ; CHECK_PIC64: RET 0, implicit %xmm0 + ; CHECK_PIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $rip, 1, $noreg, %const.0, $noreg + ; CHECK_PIC64: $xmm0 = COPY [[MOVSSrm]] + ; CHECK_PIC64: RET 0, implicit $xmm0 %0(s32) = G_FCONSTANT float 5.500000e+00 - %xmm0 = COPY %0(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %0(s32) + RET 0, implicit $xmm0 ... --- @@ -70,28 +70,28 @@ body: | bb.1.entry: ; CHECK_NOPIC64-LABEL: name: test_double - ; CHECK_NOPIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %rip, 1, %noreg, %const.0, %noreg - ; CHECK_NOPIC64: %xmm0 = COPY [[MOVSDrm]] - ; CHECK_NOPIC64: RET 0, implicit %xmm0 + ; CHECK_NOPIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $rip, 1, $noreg, %const.0, $noreg + ; CHECK_NOPIC64: $xmm0 = COPY [[MOVSDrm]] + ; CHECK_NOPIC64: RET 0, implicit $xmm0 ; CHECK_LARGE64-LABEL: name: test_double ; CHECK_LARGE64: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri %const.0 - ; CHECK_LARGE64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[MOV64ri]], 1, %noreg, 0, %noreg :: (load 8 from constant-pool, align 64) - ; CHECK_LARGE64: %xmm0 = COPY [[MOVSDrm]] - ; CHECK_LARGE64: RET 0, implicit %xmm0 + ; CHECK_LARGE64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[MOV64ri]], 1, $noreg, 0, $noreg :: (load 8 from constant-pool, align 64) + ; CHECK_LARGE64: $xmm0 = COPY [[MOVSDrm]] + ; CHECK_LARGE64: RET 0, implicit $xmm0 ; CHECK_SMALL32-LABEL: name: test_double - ; CHECK_SMALL32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %noreg, 1, %noreg, %const.0, %noreg - ; CHECK_SMALL32: %xmm0 = COPY [[MOVSDrm]] - ; CHECK_SMALL32: RET 0, implicit %xmm0 + ; CHECK_SMALL32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $noreg, 1, $noreg, %const.0, $noreg + ; CHECK_SMALL32: $xmm0 = COPY [[MOVSDrm]] + ; CHECK_SMALL32: RET 0, implicit $xmm0 ; CHECK_LARGE32-LABEL: name: test_double - ; CHECK_LARGE32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %noreg, 1, %noreg, %const.0, %noreg - ; CHECK_LARGE32: %xmm0 = COPY [[MOVSDrm]] - ; CHECK_LARGE32: RET 0, implicit %xmm0 + ; CHECK_LARGE32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $noreg, 1, $noreg, %const.0, $noreg + ; CHECK_LARGE32: $xmm0 = COPY [[MOVSDrm]] + ; CHECK_LARGE32: RET 0, implicit $xmm0 ; CHECK_PIC64-LABEL: name: test_double - ; CHECK_PIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %rip, 1, %noreg, %const.0, %noreg - ; CHECK_PIC64: %xmm0 = COPY [[MOVSDrm]] - ; CHECK_PIC64: RET 0, implicit %xmm0 + ; CHECK_PIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $rip, 1, $noreg, %const.0, $noreg + ; CHECK_PIC64: $xmm0 = COPY [[MOVSDrm]] + ; CHECK_PIC64: RET 0, implicit $xmm0 %0(s64) = G_FCONSTANT double 5.500000e+00 - %xmm0 = COPY %0(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %0(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir @@ -34,37 +34,37 @@ # body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; SSE-LABEL: name: test_fdiv_float - ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1 + ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1 ; SSE: [[DIVSSrr:%[0-9]+]]:fr32 = DIVSSrr [[COPY]], [[COPY1]] - ; SSE: %xmm0 = COPY [[DIVSSrr]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: $xmm0 = COPY [[DIVSSrr]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_fdiv_float - ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1 + ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1 ; AVX: [[VDIVSSrr:%[0-9]+]]:fr32 = VDIVSSrr [[COPY]], [[COPY1]] - ; AVX: %xmm0 = COPY [[VDIVSSrr]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: $xmm0 = COPY [[VDIVSSrr]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_fdiv_float - ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1 + ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1 ; AVX512F: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY]], [[COPY1]] - ; AVX512F: %xmm0 = COPY [[VDIVSSZrr]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: $xmm0 = COPY [[VDIVSSZrr]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_fdiv_float - ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1 + ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1 ; AVX512VL: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY]], [[COPY1]] - ; AVX512VL: %xmm0 = COPY [[VDIVSSZrr]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; AVX512VL: $xmm0 = COPY [[VDIVSSZrr]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FDIV %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -85,36 +85,36 @@ # body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; SSE-LABEL: name: test_fdiv_double - ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1 + ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1 ; SSE: [[DIVSDrr:%[0-9]+]]:fr64 = DIVSDrr [[COPY]], [[COPY1]] - ; SSE: %xmm0 = COPY [[DIVSDrr]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: $xmm0 = COPY [[DIVSDrr]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_fdiv_double - ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1 + ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1 ; AVX: [[VDIVSDrr:%[0-9]+]]:fr64 = VDIVSDrr [[COPY]], [[COPY1]] - ; AVX: %xmm0 = COPY [[VDIVSDrr]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: $xmm0 = COPY [[VDIVSDrr]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_fdiv_double - ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1 + ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1 ; AVX512F: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY]], [[COPY1]] - ; AVX512F: %xmm0 = COPY [[VDIVSDZrr]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: $xmm0 = COPY [[VDIVSDZrr]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_fdiv_double - ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1 + ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1 ; AVX512VL: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY]], [[COPY1]] - ; AVX512VL: %xmm0 = COPY [[VDIVSDZrr]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; AVX512VL: $xmm0 = COPY [[VDIVSDZrr]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FDIV %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir @@ -34,37 +34,37 @@ # body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; SSE-LABEL: name: test_fmul_float - ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1 + ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1 ; SSE: [[MULSSrr:%[0-9]+]]:fr32 = MULSSrr [[COPY]], [[COPY1]] - ; SSE: %xmm0 = COPY [[MULSSrr]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: $xmm0 = COPY [[MULSSrr]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_fmul_float - ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1 + ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1 ; AVX: [[VMULSSrr:%[0-9]+]]:fr32 = VMULSSrr [[COPY]], [[COPY1]] - ; AVX: %xmm0 = COPY [[VMULSSrr]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: $xmm0 = COPY [[VMULSSrr]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_fmul_float - ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1 + ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1 ; AVX512F: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY]], [[COPY1]] - ; AVX512F: %xmm0 = COPY [[VMULSSZrr]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: $xmm0 = COPY [[VMULSSZrr]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_fmul_float - ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1 + ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1 ; AVX512VL: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY]], [[COPY1]] - ; AVX512VL: %xmm0 = COPY [[VMULSSZrr]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; AVX512VL: $xmm0 = COPY [[VMULSSZrr]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FMUL %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -85,36 +85,36 @@ # body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; SSE-LABEL: name: test_fmul_double - ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1 + ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1 ; SSE: [[MULSDrr:%[0-9]+]]:fr64 = MULSDrr [[COPY]], [[COPY1]] - ; SSE: %xmm0 = COPY [[MULSDrr]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: $xmm0 = COPY [[MULSDrr]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_fmul_double - ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1 + ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1 ; AVX: [[VMULSDrr:%[0-9]+]]:fr64 = VMULSDrr [[COPY]], [[COPY1]] - ; AVX: %xmm0 = COPY [[VMULSDrr]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: $xmm0 = COPY [[VMULSDrr]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_fmul_double - ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1 + ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1 ; AVX512F: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY]], [[COPY1]] - ; AVX512F: %xmm0 = COPY [[VMULSDZrr]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: $xmm0 = COPY [[VMULSDZrr]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_fmul_double - ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1 + ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1 ; AVX512VL: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY]], [[COPY1]] - ; AVX512VL: %xmm0 = COPY [[VMULSDZrr]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; AVX512VL: $xmm0 = COPY [[VMULSDZrr]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FMUL %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir @@ -23,16 +23,16 @@ constants: body: | bb.1.entry: - liveins: %xmm0 + liveins: $xmm0 ; ALL-LABEL: name: test - ; ALL: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 + ; ALL: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 ; ALL: [[CVTSS2SDrr:%[0-9]+]]:fr64 = CVTSS2SDrr [[COPY]] - ; ALL: %xmm0 = COPY [[CVTSS2SDrr]] - ; ALL: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 + ; ALL: $xmm0 = COPY [[CVTSS2SDrr]] + ; ALL: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 %1(s64) = G_FPEXT %0(s32) - %xmm0 = COPY %1(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir @@ -34,37 +34,37 @@ # body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; SSE-LABEL: name: test_fsub_float - ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1 + ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1 ; SSE: [[SUBSSrr:%[0-9]+]]:fr32 = SUBSSrr [[COPY]], [[COPY1]] - ; SSE: %xmm0 = COPY [[SUBSSrr]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: $xmm0 = COPY [[SUBSSrr]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_fsub_float - ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1 + ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1 ; AVX: [[VSUBSSrr:%[0-9]+]]:fr32 = VSUBSSrr [[COPY]], [[COPY1]] - ; AVX: %xmm0 = COPY [[VSUBSSrr]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: $xmm0 = COPY [[VSUBSSrr]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_fsub_float - ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1 + ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1 ; AVX512F: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY]], [[COPY1]] - ; AVX512F: %xmm0 = COPY [[VSUBSSZrr]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: $xmm0 = COPY [[VSUBSSZrr]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_fsub_float - ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1 + ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1 ; AVX512VL: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY]], [[COPY1]] - ; AVX512VL: %xmm0 = COPY [[VSUBSSZrr]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(s32) = COPY %xmm0 - %1(s32) = COPY %xmm1 + ; AVX512VL: $xmm0 = COPY [[VSUBSSZrr]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(s32) = COPY $xmm0 + %1(s32) = COPY $xmm1 %2(s32) = G_FSUB %0, %1 - %xmm0 = COPY %2(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s32) + RET 0, implicit $xmm0 ... --- @@ -85,36 +85,36 @@ # body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; SSE-LABEL: name: test_fsub_double - ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1 + ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1 ; SSE: [[SUBSDrr:%[0-9]+]]:fr64 = SUBSDrr [[COPY]], [[COPY1]] - ; SSE: %xmm0 = COPY [[SUBSDrr]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: $xmm0 = COPY [[SUBSDrr]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_fsub_double - ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1 + ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1 ; AVX: [[VSUBSDrr:%[0-9]+]]:fr64 = VSUBSDrr [[COPY]], [[COPY1]] - ; AVX: %xmm0 = COPY [[VSUBSDrr]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: $xmm0 = COPY [[VSUBSDrr]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_fsub_double - ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1 + ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1 ; AVX512F: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY]], [[COPY1]] - ; AVX512F: %xmm0 = COPY [[VSUBSDZrr]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: $xmm0 = COPY [[VSUBSDZrr]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_fsub_double - ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1 + ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1 ; AVX512VL: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY]], [[COPY1]] - ; AVX512VL: %xmm0 = COPY [[VSUBSDZrr]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(s64) = COPY %xmm0 - %1(s64) = COPY %xmm1 + ; AVX512VL: $xmm0 = COPY [[VSUBSDZrr]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(s64) = COPY $xmm0 + %1(s64) = COPY $xmm1 %2(s64) = G_FSUB %0, %1 - %xmm0 = COPY %2(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-gep.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-gep.mir +++ test/CodeGen/X86/GlobalISel/select-gep.mir @@ -19,18 +19,18 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; CHECK-LABEL: name: test_gep_i32 - ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64_nosp = MOV64ri32 20 - ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri32_]], 0, %noreg - ; CHECK: %rax = COPY [[LEA64r]] - ; CHECK: RET 0, implicit %rax - %0(p0) = COPY %rdi + ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri32_]], 0, $noreg + ; CHECK: $rax = COPY [[LEA64r]] + ; CHECK: RET 0, implicit $rax + %0(p0) = COPY $rdi %1(s64) = G_CONSTANT i64 20 %2(p0) = G_GEP %0, %1(s64) - %rax = COPY %2(p0) - RET 0, implicit %rax + $rax = COPY %2(p0) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/select-inc.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-inc.mir +++ test/CodeGen/X86/GlobalISel/select-inc.mir @@ -21,16 +21,16 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# ALL: %0:gr8 = COPY %al +# ALL: %0:gr8 = COPY $al # INC-NEXT: %2:gr8 = INC8r %0 # ADD-NEXT: %2:gr8 = ADD8ri %0, 1 body: | bb.1 (%ir-block.0): - liveins: %al + liveins: $al - %0(s8) = COPY %al + %0(s8) = COPY $al %1(s8) = G_CONSTANT i8 1 %2(s8) = G_ADD %0, %1 - %al = COPY %2(s8) + $al = COPY %2(s8) ... Index: test/CodeGen/X86/GlobalISel/select-insert-vec256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-insert-vec256.mir +++ test/CodeGen/X86/GlobalISel/select-insert-vec256.mir @@ -28,26 +28,26 @@ - { id: 0, class: vecr } - { id: 1, class: vecr } - { id: 2, class: vecr } -# AVX: %0:vr256 = COPY %ymm0 -# AVX-NEXT: %1:vr128 = COPY %xmm1 +# AVX: %0:vr256 = COPY $ymm0 +# AVX-NEXT: %1:vr128 = COPY $xmm1 # AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 0 -# AVX-NEXT: %ymm0 = COPY %2 -# AVX-NEXT: RET 0, implicit %ymm0 +# AVX-NEXT: $ymm0 = COPY %2 +# AVX-NEXT: RET 0, implicit $ymm0 # -# AVX512VL: %0:vr256x = COPY %ymm0 -# AVX512VL-NEXT: %1:vr128x = COPY %xmm1 +# AVX512VL: %0:vr256x = COPY $ymm0 +# AVX512VL-NEXT: %1:vr128x = COPY $xmm1 # AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 0 -# AVX512VL-NEXT: %ymm0 = COPY %2 -# AVX512VL-NEXT: RET 0, implicit %ymm0 +# AVX512VL-NEXT: $ymm0 = COPY %2 +# AVX512VL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<8 x s32>) = COPY %ymm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<8 x s32>) = COPY $ymm0 + %1(<4 x s32>) = COPY $xmm1 %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -60,24 +60,24 @@ - { id: 0, class: vecr } - { id: 1, class: vecr } - { id: 2, class: vecr } -# AVX: %1:vr128 = COPY %xmm1 +# AVX: %1:vr128 = COPY $xmm1 # AVX-NEXT: undef %2.sub_xmm:vr256 = COPY %1 -# AVX-NEXT: %ymm0 = COPY %2 -# AVX-NEXT: RET 0, implicit %ymm0 +# AVX-NEXT: $ymm0 = COPY %2 +# AVX-NEXT: RET 0, implicit $ymm0 # -# AVX512VL: %1:vr128x = COPY %xmm1 +# AVX512VL: %1:vr128x = COPY $xmm1 # AVX512VL-NEXT: undef %2.sub_xmm:vr256x = COPY %1 -# AVX512VL-NEXT: %ymm0 = COPY %2 -# AVX512VL-NEXT: RET 0, implicit %ymm0 +# AVX512VL-NEXT: $ymm0 = COPY %2 +# AVX512VL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 %0(<8 x s32>) = IMPLICIT_DEF - %1(<4 x s32>) = COPY %xmm1 + %1(<4 x s32>) = COPY $xmm1 %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -90,26 +90,26 @@ - { id: 0, class: vecr } - { id: 1, class: vecr } - { id: 2, class: vecr } -# AVX: %0:vr256 = COPY %ymm0 -# AVX-NEXT: %1:vr128 = COPY %xmm1 +# AVX: %0:vr256 = COPY $ymm0 +# AVX-NEXT: %1:vr128 = COPY $xmm1 # AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 1 -# AVX-NEXT: %ymm0 = COPY %2 -# AVX-NEXT: RET 0, implicit %ymm0 +# AVX-NEXT: $ymm0 = COPY %2 +# AVX-NEXT: RET 0, implicit $ymm0 # -# AVX512VL: %0:vr256x = COPY %ymm0 -# AVX512VL-NEXT: %1:vr128x = COPY %xmm1 +# AVX512VL: %0:vr256x = COPY $ymm0 +# AVX512VL-NEXT: %1:vr128x = COPY $xmm1 # AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1 -# AVX512VL-NEXT: %ymm0 = COPY %2 -# AVX512VL-NEXT: RET 0, implicit %ymm0 +# AVX512VL-NEXT: $ymm0 = COPY %2 +# AVX512VL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<8 x s32>) = COPY %ymm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<8 x s32>) = COPY $ymm0 + %1(<4 x s32>) = COPY $xmm1 %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 128 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... --- name: test_insert_128_idx1_undef @@ -122,23 +122,23 @@ - { id: 1, class: vecr } - { id: 2, class: vecr } # AVX: %0:vr256 = IMPLICIT_DEF -# AVX-NEXT: %1:vr128 = COPY %xmm1 +# AVX-NEXT: %1:vr128 = COPY $xmm1 # AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 1 -# AVX-NEXT: %ymm0 = COPY %2 -# AVX-NEXT: RET 0, implicit %ymm0 +# AVX-NEXT: $ymm0 = COPY %2 +# AVX-NEXT: RET 0, implicit $ymm0 # # AVX512VL: %0:vr256x = IMPLICIT_DEF -# AVX512VL-NEXT: %1:vr128x = COPY %xmm1 +# AVX512VL-NEXT: %1:vr128x = COPY $xmm1 # AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1 -# AVX512VL-NEXT: %ymm0 = COPY %2 -# AVX512VL-NEXT: RET 0, implicit %ymm0 +# AVX512VL-NEXT: $ymm0 = COPY %2 +# AVX512VL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 %0(<8 x s32>) = IMPLICIT_DEF - %1(<4 x s32>) = COPY %xmm1 + %1(<4 x s32>) = COPY $xmm1 %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 128 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/select-insert-vec512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-insert-vec512.mir +++ test/CodeGen/X86/GlobalISel/select-insert-vec512.mir @@ -46,19 +46,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %ymm1 + liveins: $zmm0, $ymm1 ; ALL-LABEL: name: test_insert_128_idx0 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 0 - ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]] - ; ALL: RET 0, implicit %ymm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<4 x s32>) = COPY %xmm1 + ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] + ; ALL: RET 0, implicit $ymm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... --- @@ -72,18 +72,18 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_insert_128_idx0_undef - ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY %xmm1 + ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1 ; ALL: undef %2.sub_xmm:vr512 = COPY [[COPY]] - ; ALL: %zmm0 = COPY %2 - ; ALL: RET 0, implicit %ymm0 + ; ALL: $zmm0 = COPY %2 + ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = IMPLICIT_DEF - %1(<4 x s32>) = COPY %xmm1 + %1(<4 x s32>) = COPY $xmm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... --- @@ -97,19 +97,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_insert_128_idx1 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 1 - ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]] - ; ALL: RET 0, implicit %ymm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<4 x s32>) = COPY %xmm1 + ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] + ; ALL: RET 0, implicit $ymm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... --- name: test_insert_128_idx1_undef @@ -122,19 +122,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_insert_128_idx1_undef ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF - ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY %xmm1 + ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1 ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[DEF]], [[COPY]], 1 - ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]] - ; ALL: RET 0, implicit %ymm0 + ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] + ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = IMPLICIT_DEF - %1(<4 x s32>) = COPY %xmm1 + %1(<4 x s32>) = COPY $xmm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... --- name: test_insert_256_idx0 @@ -147,19 +147,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %ymm1 + liveins: $zmm0, $ymm1 ; ALL-LABEL: name: test_insert_256_idx0 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 0 - ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]] - ; ALL: RET 0, implicit %ymm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<8 x s32>) = COPY %ymm1 + ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] + ; ALL: RET 0, implicit $ymm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<8 x s32>) = COPY $ymm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... --- @@ -173,18 +173,18 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_insert_256_idx0_undef - ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY %ymm1 + ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1 ; ALL: undef %2.sub_ymm:vr512 = COPY [[COPY]] - ; ALL: %zmm0 = COPY %2 - ; ALL: RET 0, implicit %ymm0 + ; ALL: $zmm0 = COPY %2 + ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = IMPLICIT_DEF - %1(<8 x s32>) = COPY %ymm1 + %1(<8 x s32>) = COPY $ymm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... --- @@ -198,19 +198,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_insert_256_idx1 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 1 - ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]] - ; ALL: RET 0, implicit %ymm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<8 x s32>) = COPY %ymm1 + ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] + ; ALL: RET 0, implicit $ymm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<8 x s32>) = COPY $ymm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... --- name: test_insert_256_idx1_undef @@ -223,17 +223,17 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; ALL-LABEL: name: test_insert_256_idx1_undef ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF - ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY %ymm1 + ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1 ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[DEF]], [[COPY]], 1 - ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]] - ; ALL: RET 0, implicit %ymm0 + ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] + ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = IMPLICIT_DEF - %1(<8 x s32>) = COPY %ymm1 + %1(<8 x s32>) = COPY $ymm1 %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %ymm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir +++ test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir @@ -9,8 +9,8 @@ define void @read_flags() { ret void } ; CHECK-LABEL: name: read_flags ; CHECK: bb.0: - ; CHECK: [[RDFLAGS32_:%[0-9]+]]:gr32 = RDFLAGS32 implicit-def %esp, implicit %esp - ; CHECK: %eax = COPY [[RDFLAGS32_]] + ; CHECK: [[RDFLAGS32_:%[0-9]+]]:gr32 = RDFLAGS32 implicit-def $esp, implicit $esp + ; CHECK: $eax = COPY [[RDFLAGS32_]] ... --- @@ -24,5 +24,5 @@ body: | bb.0: %0(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.x86.flags.read.u32) - %eax = COPY %0(s32) + $eax = COPY %0(s32) ... Index: test/CodeGen/X86/GlobalISel/select-leaf-constant.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-leaf-constant.mir +++ test/CodeGen/X86/GlobalISel/select-leaf-constant.mir @@ -34,11 +34,11 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i32_1 ; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1 - ; CHECK: %eax = COPY [[MOV32ri]] - ; CHECK: RET 0, implicit %eax + ; CHECK: $eax = COPY [[MOV32ri]] + ; CHECK: RET 0, implicit $eax %0(s32) = G_CONSTANT i32 1 - %eax = COPY %0(s32) - RET 0, implicit %eax + $eax = COPY %0(s32) + RET 0, implicit $eax ... --- name: const_i32_1_optsize @@ -50,12 +50,12 @@ body: | bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i32_1_optsize - ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def %eflags - ; CHECK: %eax = COPY [[MOV32r1_]] - ; CHECK: RET 0, implicit %eax + ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def $eflags + ; CHECK: $eax = COPY [[MOV32r1_]] + ; CHECK: RET 0, implicit $eax %0(s32) = G_CONSTANT i32 1 - %eax = COPY %0(s32) - RET 0, implicit %eax + $eax = COPY %0(s32) + RET 0, implicit $eax ... --- name: const_i32_1b @@ -68,11 +68,11 @@ bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i32_1b ; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1 - ; CHECK: %eax = COPY [[MOV32ri]] - ; CHECK: RET 0, implicit %eax + ; CHECK: $eax = COPY [[MOV32ri]] + ; CHECK: RET 0, implicit $eax %0(s32) = G_CONSTANT i32 1 - %eax = COPY %0(s32) - RET 0, implicit %eax + $eax = COPY %0(s32) + RET 0, implicit $eax ... --- name: const_i32_1_optsizeb @@ -84,10 +84,10 @@ body: | bb.1 (%ir-block.0): ; CHECK-LABEL: name: const_i32_1_optsizeb - ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def %eflags - ; CHECK: %eax = COPY [[MOV32r1_]] - ; CHECK: RET 0, implicit %eax + ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def $eflags + ; CHECK: $eax = COPY [[MOV32r1_]] + ; CHECK: RET 0, implicit $eax %0(s32) = G_CONSTANT i32 1 - %eax = COPY %0(s32) - RET 0, implicit %eax + $eax = COPY %0(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir +++ test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir @@ -57,15 +57,15 @@ body: | bb.1 (%ir-block.0): ; ALL-LABEL: name: test_load_i8 - ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0) - ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1) - ; ALL: %al = COPY [[MOV8rm]] - ; ALL: RET 0, implicit %al + ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) + ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1) + ; ALL: $al = COPY [[MOV8rm]] + ; ALL: RET 0, implicit $al %1(p0) = G_FRAME_INDEX %fixed-stack.0 %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) %2(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1) - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -82,15 +82,15 @@ body: | bb.1 (%ir-block.0): ; ALL-LABEL: name: test_load_i16 - ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0) - ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1) - ; ALL: %ax = COPY [[MOV16rm]] - ; ALL: RET 0, implicit %ax + ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) + ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1) + ; ALL: $ax = COPY [[MOV16rm]] + ; ALL: RET 0, implicit $ax %1(p0) = G_FRAME_INDEX %fixed-stack.0 %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) %2(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1) - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -107,15 +107,15 @@ body: | bb.1 (%ir-block.0): ; ALL-LABEL: name: test_load_i32 - ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0) - ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; ALL: %eax = COPY [[MOV32rm1]] - ; ALL: RET 0, implicit %eax + ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) + ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; ALL: $eax = COPY [[MOV32rm1]] + ; ALL: RET 0, implicit $eax %1(p0) = G_FRAME_INDEX %fixed-stack.0 %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) %2(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -134,18 +134,18 @@ body: | bb.1 (%ir-block.0): ; ALL-LABEL: name: test_store_i8 - ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 1 from %fixed-stack.0, align 0) - ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0) - ; ALL: MOV8mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV8rm]] :: (store 1 into %ir.p1) - ; ALL: %eax = COPY [[MOV32rm]] - ; ALL: RET 0, implicit %eax + ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0) + ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) + ; ALL: MOV8mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV8rm]] :: (store 1 into %ir.p1) + ; ALL: $eax = COPY [[MOV32rm]] + ; ALL: RET 0, implicit $eax %2(p0) = G_FRAME_INDEX %fixed-stack.1 %0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0) %3(p0) = G_FRAME_INDEX %fixed-stack.0 %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) G_STORE %0(s8), %1(p0) :: (store 1 into %ir.p1) - %eax = COPY %1(p0) - RET 0, implicit %eax + $eax = COPY %1(p0) + RET 0, implicit $eax ... --- @@ -164,18 +164,18 @@ body: | bb.1 (%ir-block.0): ; ALL-LABEL: name: test_store_i16 - ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 2 from %fixed-stack.0, align 0) - ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0) - ; ALL: MOV16mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV16rm]] :: (store 2 into %ir.p1) - ; ALL: %eax = COPY [[MOV32rm]] - ; ALL: RET 0, implicit %eax + ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0) + ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) + ; ALL: MOV16mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV16rm]] :: (store 2 into %ir.p1) + ; ALL: $eax = COPY [[MOV32rm]] + ; ALL: RET 0, implicit $eax %2(p0) = G_FRAME_INDEX %fixed-stack.1 %0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0) %3(p0) = G_FRAME_INDEX %fixed-stack.0 %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p1) - %eax = COPY %1(p0) - RET 0, implicit %eax + $eax = COPY %1(p0) + RET 0, implicit $eax ... --- @@ -194,18 +194,18 @@ body: | bb.1 (%ir-block.0): ; ALL-LABEL: name: test_store_i32 - ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0) - ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0) - ; ALL: MOV32mr [[MOV32rm1]], 1, %noreg, 0, %noreg, [[MOV32rm]] :: (store 4 into %ir.p1) - ; ALL: %eax = COPY [[MOV32rm1]] - ; ALL: RET 0, implicit %eax + ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) + ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) + ; ALL: MOV32mr [[MOV32rm1]], 1, $noreg, 0, $noreg, [[MOV32rm]] :: (store 4 into %ir.p1) + ; ALL: $eax = COPY [[MOV32rm1]] + ; ALL: RET 0, implicit $eax %2(p0) = G_FRAME_INDEX %fixed-stack.1 %0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) %3(p0) = G_FRAME_INDEX %fixed-stack.0 %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1) - %eax = COPY %1(p0) - RET 0, implicit %eax + $eax = COPY %1(p0) + RET 0, implicit $eax ... --- @@ -222,15 +222,15 @@ body: | bb.1 (%ir-block.0): ; ALL-LABEL: name: test_load_ptr - ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0) - ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr1) - ; ALL: %eax = COPY [[MOV32rm1]] - ; ALL: RET 0, implicit %eax + ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) + ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr1) + ; ALL: $eax = COPY [[MOV32rm1]] + ; ALL: RET 0, implicit $eax %1(p0) = G_FRAME_INDEX %fixed-stack.0 %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) %2(p0) = G_LOAD %0(p0) :: (load 4 from %ir.ptr1) - %eax = COPY %2(p0) - RET 0, implicit %eax + $eax = COPY %2(p0) + RET 0, implicit $eax ... --- @@ -249,9 +249,9 @@ body: | bb.1 (%ir-block.0): ; ALL-LABEL: name: test_store_ptr - ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0) - ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0) - ; ALL: MOV32mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1) + ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) + ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) + ; ALL: MOV32mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1) ; ALL: RET 0 %2(p0) = G_FRAME_INDEX %fixed-stack.1 %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) Index: test/CodeGen/X86/GlobalISel/select-memop-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-memop-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-memop-scalar.mir @@ -109,32 +109,32 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_i8 - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1) - ; SSE: %al = COPY [[MOV8rm]] - ; SSE: RET 0, implicit %al + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1) + ; SSE: $al = COPY [[MOV8rm]] + ; SSE: RET 0, implicit $al ; AVX-LABEL: name: test_load_i8 - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1) - ; AVX: %al = COPY [[MOV8rm]] - ; AVX: RET 0, implicit %al + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1) + ; AVX: $al = COPY [[MOV8rm]] + ; AVX: RET 0, implicit $al ; AVX512F-LABEL: name: test_load_i8 - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1) - ; AVX512F: %al = COPY [[MOV8rm]] - ; AVX512F: RET 0, implicit %al + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1) + ; AVX512F: $al = COPY [[MOV8rm]] + ; AVX512F: RET 0, implicit $al ; AVX512VL-LABEL: name: test_load_i8 - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1) - ; AVX512VL: %al = COPY [[MOV8rm]] - ; AVX512VL: RET 0, implicit %al - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1) + ; AVX512VL: $al = COPY [[MOV8rm]] + ; AVX512VL: RET 0, implicit $al + %0(p0) = COPY $rdi %1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1) - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -147,32 +147,32 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_i16 - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1) - ; SSE: %ax = COPY [[MOV16rm]] - ; SSE: RET 0, implicit %ax + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1) + ; SSE: $ax = COPY [[MOV16rm]] + ; SSE: RET 0, implicit $ax ; AVX-LABEL: name: test_load_i16 - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1) - ; AVX: %ax = COPY [[MOV16rm]] - ; AVX: RET 0, implicit %ax + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1) + ; AVX: $ax = COPY [[MOV16rm]] + ; AVX: RET 0, implicit $ax ; AVX512F-LABEL: name: test_load_i16 - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1) - ; AVX512F: %ax = COPY [[MOV16rm]] - ; AVX512F: RET 0, implicit %ax + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1) + ; AVX512F: $ax = COPY [[MOV16rm]] + ; AVX512F: RET 0, implicit $ax ; AVX512VL-LABEL: name: test_load_i16 - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1) - ; AVX512VL: %ax = COPY [[MOV16rm]] - ; AVX512VL: RET 0, implicit %ax - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1) + ; AVX512VL: $ax = COPY [[MOV16rm]] + ; AVX512VL: RET 0, implicit $ax + %0(p0) = COPY $rdi %1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1) - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -185,32 +185,32 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_i32 - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; SSE: %eax = COPY [[MOV32rm]] - ; SSE: RET 0, implicit %eax + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; SSE: $eax = COPY [[MOV32rm]] + ; SSE: RET 0, implicit $eax ; AVX-LABEL: name: test_load_i32 - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX: %eax = COPY [[MOV32rm]] - ; AVX: RET 0, implicit %eax + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX: $eax = COPY [[MOV32rm]] + ; AVX: RET 0, implicit $eax ; AVX512F-LABEL: name: test_load_i32 - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX512F: %eax = COPY [[MOV32rm]] - ; AVX512F: RET 0, implicit %eax + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX512F: $eax = COPY [[MOV32rm]] + ; AVX512F: RET 0, implicit $eax ; AVX512VL-LABEL: name: test_load_i32 - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX512VL: %eax = COPY [[MOV32rm]] - ; AVX512VL: RET 0, implicit %eax - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX512VL: $eax = COPY [[MOV32rm]] + ; AVX512VL: RET 0, implicit $eax + %0(p0) = COPY $rdi %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... --- @@ -223,32 +223,32 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_i64 - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; SSE: %rax = COPY [[MOV64rm]] - ; SSE: RET 0, implicit %rax + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; SSE: $rax = COPY [[MOV64rm]] + ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_load_i64 - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX: %rax = COPY [[MOV64rm]] - ; AVX: RET 0, implicit %rax + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX: $rax = COPY [[MOV64rm]] + ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_load_i64 - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX512F: %rax = COPY [[MOV64rm]] - ; AVX512F: RET 0, implicit %rax + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX512F: $rax = COPY [[MOV64rm]] + ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_load_i64 - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX512VL: %rax = COPY [[MOV64rm]] - ; AVX512VL: RET 0, implicit %rax - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX512VL: $rax = COPY [[MOV64rm]] + ; AVX512VL: RET 0, implicit $rax + %0(p0) = COPY $rdi %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) - %rax = COPY %1(s64) - RET 0, implicit %rax + $rax = COPY %1(s64) + RET 0, implicit $rax ... --- @@ -261,32 +261,32 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_float - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; SSE: %xmm0 = COPY [[MOV32rm]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; SSE: $xmm0 = COPY [[MOV32rm]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_load_float - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX: %xmm0 = COPY [[MOV32rm]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX: $xmm0 = COPY [[MOV32rm]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_load_float - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX512F: %xmm0 = COPY [[MOV32rm]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX512F: $xmm0 = COPY [[MOV32rm]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_load_float - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX512VL: %xmm0 = COPY [[MOV32rm]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX512VL: $xmm0 = COPY [[MOV32rm]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(p0) = COPY $rdi %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) - %xmm0 = COPY %1(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s32) + RET 0, implicit $xmm0 ... --- @@ -299,32 +299,32 @@ - { id: 1, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_float_vecreg - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; SSE: %xmm0 = COPY [[MOVSSrm]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; SSE: $xmm0 = COPY [[MOVSSrm]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_load_float_vecreg - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[VMOVSSrm:%[0-9]+]]:fr32 = VMOVSSrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX: %xmm0 = COPY [[VMOVSSrm]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[VMOVSSrm:%[0-9]+]]:fr32 = VMOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX: $xmm0 = COPY [[VMOVSSrm]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_load_float_vecreg - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX512F: %xmm0 = COPY [[VMOVSSZrm]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX512F: $xmm0 = COPY [[VMOVSSZrm]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_load_float_vecreg - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1) - ; AVX512VL: %xmm0 = COPY [[VMOVSSZrm]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX512VL: $xmm0 = COPY [[VMOVSSZrm]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(p0) = COPY $rdi %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) - %xmm0 = COPY %1(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s32) + RET 0, implicit $xmm0 ... --- @@ -337,32 +337,32 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_double - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; SSE: %xmm0 = COPY [[MOV64rm]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; SSE: $xmm0 = COPY [[MOV64rm]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_load_double - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX: %xmm0 = COPY [[MOV64rm]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX: $xmm0 = COPY [[MOV64rm]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_load_double - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX512F: %xmm0 = COPY [[MOV64rm]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX512F: $xmm0 = COPY [[MOV64rm]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_load_double - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX512VL: %xmm0 = COPY [[MOV64rm]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX512VL: $xmm0 = COPY [[MOV64rm]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(p0) = COPY $rdi %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) - %xmm0 = COPY %1(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s64) + RET 0, implicit $xmm0 ... --- @@ -375,32 +375,32 @@ - { id: 1, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_double_vecreg - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; SSE: %xmm0 = COPY [[MOVSDrm]] - ; SSE: RET 0, implicit %xmm0 + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; SSE: $xmm0 = COPY [[MOVSDrm]] + ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_load_double_vecreg - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[VMOVSDrm:%[0-9]+]]:fr64 = VMOVSDrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX: %xmm0 = COPY [[VMOVSDrm]] - ; AVX: RET 0, implicit %xmm0 + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[VMOVSDrm:%[0-9]+]]:fr64 = VMOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX: $xmm0 = COPY [[VMOVSDrm]] + ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_load_double_vecreg - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX512F: %xmm0 = COPY [[VMOVSDZrm]] - ; AVX512F: RET 0, implicit %xmm0 + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX512F: $xmm0 = COPY [[VMOVSDZrm]] + ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_load_double_vecreg - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1) - ; AVX512VL: %xmm0 = COPY [[VMOVSDZrm]] - ; AVX512VL: RET 0, implicit %xmm0 - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX512VL: $xmm0 = COPY [[VMOVSDZrm]] + ; AVX512VL: RET 0, implicit $xmm0 + %0(p0) = COPY $rdi %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) - %xmm0 = COPY %1(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(s64) + RET 0, implicit $xmm0 ... --- @@ -413,37 +413,37 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %rsi + liveins: $edi, $rsi ; SSE-LABEL: name: test_store_i32 - ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; SSE: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1) - ; SSE: %rax = COPY [[COPY1]] - ; SSE: RET 0, implicit %rax + ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) + ; SSE: $rax = COPY [[COPY1]] + ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_i32 - ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX: %rax = COPY [[COPY1]] - ; AVX: RET 0, implicit %rax + ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) + ; AVX: $rax = COPY [[COPY1]] + ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_i32 - ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX512F: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX512F: %rax = COPY [[COPY1]] - ; AVX512F: RET 0, implicit %rax + ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_i32 - ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX512VL: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX512VL: %rax = COPY [[COPY1]] - ; AVX512VL: RET 0, implicit %rax - %0(s32) = COPY %edi - %1(p0) = COPY %rsi + ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: RET 0, implicit $rax + %0(s32) = COPY $edi + %1(p0) = COPY $rsi G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -456,37 +456,37 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; SSE-LABEL: name: test_store_i64 - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; SSE: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1) - ; SSE: %rax = COPY [[COPY1]] - ; SSE: RET 0, implicit %rax + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) + ; SSE: $rax = COPY [[COPY1]] + ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_i64 - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX: %rax = COPY [[COPY1]] - ; AVX: RET 0, implicit %rax + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) + ; AVX: $rax = COPY [[COPY1]] + ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_i64 - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX512F: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX512F: %rax = COPY [[COPY1]] - ; AVX512F: RET 0, implicit %rax + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_i64 - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX512VL: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX512VL: %rax = COPY [[COPY1]] - ; AVX512VL: RET 0, implicit %rax - %0(s64) = COPY %rdi - %1(p0) = COPY %rsi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(p0) = COPY $rsi G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -500,42 +500,42 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %xmm0 + liveins: $rdi, $xmm0 ; SSE-LABEL: name: test_store_float - ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi + ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi ; SSE: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]] - ; SSE: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1) - ; SSE: %rax = COPY [[COPY1]] - ; SSE: RET 0, implicit %rax + ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1) + ; SSE: $rax = COPY [[COPY1]] + ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_float - ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi + ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi ; AVX: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]] - ; AVX: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1) - ; AVX: %rax = COPY [[COPY1]] - ; AVX: RET 0, implicit %rax + ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1) + ; AVX: $rax = COPY [[COPY1]] + ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_float - ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi + ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi ; AVX512F: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]] - ; AVX512F: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1) - ; AVX512F: %rax = COPY [[COPY1]] - ; AVX512F: RET 0, implicit %rax + ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_float - ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi ; AVX512VL: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]] - ; AVX512VL: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1) - ; AVX512VL: %rax = COPY [[COPY1]] - ; AVX512VL: RET 0, implicit %rax - %0(s32) = COPY %xmm0 - %1(p0) = COPY %rdi + ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: RET 0, implicit $rax + %0(s32) = COPY $xmm0 + %1(p0) = COPY $rdi %2(s32) = COPY %0(s32) G_STORE %2(s32), %1(p0) :: (store 4 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -548,37 +548,37 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %xmm0 + liveins: $rdi, $xmm0 ; SSE-LABEL: name: test_store_float_vec - ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: MOVSSmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1) - ; SSE: %rax = COPY [[COPY1]] - ; SSE: RET 0, implicit %rax + ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: MOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) + ; SSE: $rax = COPY [[COPY1]] + ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_float_vec - ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: VMOVSSmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX: %rax = COPY [[COPY1]] - ; AVX: RET 0, implicit %rax + ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: VMOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) + ; AVX: $rax = COPY [[COPY1]] + ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_float_vec - ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: VMOVSSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX512F: %rax = COPY [[COPY1]] - ; AVX512F: RET 0, implicit %rax + ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_float_vec - ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: VMOVSSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX512VL: %rax = COPY [[COPY1]] - ; AVX512VL: RET 0, implicit %rax - %0(s32) = COPY %xmm0 - %1(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: RET 0, implicit $rax + %0(s32) = COPY $xmm0 + %1(p0) = COPY $rdi G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -590,45 +590,45 @@ - { id: 0, class: vecr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# NO_AVX512X: %0:fr64 = COPY %xmm0 +# NO_AVX512X: %0:fr64 = COPY $xmm0 body: | bb.1 (%ir-block.0): - liveins: %rdi, %xmm0 + liveins: $rdi, $xmm0 ; SSE-LABEL: name: test_store_double - ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi + ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]] - ; SSE: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1) - ; SSE: %rax = COPY [[COPY1]] - ; SSE: RET 0, implicit %rax + ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1) + ; SSE: $rax = COPY [[COPY1]] + ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_double - ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi + ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]] - ; AVX: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1) - ; AVX: %rax = COPY [[COPY1]] - ; AVX: RET 0, implicit %rax + ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1) + ; AVX: $rax = COPY [[COPY1]] + ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_double - ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi + ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]] - ; AVX512F: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1) - ; AVX512F: %rax = COPY [[COPY1]] - ; AVX512F: RET 0, implicit %rax + ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_double - ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]] - ; AVX512VL: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1) - ; AVX512VL: %rax = COPY [[COPY1]] - ; AVX512VL: RET 0, implicit %rax - %0(s64) = COPY %xmm0 - %1(p0) = COPY %rdi + ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: RET 0, implicit $rax + %0(s64) = COPY $xmm0 + %1(p0) = COPY $rdi %2(s64) = COPY %0(s64) G_STORE %2(s64), %1(p0) :: (store 8 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -641,37 +641,37 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %xmm0 + liveins: $rdi, $xmm0 ; SSE-LABEL: name: test_store_double_vec - ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: MOVSDmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1) - ; SSE: %rax = COPY [[COPY1]] - ; SSE: RET 0, implicit %rax + ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: MOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) + ; SSE: $rax = COPY [[COPY1]] + ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_double_vec - ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0 - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: VMOVSDmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX: %rax = COPY [[COPY1]] - ; AVX: RET 0, implicit %rax + ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: VMOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) + ; AVX: $rax = COPY [[COPY1]] + ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_double_vec - ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: VMOVSDZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX512F: %rax = COPY [[COPY1]] - ; AVX512F: RET 0, implicit %rax + ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_double_vec - ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: VMOVSDZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX512VL: %rax = COPY [[COPY1]] - ; AVX512VL: RET 0, implicit %rax - %0(s64) = COPY %xmm0 - %1(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: RET 0, implicit $rax + %0(s64) = COPY $xmm0 + %1(p0) = COPY $rdi G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -685,32 +685,32 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; SSE-LABEL: name: test_load_ptr - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1) - ; SSE: %rax = COPY [[MOV64rm]] - ; SSE: RET 0, implicit %rax + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1) + ; SSE: $rax = COPY [[MOV64rm]] + ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_load_ptr - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1) - ; AVX: %rax = COPY [[MOV64rm]] - ; AVX: RET 0, implicit %rax + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1) + ; AVX: $rax = COPY [[MOV64rm]] + ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_load_ptr - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1) - ; AVX512F: %rax = COPY [[MOV64rm]] - ; AVX512F: RET 0, implicit %rax + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1) + ; AVX512F: $rax = COPY [[MOV64rm]] + ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_load_ptr - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1) - ; AVX512VL: %rax = COPY [[MOV64rm]] - ; AVX512VL: RET 0, implicit %rax - %0(p0) = COPY %rdi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1) + ; AVX512VL: $rax = COPY [[MOV64rm]] + ; AVX512VL: RET 0, implicit $rax + %0(p0) = COPY $rdi %1(p0) = G_LOAD %0(p0) :: (load 8 from %ir.ptr1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -724,30 +724,30 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; SSE-LABEL: name: test_store_ptr - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; SSE: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1) + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; SSE: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1) ; SSE: RET 0 ; AVX-LABEL: name: test_store_ptr - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1) + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1) ; AVX: RET 0 ; AVX512F-LABEL: name: test_store_ptr - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX512F: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1) + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX512F: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1) ; AVX512F: RET 0 ; AVX512VL-LABEL: name: test_store_ptr - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; AVX512VL: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1) + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; AVX512VL: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1) ; AVX512VL: RET 0 - %0(p0) = COPY %rdi - %1(p0) = COPY %rsi + %0(p0) = COPY $rdi + %1(p0) = COPY $rsi G_STORE %1(p0), %0(p0) :: (store 8 into %ir.ptr1) RET 0 @@ -765,44 +765,44 @@ - { id: 4, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %esi, %rdi + liveins: $esi, $rdi ; SSE-LABEL: name: test_gep_folding - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; SSE: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) - ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx) - ; SSE: %eax = COPY [[MOV32rm]] - ; SSE: RET 0, implicit %eax + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; SSE: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) + ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx) + ; SSE: $eax = COPY [[MOV32rm]] + ; SSE: RET 0, implicit $eax ; AVX-LABEL: name: test_gep_folding - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; AVX: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) - ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx) - ; AVX: %eax = COPY [[MOV32rm]] - ; AVX: RET 0, implicit %eax + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; AVX: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) + ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx) + ; AVX: $eax = COPY [[MOV32rm]] + ; AVX: RET 0, implicit $eax ; AVX512F-LABEL: name: test_gep_folding - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; AVX512F: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) - ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx) - ; AVX512F: %eax = COPY [[MOV32rm]] - ; AVX512F: RET 0, implicit %eax + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; AVX512F: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) + ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx) + ; AVX512F: $eax = COPY [[MOV32rm]] + ; AVX512F: RET 0, implicit $eax ; AVX512VL-LABEL: name: test_gep_folding - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; AVX512VL: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) - ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx) - ; AVX512VL: %eax = COPY [[MOV32rm]] - ; AVX512VL: RET 0, implicit %eax - %0(p0) = COPY %rdi - %1(s32) = COPY %esi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; AVX512VL: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) + ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx) + ; AVX512VL: $eax = COPY [[MOV32rm]] + ; AVX512VL: RET 0, implicit $eax + %0(p0) = COPY $rdi + %1(s32) = COPY $esi %2(s64) = G_CONSTANT i64 20 %3(p0) = G_GEP %0, %2(s64) G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx) %4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx) - %eax = COPY %4(s32) - RET 0, implicit %eax + $eax = COPY %4(s32) + RET 0, implicit $eax ... --- @@ -818,51 +818,51 @@ - { id: 4, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %esi, %rdi + liveins: $esi, $rdi ; SSE-LABEL: name: test_gep_folding_largeGepIndex - ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY %esi + ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; SSE: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720 - ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg - ; SSE: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) - ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx) - ; SSE: %eax = COPY [[MOV32rm]] - ; SSE: RET 0, implicit %eax + ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg + ; SSE: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) + ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx) + ; SSE: $eax = COPY [[MOV32rm]] + ; SSE: RET 0, implicit $eax ; AVX-LABEL: name: test_gep_folding_largeGepIndex - ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY %esi + ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; AVX: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720 - ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg - ; AVX: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) - ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx) - ; AVX: %eax = COPY [[MOV32rm]] - ; AVX: RET 0, implicit %eax + ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg + ; AVX: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) + ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx) + ; AVX: $eax = COPY [[MOV32rm]] + ; AVX: RET 0, implicit $eax ; AVX512F-LABEL: name: test_gep_folding_largeGepIndex - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY %esi + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; AVX512F: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720 - ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg - ; AVX512F: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) - ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx) - ; AVX512F: %eax = COPY [[MOV32rm]] - ; AVX512F: RET 0, implicit %eax + ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg + ; AVX512F: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) + ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx) + ; AVX512F: $eax = COPY [[MOV32rm]] + ; AVX512F: RET 0, implicit $eax ; AVX512VL-LABEL: name: test_gep_folding_largeGepIndex - ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi + ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; AVX512VL: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720 - ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg - ; AVX512VL: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) - ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx) - ; AVX512VL: %eax = COPY [[MOV32rm]] - ; AVX512VL: RET 0, implicit %eax - %0(p0) = COPY %rdi - %1(s32) = COPY %esi + ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg + ; AVX512VL: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx) + ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx) + ; AVX512VL: $eax = COPY [[MOV32rm]] + ; AVX512VL: RET 0, implicit $eax + %0(p0) = COPY $rdi + %1(s32) = COPY $esi %2(s64) = G_CONSTANT i64 228719476720 %3(p0) = G_GEP %0, %2(s64) G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx) %4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx) - %eax = COPY %4(s32) - RET 0, implicit %eax + $eax = COPY %4(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/select-memop-v128.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-memop-v128.mir +++ test/CodeGen/X86/GlobalISel/select-memop-v128.mir @@ -34,20 +34,20 @@ registers: - { id: 0, class: gpr } - { id: 1, class: vecr } -# ALL: %0:gr64 = COPY %rdi -# SSE: %1:vr128 = MOVUPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1) -# AVX: %1:vr128 = VMOVUPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1) -# AVX512F: %1:vr128x = VMOVUPSZ128rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1) -# AVX512VL: %1:vr128x = VMOVUPSZ128rm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1) -# ALL: %xmm0 = COPY %1 +# ALL: %0:gr64 = COPY $rdi +# SSE: %1:vr128 = MOVUPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1) +# AVX: %1:vr128 = VMOVUPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1) +# AVX512F: %1:vr128x = VMOVUPSZ128rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1) +# AVX512VL: %1:vr128x = VMOVUPSZ128rm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1) +# ALL: $xmm0 = COPY %1 body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi - %0(p0) = COPY %rdi + %0(p0) = COPY $rdi %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1) - %xmm0 = COPY %1(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -59,20 +59,20 @@ registers: - { id: 0, class: gpr } - { id: 1, class: vecr } -# ALL: %0:gr64 = COPY %rdi -# SSE: %1:vr128 = MOVAPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1) -# AVX: %1:vr128 = VMOVAPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1) -# AVX512F: %1:vr128x = VMOVAPSZ128rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1) -# AVX512VL: %1:vr128x = VMOVAPSZ128rm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1) -# ALL: %xmm0 = COPY %1 +# ALL: %0:gr64 = COPY $rdi +# SSE: %1:vr128 = MOVAPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1) +# AVX: %1:vr128 = VMOVAPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1) +# AVX512F: %1:vr128x = VMOVAPSZ128rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1) +# AVX512VL: %1:vr128x = VMOVAPSZ128rm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1) +# ALL: $xmm0 = COPY %1 body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi - %0(p0) = COPY %rdi + %0(p0) = COPY $rdi %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1) - %xmm0 = COPY %1(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -84,23 +84,23 @@ registers: - { id: 0, class: vecr } - { id: 1, class: gpr } -# NO_AVX512F: %0:vr128 = COPY %xmm0 -# AVX512ALL: %0:vr128x = COPY %xmm0 -# ALL: %1:gr64 = COPY %rdi -# SSE: MOVAPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1) -# AVX: VMOVAPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1) -# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1) -# AVX512VL: VMOVAPSZ128mr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1) -# ALL: %rax = COPY %1 +# NO_AVX512F: %0:vr128 = COPY $xmm0 +# AVX512ALL: %0:vr128x = COPY $xmm0 +# ALL: %1:gr64 = COPY $rdi +# SSE: MOVAPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1) +# AVX: VMOVAPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1) +# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1) +# AVX512VL: VMOVAPSZ128mr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1) +# ALL: $rax = COPY %1 body: | bb.1 (%ir-block.0): - liveins: %rdi, %xmm0 + liveins: $rdi, $xmm0 - %0(<4 x s32>) = COPY %xmm0 - %1(p0) = COPY %rdi + %0(<4 x s32>) = COPY $xmm0 + %1(p0) = COPY $rdi G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 16) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... --- @@ -112,22 +112,22 @@ registers: - { id: 0, class: vecr } - { id: 1, class: gpr } -# NO_AVX512F: %0:vr128 = COPY %xmm0 -# AVX512ALL: %0:vr128x = COPY %xmm0 -# ALL: %1:gr64 = COPY %rdi -# SSE: MOVUPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1) -# AVX: VMOVUPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1) -# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1) -# AVX512VL: VMOVUPSZ128mr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1) -# ALL: %rax = COPY %1 +# NO_AVX512F: %0:vr128 = COPY $xmm0 +# AVX512ALL: %0:vr128x = COPY $xmm0 +# ALL: %1:gr64 = COPY $rdi +# SSE: MOVUPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1) +# AVX: VMOVUPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1) +# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1) +# AVX512VL: VMOVUPSZ128mr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1) +# ALL: $rax = COPY %1 body: | bb.1 (%ir-block.0): - liveins: %rdi, %xmm0 + liveins: $rdi, $xmm0 - %0(<4 x s32>) = COPY %xmm0 - %1(p0) = COPY %rdi + %0(<4 x s32>) = COPY $xmm0 + %1(p0) = COPY $rdi G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 1) - %rax = COPY %1(p0) - RET 0, implicit %rax + $rax = COPY %1(p0) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/select-memop-v256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-memop-v256.mir +++ test/CodeGen/X86/GlobalISel/select-memop-v256.mir @@ -42,28 +42,28 @@ registers: - { id: 0, class: gpr } - { id: 1, class: vecr } -# NO_AVX512F: %0:gr64 = COPY %rdi -# NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1) -# NO_AVX512F-NEXT: %ymm0 = COPY %1 -# NO_AVX512F-NEXT: RET 0, implicit %ymm0 +# NO_AVX512F: %0:gr64 = COPY $rdi +# NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1) +# NO_AVX512F-NEXT: $ymm0 = COPY %1 +# NO_AVX512F-NEXT: RET 0, implicit $ymm0 # -# AVX512F: %0:gr64 = COPY %rdi -# AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1) -# AVX512F-NEXT: %ymm0 = COPY %1 -# AVX512F-NEXT: RET 0, implicit %ymm0 +# AVX512F: %0:gr64 = COPY $rdi +# AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1) +# AVX512F-NEXT: $ymm0 = COPY %1 +# AVX512F-NEXT: RET 0, implicit $ymm0 # -# AVX512VL: %0:gr64 = COPY %rdi -# AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1) -# AVX512VL-NEXT: %ymm0 = COPY %1 -# AVX512VL-NEXT: RET 0, implicit %ymm0 +# AVX512VL: %0:gr64 = COPY $rdi +# AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1) +# AVX512VL-NEXT: $ymm0 = COPY %1 +# AVX512VL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi - %0(p0) = COPY %rdi + %0(p0) = COPY $rdi %1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1, align 1) - %ymm0 = COPY %1(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %1(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -75,28 +75,28 @@ registers: - { id: 0, class: gpr } - { id: 1, class: vecr } -# NO_AVX512F: %0:gr64 = COPY %rdi -# NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1) -# NO_AVX512F-NEXT: %ymm0 = COPY %1 -# NO_AVX512F-NEXT: RET 0, implicit %ymm0 +# NO_AVX512F: %0:gr64 = COPY $rdi +# NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1) +# NO_AVX512F-NEXT: $ymm0 = COPY %1 +# NO_AVX512F-NEXT: RET 0, implicit $ymm0 # -# AVX512F: %0:gr64 = COPY %rdi -# AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1) -# AVX512F-NEXT: %ymm0 = COPY %1 -# AVX512F-NEXT: RET 0, implicit %ymm0 +# AVX512F: %0:gr64 = COPY $rdi +# AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1) +# AVX512F-NEXT: $ymm0 = COPY %1 +# AVX512F-NEXT: RET 0, implicit $ymm0 # -# AVX512VL: %0:gr64 = COPY %rdi -# AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1) -# AVX512VL-NEXT: %ymm0 = COPY %1 -# AVX512VL-NEXT: RET 0, implicit %ymm0 +# AVX512VL: %0:gr64 = COPY $rdi +# AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1) +# AVX512VL-NEXT: $ymm0 = COPY %1 +# AVX512VL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi - %0(p0) = COPY %rdi + %0(p0) = COPY $rdi %1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1) - %ymm0 = COPY %1(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %1(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -115,26 +115,26 @@ registers: - { id: 0, class: vecr } - { id: 1, class: gpr } -# NO_AVX512F: %0:vr256 = COPY %ymm0 -# NO_AVX512F-NEXT: %1:gr64 = COPY %rdi -# NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1) +# NO_AVX512F: %0:vr256 = COPY $ymm0 +# NO_AVX512F-NEXT: %1:gr64 = COPY $rdi +# NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1) # NO_AVX512F-NEXT: RET 0 # -# AVX512F: %0:vr256x = COPY %ymm0 -# AVX512F-NEXT: %1:gr64 = COPY %rdi -# AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1) +# AVX512F: %0:vr256x = COPY $ymm0 +# AVX512F-NEXT: %1:gr64 = COPY $rdi +# AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1) # AVX512F-NEXT: RET 0 # -# AVX512VL: %0:vr256x = COPY %ymm0 -# AVX512VL-NEXT: %1:gr64 = COPY %rdi -# AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1) +# AVX512VL: %0:vr256x = COPY $ymm0 +# AVX512VL-NEXT: %1:gr64 = COPY $rdi +# AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1) # AVX512VL-NEXT: RET 0 body: | bb.1 (%ir-block.0): - liveins: %rdi, %ymm0 + liveins: $rdi, $ymm0 - %0(<8 x s32>) = COPY %ymm0 - %1(p0) = COPY %rdi + %0(<8 x s32>) = COPY $ymm0 + %1(p0) = COPY $rdi G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1, align 1) RET 0 @@ -155,26 +155,26 @@ registers: - { id: 0, class: vecr } - { id: 1, class: gpr } -# NO_AVX512F: %0:vr256 = COPY %ymm0 -# NO_AVX512F-NEXT: %1:gr64 = COPY %rdi -# NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1) +# NO_AVX512F: %0:vr256 = COPY $ymm0 +# NO_AVX512F-NEXT: %1:gr64 = COPY $rdi +# NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1) # NO_AVX512F-NEXT: RET 0 # -# AVX512F: %0:vr256x = COPY %ymm0 -# AVX512F-NEXT: %1:gr64 = COPY %rdi -# AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1) +# AVX512F: %0:vr256x = COPY $ymm0 +# AVX512F-NEXT: %1:gr64 = COPY $rdi +# AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1) # AVX512F-NEXT: RET 0 # -# AVX512VL: %0:vr256x = COPY %ymm0 -# AVX512VL-NEXT: %1:gr64 = COPY %rdi -# AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1) +# AVX512VL: %0:vr256x = COPY $ymm0 +# AVX512VL-NEXT: %1:gr64 = COPY $rdi +# AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1) # AVX512VL-NEXT: RET 0 body: | bb.1 (%ir-block.0): - liveins: %rdi, %ymm0 + liveins: $rdi, $ymm0 - %0(<8 x s32>) = COPY %ymm0 - %1(p0) = COPY %rdi + %0(<8 x s32>) = COPY $ymm0 + %1(p0) = COPY $rdi G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1) RET 0 Index: test/CodeGen/X86/GlobalISel/select-memop-v512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-memop-v512.mir +++ test/CodeGen/X86/GlobalISel/select-memop-v512.mir @@ -32,17 +32,17 @@ - { id: 1, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; AVX512F-LABEL: name: test_load_v16i32_noalign - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 64 from %ir.p1, align 1) - ; AVX512F: %zmm0 = COPY [[VMOVUPSZrm]] - ; AVX512F: RET 0, implicit %zmm0 - %0(p0) = COPY %rdi + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 64 from %ir.p1, align 1) + ; AVX512F: $zmm0 = COPY [[VMOVUPSZrm]] + ; AVX512F: RET 0, implicit $zmm0 + %0(p0) = COPY $rdi %1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 1) - %zmm0 = COPY %1(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %1(<16 x s32>) + RET 0, implicit $zmm0 ... --- @@ -55,17 +55,17 @@ - { id: 1, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; AVX512F-LABEL: name: test_load_v16i32_align - ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 64 from %ir.p1, align 32) - ; AVX512F: %zmm0 = COPY [[VMOVUPSZrm]] - ; AVX512F: RET 0, implicit %zmm0 - %0(p0) = COPY %rdi + ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 64 from %ir.p1, align 32) + ; AVX512F: $zmm0 = COPY [[VMOVUPSZrm]] + ; AVX512F: RET 0, implicit $zmm0 + %0(p0) = COPY $rdi %1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 32) - %zmm0 = COPY %1(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %1(<16 x s32>) + RET 0, implicit $zmm0 ... --- @@ -78,15 +78,15 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %zmm0 + liveins: $rdi, $zmm0 ; AVX512F-LABEL: name: test_store_v16i32_noalign - ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: VMOVUPSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 64 into %ir.p1, align 1) + ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: VMOVUPSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 64 into %ir.p1, align 1) ; AVX512F: RET 0 - %0(<16 x s32>) = COPY %zmm0 - %1(p0) = COPY %rdi + %0(<16 x s32>) = COPY $zmm0 + %1(p0) = COPY $rdi G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 1) RET 0 @@ -101,15 +101,15 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %zmm0 + liveins: $rdi, $zmm0 ; AVX512F-LABEL: name: test_store_v16i32_align - ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi - ; AVX512F: VMOVUPSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 64 into %ir.p1, align 32) + ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: VMOVUPSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 64 into %ir.p1, align 32) ; AVX512F: RET 0 - %0(<16 x s32>) = COPY %zmm0 - %1(p0) = COPY %rdi + %0(<16 x s32>) = COPY $zmm0 + %1(p0) = COPY $rdi G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 32) RET 0 Index: test/CodeGen/X86/GlobalISel/select-merge-vec256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-merge-vec256.mir +++ test/CodeGen/X86/GlobalISel/select-merge-vec256.mir @@ -24,18 +24,18 @@ ; AVX: [[DEF:%[0-9]+]]:vr128 = IMPLICIT_DEF ; AVX: undef %2.sub_xmm:vr256 = COPY [[DEF]] ; AVX: [[VINSERTF128rr:%[0-9]+]]:vr256 = VINSERTF128rr %2, [[DEF]], 1 - ; AVX: %ymm0 = COPY [[VINSERTF128rr]] - ; AVX: RET 0, implicit %ymm0 + ; AVX: $ymm0 = COPY [[VINSERTF128rr]] + ; AVX: RET 0, implicit $ymm0 ; AVX512VL-LABEL: name: test_merge ; AVX512VL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF ; AVX512VL: undef %2.sub_xmm:vr256x = COPY [[DEF]] ; AVX512VL: [[VINSERTF32x4Z256rr:%[0-9]+]]:vr256x = VINSERTF32x4Z256rr %2, [[DEF]], 1 - ; AVX512VL: %ymm0 = COPY [[VINSERTF32x4Z256rr]] - ; AVX512VL: RET 0, implicit %ymm0 + ; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rr]] + ; AVX512VL: RET 0, implicit $ymm0 %0(<4 x s32>) = IMPLICIT_DEF %1(<8 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>) - %ymm0 = COPY %1(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %1(<8 x s32>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/select-merge-vec512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-merge-vec512.mir +++ test/CodeGen/X86/GlobalISel/select-merge-vec512.mir @@ -27,12 +27,12 @@ ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr %2, [[DEF]], 1 ; ALL: [[VINSERTF32x4Zrr1:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr]], [[DEF]], 2 ; ALL: [[VINSERTF32x4Zrr2:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr1]], [[DEF]], 3 - ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr2]] - ; ALL: RET 0, implicit %zmm0 + ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr2]] + ; ALL: RET 0, implicit $zmm0 %0(<4 x s32>) = IMPLICIT_DEF %1(<16 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>) - %zmm0 = COPY %1(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %1(<16 x s32>) + RET 0, implicit $zmm0 ... --- @@ -50,12 +50,12 @@ ; ALL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF ; ALL: undef %2.sub_ymm:vr512 = COPY [[DEF]] ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr %2, [[DEF]], 1 - ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]] - ; ALL: RET 0, implicit %zmm0 + ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] + ; ALL: RET 0, implicit $zmm0 %0(<8 x s32>) = IMPLICIT_DEF %1(<16 x s32>) = G_MERGE_VALUES %0(<8 x s32>), %0(<8 x s32>) - %zmm0 = COPY %1(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %1(<16 x s32>) + RET 0, implicit $zmm0 ... Index: test/CodeGen/X86/GlobalISel/select-mul-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-mul-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-mul-scalar.mir @@ -29,19 +29,19 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_mul_i16 - ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di - ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si - ; ALL: [[IMUL16rr:%[0-9]+]]:gr16 = IMUL16rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %ax = COPY [[IMUL16rr]] - ; ALL: RET 0, implicit %ax - %0(s16) = COPY %di - %1(s16) = COPY %si + ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si + ; ALL: [[IMUL16rr:%[0-9]+]]:gr16 = IMUL16rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $ax = COPY [[IMUL16rr]] + ; ALL: RET 0, implicit $ax + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s16) = G_MUL %0, %1 - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -55,19 +55,19 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_mul_i32 - ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; ALL: [[IMUL32rr:%[0-9]+]]:gr32 = IMUL32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %eax = COPY [[IMUL32rr]] - ; ALL: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[IMUL32rr:%[0-9]+]]:gr32 = IMUL32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $eax = COPY [[IMUL32rr]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_MUL %0, %1 - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -81,18 +81,18 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; ALL-LABEL: name: test_mul_i64 - ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; ALL: [[IMUL64rr:%[0-9]+]]:gr64 = IMUL64rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %rax = COPY [[IMUL64rr]] - ; ALL: RET 0, implicit %rax - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; ALL: [[IMUL64rr:%[0-9]+]]:gr64 = IMUL64rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $rax = COPY [[IMUL64rr]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s64) = G_MUL %0, %1 - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/select-mul-vec.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-mul-vec.mir +++ test/CodeGen/X86/GlobalISel/select-mul-vec.mir @@ -100,19 +100,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_mul_v8i16 - ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[PMULLWrr:%[0-9]+]]:vr128 = PMULLWrr [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[PMULLWrr]] - ; CHECK: RET 0, implicit %xmm0 - %0(<8 x s16>) = COPY %xmm0 - %1(<8 x s16>) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[PMULLWrr]] + ; CHECK: RET 0, implicit $xmm0 + %0(<8 x s16>) = COPY $xmm0 + %1(<8 x s16>) = COPY $xmm1 %2(<8 x s16>) = G_MUL %0, %1 - %xmm0 = COPY %2(<8 x s16>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<8 x s16>) + RET 0, implicit $xmm0 ... --- @@ -126,19 +126,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_mul_v8i16_avx - ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[VPMULLWrr:%[0-9]+]]:vr128 = VPMULLWrr [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[VPMULLWrr]] - ; CHECK: RET 0, implicit %xmm0 - %0(<8 x s16>) = COPY %xmm0 - %1(<8 x s16>) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[VPMULLWrr]] + ; CHECK: RET 0, implicit $xmm0 + %0(<8 x s16>) = COPY $xmm0 + %1(<8 x s16>) = COPY $xmm1 %2(<8 x s16>) = G_MUL %0, %1 - %xmm0 = COPY %2(<8 x s16>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<8 x s16>) + RET 0, implicit $xmm0 ... --- @@ -152,19 +152,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_mul_v8i16_avx512bwvl - ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 ; CHECK: [[VPMULLWZ128rr:%[0-9]+]]:vr128x = VPMULLWZ128rr [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[VPMULLWZ128rr]] - ; CHECK: RET 0, implicit %xmm0 - %0(<8 x s16>) = COPY %xmm0 - %1(<8 x s16>) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[VPMULLWZ128rr]] + ; CHECK: RET 0, implicit $xmm0 + %0(<8 x s16>) = COPY $xmm0 + %1(<8 x s16>) = COPY $xmm1 %2(<8 x s16>) = G_MUL %0, %1 - %xmm0 = COPY %2(<8 x s16>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<8 x s16>) + RET 0, implicit $xmm0 ... --- @@ -178,19 +178,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_mul_v4i32 - ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[PMULLDrr:%[0-9]+]]:vr128 = PMULLDrr [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[PMULLDrr]] - ; CHECK: RET 0, implicit %xmm0 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[PMULLDrr]] + ; CHECK: RET 0, implicit $xmm0 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_MUL %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -204,19 +204,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_mul_v4i32_avx - ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1 ; CHECK: [[VPMULLDrr:%[0-9]+]]:vr128 = VPMULLDrr [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[VPMULLDrr]] - ; CHECK: RET 0, implicit %xmm0 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[VPMULLDrr]] + ; CHECK: RET 0, implicit $xmm0 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_MUL %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -230,19 +230,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_mul_v4i32_avx512vl - ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 ; CHECK: [[VPMULLDZ128rr:%[0-9]+]]:vr128x = VPMULLDZ128rr [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[VPMULLDZ128rr]] - ; CHECK: RET 0, implicit %xmm0 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[VPMULLDZ128rr]] + ; CHECK: RET 0, implicit $xmm0 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_MUL %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -256,19 +256,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 ; CHECK-LABEL: name: test_mul_v2i64 - ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 ; CHECK: [[VPMULLQZ128rr:%[0-9]+]]:vr128x = VPMULLQZ128rr [[COPY]], [[COPY1]] - ; CHECK: %xmm0 = COPY [[VPMULLQZ128rr]] - ; CHECK: RET 0, implicit %xmm0 - %0(<2 x s64>) = COPY %xmm0 - %1(<2 x s64>) = COPY %xmm1 + ; CHECK: $xmm0 = COPY [[VPMULLQZ128rr]] + ; CHECK: RET 0, implicit $xmm0 + %0(<2 x s64>) = COPY $xmm0 + %1(<2 x s64>) = COPY $xmm1 %2(<2 x s64>) = G_MUL %0, %1 - %xmm0 = COPY %2(<2 x s64>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<2 x s64>) + RET 0, implicit $xmm0 ... --- @@ -282,19 +282,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; CHECK-LABEL: name: test_mul_v16i16 - ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY %ymm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY %ymm1 + ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY $ymm1 ; CHECK: [[VPMULLWYrr:%[0-9]+]]:vr256 = VPMULLWYrr [[COPY]], [[COPY1]] - ; CHECK: %ymm0 = COPY [[VPMULLWYrr]] - ; CHECK: RET 0, implicit %ymm0 - %0(<16 x s16>) = COPY %ymm0 - %1(<16 x s16>) = COPY %ymm1 + ; CHECK: $ymm0 = COPY [[VPMULLWYrr]] + ; CHECK: RET 0, implicit $ymm0 + %0(<16 x s16>) = COPY $ymm0 + %1(<16 x s16>) = COPY $ymm1 %2(<16 x s16>) = G_MUL %0, %1 - %ymm0 = COPY %2(<16 x s16>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<16 x s16>) + RET 0, implicit $ymm0 ... --- @@ -308,19 +308,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; CHECK-LABEL: name: test_mul_v16i16_avx512bwvl - ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1 + ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 ; CHECK: [[VPMULLWZ256rr:%[0-9]+]]:vr256x = VPMULLWZ256rr [[COPY]], [[COPY1]] - ; CHECK: %ymm0 = COPY [[VPMULLWZ256rr]] - ; CHECK: RET 0, implicit %ymm0 - %0(<16 x s16>) = COPY %ymm0 - %1(<16 x s16>) = COPY %ymm1 + ; CHECK: $ymm0 = COPY [[VPMULLWZ256rr]] + ; CHECK: RET 0, implicit $ymm0 + %0(<16 x s16>) = COPY $ymm0 + %1(<16 x s16>) = COPY $ymm1 %2(<16 x s16>) = G_MUL %0, %1 - %ymm0 = COPY %2(<16 x s16>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<16 x s16>) + RET 0, implicit $ymm0 ... --- @@ -334,19 +334,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; CHECK-LABEL: name: test_mul_v8i32 - ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY %ymm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY %ymm1 + ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY $ymm1 ; CHECK: [[VPMULLDYrr:%[0-9]+]]:vr256 = VPMULLDYrr [[COPY]], [[COPY1]] - ; CHECK: %ymm0 = COPY [[VPMULLDYrr]] - ; CHECK: RET 0, implicit %ymm0 - %0(<8 x s32>) = COPY %ymm0 - %1(<8 x s32>) = COPY %ymm1 + ; CHECK: $ymm0 = COPY [[VPMULLDYrr]] + ; CHECK: RET 0, implicit $ymm0 + %0(<8 x s32>) = COPY $ymm0 + %1(<8 x s32>) = COPY $ymm1 %2(<8 x s32>) = G_MUL %0, %1 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -360,19 +360,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; CHECK-LABEL: name: test_mul_v8i32_avx512vl - ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1 + ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 ; CHECK: [[VPMULLDZ256rr:%[0-9]+]]:vr256x = VPMULLDZ256rr [[COPY]], [[COPY1]] - ; CHECK: %ymm0 = COPY [[VPMULLDZ256rr]] - ; CHECK: RET 0, implicit %ymm0 - %0(<8 x s32>) = COPY %ymm0 - %1(<8 x s32>) = COPY %ymm1 + ; CHECK: $ymm0 = COPY [[VPMULLDZ256rr]] + ; CHECK: RET 0, implicit $ymm0 + %0(<8 x s32>) = COPY $ymm0 + %1(<8 x s32>) = COPY $ymm1 %2(<8 x s32>) = G_MUL %0, %1 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -386,19 +386,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 ; CHECK-LABEL: name: test_mul_v4i64 - ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1 + ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 ; CHECK: [[VPMULLQZ256rr:%[0-9]+]]:vr256x = VPMULLQZ256rr [[COPY]], [[COPY1]] - ; CHECK: %ymm0 = COPY [[VPMULLQZ256rr]] - ; CHECK: RET 0, implicit %ymm0 - %0(<4 x s64>) = COPY %ymm0 - %1(<4 x s64>) = COPY %ymm1 + ; CHECK: $ymm0 = COPY [[VPMULLQZ256rr]] + ; CHECK: RET 0, implicit $ymm0 + %0(<4 x s64>) = COPY $ymm0 + %1(<4 x s64>) = COPY $ymm1 %2(<4 x s64>) = G_MUL %0, %1 - %ymm0 = COPY %2(<4 x s64>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<4 x s64>) + RET 0, implicit $ymm0 ... --- @@ -412,19 +412,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; CHECK-LABEL: name: test_mul_v32i16 - ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; CHECK: [[VPMULLWZrr:%[0-9]+]]:vr512 = VPMULLWZrr [[COPY]], [[COPY1]] - ; CHECK: %zmm0 = COPY [[VPMULLWZrr]] - ; CHECK: RET 0, implicit %zmm0 - %0(<32 x s16>) = COPY %zmm0 - %1(<32 x s16>) = COPY %zmm1 + ; CHECK: $zmm0 = COPY [[VPMULLWZrr]] + ; CHECK: RET 0, implicit $zmm0 + %0(<32 x s16>) = COPY $zmm0 + %1(<32 x s16>) = COPY $zmm1 %2(<32 x s16>) = G_MUL %0, %1 - %zmm0 = COPY %2(<32 x s16>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<32 x s16>) + RET 0, implicit $zmm0 ... --- @@ -438,19 +438,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; CHECK-LABEL: name: test_mul_v16i32 - ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; CHECK: [[VPMULLDZrr:%[0-9]+]]:vr512 = VPMULLDZrr [[COPY]], [[COPY1]] - ; CHECK: %zmm0 = COPY [[VPMULLDZrr]] - ; CHECK: RET 0, implicit %zmm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<16 x s32>) = COPY %zmm1 + ; CHECK: $zmm0 = COPY [[VPMULLDZrr]] + ; CHECK: RET 0, implicit $zmm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<16 x s32>) = COPY $zmm1 %2(<16 x s32>) = G_MUL %0, %1 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $zmm0 ... --- @@ -464,18 +464,18 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; CHECK-LABEL: name: test_mul_v8i64 - ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; CHECK: [[VPMULLQZrr:%[0-9]+]]:vr512 = VPMULLQZrr [[COPY]], [[COPY1]] - ; CHECK: %zmm0 = COPY [[VPMULLQZrr]] - ; CHECK: RET 0, implicit %zmm0 - %0(<8 x s64>) = COPY %zmm0 - %1(<8 x s64>) = COPY %zmm1 + ; CHECK: $zmm0 = COPY [[VPMULLQZrr]] + ; CHECK: RET 0, implicit $zmm0 + %0(<8 x s64>) = COPY $zmm0 + %1(<8 x s64>) = COPY $zmm1 %2(<8 x s64>) = G_MUL %0, %1 - %zmm0 = COPY %2(<8 x s64>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<8 x s64>) + RET 0, implicit $zmm0 ... Index: test/CodeGen/X86/GlobalISel/select-or-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-or-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-or-scalar.mir @@ -38,19 +38,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_or_i8 - ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil - ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil - ; ALL: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %al = COPY [[OR8rr]] - ; ALL: RET 0, implicit %al - %0(s8) = COPY %dil - %1(s8) = COPY %sil + ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil + ; ALL: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $al = COPY [[OR8rr]] + ; ALL: RET 0, implicit $al + %0(s8) = COPY $dil + %1(s8) = COPY $sil %2(s8) = G_OR %0, %1 - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -68,19 +68,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_or_i16 - ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di - ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si - ; ALL: [[OR16rr:%[0-9]+]]:gr16 = OR16rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %ax = COPY [[OR16rr]] - ; ALL: RET 0, implicit %ax - %0(s16) = COPY %di - %1(s16) = COPY %si + ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si + ; ALL: [[OR16rr:%[0-9]+]]:gr16 = OR16rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $ax = COPY [[OR16rr]] + ; ALL: RET 0, implicit $ax + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s16) = G_OR %0, %1 - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -98,19 +98,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_or_i32 - ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; ALL: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %eax = COPY [[OR32rr]] - ; ALL: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $eax = COPY [[OR32rr]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_OR %0, %1 - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -128,18 +128,18 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; ALL-LABEL: name: test_or_i64 - ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; ALL: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %rax = COPY [[OR64rr]] - ; ALL: RET 0, implicit %rax - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; ALL: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $rax = COPY [[OR64rr]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s64) = G_OR %0, %1 - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/select-phi.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-phi.mir +++ test/CodeGen/X86/GlobalISel/select-phi.mir @@ -119,31 +119,31 @@ ; ALL-LABEL: name: test_i8 ; ALL: bb.0.entry: ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000) - ; ALL: liveins: %edi, %edx, %esi - ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi + ; ALL: liveins: $edi, $edx, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit - ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY %edx + ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY $edx ; ALL: [[COPY4:%[0-9]+]]:gr8 = COPY [[COPY3]].sub_8bit - ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags - ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags - ; ALL: JNE_1 %bb.2, implicit %eflags + ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags + ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags + ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags + ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: JNE_1 %bb.2, implicit $eflags ; ALL: bb.1.cond.false: ; ALL: successors: %bb.2(0x80000000) ; ALL: bb.2.cond.end: ; ALL: [[PHI:%[0-9]+]]:gr8 = PHI [[COPY4]], %bb.1, [[COPY2]], %bb.0 - ; ALL: %al = COPY [[PHI]] - ; ALL: RET 0, implicit %al + ; ALL: $al = COPY [[PHI]] + ; ALL: RET 0, implicit $al bb.1.entry: successors: %bb.3(0x40000000), %bb.2(0x40000000) - liveins: %edi, %edx, %esi + liveins: $edi, $edx, $esi - %0:gpr(s32) = COPY %edi - %3:gpr(s32) = COPY %esi + %0:gpr(s32) = COPY $edi + %3:gpr(s32) = COPY $esi %1:gpr(s8) = G_TRUNC %3(s32) - %4:gpr(s32) = COPY %edx + %4:gpr(s32) = COPY $edx %2:gpr(s8) = G_TRUNC %4(s32) %5:gpr(s32) = G_CONSTANT i32 0 %6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5 @@ -155,8 +155,8 @@ bb.3.cond.end: %7:gpr(s8) = G_PHI %2(s8), %bb.2, %1(s8), %bb.1 - %al = COPY %7(s8) - RET 0, implicit %al + $al = COPY %7(s8) + RET 0, implicit $al ... --- @@ -178,31 +178,31 @@ ; ALL-LABEL: name: test_i16 ; ALL: bb.0.entry: ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000) - ; ALL: liveins: %edi, %edx, %esi - ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi + ; ALL: liveins: $edi, $edx, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi ; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit - ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY %edx + ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY $edx ; ALL: [[COPY4:%[0-9]+]]:gr16 = COPY [[COPY3]].sub_16bit - ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags - ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags - ; ALL: JNE_1 %bb.2, implicit %eflags + ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags + ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags + ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags + ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: JNE_1 %bb.2, implicit $eflags ; ALL: bb.1.cond.false: ; ALL: successors: %bb.2(0x80000000) ; ALL: bb.2.cond.end: ; ALL: [[PHI:%[0-9]+]]:gr16 = PHI [[COPY4]], %bb.1, [[COPY2]], %bb.0 - ; ALL: %ax = COPY [[PHI]] - ; ALL: RET 0, implicit %ax + ; ALL: $ax = COPY [[PHI]] + ; ALL: RET 0, implicit $ax bb.1.entry: successors: %bb.3(0x40000000), %bb.2(0x40000000) - liveins: %edi, %edx, %esi + liveins: $edi, $edx, $esi - %0:gpr(s32) = COPY %edi - %3:gpr(s32) = COPY %esi + %0:gpr(s32) = COPY $edi + %3:gpr(s32) = COPY $esi %1:gpr(s16) = G_TRUNC %3(s32) - %4:gpr(s32) = COPY %edx + %4:gpr(s32) = COPY $edx %2:gpr(s16) = G_TRUNC %4(s32) %5:gpr(s32) = G_CONSTANT i32 0 %6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5 @@ -214,8 +214,8 @@ bb.3.cond.end: %7:gpr(s16) = G_PHI %2(s16), %bb.2, %1(s16), %bb.1 - %ax = COPY %7(s16) - RET 0, implicit %ax + $ax = COPY %7(s16) + RET 0, implicit $ax ... --- @@ -235,15 +235,15 @@ ; ALL-LABEL: name: test_i32 ; ALL: bb.0.entry: ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; ALL: liveins: %edi, %edx, %esi - ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; ALL: [[COPY2:%[0-9]+]]:gr32 = COPY %edx - ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags - ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags - ; ALL: JNE_1 %bb.1, implicit %eflags + ; ALL: liveins: $edi, $edx, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:gr32 = COPY $edx + ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags + ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags + ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags + ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: JNE_1 %bb.1, implicit $eflags ; ALL: JMP_1 %bb.2 ; ALL: bb.1.cond.true: ; ALL: successors: %bb.3(0x80000000) @@ -252,15 +252,15 @@ ; ALL: successors: %bb.3(0x80000000) ; ALL: bb.3.cond.end: ; ALL: [[PHI:%[0-9]+]]:gr32 = PHI [[COPY1]], %bb.1, [[COPY2]], %bb.2 - ; ALL: %eax = COPY [[PHI]] - ; ALL: RET 0, implicit %eax + ; ALL: $eax = COPY [[PHI]] + ; ALL: RET 0, implicit $eax bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi, %edx, %esi + liveins: $edi, $edx, $esi - %0(s32) = COPY %edi - %1(s32) = COPY %esi - %2(s32) = COPY %edx + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s32) = COPY $edx %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.2 @@ -277,8 +277,8 @@ bb.4.cond.end: %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3 - %eax = COPY %5(s32) - RET 0, implicit %eax + $eax = COPY %5(s32) + RET 0, implicit $eax ... --- @@ -298,15 +298,15 @@ ; ALL-LABEL: name: test_i64 ; ALL: bb.0.entry: ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; ALL: liveins: %edi, %rdx, %rsi - ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; ALL: [[COPY2:%[0-9]+]]:gr64 = COPY %rdx - ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags - ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags - ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags - ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags - ; ALL: JNE_1 %bb.1, implicit %eflags + ; ALL: liveins: $edi, $rdx, $rsi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; ALL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdx + ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags + ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags + ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags + ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags + ; ALL: JNE_1 %bb.1, implicit $eflags ; ALL: JMP_1 %bb.2 ; ALL: bb.1.cond.true: ; ALL: successors: %bb.3(0x80000000) @@ -315,15 +315,15 @@ ; ALL: successors: %bb.3(0x80000000) ; ALL: bb.3.cond.end: ; ALL: [[PHI:%[0-9]+]]:gr64 = PHI [[COPY1]], %bb.1, [[COPY2]], %bb.2 - ; ALL: %rax = COPY [[PHI]] - ; ALL: RET 0, implicit %rax + ; ALL: $rax = COPY [[PHI]] + ; ALL: RET 0, implicit $rax bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi, %rdx, %rsi + liveins: $edi, $rdx, $rsi - %0(s32) = COPY %edi - %1(s64) = COPY %rsi - %2(s64) = COPY %rdx + %0(s32) = COPY $edi + %1(s64) = COPY $rsi + %2(s64) = COPY $rdx %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.2 @@ -340,8 +340,8 @@ bb.4.cond.end: %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3 - %rax = COPY %5(s64) - RET 0, implicit %rax + $rax = COPY %5(s64) + RET 0, implicit $rax ... --- @@ -371,16 +371,16 @@ constants: # ALL-LABEL: bb.3.cond.end: # ALL: %5:fr32 = PHI %1, %bb.1, %2, %bb.2 -# ALL-NEXT: %xmm0 = COPY %5 -# ALL-NEXT: RET 0, implicit %xmm0 +# ALL-NEXT: $xmm0 = COPY %5 +# ALL-NEXT: RET 0, implicit $xmm0 body: | bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi, %xmm0, %xmm1 + liveins: $edi, $xmm0, $xmm1 - %0(s32) = COPY %edi - %1(s32) = COPY %xmm0 - %2(s32) = COPY %xmm1 + %0(s32) = COPY $edi + %1(s32) = COPY $xmm0 + %2(s32) = COPY $xmm1 %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.2 @@ -397,8 +397,8 @@ bb.4.cond.end: %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3 - %xmm0 = COPY %5(s32) - RET 0, implicit %xmm0 + $xmm0 = COPY %5(s32) + RET 0, implicit $xmm0 ... --- @@ -424,16 +424,16 @@ - { id: 5, class: vecr, preferred-register: '' } # ALL-LABEL: bb.3.cond.end: # ALL: %5:fr64 = PHI %1, %bb.1, %2, %bb.2 -# ALL-NEXT: %xmm0 = COPY %5 -# ALL-NEXT: RET 0, implicit %xmm0 +# ALL-NEXT: $xmm0 = COPY %5 +# ALL-NEXT: RET 0, implicit $xmm0 body: | bb.1.entry: successors: %bb.2(0x40000000), %bb.3(0x40000000) - liveins: %edi, %xmm0, %xmm1 + liveins: $edi, $xmm0, $xmm1 - %0(s32) = COPY %edi - %1(s64) = COPY %xmm0 - %2(s64) = COPY %xmm1 + %0(s32) = COPY $edi + %1(s64) = COPY $xmm0 + %2(s64) = COPY $xmm1 %3(s32) = G_CONSTANT i32 0 %4(s1) = G_ICMP intpred(sgt), %0(s32), %3 G_BRCOND %4(s1), %bb.2 @@ -450,7 +450,7 @@ bb.4.cond.end: %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3 - %xmm0 = COPY %5(s64) - RET 0, implicit %xmm0 + $xmm0 = COPY %5(s64) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-sub-v128.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-sub-v128.mir +++ test/CodeGen/X86/GlobalISel/select-sub-v128.mir @@ -44,13 +44,13 @@ # AVX512BWVL: %2:vr128x = VPSUBBZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<16 x s8>) = COPY %xmm0 - %1(<16 x s8>) = COPY %xmm1 + %0(<16 x s8>) = COPY $xmm0 + %1(<16 x s8>) = COPY $xmm1 %2(<16 x s8>) = G_SUB %0, %1 - %xmm0 = COPY %2(<16 x s8>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<16 x s8>) + RET 0, implicit $xmm0 ... --- @@ -72,13 +72,13 @@ # AVX512BWVL: %2:vr128x = VPSUBWZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<8 x s16>) = COPY %xmm0 - %1(<8 x s16>) = COPY %xmm1 + %0(<8 x s16>) = COPY $xmm0 + %1(<8 x s16>) = COPY $xmm1 %2(<8 x s16>) = G_SUB %0, %1 - %xmm0 = COPY %2(<8 x s16>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<8 x s16>) + RET 0, implicit $xmm0 ... --- @@ -100,13 +100,13 @@ # AVX512BWVL: %2:vr128x = VPSUBDZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_SUB %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -128,12 +128,12 @@ # AVX512BWVL: %2:vr128x = VPSUBQZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<2 x s64>) = COPY %xmm0 - %1(<2 x s64>) = COPY %xmm1 + %0(<2 x s64>) = COPY $xmm0 + %1(<2 x s64>) = COPY $xmm1 %2(<2 x s64>) = G_SUB %0, %1 - %xmm0 = COPY %2(<2 x s64>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<2 x s64>) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-sub-v256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-sub-v256.mir +++ test/CodeGen/X86/GlobalISel/select-sub-v256.mir @@ -40,13 +40,13 @@ # AVX512BWVL: %2:vr256x = VPSUBBZ256rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<32 x s8>) = COPY %ymm0 - %1(<32 x s8>) = COPY %ymm1 + %0(<32 x s8>) = COPY $ymm0 + %1(<32 x s8>) = COPY $ymm1 %2(<32 x s8>) = G_SUB %0, %1 - %ymm0 = COPY %2(<32 x s8>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<32 x s8>) + RET 0, implicit $ymm0 ... --- @@ -66,13 +66,13 @@ # AVX512BWVL: %2:vr256x = VPSUBWZ256rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<16 x s16>) = COPY %ymm0 - %1(<16 x s16>) = COPY %ymm1 + %0(<16 x s16>) = COPY $ymm0 + %1(<16 x s16>) = COPY $ymm1 %2(<16 x s16>) = G_SUB %0, %1 - %ymm0 = COPY %2(<16 x s16>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<16 x s16>) + RET 0, implicit $ymm0 ... --- @@ -92,13 +92,13 @@ # AVX512BWVL: %2:vr256x = VPSUBDZ256rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<8 x s32>) = COPY %ymm0 - %1(<8 x s32>) = COPY %ymm1 + %0(<8 x s32>) = COPY $ymm0 + %1(<8 x s32>) = COPY $ymm1 %2(<8 x s32>) = G_SUB %0, %1 - %ymm0 = COPY %2(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<8 x s32>) + RET 0, implicit $ymm0 ... --- @@ -118,12 +118,12 @@ # AVX512BWVL: %2:vr256x = VPSUBQZ256rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %ymm0, %ymm1 + liveins: $ymm0, $ymm1 - %0(<4 x s64>) = COPY %ymm0 - %1(<4 x s64>) = COPY %ymm1 + %0(<4 x s64>) = COPY $ymm0 + %1(<4 x s64>) = COPY $ymm1 %2(<4 x s64>) = G_SUB %0, %1 - %ymm0 = COPY %2(<4 x s64>) - RET 0, implicit %ymm0 + $ymm0 = COPY %2(<4 x s64>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/select-sub-v512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-sub-v512.mir +++ test/CodeGen/X86/GlobalISel/select-sub-v512.mir @@ -36,19 +36,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_sub_v64i8 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; ALL: [[VPSUBBZrr:%[0-9]+]]:vr512 = VPSUBBZrr [[COPY]], [[COPY1]] - ; ALL: %zmm0 = COPY [[VPSUBBZrr]] - ; ALL: RET 0, implicit %zmm0 - %0(<64 x s8>) = COPY %zmm0 - %1(<64 x s8>) = COPY %zmm1 + ; ALL: $zmm0 = COPY [[VPSUBBZrr]] + ; ALL: RET 0, implicit $zmm0 + %0(<64 x s8>) = COPY $zmm0 + %1(<64 x s8>) = COPY $zmm1 %2(<64 x s8>) = G_SUB %0, %1 - %zmm0 = COPY %2(<64 x s8>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<64 x s8>) + RET 0, implicit $zmm0 ... --- @@ -62,19 +62,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_sub_v32i16 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; ALL: [[VPSUBWZrr:%[0-9]+]]:vr512 = VPSUBWZrr [[COPY]], [[COPY1]] - ; ALL: %zmm0 = COPY [[VPSUBWZrr]] - ; ALL: RET 0, implicit %zmm0 - %0(<32 x s16>) = COPY %zmm0 - %1(<32 x s16>) = COPY %zmm1 + ; ALL: $zmm0 = COPY [[VPSUBWZrr]] + ; ALL: RET 0, implicit $zmm0 + %0(<32 x s16>) = COPY $zmm0 + %1(<32 x s16>) = COPY $zmm1 %2(<32 x s16>) = G_SUB %0, %1 - %zmm0 = COPY %2(<32 x s16>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<32 x s16>) + RET 0, implicit $zmm0 ... --- @@ -88,19 +88,19 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_sub_v16i32 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; ALL: [[VPSUBDZrr:%[0-9]+]]:vr512 = VPSUBDZrr [[COPY]], [[COPY1]] - ; ALL: %zmm0 = COPY [[VPSUBDZrr]] - ; ALL: RET 0, implicit %zmm0 - %0(<16 x s32>) = COPY %zmm0 - %1(<16 x s32>) = COPY %zmm1 + ; ALL: $zmm0 = COPY [[VPSUBDZrr]] + ; ALL: RET 0, implicit $zmm0 + %0(<16 x s32>) = COPY $zmm0 + %1(<16 x s32>) = COPY $zmm1 %2(<16 x s32>) = G_SUB %0, %1 - %zmm0 = COPY %2(<16 x s32>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<16 x s32>) + RET 0, implicit $zmm0 ... --- @@ -114,18 +114,18 @@ - { id: 2, class: vecr } body: | bb.1 (%ir-block.0): - liveins: %zmm0, %zmm1 + liveins: $zmm0, $zmm1 ; ALL-LABEL: name: test_sub_v8i64 - ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0 - ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1 + ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 ; ALL: [[VPSUBQZrr:%[0-9]+]]:vr512 = VPSUBQZrr [[COPY]], [[COPY1]] - ; ALL: %zmm0 = COPY [[VPSUBQZrr]] - ; ALL: RET 0, implicit %zmm0 - %0(<8 x s64>) = COPY %zmm0 - %1(<8 x s64>) = COPY %zmm1 + ; ALL: $zmm0 = COPY [[VPSUBQZrr]] + ; ALL: RET 0, implicit $zmm0 + %0(<8 x s64>) = COPY $zmm0 + %1(<8 x s64>) = COPY $zmm1 %2(<8 x s64>) = G_SUB %0, %1 - %zmm0 = COPY %2(<8 x s64>) - RET 0, implicit %zmm0 + $zmm0 = COPY %2(<8 x s64>) + RET 0, implicit $zmm0 ... Index: test/CodeGen/X86/GlobalISel/select-sub.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-sub.mir +++ test/CodeGen/X86/GlobalISel/select-sub.mir @@ -33,17 +33,17 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# ALL: %0:gr64 = COPY %rdi -# ALL-NEXT: %1:gr64 = COPY %rsi +# ALL: %0:gr64 = COPY $rdi +# ALL-NEXT: %1:gr64 = COPY $rsi # ALL-NEXT: %2:gr64 = SUB64rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s64) = G_SUB %0, %1 - %rax = COPY %2(s64) + $rax = COPY %2(s64) ... @@ -55,17 +55,17 @@ - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } -# ALL: %0:gr32 = COPY %edi -# ALL-NEXT: %1:gr32 = COPY %esi +# ALL: %0:gr32 = COPY $edi +# ALL-NEXT: %1:gr32 = COPY $esi # ALL-NEXT: %2:gr32 = SUB32rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi - %0(s32) = COPY %edi - %1(s32) = COPY %esi + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_SUB %0, %1 - %eax = COPY %2(s32) + $eax = COPY %2(s32) ... --- @@ -79,23 +79,23 @@ - { id: 0, class: vecr } - { id: 1, class: vecr } - { id: 2, class: vecr } -# NO_AVX512VL: %0:vr128 = COPY %xmm0 -# AVX512VL: %0:vr128x = COPY %xmm0 -# NO_AVX512VL: %1:vr128 = COPY %xmm1 -# AVX512VL: %1:vr128x = COPY %xmm1 +# NO_AVX512VL: %0:vr128 = COPY $xmm0 +# AVX512VL: %0:vr128x = COPY $xmm0 +# NO_AVX512VL: %1:vr128 = COPY $xmm1 +# AVX512VL: %1:vr128x = COPY $xmm1 # SSE-NEXT: %2:vr128 = PSUBDrr %0, %1 # AVX-NEXT: %2:vr128 = VPSUBDrr %0, %1 # AVX512F-NEXT: %2:vr128 = VPSUBDrr %0, %1 # AVX512VL-NEXT: %2:vr128x = VPSUBDZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_SUB %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -109,23 +109,23 @@ - { id: 0, class: vecr } - { id: 1, class: vecr } - { id: 2, class: vecr } -# NO_AVX512VL: %0:vr128 = COPY %xmm0 -# NO_AVX512VL: %1:vr128 = COPY %xmm1 +# NO_AVX512VL: %0:vr128 = COPY $xmm0 +# NO_AVX512VL: %1:vr128 = COPY $xmm1 # SSE-NEXT: %2:vr128 = SUBPSrr %0, %1 # AVX-NEXT: %2:vr128 = VSUBPSrr %0, %1 # AVX512F-NEXT: %2:vr128 = VSUBPSrr %0, %1 # -# AVX512VL: %0:vr128x = COPY %xmm0 -# AVX512VL: %1:vr128x = COPY %xmm1 +# AVX512VL: %0:vr128x = COPY $xmm0 +# AVX512VL: %1:vr128x = COPY $xmm1 # AVX512VL-NEXT: %2:vr128x = VSUBPSZ128rr %0, %1 body: | bb.1 (%ir-block.0): - liveins: %xmm0, %xmm1 + liveins: $xmm0, $xmm1 - %0(<4 x s32>) = COPY %xmm0 - %1(<4 x s32>) = COPY %xmm1 + %0(<4 x s32>) = COPY $xmm0 + %1(<4 x s32>) = COPY $xmm1 %2(<4 x s32>) = G_FSUB %0, %1 - %xmm0 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0 ... Index: test/CodeGen/X86/GlobalISel/select-trunc.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-trunc.mir +++ test/CodeGen/X86/GlobalISel/select-trunc.mir @@ -43,18 +43,18 @@ - { id: 2, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: trunc_i32toi1 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit - ; CHECK: %al = COPY [[COPY1]] - ; CHECK: RET 0, implicit %al - %0(s32) = COPY %edi + ; CHECK: $al = COPY [[COPY1]] + ; CHECK: RET 0, implicit $al + %0(s32) = COPY $edi %1(s1) = G_TRUNC %0(s32) %2(s8) = G_ANYEXT %1(s1) - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -67,17 +67,17 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: trunc_i32toi8 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit - ; CHECK: %al = COPY [[COPY1]] - ; CHECK: RET 0, implicit %al - %0(s32) = COPY %edi + ; CHECK: $al = COPY [[COPY1]] + ; CHECK: RET 0, implicit $al + %0(s32) = COPY $edi %1(s8) = G_TRUNC %0(s32) - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -90,17 +90,17 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; CHECK-LABEL: name: trunc_i32toi16 - ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit - ; CHECK: %ax = COPY [[COPY1]] - ; CHECK: RET 0, implicit %ax - %0(s32) = COPY %edi + ; CHECK: $ax = COPY [[COPY1]] + ; CHECK: RET 0, implicit $ax + %0(s32) = COPY $edi %1(s16) = G_TRUNC %0(s32) - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -113,17 +113,17 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; CHECK-LABEL: name: trunc_i64toi8 - ; CHECK: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi + ; CHECK: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit - ; CHECK: %al = COPY [[COPY1]] - ; CHECK: RET 0, implicit %al - %0(s64) = COPY %rdi + ; CHECK: $al = COPY [[COPY1]] + ; CHECK: RET 0, implicit $al + %0(s64) = COPY $rdi %1(s8) = G_TRUNC %0(s64) - %al = COPY %1(s8) - RET 0, implicit %al + $al = COPY %1(s8) + RET 0, implicit $al ... --- @@ -136,17 +136,17 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; CHECK-LABEL: name: trunc_i64toi16 - ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit - ; CHECK: %ax = COPY [[COPY1]] - ; CHECK: RET 0, implicit %ax - %0(s64) = COPY %rdi + ; CHECK: $ax = COPY [[COPY1]] + ; CHECK: RET 0, implicit $ax + %0(s64) = COPY $rdi %1(s16) = G_TRUNC %0(s64) - %ax = COPY %1(s16) - RET 0, implicit %ax + $ax = COPY %1(s16) + RET 0, implicit $ax ... --- @@ -159,16 +159,16 @@ - { id: 1, class: gpr } body: | bb.1 (%ir-block.0): - liveins: %rdi + liveins: $rdi ; CHECK-LABEL: name: trunc_i64toi32 - ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit - ; CHECK: %eax = COPY [[COPY1]] - ; CHECK: RET 0, implicit %eax - %0(s64) = COPY %rdi + ; CHECK: $eax = COPY [[COPY1]] + ; CHECK: RET 0, implicit $eax + %0(s64) = COPY $rdi %1(s32) = G_TRUNC %0(s64) - %eax = COPY %1(s32) - RET 0, implicit %eax + $eax = COPY %1(s32) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/select-undef.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-undef.mir +++ test/CodeGen/X86/GlobalISel/select-undef.mir @@ -27,11 +27,11 @@ bb.1 (%ir-block.0): ; ALL-LABEL: name: test ; ALL: [[DEF:%[0-9]+]]:gr8 = IMPLICIT_DEF - ; ALL: %al = COPY [[DEF]] - ; ALL: RET 0, implicit %al + ; ALL: $al = COPY [[DEF]] + ; ALL: RET 0, implicit $al %0(s8) = G_IMPLICIT_DEF - %al = COPY %0(s8) - RET 0, implicit %al + $al = COPY %0(s8) + RET 0, implicit $al ... --- @@ -49,18 +49,18 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi + liveins: $edi ; ALL-LABEL: name: test2 - ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil + ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil ; ALL: [[DEF:%[0-9]+]]:gr8 = IMPLICIT_DEF - ; ALL: [[ADD8rr:%[0-9]+]]:gr8 = ADD8rr [[COPY]], [[DEF]], implicit-def %eflags - ; ALL: %al = COPY [[ADD8rr]] - ; ALL: RET 0, implicit %al - %0(s8) = COPY %dil + ; ALL: [[ADD8rr:%[0-9]+]]:gr8 = ADD8rr [[COPY]], [[DEF]], implicit-def $eflags + ; ALL: $al = COPY [[ADD8rr]] + ; ALL: RET 0, implicit $al + %0(s8) = COPY $dil %1(s8) = G_IMPLICIT_DEF %2(s8) = G_ADD %0, %1 - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... Index: test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir +++ test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir @@ -26,21 +26,21 @@ ; AVX: [[DEF:%[0-9]+]]:vr256 = IMPLICIT_DEF ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY [[DEF]].sub_xmm ; AVX: [[VEXTRACTF128rr:%[0-9]+]]:vr128 = VEXTRACTF128rr [[DEF]], 1 - ; AVX: %xmm0 = COPY [[COPY]] - ; AVX: %xmm1 = COPY [[VEXTRACTF128rr]] - ; AVX: RET 0, implicit %xmm0, implicit %xmm1 + ; AVX: $xmm0 = COPY [[COPY]] + ; AVX: $xmm1 = COPY [[VEXTRACTF128rr]] + ; AVX: RET 0, implicit $xmm0, implicit $xmm1 ; AVX512VL-LABEL: name: test_unmerge ; AVX512VL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm ; AVX512VL: [[VEXTRACTF32x4Z256rr:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rr [[DEF]], 1 - ; AVX512VL: %xmm0 = COPY [[COPY]] - ; AVX512VL: %xmm1 = COPY [[VEXTRACTF32x4Z256rr]] - ; AVX512VL: RET 0, implicit %xmm0, implicit %xmm1 + ; AVX512VL: $xmm0 = COPY [[COPY]] + ; AVX512VL: $xmm1 = COPY [[VEXTRACTF32x4Z256rr]] + ; AVX512VL: RET 0, implicit $xmm0, implicit $xmm1 %0(<8 x s32>) = IMPLICIT_DEF %1(<4 x s32>), %2(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>) - %xmm0 = COPY %1(<4 x s32>) - %xmm1 = COPY %2(<4 x s32>) - RET 0, implicit %xmm0, implicit %xmm1 + $xmm0 = COPY %1(<4 x s32>) + $xmm1 = COPY %2(<4 x s32>) + RET 0, implicit $xmm0, implicit $xmm1 ... Index: test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir +++ test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir @@ -30,12 +30,12 @@ ; ALL: [[VEXTRACTF32x4Zrr:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 1 ; ALL: [[VEXTRACTF32x4Zrr1:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 2 ; ALL: [[VEXTRACTF32x4Zrr2:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 3 - ; ALL: %xmm0 = COPY [[COPY]] - ; ALL: RET 0, implicit %xmm0 + ; ALL: $xmm0 = COPY [[COPY]] + ; ALL: RET 0, implicit $xmm0 %0(<16 x s32>) = IMPLICIT_DEF %1(<4 x s32>), %2(<4 x s32>), %3(<4 x s32>), %4(<4 x s32>) = G_UNMERGE_VALUES %0(<16 x s32>) - %xmm0 = COPY %1(<4 x s32>) - RET 0, implicit %xmm0 + $xmm0 = COPY %1(<4 x s32>) + RET 0, implicit $xmm0 ... --- @@ -54,11 +54,11 @@ ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY [[DEF]].sub_ymm ; ALL: [[VEXTRACTF64x4Zrr:%[0-9]+]]:vr256x = VEXTRACTF64x4Zrr [[DEF]], 1 - ; ALL: %ymm0 = COPY [[COPY]] - ; ALL: RET 0, implicit %ymm0 + ; ALL: $ymm0 = COPY [[COPY]] + ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = IMPLICIT_DEF %1(<8 x s32>), %2(<8 x s32>) = G_UNMERGE_VALUES %0(<16 x s32>) - %ymm0 = COPY %1(<8 x s32>) - RET 0, implicit %ymm0 + $ymm0 = COPY %1(<8 x s32>) + RET 0, implicit $ymm0 ... Index: test/CodeGen/X86/GlobalISel/select-xor-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-xor-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-xor-scalar.mir @@ -38,19 +38,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_xor_i8 - ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil - ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil - ; ALL: [[XOR8rr:%[0-9]+]]:gr8 = XOR8rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %al = COPY [[XOR8rr]] - ; ALL: RET 0, implicit %al - %0(s8) = COPY %dil - %1(s8) = COPY %sil + ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil + ; ALL: [[XOR8rr:%[0-9]+]]:gr8 = XOR8rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $al = COPY [[XOR8rr]] + ; ALL: RET 0, implicit $al + %0(s8) = COPY $dil + %1(s8) = COPY $sil %2(s8) = G_XOR %0, %1 - %al = COPY %2(s8) - RET 0, implicit %al + $al = COPY %2(s8) + RET 0, implicit $al ... --- @@ -68,19 +68,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_xor_i16 - ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di - ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si - ; ALL: [[XOR16rr:%[0-9]+]]:gr16 = XOR16rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %ax = COPY [[XOR16rr]] - ; ALL: RET 0, implicit %ax - %0(s16) = COPY %di - %1(s16) = COPY %si + ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si + ; ALL: [[XOR16rr:%[0-9]+]]:gr16 = XOR16rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $ax = COPY [[XOR16rr]] + ; ALL: RET 0, implicit $ax + %0(s16) = COPY $di + %1(s16) = COPY $si %2(s16) = G_XOR %0, %1 - %ax = COPY %2(s16) - RET 0, implicit %ax + $ax = COPY %2(s16) + RET 0, implicit $ax ... --- @@ -98,19 +98,19 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %edi, %esi + liveins: $edi, $esi ; ALL-LABEL: name: test_xor_i32 - ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi - ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi - ; ALL: [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %eax = COPY [[XOR32rr]] - ; ALL: RET 0, implicit %eax - %0(s32) = COPY %edi - %1(s32) = COPY %esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $eax = COPY [[XOR32rr]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi %2(s32) = G_XOR %0, %1 - %eax = COPY %2(s32) - RET 0, implicit %eax + $eax = COPY %2(s32) + RET 0, implicit $eax ... --- @@ -128,18 +128,18 @@ constants: body: | bb.1 (%ir-block.0): - liveins: %rdi, %rsi + liveins: $rdi, $rsi ; ALL-LABEL: name: test_xor_i64 - ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi - ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi - ; ALL: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[COPY1]], implicit-def %eflags - ; ALL: %rax = COPY [[XOR64rr]] - ; ALL: RET 0, implicit %rax - %0(s64) = COPY %rdi - %1(s64) = COPY %rsi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; ALL: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[COPY1]], implicit-def $eflags + ; ALL: $rax = COPY [[XOR64rr]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi %2(s64) = G_XOR %0, %1 - %rax = COPY %2(s64) - RET 0, implicit %rax + $rax = COPY %2(s64) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir =================================================================== --- test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir +++ test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir @@ -24,11 +24,11 @@ body: | bb.1 (%ir-block.0): ; CHECK-LABEL: name: allocai32 - ; CHECK: [[LEA64_32r:%[0-9]+]]:gr32 = LEA64_32r %stack.0.ptr1, 1, %noreg, 0, %noreg - ; CHECK: %eax = COPY [[LEA64_32r]] - ; CHECK: RET 0, implicit %eax + ; CHECK: [[LEA64_32r:%[0-9]+]]:gr32 = LEA64_32r %stack.0.ptr1, 1, $noreg, 0, $noreg + ; CHECK: $eax = COPY [[LEA64_32r]] + ; CHECK: RET 0, implicit $eax %0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1 - %eax = COPY %0(p0) - RET 0, implicit %eax + $eax = COPY %0(p0) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir =================================================================== --- test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir +++ test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir @@ -19,12 +19,12 @@ registers: - { id: 0, class: _, preferred-register: '' } # ALL: %0:_(p0) = G_GLOBAL_VALUE @g_int -# ALL-NEXT: %eax = COPY %0(p0) -# ALL-NEXT: RET 0, implicit %rax +# ALL-NEXT: $eax = COPY %0(p0) +# ALL-NEXT: RET 0, implicit $rax body: | bb.1.entry: %0(p0) = G_GLOBAL_VALUE @g_int - %eax = COPY %0(p0) - RET 0, implicit %rax + $eax = COPY %0(p0) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir =================================================================== --- test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir +++ test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir @@ -24,11 +24,11 @@ body: | bb.1 (%ir-block.0): ; CHECK-LABEL: name: allocai32 - ; CHECK: [[LEA32r:%[0-9]+]]:gr32 = LEA32r %stack.0.ptr1, 1, %noreg, 0, %noreg - ; CHECK: %eax = COPY [[LEA32r]] - ; CHECK: RET 0, implicit %eax + ; CHECK: [[LEA32r:%[0-9]+]]:gr32 = LEA32r %stack.0.ptr1, 1, $noreg, 0, $noreg + ; CHECK: $eax = COPY [[LEA32r]] + ; CHECK: RET 0, implicit $eax %0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1 - %eax = COPY %0(p0) - RET 0, implicit %eax + $eax = COPY %0(p0) + RET 0, implicit $eax ... Index: test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir =================================================================== --- test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir +++ test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir @@ -19,12 +19,12 @@ registers: - { id: 0, class: _, preferred-register: '' } # ALL: %0:_(p0) = G_GLOBAL_VALUE @g_int -# ALL-NEXT: %rax = COPY %0(p0) -# ALL-NEXT: RET 0, implicit %rax +# ALL-NEXT: $rax = COPY %0(p0) +# ALL-NEXT: RET 0, implicit $rax body: | bb.1.entry: %0(p0) = G_GLOBAL_VALUE @g_int - %rax = COPY %0(p0) - RET 0, implicit %rax + $rax = COPY %0(p0) + RET 0, implicit $rax ... Index: test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir =================================================================== --- test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir +++ test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir @@ -24,11 +24,11 @@ body: | bb.1 (%ir-block.0): ; CHECK-LABEL: name: allocai32 - ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0.ptr1, 1, %noreg, 0, %noreg - ; CHECK: %rax = COPY [[LEA64r]] - ; CHECK: RET 0, implicit %rax + ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0.ptr1, 1, $noreg, 0, $noreg + ; CHECK: $rax = COPY [[LEA64r]] + ; CHECK: RET 0, implicit $rax %0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1 - %rax = COPY %0(p0) - RET 0, implicit %rax + $rax = COPY %0(p0) + RET 0, implicit $rax ... Index: test/CodeGen/X86/add-sub-nsw-nuw.ll =================================================================== --- test/CodeGen/X86/add-sub-nsw-nuw.ll +++ test/CodeGen/X86/add-sub-nsw-nuw.ll @@ -10,7 +10,7 @@ ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: negl %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retl entry: %or = or i64 %argc, -4294967296 Index: test/CodeGen/X86/add.ll =================================================================== --- test/CodeGen/X86/add.ll +++ test/CodeGen/X86/add.ll @@ -176,14 +176,14 @@ ; ; X64-LINUX-LABEL: test6: ; X64-LINUX: # %bb.0: # %entry -; X64-LINUX-NEXT: # kill: def %esi killed %esi def %rsi +; X64-LINUX-NEXT: # kill: def $esi killed $esi def $rsi ; X64-LINUX-NEXT: shlq $32, %rsi ; X64-LINUX-NEXT: leaq (%rsi,%rdi), %rax ; X64-LINUX-NEXT: retq ; ; X64-WIN32-LABEL: test6: ; X64-WIN32: # %bb.0: # %entry -; X64-WIN32-NEXT: # kill: def %edx killed %edx def %rdx +; X64-WIN32-NEXT: # kill: def $edx killed $edx def $rdx ; X64-WIN32-NEXT: shlq $32, %rdx ; X64-WIN32-NEXT: leaq (%rdx,%rcx), %rax ; X64-WIN32-NEXT: retq Index: test/CodeGen/X86/addcarry.ll =================================================================== --- test/CodeGen/X86/addcarry.ll +++ test/CodeGen/X86/addcarry.ll @@ -84,7 +84,7 @@ define i8 @e(i32* nocapture %a, i32 %b) nounwind { ; CHECK-LABEL: e: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi ; CHECK-NEXT: movl (%rdi), %ecx ; CHECK-NEXT: leal (%rsi,%rcx), %edx ; CHECK-NEXT: addl %esi, %edx Index: test/CodeGen/X86/and-encoding.ll =================================================================== --- test/CodeGen/X86/and-encoding.ll +++ test/CodeGen/X86/and-encoding.ll @@ -110,7 +110,7 @@ ; CHECK-NEXT: imulq %rcx, %rax # encoding: [0x48,0x0f,0xaf,0xc1] ; CHECK-NEXT: shrq $36, %rax # encoding: [0x48,0xc1,0xe8,0x24] ; CHECK-NEXT: andl $-128, %eax # encoding: [0x83,0xe0,0x80] -; CHECK-NEXT: # kill: def %eax killed %eax killed %rax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-NEXT: retq # encoding: [0xc3] %div = udiv i32 %x, 17 %and = and i32 %div, 268435328 Index: test/CodeGen/X86/anyext.ll =================================================================== --- test/CodeGen/X86/anyext.ll +++ test/CodeGen/X86/anyext.ll @@ -8,7 +8,7 @@ ; X32-LABEL: foo: ; X32: # %bb.0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: # kill: def %eax killed %eax def %ax +; X32-NEXT: # kill: def $eax killed $eax def $ax ; X32-NEXT: divb {{[0-9]+}}(%esp) ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: andl $1, %eax @@ -17,7 +17,7 @@ ; X64-LABEL: foo: ; X64: # %bb.0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: # kill: def %eax killed %eax def %ax +; X64-NEXT: # kill: def $eax killed $eax def $ax ; X64-NEXT: divb %sil ; X64-NEXT: movzbl %al, %eax ; X64-NEXT: andl $1, %eax @@ -35,7 +35,7 @@ ; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X32-NEXT: xorl %edx, %edx ; X32-NEXT: divw {{[0-9]+}}(%esp) -; X32-NEXT: # kill: def %ax killed %ax def %eax +; X32-NEXT: # kill: def $ax killed $ax def $eax ; X32-NEXT: andl $1, %eax ; X32-NEXT: retl ; @@ -44,7 +44,7 @@ ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: movl %edi, %eax ; X64-NEXT: divw %si -; X64-NEXT: # kill: def %ax killed %ax def %eax +; X64-NEXT: # kill: def $ax killed $ax def $eax ; X64-NEXT: andl $1, %eax ; X64-NEXT: retq %q = trunc i32 %p to i16 Index: test/CodeGen/X86/atomic-eflags-reuse.ll =================================================================== --- test/CodeGen/X86/atomic-eflags-reuse.ll +++ test/CodeGen/X86/atomic-eflags-reuse.ll @@ -93,7 +93,7 @@ ; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: lock xaddq %rax, (%rdi) ; CHECK-NEXT: shrq $63, %rax -; CHECK-NEXT: # kill: def %al killed %al killed %rax +; CHECK-NEXT: # kill: def $al killed $al killed $rax ; CHECK-NEXT: retq entry: %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst Index: test/CodeGen/X86/avx-cast.ll =================================================================== --- test/CodeGen/X86/avx-cast.ll +++ test/CodeGen/X86/avx-cast.ll @@ -9,7 +9,7 @@ define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp { ; AVX-LABEL: castA: ; AVX: ## %bb.0: -; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; AVX-NEXT: retq @@ -20,7 +20,7 @@ define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp { ; AVX-LABEL: castB: ; AVX: ## %bb.0: -; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; AVX-NEXT: retq @@ -33,7 +33,7 @@ define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp { ; AVX-LABEL: castC: ; AVX: ## %bb.0: -; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; AVX-NEXT: retq @@ -47,7 +47,7 @@ define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp { ; AVX-LABEL: castD: ; AVX: ## %bb.0: -; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %shuffle.i = shufflevector <8 x float> %m, <8 x float> %m, <4 x i32> @@ -57,7 +57,7 @@ define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp { ; AVX-LABEL: castE: ; AVX: ## %bb.0: -; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %shuffle.i = shufflevector <4 x i64> %m, <4 x i64> %m, <2 x i32> @@ -67,7 +67,7 @@ define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp { ; AVX-LABEL: castF: ; AVX: ## %bb.0: -; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %shuffle.i = shufflevector <4 x double> %m, <4 x double> %m, <2 x i32> Index: test/CodeGen/X86/avx-cmp.ll =================================================================== --- test/CodeGen/X86/avx-cmp.ll +++ test/CodeGen/X86/avx-cmp.ll @@ -197,7 +197,7 @@ ; CHECK-NEXT: vcmpeqsd %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vmovq %xmm0, %rax ; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: # kill: def %eax killed %eax killed %rax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-NEXT: retq %cmp29 = fcmp oeq double undef, 0.000000e+00 %res = zext i1 %cmp29 to i32 Index: test/CodeGen/X86/avx-insertelt.ll =================================================================== --- test/CodeGen/X86/avx-insertelt.ll +++ test/CodeGen/X86/avx-insertelt.ll @@ -5,7 +5,7 @@ define <8 x float> @insert_f32(<8 x float> %y, float %f, <8 x float> %x) { ; ALL-LABEL: insert_f32: ; ALL: # %bb.0: -; ALL-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; ALL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] ; ALL-NEXT: retq %i0 = insertelement <8 x float> %y, float %f, i32 0 @@ -15,7 +15,7 @@ define <4 x double> @insert_f64(<4 x double> %y, double %f, <4 x double> %x) { ; ALL-LABEL: insert_f64: ; ALL: # %bb.0: -; ALL-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; ALL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7] ; ALL-NEXT: retq %i0 = insertelement <4 x double> %y, double %f, i32 0 Index: test/CodeGen/X86/avx-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/avx-intrinsics-fast-isel.ll +++ test/CodeGen/X86/avx-intrinsics-fast-isel.ll @@ -316,12 +316,12 @@ define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind { ; X32-LABEL: test_mm256_castpd128_pd256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_castpd128_pd256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: retq %res = shufflevector <2 x double> %a0, <2 x double> %a0, <4 x i32> ret <4 x double> %res @@ -330,13 +330,13 @@ define <2 x double> @test_mm256_castpd256_pd128(<4 x double> %a0) nounwind { ; X32-LABEL: test_mm256_castpd256_pd128: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_castpd256_pd128: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq %res = shufflevector <4 x double> %a0, <4 x double> %a0, <2 x i32> @@ -370,12 +370,12 @@ define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind { ; X32-LABEL: test_mm256_castps128_ps256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_castps128_ps256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: retq %res = shufflevector <4 x float> %a0, <4 x float> %a0, <8 x i32> ret <8 x float> %res @@ -384,13 +384,13 @@ define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind { ; X32-LABEL: test_mm256_castps256_ps128: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_castps256_ps128: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq %res = shufflevector <8 x float> %a0, <8 x float> %a0, <4 x i32> @@ -400,12 +400,12 @@ define <4 x i64> @test_mm256_castsi128_si256(<2 x i64> %a0) nounwind { ; X32-LABEL: test_mm256_castsi128_si256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_castsi128_si256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: retq %res = shufflevector <2 x i64> %a0, <2 x i64> %a0, <4 x i32> ret <4 x i64> %res @@ -438,13 +438,13 @@ define <2 x i64> @test_mm256_castsi256_si128(<4 x i64> %a0) nounwind { ; X32-LABEL: test_mm256_castsi256_si128: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_castsi256_si128: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq %res = shufflevector <4 x i64> %a0, <4 x i64> %a0, <2 x i32> @@ -1043,13 +1043,13 @@ define <4 x double> @test_mm256_insertf128_pd(<4 x double> %a0, <2 x double> %a1) nounwind { ; X32-LABEL: test_mm256_insertf128_pd: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_insertf128_pd: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; X64-NEXT: retq %ext = shufflevector <2 x double> %a1, <2 x double> %a1, <4 x i32> @@ -1075,13 +1075,13 @@ define <4 x i64> @test_mm256_insertf128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind { ; X32-LABEL: test_mm256_insertf128_si256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_insertf128_si256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; X64-NEXT: retq %ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> @@ -2188,13 +2188,13 @@ define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwind { ; X32-LABEL: test_mm256_set_m128: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_set_m128: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X64-NEXT: retq %res = shufflevector <4 x float> %a1, <4 x float> %a0, <8 x i32> @@ -2204,13 +2204,13 @@ define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) nounwind { ; X32-LABEL: test_mm256_set_m128d: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_set_m128d: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X64-NEXT: retq %arg0 = bitcast <2 x double> %a0 to <4 x float> @@ -2223,13 +2223,13 @@ define <4 x i64> @test_mm256_set_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind { ; X32-LABEL: test_mm256_set_m128i: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_set_m128i: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X64-NEXT: retq %arg0 = bitcast <2 x i64> %a0 to <4 x float> @@ -2825,13 +2825,13 @@ define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounwind { ; X32-LABEL: test_mm256_setr_m128: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_setr_m128: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X64-NEXT: retq %res = shufflevector <4 x float> %a0, <4 x float> %a1, <8 x i32> @@ -2841,13 +2841,13 @@ define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) nounwind { ; X32-LABEL: test_mm256_setr_m128d: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_setr_m128d: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X64-NEXT: retq %arg0 = bitcast <2 x double> %a0 to <4 x float> @@ -2860,13 +2860,13 @@ define <4 x i64> @test_mm256_setr_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind { ; X32-LABEL: test_mm256_setr_m128i: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_setr_m128i: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X64-NEXT: retq %arg0 = bitcast <2 x i64> %a0 to <4 x float> Index: test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll =================================================================== --- test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll +++ test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll @@ -39,7 +39,7 @@ define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) { ; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; CHECK-NEXT: ret{{[l|q]}} %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2) @@ -88,7 +88,7 @@ define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) { ; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: ret{{[l|q]}} %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2) Index: test/CodeGen/X86/avx-load-store.ll =================================================================== --- test/CodeGen/X86/avx-load-store.ll +++ test/CodeGen/X86/avx-load-store.ll @@ -85,7 +85,7 @@ ; CHECK_O0-LABEL: mov00: ; CHECK_O0: # %bb.0: ; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK_O0-NEXT: # implicit-def: %ymm1 +; CHECK_O0-NEXT: # implicit-def: $ymm1 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1 ; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; CHECK_O0-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3,4,5,6,7] @@ -104,7 +104,7 @@ ; CHECK_O0-LABEL: mov01: ; CHECK_O0: # %bb.0: ; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; CHECK_O0-NEXT: # implicit-def: %ymm1 +; CHECK_O0-NEXT: # implicit-def: $ymm1 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1 ; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; CHECK_O0-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3] @@ -121,7 +121,7 @@ ; ; CHECK_O0-LABEL: storev16i16: ; CHECK_O0: # %bb.0: -; CHECK_O0-NEXT: # implicit-def: %rax +; CHECK_O0-NEXT: # implicit-def: $rax ; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax) store <16 x i16> %a, <16 x i16>* undef, align 32 unreachable @@ -135,7 +135,7 @@ ; ; CHECK_O0-LABEL: storev16i16_01: ; CHECK_O0: # %bb.0: -; CHECK_O0-NEXT: # implicit-def: %rax +; CHECK_O0-NEXT: # implicit-def: $rax ; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax) store <16 x i16> %a, <16 x i16>* undef, align 4 unreachable @@ -148,7 +148,7 @@ ; ; CHECK_O0-LABEL: storev32i8: ; CHECK_O0: # %bb.0: -; CHECK_O0-NEXT: # implicit-def: %rax +; CHECK_O0-NEXT: # implicit-def: $rax ; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax) store <32 x i8> %a, <32 x i8>* undef, align 32 unreachable @@ -162,7 +162,7 @@ ; ; CHECK_O0-LABEL: storev32i8_01: ; CHECK_O0: # %bb.0: -; CHECK_O0-NEXT: # implicit-def: %rax +; CHECK_O0-NEXT: # implicit-def: $rax ; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax) store <32 x i8> %a, <32 x i8>* undef, align 4 unreachable @@ -179,7 +179,7 @@ ; ; CHECK_O0-LABEL: double_save: ; CHECK_O0: # %bb.0: -; CHECK_O0-NEXT: # implicit-def: %ymm2 +; CHECK_O0-NEXT: # implicit-def: $ymm2 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi) @@ -211,13 +211,13 @@ ; ; CHECK_O0-LABEL: f_f: ; CHECK_O0: # %bb.0: # %allocas -; CHECK_O0-NEXT: # implicit-def: %al +; CHECK_O0-NEXT: # implicit-def: $al ; CHECK_O0-NEXT: testb $1, %al ; CHECK_O0-NEXT: jne .LBB8_1 ; CHECK_O0-NEXT: jmp .LBB8_2 ; CHECK_O0-NEXT: .LBB8_1: # %cif_mask_all ; CHECK_O0-NEXT: .LBB8_2: # %cif_mask_mixed -; CHECK_O0-NEXT: # implicit-def: %al +; CHECK_O0-NEXT: # implicit-def: $al ; CHECK_O0-NEXT: testb $1, %al ; CHECK_O0-NEXT: jne .LBB8_3 ; CHECK_O0-NEXT: jmp .LBB8_4 @@ -225,8 +225,8 @@ ; CHECK_O0-NEXT: movl $-1, %eax ; CHECK_O0-NEXT: vmovd %eax, %xmm0 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1 -; CHECK_O0-NEXT: # implicit-def: %rcx -; CHECK_O0-NEXT: # implicit-def: %ymm2 +; CHECK_O0-NEXT: # implicit-def: $rcx +; CHECK_O0-NEXT: # implicit-def: $ymm2 ; CHECK_O0-NEXT: vmaskmovps %ymm2, %ymm1, (%rcx) ; CHECK_O0-NEXT: .LBB8_4: # %cif_mixed_test_any_check allocas: @@ -259,7 +259,7 @@ ; CHECK_O0: # %bb.0: ; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm0 ; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1 -; CHECK_O0-NEXT: # implicit-def: %ymm2 +; CHECK_O0-NEXT: # implicit-def: $ymm2 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi) @@ -304,7 +304,7 @@ ; CHECK_O0: # %bb.0: ; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm0 ; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1 -; CHECK_O0-NEXT: # implicit-def: %ymm2 +; CHECK_O0-NEXT: # implicit-def: $ymm2 ; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 ; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 ; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi) Index: test/CodeGen/X86/avx-splat.ll =================================================================== --- test/CodeGen/X86/avx-splat.ll +++ test/CodeGen/X86/avx-splat.ll @@ -61,7 +61,7 @@ ; CHECK: # %bb.0: # %for_exit499 ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: testb %al, %al -; CHECK-NEXT: # implicit-def: %ymm0 +; CHECK-NEXT: # implicit-def: $ymm0 ; CHECK-NEXT: jne .LBB4_2 ; CHECK-NEXT: # %bb.1: # %load.i1247 ; CHECK-NEXT: pushq %rbp Index: test/CodeGen/X86/avx-vinsertf128.ll =================================================================== --- test/CodeGen/X86/avx-vinsertf128.ll +++ test/CodeGen/X86/avx-vinsertf128.ll @@ -75,7 +75,7 @@ define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) { ; CHECK-LABEL: insert_undef_pd: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ; CHECK-NEXT: retq %res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> undef, <2 x double> %a1, i8 0) @@ -86,7 +86,7 @@ define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) { ; CHECK-LABEL: insert_undef_ps: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ; CHECK-NEXT: retq %res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %a1, i8 0) @@ -97,7 +97,7 @@ define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) { ; CHECK-LABEL: insert_undef_si: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ; CHECK-NEXT: retq %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> undef, <4 x i32> %a1, i8 0) Index: test/CodeGen/X86/avx-vzeroupper.ll =================================================================== --- test/CodeGen/X86/avx-vzeroupper.ll +++ test/CodeGen/X86/avx-vzeroupper.ll @@ -82,14 +82,14 @@ ; VZ-LABEL: test02: ; VZ: # %bb.0: ; VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0 -; VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; VZ-NEXT: vzeroupper ; VZ-NEXT: jmp do_sse # TAILCALL ; ; NO-VZ-LABEL: test02: ; NO-VZ: # %bb.0: ; NO-VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0 -; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; NO-VZ-NEXT: jmp do_sse # TAILCALL %add.i = fadd <8 x float> %a, %b %add.low = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %add.i, i8 0) @@ -222,10 +222,10 @@ ; VZ-LABEL: test04: ; VZ: # %bb.0: ; VZ-NEXT: pushq %rax -; VZ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; VZ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; VZ-NEXT: callq do_avx -; VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; VZ-NEXT: popq %rax ; VZ-NEXT: vzeroupper ; VZ-NEXT: retq @@ -233,10 +233,10 @@ ; NO-VZ-LABEL: test04: ; NO-VZ: # %bb.0: ; NO-VZ-NEXT: pushq %rax -; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; NO-VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; NO-VZ-NEXT: callq do_avx -; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; NO-VZ-NEXT: popq %rax ; NO-VZ-NEXT: retq %shuf = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> Index: test/CodeGen/X86/avx2-conversions.ll =================================================================== --- test/CodeGen/X86/avx2-conversions.ll +++ test/CodeGen/X86/avx2-conversions.ll @@ -9,7 +9,7 @@ ; X32-SLOW: # %bb.0: ; X32-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; X32-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X32-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-SLOW-NEXT: vzeroupper ; X32-SLOW-NEXT: retl ; @@ -17,7 +17,7 @@ ; X32-FAST: # %bb.0: ; X32-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; X32-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0 -; X32-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-FAST-NEXT: vzeroupper ; X32-FAST-NEXT: retl ; @@ -25,7 +25,7 @@ ; X64-SLOW: # %bb.0: ; X64-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; X64-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-SLOW-NEXT: vzeroupper ; X64-SLOW-NEXT: retq ; @@ -33,7 +33,7 @@ ; X64-FAST: # %bb.0: ; X64-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; X64-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0 -; X64-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-FAST-NEXT: vzeroupper ; X64-FAST-NEXT: retq %B = trunc <4 x i64> %A to <4 x i32> @@ -45,7 +45,7 @@ ; X32: # %bb.0: ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; @@ -53,7 +53,7 @@ ; X64: # %bb.0: ; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq %B = trunc <8 x i32> %A to <8 x i16> Index: test/CodeGen/X86/avx2-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/avx2-intrinsics-fast-isel.ll +++ test/CodeGen/X86/avx2-intrinsics-fast-isel.ll @@ -355,7 +355,7 @@ define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) { ; CHECK-LABEL: test_mm256_broadcastsi128_si256: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> @@ -1447,7 +1447,7 @@ define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind { ; CHECK-LABEL: test0_mm256_inserti128_si256: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; CHECK-NEXT: ret{{[l|q]}} %ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> Index: test/CodeGen/X86/avx2-masked-gather.ll =================================================================== --- test/CodeGen/X86/avx2-masked-gather.ll +++ test/CodeGen/X86/avx2-masked-gather.ll @@ -32,7 +32,7 @@ ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %xmm2 +; NOGATHER-NEXT: # implicit-def: $xmm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB0_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -82,7 +82,7 @@ ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %xmm2 +; NOGATHER-NEXT: # implicit-def: $xmm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB1_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -134,7 +134,7 @@ ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %xmm2 +; NOGATHER-NEXT: # implicit-def: $xmm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB2_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -182,7 +182,7 @@ ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %xmm2 +; NOGATHER-NEXT: # implicit-def: $xmm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB3_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -229,7 +229,7 @@ ; NOGATHER-LABEL: masked_gather_v4i32: ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax -; NOGATHER-NEXT: # implicit-def: %xmm3 +; NOGATHER-NEXT: # implicit-def: $xmm3 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB4_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -289,7 +289,7 @@ ; NOGATHER-LABEL: masked_gather_v4float: ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax -; NOGATHER-NEXT: # implicit-def: %xmm3 +; NOGATHER-NEXT: # implicit-def: $xmm3 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB5_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -360,7 +360,7 @@ ; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4 ; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %ymm2 +; NOGATHER-NEXT: # implicit-def: $ymm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB6_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -476,7 +476,7 @@ ; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4 ; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %ymm2 +; NOGATHER-NEXT: # implicit-def: $ymm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB7_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -587,7 +587,7 @@ ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %ymm2 +; NOGATHER-NEXT: # implicit-def: $ymm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB8_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -662,7 +662,7 @@ ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %ymm2 +; NOGATHER-NEXT: # implicit-def: $ymm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB9_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -735,7 +735,7 @@ ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %xmm2 +; NOGATHER-NEXT: # implicit-def: $xmm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB10_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load @@ -782,7 +782,7 @@ ; NOGATHER: # %bb.0: # %entry ; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3 ; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax -; NOGATHER-NEXT: # implicit-def: %xmm2 +; NOGATHER-NEXT: # implicit-def: $xmm2 ; NOGATHER-NEXT: testb $1, %al ; NOGATHER-NEXT: je .LBB11_2 ; NOGATHER-NEXT: # %bb.1: # %cond.load Index: test/CodeGen/X86/avx2-shift.ll =================================================================== --- test/CodeGen/X86/avx2-shift.ll +++ test/CodeGen/X86/avx2-shift.ll @@ -532,7 +532,7 @@ ; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; @@ -543,7 +543,7 @@ ; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq %res = shl <8 x i16> %lhs, %rhs @@ -582,7 +582,7 @@ ; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; @@ -593,7 +593,7 @@ ; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq %res = lshr <8 x i16> %lhs, %rhs Index: test/CodeGen/X86/avx2-vector-shifts.ll =================================================================== --- test/CodeGen/X86/avx2-vector-shifts.ll +++ test/CodeGen/X86/avx2-vector-shifts.ll @@ -431,7 +431,7 @@ ; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; @@ -442,7 +442,7 @@ ; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq %shl = shl <8 x i16> %r, %a @@ -639,7 +639,7 @@ ; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; @@ -650,7 +650,7 @@ ; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq %lshr = lshr <8 x i16> %r, %a Index: test/CodeGen/X86/avx512-arith.ll =================================================================== --- test/CodeGen/X86/avx512-arith.ll +++ test/CodeGen/X86/avx512-arith.ll @@ -176,10 +176,10 @@ ; ; AVX512DQ-LABEL: imulq256: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; SKX-LABEL: imulq256: @@ -229,10 +229,10 @@ ; ; AVX512DQ-LABEL: imulq128: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; Index: test/CodeGen/X86/avx512-build-vector.ll =================================================================== --- test/CodeGen/X86/avx512-build-vector.ll +++ test/CodeGen/X86/avx512-build-vector.ll @@ -14,7 +14,7 @@ define <16 x float> @test3(<4 x float> %a) { ; CHECK-LABEL: test3: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,2,3,4,18,16,7,8,9,10,11,12,13,14,15] ; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; CHECK-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1 Index: test/CodeGen/X86/avx512-calling-conv.ll =================================================================== --- test/CodeGen/X86/avx512-calling-conv.ll +++ test/CodeGen/X86/avx512-calling-conv.ll @@ -67,7 +67,7 @@ ; KNL-NEXT: .cfi_def_cfa_offset 16 ; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: callq _func8xi1 ; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; KNL-NEXT: vpslld $31, %ymm0, %ymm0 @@ -95,7 +95,7 @@ ; KNL_X32-NEXT: .cfi_def_cfa_offset 16 ; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 ; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0 -; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL_X32-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL_X32-NEXT: calll _func8xi1 ; KNL_X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; KNL_X32-NEXT: vpslld $31, %ymm0, %ymm0 @@ -195,7 +195,7 @@ ; KNL-NEXT: .cfi_def_cfa_offset 16 ; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: callq _func8xi1 ; KNL-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 ; KNL-NEXT: popq %rax @@ -219,7 +219,7 @@ ; KNL_X32-NEXT: .cfi_def_cfa_offset 16 ; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 ; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0 -; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL_X32-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL_X32-NEXT: calll _func8xi1 ; KNL_X32-NEXT: vandps LCPI7_0, %xmm0, %xmm0 ; KNL_X32-NEXT: addl $12, %esp @@ -378,21 +378,21 @@ ; KNL-LABEL: test13: ; KNL: ## %bb.0: ; KNL-NEXT: movzbl (%rdi), %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test13: ; SKX: ## %bb.0: ; SKX-NEXT: kmovb (%rdi), %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq ; ; KNL_X32-LABEL: test13: ; KNL_X32: ## %bb.0: ; KNL_X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; KNL_X32-NEXT: movzbl (%eax), %eax -; KNL_X32-NEXT: ## kill: def %al killed %al killed %eax +; KNL_X32-NEXT: ## kill: def $al killed $al killed $eax ; KNL_X32-NEXT: retl %bar = load <1 x i1>, <1 x i1>* %foo ret <1 x i1> %bar Index: test/CodeGen/X86/avx512-cmp-kor-sequence.ll =================================================================== --- test/CodeGen/X86/avx512-cmp-kor-sequence.ll +++ test/CodeGen/X86/avx512-cmp-kor-sequence.ll @@ -19,7 +19,7 @@ ; CHECK-NEXT: korw %k2, %k1, %k1 ; CHECK-NEXT: korw %k1, %k0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq entry: %0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %x, i32 13, i16 -1, i32 4) Index: test/CodeGen/X86/avx512-cvt.ll =================================================================== --- test/CodeGen/X86/avx512-cvt.ll +++ test/CodeGen/X86/avx512-cvt.ll @@ -80,9 +80,9 @@ ; ; AVX512DQ-LABEL: slto4f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq %b = sitofp <4 x i64> %a to <4 x double> ret <4 x double> %b @@ -105,9 +105,9 @@ ; ; AVX512DQ-LABEL: slto2f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %b = sitofp <2 x i64> %a to <2 x double> @@ -133,9 +133,9 @@ ; ; AVX512DQ-LABEL: sltof2f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %b = sitofp <2 x i64> %a to <2 x float> @@ -170,7 +170,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovups (%rdi), %ymm0 ; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %a1 = load <4 x i64>, <4 x i64>* %a, align 8 @@ -204,9 +204,9 @@ ; ; AVX512DQ-LABEL: f64to4sl: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq %b = fptosi <4 x double> %a to <4 x i64> ret <4 x i64> %b @@ -238,9 +238,9 @@ ; ; AVX512DQ-LABEL: f32to4sl: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq %b = fptosi <4 x float> %a to <4 x i64> ret <4 x i64> %b @@ -272,9 +272,9 @@ ; ; AVX512DQ-LABEL: slto4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %b = sitofp <4 x i64> %a to <4 x float> @@ -307,9 +307,9 @@ ; ; AVX512DQ-LABEL: ulto4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %b = uitofp <4 x i64> %a to <4 x float> @@ -484,9 +484,9 @@ define <8 x i32> @f32to8ui(<8 x float> %a) nounwind { ; NOVL-LABEL: f32to8ui: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0 -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NOVL-NEXT: retq ; ; VL-LABEL: f32to8ui: @@ -500,9 +500,9 @@ define <4 x i32> @f32to4ui(<4 x float> %a) nounwind { ; NOVL-LABEL: f32to4ui: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0 -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -528,7 +528,7 @@ ; NOVL: # %bb.0: ; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0 ; NOVL-NEXT: vpmovdw %zmm0, %ymm0 -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -547,7 +547,7 @@ ; NOVL: # %bb.0: ; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0 ; NOVL-NEXT: vpmovdw %zmm0, %ymm0 -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -564,9 +564,9 @@ define <4 x i32> @f64to4ui(<4 x double> %a) nounwind { ; NOVL-LABEL: f64to4ui: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NOVL-NEXT: vcvttpd2udq %zmm0, %ymm0 -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -705,7 +705,7 @@ ; NOVL-NEXT: vptestmd %zmm1, %zmm1, %k1 ; NOVL-NEXT: vcvtpd2ps %ymm0, %xmm0 ; NOVL-NEXT: vmovaps %zmm0, %zmm0 {%k1} {z} -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -744,12 +744,12 @@ define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x double> %a1) { ; NOVL-LABEL: f32to4f64_mask: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 +; NOVL-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; NOVL-NEXT: vcvtps2pd %xmm0, %ymm0 ; NOVL-NEXT: vcmpltpd %zmm2, %zmm1, %k1 ; NOVL-NEXT: vmovapd %zmm0, %zmm0 {%k1} {z} -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NOVL-NEXT: retq ; ; VL-LABEL: f32to4f64_mask: @@ -1291,9 +1291,9 @@ define <4 x double> @uito4f64(<4 x i32> %a) nounwind { ; NOVL-LABEL: uito4f64: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0 -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NOVL-NEXT: retq ; ; VL-LABEL: uito4f64: @@ -1325,9 +1325,9 @@ define <8 x float> @uito8f32(<8 x i32> %a) nounwind { ; NOVL-LABEL: uito8f32: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0 -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NOVL-NEXT: retq ; ; VL-LABEL: uito8f32: @@ -1341,9 +1341,9 @@ define <4 x float> @uito4f32(<4 x i32> %a) nounwind { ; NOVL-LABEL: uito4f32: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0 -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -1556,7 +1556,7 @@ define <8 x float> @sbto8f32(<8 x float> %a) { ; NOVLDQ-LABEL: sbto8f32: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} @@ -1582,7 +1582,7 @@ ; ; AVX512DQ-LABEL: sbto8f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 @@ -1596,7 +1596,7 @@ define <4 x float> @sbto4f32(<4 x float> %a) { ; NOVLDQ-LABEL: sbto4f32: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} @@ -1623,7 +1623,7 @@ ; ; AVX512DQ-LABEL: sbto4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 @@ -1638,7 +1638,7 @@ define <4 x double> @sbto4f64(<4 x double> %a) { ; NOVLDQ-LABEL: sbto4f64: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NOVLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} @@ -1664,7 +1664,7 @@ ; ; AVX512DQ-LABEL: sbto4f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 @@ -1678,7 +1678,7 @@ define <2 x float> @sbto2f32(<2 x float> %a) { ; NOVLDQ-LABEL: sbto2f32: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} @@ -1705,7 +1705,7 @@ ; ; AVX512DQ-LABEL: sbto2f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 @@ -1720,7 +1720,7 @@ define <2 x double> @sbto2f64(<2 x double> %a) { ; NOVLDQ-LABEL: sbto2f64: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NOVLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} @@ -1747,7 +1747,7 @@ ; ; AVX512DQ-LABEL: sbto2f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 @@ -1961,7 +1961,7 @@ define <8 x float> @ubto8f32(<8 x i32> %a) { ; NOVLDQ-LABEL: ubto8f32: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} @@ -1985,7 +1985,7 @@ ; ; AVX512DQ-LABEL: ubto8f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1 ; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} ; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0 @@ -1998,7 +1998,7 @@ define <8 x double> @ubto8f64(<8 x i32> %a) { ; NOVLDQ-LABEL: ubto8f64: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} @@ -2022,7 +2022,7 @@ ; ; AVX512DQ-LABEL: ubto8f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1 ; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} ; AVX512DQ-NEXT: vcvtdq2pd %ymm0, %zmm0 @@ -2035,7 +2035,7 @@ define <4 x float> @ubto4f32(<4 x i32> %a) { ; NOVLDQ-LABEL: ubto4f32: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} @@ -2060,7 +2060,7 @@ ; ; AVX512DQ-LABEL: ubto4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1 ; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} ; AVX512DQ-NEXT: vcvtdq2ps %xmm0, %xmm0 @@ -2074,7 +2074,7 @@ define <4 x double> @ubto4f64(<4 x i32> %a) { ; NOVLDQ-LABEL: ubto4f64: ; NOVLDQ: # %bb.0: -; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 ; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} @@ -2098,7 +2098,7 @@ ; ; AVX512DQ-LABEL: ubto4f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1 ; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} ; AVX512DQ-NEXT: vcvtdq2pd %xmm0, %ymm0 @@ -2140,7 +2140,7 @@ ; NOVL-NEXT: vpcmpltuq %zmm1, %zmm0, %k1 ; NOVL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} ; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0 -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -2160,7 +2160,7 @@ define <2 x i64> @test_2f64toub(<2 x double> %a, <2 x i64> %passthru) { ; KNL-LABEL: test_2f64toub: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: vcvttsd2si %xmm0, %eax ; KNL-NEXT: andl $1, %eax ; KNL-NEXT: kmovw %eax, %k0 @@ -2170,7 +2170,7 @@ ; KNL-NEXT: kshiftlw $1, %k1, %k1 ; KNL-NEXT: korw %k1, %k0, %k1 ; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -2184,7 +2184,7 @@ ; ; AVX512DQ-LABEL: test_2f64toub: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512DQ-NEXT: vcvttsd2si %xmm2, %eax ; AVX512DQ-NEXT: kmovw %eax, %k0 @@ -2195,13 +2195,13 @@ ; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1 ; AVX512DQ-NEXT: korb %k0, %k1, %k1 ; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: test_2f64toub: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512BW-NEXT: vcvttsd2si %xmm0, %eax ; AVX512BW-NEXT: andl $1, %eax ; AVX512BW-NEXT: kmovw %eax, %k0 @@ -2211,7 +2211,7 @@ ; AVX512BW-NEXT: kshiftlw $1, %k1, %k1 ; AVX512BW-NEXT: korw %k1, %k0, %k1 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %mask = fptoui <2 x double> %a to <2 x i1> @@ -2222,12 +2222,12 @@ define <4 x i64> @test_4f64toub(<4 x double> %a, <4 x i64> %passthru) { ; NOVL-LABEL: test_4f64toub: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 +; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; NOVL-NEXT: vcvttpd2dq %ymm0, %xmm0 ; NOVL-NEXT: vpslld $31, %xmm0, %xmm0 ; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NOVL-NEXT: retq ; ; VL-LABEL: test_4f64toub: @@ -2266,12 +2266,12 @@ define <2 x i64> @test_2f32toub(<2 x float> %a, <2 x i64> %passthru) { ; NOVL-LABEL: test_2f32toub: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; NOVL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0 ; NOVL-NEXT: vpslld $31, %xmm0, %xmm0 ; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -2290,12 +2290,12 @@ define <4 x i64> @test_4f32toub(<4 x float> %a, <4 x i64> %passthru) { ; NOVL-LABEL: test_4f32toub: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 +; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0 ; NOVL-NEXT: vpslld $31, %xmm0, %xmm0 ; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NOVL-NEXT: retq ; ; VL-LABEL: test_4f32toub: @@ -2347,7 +2347,7 @@ define <2 x i64> @test_2f64tosb(<2 x double> %a, <2 x i64> %passthru) { ; KNL-LABEL: test_2f64tosb: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: vcvttsd2si %xmm0, %eax ; KNL-NEXT: andl $1, %eax ; KNL-NEXT: kmovw %eax, %k0 @@ -2357,7 +2357,7 @@ ; KNL-NEXT: kshiftlw $1, %k1, %k1 ; KNL-NEXT: korw %k1, %k0, %k1 ; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -2371,7 +2371,7 @@ ; ; AVX512DQ-LABEL: test_2f64tosb: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512DQ-NEXT: vcvttsd2si %xmm2, %eax ; AVX512DQ-NEXT: kmovw %eax, %k0 @@ -2382,13 +2382,13 @@ ; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1 ; AVX512DQ-NEXT: korb %k0, %k1, %k1 ; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: test_2f64tosb: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512BW-NEXT: vcvttsd2si %xmm0, %eax ; AVX512BW-NEXT: andl $1, %eax ; AVX512BW-NEXT: kmovw %eax, %k0 @@ -2398,7 +2398,7 @@ ; AVX512BW-NEXT: kshiftlw $1, %k1, %k1 ; AVX512BW-NEXT: korw %k1, %k0, %k1 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %mask = fptosi <2 x double> %a to <2 x i1> @@ -2409,11 +2409,11 @@ define <4 x i64> @test_4f64tosb(<4 x double> %a, <4 x i64> %passthru) { ; NOVL-LABEL: test_4f64tosb: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 +; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; NOVL-NEXT: vcvttpd2dq %ymm0, %xmm0 ; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NOVL-NEXT: retq ; ; VL-LABEL: test_4f64tosb: @@ -2449,11 +2449,11 @@ define <2 x i64> @test_2f32tosb(<2 x float> %a, <2 x i64> %passthru) { ; NOVL-LABEL: test_2f32tosb: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; NOVL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0 ; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NOVL-NEXT: vzeroupper ; NOVL-NEXT: retq ; @@ -2471,11 +2471,11 @@ define <4 x i64> @test_4f32tosb(<4 x float> %a, <4 x i64> %passthru) { ; NOVL-LABEL: test_4f32tosb: ; NOVL: # %bb.0: -; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 +; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0 ; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} -; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NOVL-NEXT: retq ; ; VL-LABEL: test_4f32tosb: Index: test/CodeGen/X86/avx512-ext.ll =================================================================== --- test/CodeGen/X86/avx512-ext.ll +++ test/CodeGen/X86/avx512-ext.ll @@ -302,7 +302,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_4x8mem_to_4x32: @@ -324,7 +324,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxbd (%rdi), %xmm0 ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_4x8mem_to_4x32: @@ -347,7 +347,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_8x8mem_to_8x32: @@ -370,7 +370,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxbd (%rdi), %ymm0 ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_8x8mem_to_8x32: @@ -492,7 +492,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_2x8mem_to_2x64: @@ -513,7 +513,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxbq (%rdi), %xmm0 ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_2x8mem_to_2x64mask: @@ -544,7 +544,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_4x8mem_to_4x64: @@ -566,7 +566,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxbq (%rdi), %ymm0 ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_4x8mem_to_4x64mask: @@ -650,7 +650,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_4x16mem_to_4x32: @@ -672,7 +672,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxwd (%rdi), %xmm0 ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_4x16mem_to_4x32mask: @@ -706,7 +706,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_8x16mem_to_8x32: @@ -729,7 +729,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxwd (%rdi), %ymm0 ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_8x16mem_to_8x32mask: @@ -762,7 +762,7 @@ ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 ; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_8x16_to_8x32mask: @@ -872,7 +872,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_2x16mem_to_2x64: @@ -894,7 +894,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxwq (%rdi), %xmm0 ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_2x16mem_to_2x64mask: @@ -926,7 +926,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_4x16mem_to_4x64: @@ -948,7 +948,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxwq (%rdi), %ymm0 ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_4x16mem_to_4x64mask: @@ -1061,7 +1061,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_2x32mem_to_2x64: @@ -1083,7 +1083,7 @@ ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxdq (%rdi), %xmm0 ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_2x32mem_to_2x64mask: @@ -1115,7 +1115,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_4x32mem_to_4x64: @@ -1137,7 +1137,7 @@ ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpmovsxdq (%rdi), %ymm0 ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_4x32mem_to_4x64mask: @@ -1178,7 +1178,7 @@ ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1 ; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: zext_4x32_to_4x64mask: @@ -1331,7 +1331,7 @@ ; KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: # kill: def %ax killed %ax killed %eax +; KNL-NEXT: # kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: trunc_16i8_to_16i1: @@ -1339,7 +1339,7 @@ ; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 ; SKX-NEXT: vpmovb2m %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq %mask_b = trunc <16 x i8>%a to <16 x i1> %mask = bitcast <16 x i1> %mask_b to i16 @@ -1352,7 +1352,7 @@ ; KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: # kill: def %ax killed %ax killed %eax +; KNL-NEXT: # kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: trunc_16i32_to_16i1: @@ -1360,7 +1360,7 @@ ; SKX-NEXT: vpslld $31, %zmm0, %zmm0 ; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %mask_b = trunc <16 x i32>%a to <16 x i1> @@ -1390,7 +1390,7 @@ ; KNL-NEXT: vpsllq $63, %zmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: # kill: def %al killed %al killed %eax +; KNL-NEXT: # kill: def $al killed $al killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: trunc_8i16_to_8i1: @@ -1398,7 +1398,7 @@ ; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 ; SKX-NEXT: vpmovw2m %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: # kill: def %al killed %al killed %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq %mask_b = trunc <8 x i16>%a to <8 x i1> %mask = bitcast <8 x i1> %mask_b to i8 @@ -1436,7 +1436,7 @@ ; KNL-NEXT: kmovw %edi, %k1 ; KNL-NEXT: korw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: # kill: def %ax killed %ax killed %eax +; KNL-NEXT: # kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: trunc_i32_to_i1: @@ -1449,7 +1449,7 @@ ; SKX-NEXT: kmovw %edi, %k1 ; SKX-NEXT: korw %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq %a_i = trunc i32 %a to i1 %maskv = insertelement <16 x i1> , i1 %a_i, i32 0 @@ -1462,7 +1462,7 @@ ; KNL: # %bb.0: ; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0 ; KNL-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: retq ; ; SKX-LABEL: sext_8i1_8i16: Index: test/CodeGen/X86/avx512-extract-subvector.ll =================================================================== --- test/CodeGen/X86/avx512-extract-subvector.ll +++ test/CodeGen/X86/avx512-extract-subvector.ll @@ -15,7 +15,7 @@ define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind { ; SKX-LABEL: extract_subvector128_v32i16_first_element: ; SKX: ## %bb.0: -; SKX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; SKX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> @@ -35,7 +35,7 @@ define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind { ; SKX-LABEL: extract_subvector128_v64i8_first_element: ; SKX: ## %bb.0: -; SKX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; SKX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> Index: test/CodeGen/X86/avx512-hadd-hsub.ll =================================================================== --- test/CodeGen/X86/avx512-hadd-hsub.ll +++ test/CodeGen/X86/avx512-hadd-hsub.ll @@ -63,7 +63,7 @@ ; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0 -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: fhadd_16: @@ -72,7 +72,7 @@ ; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0 -; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> @@ -90,7 +90,7 @@ ; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; KNL-NEXT: vsubps %zmm1, %zmm0, %zmm0 -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: fhsub_16: @@ -99,7 +99,7 @@ ; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SKX-NEXT: vsubps %zmm1, %zmm0, %zmm0 -; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> @@ -181,7 +181,7 @@ ; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] ; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0 -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: fadd_noundef_low: @@ -189,7 +189,7 @@ ; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] ; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0 -; SKX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; SKX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; SKX-NEXT: retq %x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> %x228 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> @@ -228,7 +228,7 @@ ; KNL-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14] ; KNL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15] ; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0 -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: hadd_16_3_sv: @@ -236,7 +236,7 @@ ; SKX-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14] ; SKX-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15] ; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0 -; SKX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; SKX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; SKX-NEXT: retq %x226 = shufflevector <16 x i32> %x225, <16 x i32> %x227, <16 x i32> @@ -255,7 +255,7 @@ ; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] ; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0 -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: fadd_noundef_eel: @@ -263,7 +263,7 @@ ; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] ; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0 -; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> Index: test/CodeGen/X86/avx512-insert-extract.ll =================================================================== --- test/CodeGen/X86/avx512-insert-extract.ll +++ test/CodeGen/X86/avx512-insert-extract.ll @@ -85,7 +85,7 @@ ; CHECK-NEXT: movq %rsp, %rbp ; CHECK-NEXT: andq $-64, %rsp ; CHECK-NEXT: subq $128, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %zmm0, (%rsp) ; CHECK-NEXT: andl $15, %edi ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -104,7 +104,7 @@ ; CHECK-NEXT: movq %rsp, %rbp ; CHECK-NEXT: andq $-64, %rsp ; CHECK-NEXT: subq $128, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %zmm0, (%rsp) ; CHECK-NEXT: andl $7, %edi ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -123,7 +123,7 @@ ; CHECK-NEXT: movq %rsp, %rbp ; CHECK-NEXT: andq $-32, %rsp ; CHECK-NEXT: subq $64, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %ymm0, (%rsp) ; CHECK-NEXT: andl $7, %edi ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -142,7 +142,7 @@ ; CHECK-NEXT: movq %rsp, %rbp ; CHECK-NEXT: andq $-64, %rsp ; CHECK-NEXT: subq $128, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %zmm0, (%rsp) ; CHECK-NEXT: andl $15, %edi ; CHECK-NEXT: movl (%rsp,%rdi,4), %eax @@ -231,7 +231,7 @@ ; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: korw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test13: @@ -246,7 +246,7 @@ ; SKX-NEXT: kmovw %eax, %k1 ; SKX-NEXT: korw %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq %cmp_res = icmp ult i32 %a, %b %maskv = insertelement <16 x i1> , i1 %cmp_res, i32 0 @@ -309,7 +309,7 @@ ; KNL-NEXT: kshiftrw $5, %k1, %k1 ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test16: @@ -322,7 +322,7 @@ ; SKX-NEXT: kshiftrw $5, %k0, %k0 ; SKX-NEXT: kxorw %k0, %k1, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq %x = load i1 , i1 * %addr, align 128 %a1 = bitcast i16 %a to <16 x i1> @@ -343,7 +343,7 @@ ; KNL-NEXT: kshiftrw $11, %k1, %k1 ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test17: @@ -356,7 +356,7 @@ ; SKX-NEXT: kshiftrb $3, %k0, %k0 ; SKX-NEXT: kxorb %k0, %k1, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq %x = load i1 , i1 * %addr, align 128 %a1 = bitcast i8 %a to <8 x i1> @@ -451,7 +451,7 @@ ; CHECK-NEXT: vpextrw $1, %xmm0, %eax ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi) -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %r1 = extractelement <32 x i16> %x, i32 1 @@ -466,7 +466,7 @@ ; CHECK-NEXT: vpextrw $1, %xmm0, %eax ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi) -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %r1 = extractelement <16 x i16> %x, i32 1 @@ -480,7 +480,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpextrw $1, %xmm0, %eax ; CHECK-NEXT: vpextrw $3, %xmm0, (%rdi) -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %r1 = extractelement <8 x i16> %x, i32 1 %r2 = extractelement <8 x i16> %x, i32 3 @@ -494,7 +494,7 @@ ; CHECK-NEXT: vpextrb $1, %xmm0, %eax ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi) -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %r1 = extractelement <64 x i8> %x, i32 1 @@ -509,7 +509,7 @@ ; CHECK-NEXT: vpextrb $1, %xmm0, %eax ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi) -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %r1 = extractelement <32 x i8> %x, i32 1 @@ -523,7 +523,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpextrb $1, %xmm0, %eax ; CHECK-NEXT: vpextrb $3, %xmm0, (%rdi) -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %r1 = extractelement <16 x i8> %x, i32 1 %r2 = extractelement <16 x i8> %x, i32 3 @@ -825,8 +825,8 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) { ; KNL-LABEL: test_iinsertelement_v4i1: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: cmpl %esi, %edi ; KNL-NEXT: setb %al ; KNL-NEXT: vpcmpltud %zmm1, %zmm0, %k0 @@ -837,7 +837,7 @@ ; KNL-NEXT: kshiftrw $13, %k1, %k1 ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -853,7 +853,7 @@ ; SKX-NEXT: kshiftrb $5, %k1, %k1 ; SKX-NEXT: kxorb %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq %cmp_res_i1 = icmp ult i32 %a, %b %cmp_cmp_vec = icmp ult <4 x i32> %x, %y @@ -866,8 +866,8 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) { ; KNL-LABEL: test_iinsertelement_v2i1: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: cmpl %esi, %edi ; KNL-NEXT: setb %al ; KNL-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 @@ -877,7 +877,7 @@ ; KNL-NEXT: kshiftlw $1, %k1, %k1 ; KNL-NEXT: korw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -892,7 +892,7 @@ ; SKX-NEXT: kshiftlb $1, %k1, %k1 ; SKX-NEXT: korb %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq %cmp_res_i1 = icmp ult i32 %a, %b %cmp_cmp_vec = icmp ult <2 x i64> %x, %y @@ -905,8 +905,8 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) { ; KNL-LABEL: test_extractelement_v2i1: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: andb $1, %al @@ -934,8 +934,8 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) { ; KNL-LABEL: extractelement_v2i1_alt: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: andb $1, %al @@ -964,8 +964,8 @@ define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) { ; KNL-LABEL: test_extractelement_v4i1: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0 ; KNL-NEXT: kshiftrw $3, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax @@ -1091,7 +1091,7 @@ define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) { ; CHECK-LABEL: test_extractelement_variable_v2i64: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: movq -24(%rsp,%rdi,8), %rax @@ -1110,7 +1110,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-32, %rsp ; CHECK-NEXT: subq $64, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %ymm0, (%rsp) ; CHECK-NEXT: andl $3, %edi ; CHECK-NEXT: movq (%rsp,%rdi,8), %rax @@ -1132,7 +1132,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-64, %rsp ; CHECK-NEXT: subq $128, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %zmm0, (%rsp) ; CHECK-NEXT: andl $7, %edi ; CHECK-NEXT: movq (%rsp,%rdi,8), %rax @@ -1147,7 +1147,7 @@ define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index) { ; CHECK-LABEL: test_extractelement_variable_v2f64: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -1166,7 +1166,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-32, %rsp ; CHECK-NEXT: subq $64, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %ymm0, (%rsp) ; CHECK-NEXT: andl $3, %edi ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -1188,7 +1188,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-64, %rsp ; CHECK-NEXT: subq $128, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %zmm0, (%rsp) ; CHECK-NEXT: andl $7, %edi ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -1203,7 +1203,7 @@ define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) { ; CHECK-LABEL: test_extractelement_variable_v4i32: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: andl $3, %edi ; CHECK-NEXT: movl -24(%rsp,%rdi,4), %eax @@ -1222,7 +1222,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-32, %rsp ; CHECK-NEXT: subq $64, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %ymm0, (%rsp) ; CHECK-NEXT: andl $7, %edi ; CHECK-NEXT: movl (%rsp,%rdi,4), %eax @@ -1244,7 +1244,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-64, %rsp ; CHECK-NEXT: subq $128, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %zmm0, (%rsp) ; CHECK-NEXT: andl $15, %edi ; CHECK-NEXT: movl (%rsp,%rdi,4), %eax @@ -1259,7 +1259,7 @@ define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) { ; CHECK-LABEL: test_extractelement_variable_v4f32: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: andl $3, %edi ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -1278,7 +1278,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-32, %rsp ; CHECK-NEXT: subq $64, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %ymm0, (%rsp) ; CHECK-NEXT: andl $7, %edi ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -1300,7 +1300,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-64, %rsp ; CHECK-NEXT: subq $128, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %zmm0, (%rsp) ; CHECK-NEXT: andl $15, %edi ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -1315,7 +1315,7 @@ define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) { ; CHECK-LABEL: test_extractelement_variable_v8i16: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: andl $7, %edi ; CHECK-NEXT: movzwl -24(%rsp,%rdi,2), %eax @@ -1334,7 +1334,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-32, %rsp ; CHECK-NEXT: subq $64, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %ymm0, (%rsp) ; CHECK-NEXT: andl $15, %edi ; CHECK-NEXT: movzwl (%rsp,%rdi,2), %eax @@ -1356,7 +1356,7 @@ ; KNL-NEXT: .cfi_def_cfa_register %rbp ; KNL-NEXT: andq $-64, %rsp ; KNL-NEXT: subq $128, %rsp -; KNL-NEXT: ## kill: def %edi killed %edi def %rdi +; KNL-NEXT: ## kill: def $edi killed $edi def $rdi ; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) ; KNL-NEXT: vmovaps %ymm0, (%rsp) ; KNL-NEXT: andl $31, %edi @@ -1375,7 +1375,7 @@ ; SKX-NEXT: .cfi_def_cfa_register %rbp ; SKX-NEXT: andq $-64, %rsp ; SKX-NEXT: subq $128, %rsp -; SKX-NEXT: ## kill: def %edi killed %edi def %rdi +; SKX-NEXT: ## kill: def $edi killed $edi def $rdi ; SKX-NEXT: vmovaps %zmm0, (%rsp) ; SKX-NEXT: andl $31, %edi ; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax @@ -1390,7 +1390,7 @@ define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) { ; CHECK-LABEL: test_extractelement_variable_v16i8: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: andl $15, %edi ; CHECK-NEXT: movb -24(%rsp,%rdi), %al @@ -1409,7 +1409,7 @@ ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-32, %rsp ; CHECK-NEXT: subq $64, %rsp -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: vmovaps %ymm0, (%rsp) ; CHECK-NEXT: andl $31, %edi ; CHECK-NEXT: movb (%rsp,%rdi), %al @@ -1432,7 +1432,7 @@ ; KNL-NEXT: .cfi_def_cfa_register %rbp ; KNL-NEXT: andq $-64, %rsp ; KNL-NEXT: subq $128, %rsp -; KNL-NEXT: ## kill: def %edi killed %edi def %rdi +; KNL-NEXT: ## kill: def $edi killed $edi def $rdi ; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) ; KNL-NEXT: vmovaps %ymm0, (%rsp) ; KNL-NEXT: andl $63, %edi @@ -1451,7 +1451,7 @@ ; SKX-NEXT: .cfi_def_cfa_register %rbp ; SKX-NEXT: andq $-64, %rsp ; SKX-NEXT: subq $128, %rsp -; SKX-NEXT: ## kill: def %edi killed %edi def %rdi +; SKX-NEXT: ## kill: def $edi killed $edi def $rdi ; SKX-NEXT: vmovaps %zmm0, (%rsp) ; SKX-NEXT: andl $63, %edi ; SKX-NEXT: movb (%rsp,%rdi), %al @@ -1512,9 +1512,9 @@ define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, i32 %index) { ; KNL-LABEL: test_extractelement_varible_v2i1: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %edi killed %edi def %rdi -; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: ## kill: def $edi killed $edi def $rdi +; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vextracti32x4 $0, %zmm0, -{{[0-9]+}}(%rsp) @@ -1526,7 +1526,7 @@ ; ; SKX-LABEL: test_extractelement_varible_v2i1: ; SKX: ## %bb.0: -; SKX-NEXT: ## kill: def %edi killed %edi def %rdi +; SKX-NEXT: ## kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0 ; SKX-NEXT: vpmovm2q %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) @@ -1543,9 +1543,9 @@ define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, i32 %index) { ; KNL-LABEL: test_extractelement_varible_v4i1: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %edi killed %edi def %rdi -; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: ## kill: def $edi killed $edi def $rdi +; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vextracti32x4 $0, %zmm0, -{{[0-9]+}}(%rsp) @@ -1557,7 +1557,7 @@ ; ; SKX-LABEL: test_extractelement_varible_v4i1: ; SKX: ## %bb.0: -; SKX-NEXT: ## kill: def %edi killed %edi def %rdi +; SKX-NEXT: ## kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0 ; SKX-NEXT: vpmovm2d %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) @@ -1574,9 +1574,9 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b, i32 %index) { ; KNL-LABEL: test_extractelement_varible_v8i1: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %edi killed %edi def %rdi -; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $edi killed $edi def $rdi +; KNL-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovdw %zmm0, %ymm0 @@ -1589,7 +1589,7 @@ ; ; SKX-LABEL: test_extractelement_varible_v8i1: ; SKX: ## %bb.0: -; SKX-NEXT: ## kill: def %edi killed %edi def %rdi +; SKX-NEXT: ## kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpnleud %ymm1, %ymm0, %k0 ; SKX-NEXT: vpmovm2w %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) @@ -1607,7 +1607,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %b, i32 %index) { ; KNL-LABEL: test_extractelement_varible_v16i1: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %edi killed %edi def %rdi +; KNL-NEXT: ## kill: def $edi killed $edi def $rdi ; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovdb %zmm0, -{{[0-9]+}}(%rsp) @@ -1619,7 +1619,7 @@ ; ; SKX-LABEL: test_extractelement_varible_v16i1: ; SKX: ## %bb.0: -; SKX-NEXT: ## kill: def %edi killed %edi def %rdi +; SKX-NEXT: ## kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0 ; SKX-NEXT: vpmovm2b %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) @@ -1644,7 +1644,7 @@ ; KNL-NEXT: .cfi_def_cfa_register %rbp ; KNL-NEXT: andq $-32, %rsp ; KNL-NEXT: subq $64, %rsp -; KNL-NEXT: ## kill: def %edi killed %edi def %rdi +; KNL-NEXT: ## kill: def $edi killed $edi def $rdi ; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] ; KNL-NEXT: vpxor %ymm2, %ymm1, %ymm1 ; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0 @@ -1667,7 +1667,7 @@ ; SKX-NEXT: .cfi_def_cfa_register %rbp ; SKX-NEXT: andq $-32, %rsp ; SKX-NEXT: subq $64, %rsp -; SKX-NEXT: ## kill: def %edi killed %edi def %rdi +; SKX-NEXT: ## kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0 ; SKX-NEXT: vpmovm2b %k0, %ymm0 ; SKX-NEXT: vmovdqa %ymm0, (%rsp) @@ -1706,7 +1706,7 @@ ; KNL-NEXT: .cfi_def_cfa_register %rbp ; KNL-NEXT: andq $-32, %rsp ; KNL-NEXT: subq $64, %rsp -; KNL-NEXT: ## kill: def %esi killed %esi def %rsi +; KNL-NEXT: ## kill: def $esi killed $esi def $rsi ; KNL-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] ; KNL-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 @@ -1738,7 +1738,7 @@ ; SKX-NEXT: .cfi_def_cfa_register %rbp ; SKX-NEXT: andq $-32, %rsp ; SKX-NEXT: subq $64, %rsp -; SKX-NEXT: ## kill: def %esi killed %esi def %rsi +; SKX-NEXT: ## kill: def $esi killed $esi def $rsi ; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0 ; SKX-NEXT: andl $31, %esi @@ -1770,7 +1770,7 @@ ; KNL-NEXT: .cfi_def_cfa_register %rbp ; KNL-NEXT: andq $-64, %rsp ; KNL-NEXT: subq $128, %rsp -; KNL-NEXT: ## kill: def %esi killed %esi def %rsi +; KNL-NEXT: ## kill: def $esi killed $esi def $rsi ; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] ; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0 ; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0 @@ -1821,7 +1821,7 @@ ; SKX-NEXT: .cfi_def_cfa_register %rbp ; SKX-NEXT: andq $-64, %rsp ; SKX-NEXT: subq $128, %rsp -; SKX-NEXT: ## kill: def %esi killed %esi def %rsi +; SKX-NEXT: ## kill: def $esi killed $esi def $rsi ; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0 ; SKX-NEXT: andl $63, %esi @@ -2172,7 +2172,7 @@ ; KNL-NEXT: .cfi_def_cfa_register %rbp ; KNL-NEXT: andq $-128, %rsp ; KNL-NEXT: subq $256, %rsp ## imm = 0x100 -; KNL-NEXT: ## kill: def %esi killed %esi def %rsi +; KNL-NEXT: ## kill: def $esi killed $esi def $rsi ; KNL-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] ; KNL-NEXT: vpxor %ymm4, %ymm0, %ymm0 ; KNL-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0 @@ -2255,7 +2255,7 @@ ; SKX-NEXT: .cfi_def_cfa_register %rbp ; SKX-NEXT: andq $-128, %rsp ; SKX-NEXT: subq $256, %rsp ## imm = 0x100 -; SKX-NEXT: ## kill: def %esi killed %esi def %rsi +; SKX-NEXT: ## kill: def $esi killed $esi def $rsi ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; SKX-NEXT: vpcmpnleub %zmm2, %zmm0, %k0 ; SKX-NEXT: vpcmpnleub %zmm2, %zmm1, %k1 Index: test/CodeGen/X86/avx512-insert-extract_i1.ll =================================================================== --- test/CodeGen/X86/avx512-insert-extract_i1.ll +++ test/CodeGen/X86/avx512-insert-extract_i1.ll @@ -13,7 +13,7 @@ ; SKX-NEXT: .cfi_def_cfa_register %rbp ; SKX-NEXT: andq $-64, %rsp ; SKX-NEXT: subq $128, %rsp -; SKX-NEXT: ## kill: def %edi killed %edi def %rdi +; SKX-NEXT: ## kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0 ; SKX-NEXT: vpmovm2b %k0, %zmm0 ; SKX-NEXT: vmovdqa64 %zmm0, (%rsp) Index: test/CodeGen/X86/avx512-intrinsics-upgrade.ll =================================================================== --- test/CodeGen/X86/avx512-intrinsics-upgrade.ll +++ test/CodeGen/X86/avx512-intrinsics-upgrade.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: kunpckbw %k0, %k1, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1) ret i16 %res @@ -559,7 +559,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1) ret i16 %res @@ -571,7 +571,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask) ret i16 %res @@ -584,7 +584,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1) ret i8 %res @@ -596,7 +596,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask) ret i8 %res @@ -609,7 +609,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1) ret i16 %res @@ -621,7 +621,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask) ret i16 %res @@ -634,7 +634,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1) ret i8 %res @@ -646,7 +646,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask) ret i8 %res @@ -3054,7 +3054,7 @@ define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 +; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 ; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1} @@ -3075,7 +3075,7 @@ define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 +; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 ; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1} @@ -3519,7 +3519,7 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, <16 x float> %x2, i16 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; CHECK-NEXT: kmovw %edi, %k1 @@ -3553,7 +3553,7 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm1 {%k1} @@ -3587,7 +3587,7 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 ; CHECK-NEXT: kmovw %edi, %k1 @@ -3622,7 +3622,7 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm1 {%k1} @@ -3692,7 +3692,7 @@ ; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: addb %cl, %al -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1) %res1 = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 %m) @@ -3710,7 +3710,7 @@ ; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: addl %ecx, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 -1) %res1 = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 %m) @@ -3730,7 +3730,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: addl %ecx, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) %res1 = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16-1) @@ -3749,7 +3749,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: addb %cl, %al -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8-1) Index: test/CodeGen/X86/avx512-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512-intrinsics.ll +++ test/CodeGen/X86/avx512-intrinsics.ll @@ -40,7 +40,7 @@ ; CHECK-NEXT: kandw %k0, %k1, %k0 ; CHECK-NEXT: kandw %k0, %k2, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %t1 = call i16 @llvm.x86.avx512.kand.w(i16 %a0, i16 8) %t2 = call i16 @llvm.x86.avx512.kand.w(i16 %t1, i16 %a1) @@ -58,7 +58,7 @@ ; CHECK-NEXT: kandnw %k2, %k1, %k1 ; CHECK-NEXT: kandnw %k0, %k1, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %t1 = call i16 @llvm.x86.avx512.kandn.w(i16 %a0, i16 8) %t2 = call i16 @llvm.x86.avx512.kandn.w(i16 %t1, i16 %a1) @@ -72,7 +72,7 @@ ; CHECK-NEXT: kmovw %edi, %k0 ; CHECK-NEXT: knotw %k0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.knot.w(i16 %a0) ret i16 %res @@ -89,7 +89,7 @@ ; CHECK-NEXT: korw %k0, %k1, %k0 ; CHECK-NEXT: korw %k0, %k2, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %t1 = call i16 @llvm.x86.avx512.kor.w(i16 %a0, i16 8) %t2 = call i16 @llvm.x86.avx512.kor.w(i16 %t1, i16 %a1) @@ -109,7 +109,7 @@ ; CHECK-NEXT: kxorw %k0, %k1, %k0 ; CHECK-NEXT: kxorw %k0, %k2, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %t1 = call i16 @llvm.x86.avx512.kxnor.w(i16 %a0, i16 8) %t2 = call i16 @llvm.x86.avx512.kxnor.w(i16 %t1, i16 %a1) @@ -127,7 +127,7 @@ ; CHECK-NEXT: kxorw %k0, %k1, %k0 ; CHECK-NEXT: kxorw %k0, %k2, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %t1 = call i16 @llvm.x86.avx512.kxor.w(i16 %a0, i16 8) %t2 = call i16 @llvm.x86.avx512.kxor.w(i16 %t1, i16 %a1) @@ -803,7 +803,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8) ret i16 %res @@ -815,7 +815,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vcmpneqpd %zmm1, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4) ret i8 %res @@ -3304,7 +3304,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8) @@ -3326,7 +3326,7 @@ ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: orl %edx, %eax ; CHECK-NEXT: orl %ecx, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4) @@ -3348,7 +3348,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4) @@ -3371,7 +3371,7 @@ ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: andl %edx, %eax ; CHECK-NEXT: andl %ecx, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4) %res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8) Index: test/CodeGen/X86/avx512-mask-op.ll =================================================================== --- test/CodeGen/X86/avx512-mask-op.ll +++ test/CodeGen/X86/avx512-mask-op.ll @@ -11,7 +11,7 @@ ; KNL-NEXT: kmovw %edi, %k0 ; KNL-NEXT: knotw %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: mask16: @@ -19,7 +19,7 @@ ; SKX-NEXT: kmovd %edi, %k0 ; SKX-NEXT: knotw %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: mask16: @@ -27,7 +27,7 @@ ; AVX512BW-NEXT: kmovd %edi, %k0 ; AVX512BW-NEXT: knotw %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: mask16: @@ -35,7 +35,7 @@ ; AVX512DQ-NEXT: kmovw %edi, %k0 ; AVX512DQ-NEXT: knotw %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512DQ-NEXT: retq %m0 = bitcast i16 %x to <16 x i1> %m1 = xor <16 x i1> %m0, @@ -84,7 +84,7 @@ ; KNL-NEXT: kmovw %edi, %k0 ; KNL-NEXT: knotw %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: mask8: @@ -92,7 +92,7 @@ ; SKX-NEXT: kmovd %edi, %k0 ; SKX-NEXT: knotb %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: mask8: @@ -100,7 +100,7 @@ ; AVX512BW-NEXT: kmovd %edi, %k0 ; AVX512BW-NEXT: knotw %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax +; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: mask8: @@ -108,7 +108,7 @@ ; AVX512DQ-NEXT: kmovw %edi, %k0 ; AVX512DQ-NEXT: knotb %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax +; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax ; AVX512DQ-NEXT: retq %m0 = bitcast i8 %x to <8 x i1> %m1 = xor <8 x i1> %m0, @@ -235,7 +235,7 @@ ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: korw %k0, %k2, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: mand16_mem: @@ -246,7 +246,7 @@ ; SKX-NEXT: kxorw %k1, %k0, %k0 ; SKX-NEXT: korw %k0, %k2, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: mand16_mem: @@ -257,7 +257,7 @@ ; AVX512BW-NEXT: kxorw %k1, %k0, %k0 ; AVX512BW-NEXT: korw %k0, %k2, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: mand16_mem: @@ -268,7 +268,7 @@ ; AVX512DQ-NEXT: kxorw %k1, %k0, %k0 ; AVX512DQ-NEXT: korw %k0, %k2, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512DQ-NEXT: retq %ma = load <16 x i1>, <16 x i1>* %x %mb = load <16 x i1>, <16 x i1>* %y @@ -285,7 +285,7 @@ ; KNL-NEXT: kmovw %edi, %k0 ; KNL-NEXT: kshiftrw $8, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: shuf_test1: @@ -293,7 +293,7 @@ ; SKX-NEXT: kmovd %edi, %k0 ; SKX-NEXT: kshiftrw $8, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: shuf_test1: @@ -301,7 +301,7 @@ ; AVX512BW-NEXT: kmovd %edi, %k0 ; AVX512BW-NEXT: kshiftrw $8, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax +; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: shuf_test1: @@ -309,7 +309,7 @@ ; AVX512DQ-NEXT: kmovw %edi, %k0 ; AVX512DQ-NEXT: kshiftrw $8, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax +; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax ; AVX512DQ-NEXT: retq %v1 = bitcast i16 %v to <16 x i1> %mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> @@ -366,7 +366,7 @@ ; KNL-NEXT: kshiftrw $5, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: andl $1, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -376,7 +376,7 @@ ; SKX-NEXT: kshiftrw $5, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: andl $1, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq ; @@ -386,7 +386,7 @@ ; AVX512BW-NEXT: kshiftrw $5, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax ; AVX512BW-NEXT: andl $1, %eax -; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -396,7 +396,7 @@ ; AVX512DQ-NEXT: kshiftrw $5, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax ; AVX512DQ-NEXT: andl $1, %eax -; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %cmp_res = icmp ugt <16 x i32> %a, %b @@ -412,7 +412,7 @@ ; KNL-NEXT: kshiftrw $5, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: andb $1, %al -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -422,7 +422,7 @@ ; SKX-NEXT: kshiftrw $5, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: andb $1, %al -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq ; @@ -432,7 +432,7 @@ ; AVX512BW-NEXT: kshiftrw $5, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax ; AVX512BW-NEXT: andb $1, %al -; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax +; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -442,7 +442,7 @@ ; AVX512DQ-NEXT: kshiftrw $5, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax ; AVX512DQ-NEXT: andb $1, %al -; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax +; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %cmp_res = icmp ugt <16 x i32> %a, %b @@ -717,7 +717,7 @@ ; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0 ; AVX512BW-NEXT: LBB17_3: ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -787,7 +787,7 @@ ; AVX512BW-NEXT: LBB18_3: ; AVX512BW-NEXT: vpmovb2m %zmm0, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -829,7 +829,7 @@ ; KNL-NEXT: LBB20_3: ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -859,7 +859,7 @@ ; AVX512BW-NEXT: LBB20_3: ; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1 ; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -875,7 +875,7 @@ ; AVX512DQ-NEXT: LBB20_3: ; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 -; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %mask = icmp sgt i32 %a1, %b1 @@ -959,7 +959,7 @@ ; AVX512BW-NEXT: cmovgw %ax, %cx ; AVX512BW-NEXT: kmovd %ecx, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1190,7 +1190,7 @@ ; KNL-NEXT: korw %k0, %k1, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -1228,7 +1228,7 @@ ; AVX512BW-NEXT: kshiftlw $7, %k0, %k0 ; AVX512BW-NEXT: korw %k0, %k1, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 -; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1249,7 +1249,7 @@ ; AVX512DQ-NEXT: korb %k0, %k1, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -1995,7 +1995,7 @@ ; KNL-NEXT: movzbl (%rdi), %eax ; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -2010,7 +2010,7 @@ ; AVX512BW-NEXT: movzbl (%rdi), %eax ; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -2018,7 +2018,7 @@ ; AVX512DQ: ## %bb.0: ; AVX512DQ-NEXT: kmovb (%rdi), %k0 ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0 -; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %b = load <2 x i1>, <2 x i1>* %a @@ -2032,7 +2032,7 @@ ; KNL-NEXT: movzbl (%rdi), %eax ; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -2047,7 +2047,7 @@ ; AVX512BW-NEXT: movzbl (%rdi), %eax ; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -2055,7 +2055,7 @@ ; AVX512DQ: ## %bb.0: ; AVX512DQ-NEXT: kmovb (%rdi), %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 -; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %b = load <4 x i1>, <4 x i1>* %a @@ -2492,7 +2492,7 @@ ; KNL-NEXT: kmovw %esi, %k1 ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test_v16i1_add: @@ -2501,7 +2501,7 @@ ; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kxorw %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: test_v16i1_add: @@ -2510,7 +2510,7 @@ ; AVX512BW-NEXT: kmovd %esi, %k1 ; AVX512BW-NEXT: kxorw %k1, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: test_v16i1_add: @@ -2519,7 +2519,7 @@ ; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kxorw %k1, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512DQ-NEXT: retq %m0 = bitcast i16 %x to <16 x i1> %m1 = bitcast i16 %y to <16 x i1> @@ -2535,7 +2535,7 @@ ; KNL-NEXT: kmovw %esi, %k1 ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test_v16i1_sub: @@ -2544,7 +2544,7 @@ ; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kxorw %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: test_v16i1_sub: @@ -2553,7 +2553,7 @@ ; AVX512BW-NEXT: kmovd %esi, %k1 ; AVX512BW-NEXT: kxorw %k1, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: test_v16i1_sub: @@ -2562,7 +2562,7 @@ ; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kxorw %k1, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512DQ-NEXT: retq %m0 = bitcast i16 %x to <16 x i1> %m1 = bitcast i16 %y to <16 x i1> @@ -2578,7 +2578,7 @@ ; KNL-NEXT: kmovw %esi, %k1 ; KNL-NEXT: kandw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test_v16i1_mul: @@ -2587,7 +2587,7 @@ ; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kandw %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: test_v16i1_mul: @@ -2596,7 +2596,7 @@ ; AVX512BW-NEXT: kmovd %esi, %k1 ; AVX512BW-NEXT: kandw %k1, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: test_v16i1_mul: @@ -2605,7 +2605,7 @@ ; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kandw %k1, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512DQ-NEXT: retq %m0 = bitcast i16 %x to <16 x i1> %m1 = bitcast i16 %y to <16 x i1> @@ -2621,7 +2621,7 @@ ; KNL-NEXT: kmovw %esi, %k1 ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test_v8i1_add: @@ -2630,7 +2630,7 @@ ; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kxorb %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: test_v8i1_add: @@ -2639,7 +2639,7 @@ ; AVX512BW-NEXT: kmovd %esi, %k1 ; AVX512BW-NEXT: kxorw %k1, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax +; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: test_v8i1_add: @@ -2648,7 +2648,7 @@ ; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kxorb %k1, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax +; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax ; AVX512DQ-NEXT: retq %m0 = bitcast i8 %x to <8 x i1> %m1 = bitcast i8 %y to <8 x i1> @@ -2664,7 +2664,7 @@ ; KNL-NEXT: kmovw %esi, %k1 ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test_v8i1_sub: @@ -2673,7 +2673,7 @@ ; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kxorb %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: test_v8i1_sub: @@ -2682,7 +2682,7 @@ ; AVX512BW-NEXT: kmovd %esi, %k1 ; AVX512BW-NEXT: kxorw %k1, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax +; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: test_v8i1_sub: @@ -2691,7 +2691,7 @@ ; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kxorb %k1, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax +; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax ; AVX512DQ-NEXT: retq %m0 = bitcast i8 %x to <8 x i1> %m1 = bitcast i8 %y to <8 x i1> @@ -2707,7 +2707,7 @@ ; KNL-NEXT: kmovw %esi, %k1 ; KNL-NEXT: kandw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %al killed %al killed %eax +; KNL-NEXT: ## kill: def $al killed $al killed $eax ; KNL-NEXT: retq ; ; SKX-LABEL: test_v8i1_mul: @@ -2716,7 +2716,7 @@ ; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kandb %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %al killed %al killed %eax +; SKX-NEXT: ## kill: def $al killed $al killed $eax ; SKX-NEXT: retq ; ; AVX512BW-LABEL: test_v8i1_mul: @@ -2725,7 +2725,7 @@ ; AVX512BW-NEXT: kmovd %esi, %k1 ; AVX512BW-NEXT: kandw %k1, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax +; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: test_v8i1_mul: @@ -2734,7 +2734,7 @@ ; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kandb %k1, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax +; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax ; AVX512DQ-NEXT: retq %m0 = bitcast i8 %x to <8 x i1> %m1 = bitcast i8 %y to <8 x i1> Index: test/CodeGen/X86/avx512-memfold.ll =================================================================== --- test/CodeGen/X86/avx512-memfold.ll +++ test/CodeGen/X86/avx512-memfold.ll @@ -7,7 +7,7 @@ ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %b.val = load float, float* %b %bv0 = insertelement <4 x float> undef, float %b.val, i32 0 Index: test/CodeGen/X86/avx512-regcall-Mask.ll =================================================================== --- test/CodeGen/X86/avx512-regcall-Mask.ll +++ test/CodeGen/X86/avx512-regcall-Mask.ll @@ -310,9 +310,9 @@ ; X32-NEXT: vpmovm2b %k2, %zmm0 ; X32-NEXT: vpmovm2b %k1, %zmm1 ; X32-NEXT: vpmovm2b %k0, %zmm2 -; X32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 -; X32-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1 -; X32-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2 +; X32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; X32-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1 +; X32-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2 ; X32-NEXT: calll _test_argv32i1helper ; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload ; X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm5 # 16-byte Reload @@ -340,9 +340,9 @@ ; WIN64-NEXT: vpmovm2b %k2, %zmm0 ; WIN64-NEXT: vpmovm2b %k1, %zmm1 ; WIN64-NEXT: vpmovm2b %k0, %zmm2 -; WIN64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 -; WIN64-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1 -; WIN64-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2 +; WIN64-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; WIN64-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1 +; WIN64-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2 ; WIN64-NEXT: callq test_argv32i1helper ; WIN64-NEXT: nop ; WIN64-NEXT: addq $32, %rsp @@ -384,9 +384,9 @@ ; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0 ; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1 ; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2 -; LINUXOSX64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 -; LINUXOSX64-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1 -; LINUXOSX64-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2 +; LINUXOSX64-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; LINUXOSX64-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1 +; LINUXOSX64-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2 ; LINUXOSX64-NEXT: callq test_argv32i1helper ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload @@ -538,9 +538,9 @@ ; X32-NEXT: vpmovm2b %k2, %zmm0 ; X32-NEXT: vpmovm2b %k1, %zmm1 ; X32-NEXT: vpmovm2b %k0, %zmm2 -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 -; X32-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1 -; X32-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; X32-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1 +; X32-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2 ; X32-NEXT: vzeroupper ; X32-NEXT: calll _test_argv16i1helper ; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload @@ -568,9 +568,9 @@ ; WIN64-NEXT: vpmovm2b %k2, %zmm0 ; WIN64-NEXT: vpmovm2b %k1, %zmm1 ; WIN64-NEXT: vpmovm2b %k0, %zmm2 -; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 -; WIN64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1 -; WIN64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2 +; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; WIN64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1 +; WIN64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2 ; WIN64-NEXT: vzeroupper ; WIN64-NEXT: callq test_argv16i1helper ; WIN64-NEXT: nop @@ -612,9 +612,9 @@ ; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0 ; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1 ; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2 -; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 -; LINUXOSX64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1 -; LINUXOSX64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2 +; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; LINUXOSX64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1 +; LINUXOSX64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2 ; LINUXOSX64-NEXT: vzeroupper ; LINUXOSX64-NEXT: callq test_argv16i1helper ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload @@ -705,9 +705,9 @@ ; X32-LABEL: caller_retv16i1: ; X32: # %bb.0: # %entry ; X32-NEXT: calll _test_retv16i1 -; X32-NEXT: # kill: def %ax killed %ax def %eax +; X32-NEXT: # kill: def $ax killed $ax def $eax ; X32-NEXT: incl %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; WIN64-LABEL: caller_retv16i1: @@ -724,9 +724,9 @@ ; WIN64-NEXT: .seh_savexmm 6, 0 ; WIN64-NEXT: .seh_endprologue ; WIN64-NEXT: callq test_retv16i1 -; WIN64-NEXT: # kill: def %ax killed %ax def %eax +; WIN64-NEXT: # kill: def $ax killed $ax def $eax ; WIN64-NEXT: incl %eax -; WIN64-NEXT: # kill: def %ax killed %ax killed %eax +; WIN64-NEXT: # kill: def $ax killed $ax killed $eax ; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload ; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload ; WIN64-NEXT: addq $40, %rsp @@ -742,9 +742,9 @@ ; LINUXOSX64-NEXT: pushq %rax ; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: callq test_retv16i1 -; LINUXOSX64-NEXT: # kill: def %ax killed %ax def %eax +; LINUXOSX64-NEXT: # kill: def $ax killed $ax def $eax ; LINUXOSX64-NEXT: incl %eax -; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax +; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax ; LINUXOSX64-NEXT: popq %rcx ; LINUXOSX64-NEXT: retq entry: @@ -771,9 +771,9 @@ ; X32-NEXT: vpmovm2w %k2, %zmm0 ; X32-NEXT: vpmovm2w %k1, %zmm1 ; X32-NEXT: vpmovm2w %k0, %zmm2 -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 -; X32-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1 -; X32-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; X32-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1 +; X32-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2 ; X32-NEXT: vzeroupper ; X32-NEXT: calll _test_argv8i1helper ; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload @@ -801,9 +801,9 @@ ; WIN64-NEXT: vpmovm2w %k2, %zmm0 ; WIN64-NEXT: vpmovm2w %k1, %zmm1 ; WIN64-NEXT: vpmovm2w %k0, %zmm2 -; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 -; WIN64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1 -; WIN64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2 +; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; WIN64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1 +; WIN64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2 ; WIN64-NEXT: vzeroupper ; WIN64-NEXT: callq test_argv8i1helper ; WIN64-NEXT: nop @@ -845,9 +845,9 @@ ; LINUXOSX64-NEXT: vpmovm2w %k2, %zmm0 ; LINUXOSX64-NEXT: vpmovm2w %k1, %zmm1 ; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm2 -; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 -; LINUXOSX64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1 -; LINUXOSX64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2 +; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; LINUXOSX64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1 +; LINUXOSX64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2 ; LINUXOSX64-NEXT: vzeroupper ; LINUXOSX64-NEXT: callq test_argv8i1helper ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload @@ -938,10 +938,10 @@ ; X32-LABEL: caller_retv8i1: ; X32: # %bb.0: # %entry ; X32-NEXT: calll _test_retv8i1 -; X32-NEXT: # kill: def %al killed %al def %eax +; X32-NEXT: # kill: def $al killed $al def $eax ; X32-NEXT: kmovd %eax, %k0 ; X32-NEXT: vpmovm2w %k0, %zmm0 -; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; @@ -959,10 +959,10 @@ ; WIN64-NEXT: .seh_savexmm 6, 0 ; WIN64-NEXT: .seh_endprologue ; WIN64-NEXT: callq test_retv8i1 -; WIN64-NEXT: # kill: def %al killed %al def %eax +; WIN64-NEXT: # kill: def $al killed $al def $eax ; WIN64-NEXT: kmovd %eax, %k0 ; WIN64-NEXT: vpmovm2w %k0, %zmm0 -; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload ; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload ; WIN64-NEXT: addq $40, %rsp @@ -979,10 +979,10 @@ ; LINUXOSX64-NEXT: pushq %rax ; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: callq test_retv8i1 -; LINUXOSX64-NEXT: # kill: def %al killed %al def %eax +; LINUXOSX64-NEXT: # kill: def $al killed $al def $eax ; LINUXOSX64-NEXT: kmovd %eax, %k0 ; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm0 -; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; LINUXOSX64-NEXT: popq %rax ; LINUXOSX64-NEXT: vzeroupper ; LINUXOSX64-NEXT: retq Index: test/CodeGen/X86/avx512-regcall-NoMask.ll =================================================================== --- test/CodeGen/X86/avx512-regcall-NoMask.ll +++ test/CodeGen/X86/avx512-regcall-NoMask.ll @@ -8,19 +8,19 @@ ; X32-LABEL: test_argReti1: ; X32: # %bb.0: ; X32-NEXT: incb %al -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; WIN64-LABEL: test_argReti1: ; WIN64: # %bb.0: ; WIN64-NEXT: incb %al -; WIN64-NEXT: # kill: def %al killed %al killed %eax +; WIN64-NEXT: # kill: def $al killed $al killed $eax ; WIN64-NEXT: retq ; ; LINUXOSX64-LABEL: test_argReti1: ; LINUXOSX64: # %bb.0: ; LINUXOSX64-NEXT: incb %al -; LINUXOSX64-NEXT: # kill: def %al killed %al killed %eax +; LINUXOSX64-NEXT: # kill: def $al killed $al killed $eax ; LINUXOSX64-NEXT: retq %add = add i1 %a, 1 ret i1 %add @@ -75,19 +75,19 @@ ; X32-LABEL: test_argReti8: ; X32: # %bb.0: ; X32-NEXT: incb %al -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; WIN64-LABEL: test_argReti8: ; WIN64: # %bb.0: ; WIN64-NEXT: incb %al -; WIN64-NEXT: # kill: def %al killed %al killed %eax +; WIN64-NEXT: # kill: def $al killed $al killed $eax ; WIN64-NEXT: retq ; ; LINUXOSX64-LABEL: test_argReti8: ; LINUXOSX64: # %bb.0: ; LINUXOSX64-NEXT: incb %al -; LINUXOSX64-NEXT: # kill: def %al killed %al killed %eax +; LINUXOSX64-NEXT: # kill: def $al killed $al killed $eax ; LINUXOSX64-NEXT: retq %add = add i8 %a, 1 ret i8 %add @@ -142,19 +142,19 @@ ; X32-LABEL: test_argReti16: ; X32: # %bb.0: ; X32-NEXT: incl %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; WIN64-LABEL: test_argReti16: ; WIN64: # %bb.0: ; WIN64-NEXT: incl %eax -; WIN64-NEXT: # kill: def %ax killed %ax killed %eax +; WIN64-NEXT: # kill: def $ax killed $ax killed $eax ; WIN64-NEXT: retq ; ; LINUXOSX64-LABEL: test_argReti16: ; LINUXOSX64: # %bb.0: ; LINUXOSX64-NEXT: incl %eax -; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax +; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax ; LINUXOSX64-NEXT: retq %add = add i16 %a, 1 ret i16 %add @@ -167,9 +167,9 @@ ; X32-NEXT: pushl %esp ; X32-NEXT: incl %eax ; X32-NEXT: calll _test_argReti16 -; X32-NEXT: # kill: def %ax killed %ax def %eax +; X32-NEXT: # kill: def $ax killed $ax def $eax ; X32-NEXT: incl %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: popl %esp ; X32-NEXT: retl ; @@ -180,9 +180,9 @@ ; WIN64-NEXT: .seh_endprologue ; WIN64-NEXT: incl %eax ; WIN64-NEXT: callq test_argReti16 -; WIN64-NEXT: # kill: def %ax killed %ax def %eax +; WIN64-NEXT: # kill: def $ax killed $ax def $eax ; WIN64-NEXT: incl %eax -; WIN64-NEXT: # kill: def %ax killed %ax killed %eax +; WIN64-NEXT: # kill: def $ax killed $ax killed $eax ; WIN64-NEXT: popq %rsp ; WIN64-NEXT: retq ; WIN64-NEXT: .seh_handlerdata @@ -196,9 +196,9 @@ ; LINUXOSX64-NEXT: .cfi_offset %rsp, -16 ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: callq test_argReti16 -; LINUXOSX64-NEXT: # kill: def %ax killed %ax def %eax +; LINUXOSX64-NEXT: # kill: def $ax killed $ax def $eax ; LINUXOSX64-NEXT: incl %eax -; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax +; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax ; LINUXOSX64-NEXT: popq %rsp ; LINUXOSX64-NEXT: retq %b = add i16 %a, 1 Index: test/CodeGen/X86/avx512-schedule.ll =================================================================== --- test/CodeGen/X86/avx512-schedule.ll +++ test/CodeGen/X86/avx512-schedule.ll @@ -4281,7 +4281,7 @@ ; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00] ; GENERIC-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:0.33] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: trunc_16i8_to_16i1: @@ -4289,7 +4289,7 @@ ; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50] ; SKX-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %mask_b = trunc <16 x i8>%a to <16 x i1> %mask = bitcast <16 x i1> %mask_b to i16 @@ -4302,7 +4302,7 @@ ; GENERIC-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [3:1.00] ; GENERIC-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: vzeroupper # sched: [100:0.33] ; GENERIC-NEXT: retq # sched: [1:1.00] ; @@ -4311,7 +4311,7 @@ ; SKX-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [1:0.50] ; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [3:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: vzeroupper # sched: [4:1.00] ; SKX-NEXT: retq # sched: [7:1.00] %mask_b = trunc <16 x i32>%a to <16 x i1> @@ -4347,7 +4347,7 @@ ; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00] ; GENERIC-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:0.33] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %al killed %al killed %eax +; GENERIC-NEXT: # kill: def $al killed $al killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: trunc_8i16_to_8i1: @@ -4355,7 +4355,7 @@ ; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50] ; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %al killed %al killed %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %mask_b = trunc <8 x i16>%a to <8 x i1> %mask = bitcast <8 x i1> %mask_b to i8 @@ -4392,7 +4392,7 @@ ; GENERIC-NEXT: kmovw %edi, %k1 # sched: [1:0.33] ; GENERIC-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: trunc_i32_to_i1: @@ -4405,7 +4405,7 @@ ; SKX-NEXT: kmovw %edi, %k1 # sched: [1:1.00] ; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %a_i = trunc i32 %a to i1 %maskv = insertelement <16 x i1> , i1 %a_i, i32 0 @@ -6666,7 +6666,7 @@ ; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33] ; GENERIC-NEXT: knotw %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: mask16: @@ -6674,7 +6674,7 @@ ; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00] ; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %m0 = bitcast i16 %x to <16 x i1> %m1 = xor <16 x i1> %m0, @@ -6709,7 +6709,7 @@ ; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33] ; GENERIC-NEXT: knotb %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %al killed %al killed %eax +; GENERIC-NEXT: # kill: def $al killed $al killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: mask8: @@ -6717,7 +6717,7 @@ ; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00] ; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %al killed %al killed %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %m0 = bitcast i8 %x to <8 x i1> %m1 = xor <8 x i1> %m0, @@ -6826,7 +6826,7 @@ ; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: mand16_mem: @@ -6837,7 +6837,7 @@ ; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %ma = load <16 x i1>, <16 x i1>* %x %mb = load <16 x i1>, <16 x i1>* %y @@ -6854,7 +6854,7 @@ ; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33] ; GENERIC-NEXT: kshiftrw $8, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %al killed %al killed %eax +; GENERIC-NEXT: # kill: def $al killed $al killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: shuf_test1: @@ -6862,7 +6862,7 @@ ; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00] ; SKX-NEXT: kshiftrw $8, %k0, %k0 # sched: [3:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %al killed %al killed %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %v1 = bitcast i16 %v to <16 x i1> %mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> @@ -6901,7 +6901,7 @@ ; GENERIC-NEXT: kshiftrw $5, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] ; GENERIC-NEXT: andl $1, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: vzeroupper # sched: [100:0.33] ; GENERIC-NEXT: retq # sched: [1:1.00] ; @@ -6911,7 +6911,7 @@ ; SKX-NEXT: kshiftrw $5, %k0, %k0 # sched: [3:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] ; SKX-NEXT: andl $1, %eax # sched: [1:0.25] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: vzeroupper # sched: [4:1.00] ; SKX-NEXT: retq # sched: [7:1.00] %cmp_res = icmp ugt <16 x i32> %a, %b @@ -6927,7 +6927,7 @@ ; GENERIC-NEXT: kshiftrw $5, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] ; GENERIC-NEXT: andb $1, %al # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %al killed %al killed %eax +; GENERIC-NEXT: # kill: def $al killed $al killed $eax ; GENERIC-NEXT: vzeroupper # sched: [100:0.33] ; GENERIC-NEXT: retq # sched: [1:1.00] ; @@ -6937,7 +6937,7 @@ ; SKX-NEXT: kshiftrw $5, %k0, %k0 # sched: [3:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] ; SKX-NEXT: andb $1, %al # sched: [1:0.25] -; SKX-NEXT: # kill: def %al killed %al killed %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: vzeroupper # sched: [4:1.00] ; SKX-NEXT: retq # sched: [7:1.00] %cmp_res = icmp ugt <16 x i32> %a, %b @@ -8027,7 +8027,7 @@ ; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33] ; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: test_v16i1_add: @@ -8036,7 +8036,7 @@ ; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00] ; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %m0 = bitcast i16 %x to <16 x i1> %m1 = bitcast i16 %y to <16 x i1> @@ -8052,7 +8052,7 @@ ; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33] ; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: test_v16i1_sub: @@ -8061,7 +8061,7 @@ ; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00] ; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %m0 = bitcast i16 %x to <16 x i1> %m1 = bitcast i16 %y to <16 x i1> @@ -8077,7 +8077,7 @@ ; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33] ; GENERIC-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: test_v16i1_mul: @@ -8086,7 +8086,7 @@ ; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00] ; SKX-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %m0 = bitcast i16 %x to <16 x i1> %m1 = bitcast i16 %y to <16 x i1> @@ -8102,7 +8102,7 @@ ; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33] ; GENERIC-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %al killed %al killed %eax +; GENERIC-NEXT: # kill: def $al killed $al killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: test_v8i1_add: @@ -8111,7 +8111,7 @@ ; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00] ; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %al killed %al killed %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %m0 = bitcast i8 %x to <8 x i1> %m1 = bitcast i8 %y to <8 x i1> @@ -8127,7 +8127,7 @@ ; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33] ; GENERIC-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %al killed %al killed %eax +; GENERIC-NEXT: # kill: def $al killed $al killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: test_v8i1_sub: @@ -8136,7 +8136,7 @@ ; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00] ; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %al killed %al killed %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %m0 = bitcast i8 %x to <8 x i1> %m1 = bitcast i8 %y to <8 x i1> @@ -8152,7 +8152,7 @@ ; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33] ; GENERIC-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00] ; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %al killed %al killed %eax +; GENERIC-NEXT: # kill: def $al killed $al killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: test_v8i1_mul: @@ -8161,7 +8161,7 @@ ; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00] ; SKX-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00] ; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %al killed %al killed %eax +; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq # sched: [7:1.00] %m0 = bitcast i8 %x to <8 x i1> %m1 = bitcast i8 %y to <8 x i1> Index: test/CodeGen/X86/avx512-select.ll =================================================================== --- test/CodeGen/X86/avx512-select.ll +++ test/CodeGen/X86/avx512-select.ll @@ -155,7 +155,7 @@ ; X86-NEXT: kmovw %eax, %k1 ; X86-NEXT: korw %k1, %k0, %k0 ; X86-NEXT: kmovw %k0, %eax -; X86-NEXT: # kill: def %al killed %al killed %eax +; X86-NEXT: # kill: def $al killed $al killed $eax ; X86-NEXT: retl ; ; X64-LABEL: select05_mem: @@ -166,7 +166,7 @@ ; X64-NEXT: kmovw %eax, %k1 ; X64-NEXT: korw %k1, %k0, %k0 ; X64-NEXT: kmovw %k0, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %mask = load <8 x i1> , <8 x i1>* %m %a = load <8 x i1> , <8 x i1>* %a.0 @@ -205,7 +205,7 @@ ; X86-NEXT: kmovw %eax, %k1 ; X86-NEXT: kandw %k1, %k0, %k0 ; X86-NEXT: kmovw %k0, %eax -; X86-NEXT: # kill: def %al killed %al killed %eax +; X86-NEXT: # kill: def $al killed $al killed $eax ; X86-NEXT: retl ; ; X64-LABEL: select06_mem: @@ -216,7 +216,7 @@ ; X64-NEXT: kmovw %eax, %k1 ; X64-NEXT: kandw %k1, %k0, %k0 ; X64-NEXT: kmovw %k0, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %mask = load <8 x i1> , <8 x i1>* %m %a = load <8 x i1> , <8 x i1>* %a.0 @@ -237,7 +237,7 @@ ; X86-NEXT: kandw %k0, %k1, %k0 ; X86-NEXT: korw %k2, %k0, %k0 ; X86-NEXT: kmovw %k0, %eax -; X86-NEXT: # kill: def %al killed %al killed %eax +; X86-NEXT: # kill: def $al killed $al killed $eax ; X86-NEXT: retl ; ; X64-LABEL: select07: @@ -249,7 +249,7 @@ ; X64-NEXT: kandw %k0, %k1, %k0 ; X64-NEXT: korw %k2, %k0, %k0 ; X64-NEXT: kmovw %k0, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %mask = bitcast i8 %m to <8 x i1> %a = bitcast i8 %a.0 to <8 x i1> Index: test/CodeGen/X86/avx512-shift.ll =================================================================== --- test/CodeGen/X86/avx512-shift.ll +++ test/CodeGen/X86/avx512-shift.ll @@ -34,7 +34,7 @@ ; KNL-NEXT: vpsrlq $1, %ymm0, %ymm0 ; KNL-NEXT: vpsllq $12, %ymm0, %ymm0 ; KNL-NEXT: vpsraq $12, %zmm0, %zmm0 -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: shift_4_i64: @@ -106,10 +106,10 @@ define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) { ; KNL-LABEL: variable_sra3: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpsravq %zmm1, %zmm0, %zmm0 -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL-NEXT: retq ; ; SKX-LABEL: variable_sra3: @@ -127,7 +127,7 @@ ; KNL-NEXT: vpmovsxwd %xmm0, %ymm0 ; KNL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: retq ; ; SKX-LABEL: variable_sra4: Index: test/CodeGen/X86/avx512-shuffles/partial_permute.ll =================================================================== --- test/CodeGen/X86/avx512-shuffles/partial_permute.ll +++ test/CodeGen/X86/avx512-shuffles/partial_permute.ll @@ -789,7 +789,7 @@ ; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u> ; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm0 -; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %vp @@ -911,7 +911,7 @@ ; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u> ; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0 -; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %vp @@ -1710,7 +1710,7 @@ ; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <13,0,0,6,u,u,u,u> ; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0 -; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %vec = load <16 x i32>, <16 x i32>* %vp @@ -3681,7 +3681,7 @@ ; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 ; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = <3,3,15,9,u,u,u,u> ; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0 -; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %vec = load <16 x float>, <16 x float>* %vp @@ -4565,7 +4565,7 @@ ; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 ; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [1,6,3,6] ; CHECK-NEXT: vpermi2pd %ymm2, %ymm1, %ymm0 -; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %vec = load <8 x double>, <8 x double>* %vp Index: test/CodeGen/X86/avx512-trunc.ll =================================================================== --- test/CodeGen/X86/avx512-trunc.ll +++ test/CodeGen/X86/avx512-trunc.ll @@ -57,9 +57,9 @@ define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 { ; KNL-LABEL: trunc_qb_256: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovqd %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -75,7 +75,7 @@ define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 { ; KNL-LABEL: trunc_qb_256_mem: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovqd %zmm0, %ymm0 ; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] ; KNL-NEXT: vmovd %xmm0, (%rdi) @@ -140,9 +140,9 @@ define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 { ; KNL-LABEL: trunc_qw_256: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovqd %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -158,7 +158,7 @@ define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 { ; KNL-LABEL: trunc_qw_256_mem: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovqd %zmm0, %ymm0 ; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; KNL-NEXT: vmovq %xmm0, (%rdi) @@ -223,9 +223,9 @@ define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 { ; KNL-LABEL: trunc_qd_256: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovqd %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -241,7 +241,7 @@ define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 { ; KNL-LABEL: trunc_qd_256_mem: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovqd %zmm0, %ymm0 ; KNL-NEXT: vmovdqa %xmm0, (%rdi) ; KNL-NEXT: vzeroupper @@ -305,9 +305,9 @@ define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 { ; KNL-LABEL: trunc_db_256: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -323,7 +323,7 @@ define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 { ; KNL-LABEL: trunc_db_256_mem: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovdw %zmm0, %ymm0 ; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; KNL-NEXT: vmovq %xmm0, (%rdi) @@ -387,9 +387,9 @@ define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 { ; KNL-LABEL: trunc_dw_256: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -405,7 +405,7 @@ define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 { ; KNL-LABEL: trunc_dw_256_mem: ; KNL: ## %bb.0: -; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovdw %zmm0, %ymm0 ; KNL-NEXT: vmovdqa %xmm0, (%rdi) ; KNL-NEXT: vzeroupper Index: test/CodeGen/X86/avx512-vec-cmp.ll =================================================================== --- test/CodeGen/X86/avx512-vec-cmp.ll +++ test/CodeGen/X86/avx512-vec-cmp.ll @@ -73,12 +73,12 @@ define <4 x float> @test7(<4 x float> %a, <4 x float> %b) { ; AVX512-LABEL: test7: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; @@ -97,12 +97,12 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b) { ; AVX512-LABEL: test8: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; @@ -120,11 +120,11 @@ define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind { ; AVX512-LABEL: test9: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpcmpeqd %zmm1, %zmm0, %k1 ; AVX512-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; SKX-LABEL: test9: @@ -140,11 +140,11 @@ define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind { ; AVX512-LABEL: test10: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vcmpeqps %zmm1, %zmm0, %k1 ; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; SKX-LABEL: test10: @@ -175,7 +175,7 @@ ; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 ; KNL-NEXT: kunpckbw %k0, %k1, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: def %ax killed %ax killed %eax +; KNL-NEXT: ## kill: def $ax killed $ax killed $eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; @@ -185,7 +185,7 @@ ; AVX512BW-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 ; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -195,7 +195,7 @@ ; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 ; SKX-NEXT: kunpckbw %k0, %k1, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: ## kill: def %ax killed %ax killed %eax +; SKX-NEXT: ## kill: def $ax killed $ax killed $eax ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %res = icmp eq <16 x i64> %a, %b @@ -503,7 +503,7 @@ ; AVX512-NEXT: vpcmpgtq %zmm3, %zmm2, %k1 ; AVX512-NEXT: kxnorw %k1, %k0, %k1 ; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; SKX-LABEL: test28: @@ -537,7 +537,7 @@ ; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k1 ; AVX512BW-NEXT: kxorw %k1, %k0, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -559,11 +559,11 @@ define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind { ; AVX512-LABEL: test30: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vcmpeqpd %zmm1, %zmm0, %k1 ; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; SKX-LABEL: test30: @@ -580,12 +580,12 @@ define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp) nounwind { ; AVX512-LABEL: test31: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vmovupd (%rdi), %xmm2 ; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; @@ -604,12 +604,12 @@ define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp) nounwind { ; AVX512-LABEL: test32: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vmovupd (%rdi), %ymm2 ; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; SKX-LABEL: test32: @@ -639,12 +639,12 @@ define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) nounwind { ; AVX512-LABEL: test34: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vmovups (%rdi), %xmm2 ; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; @@ -662,12 +662,12 @@ define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) nounwind { ; AVX512-LABEL: test35: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vmovups (%rdi), %ymm2 ; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; SKX-LABEL: test35: @@ -713,12 +713,12 @@ define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nounwind { ; AVX512-LABEL: test38: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vbroadcastsd (%rdi), %ymm2 ; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; SKX-LABEL: test38: @@ -739,12 +739,12 @@ define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nounwind { ; AVX512-LABEL: test39: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] ; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; @@ -783,12 +783,12 @@ define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) nounwind { ; AVX512-LABEL: test41: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vbroadcastss (%rdi), %ymm2 ; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; SKX-LABEL: test41: @@ -809,12 +809,12 @@ define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) nounwind { ; AVX512-LABEL: test42: ; AVX512: ## %bb.0: -; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vbroadcastss (%rdi), %xmm2 ; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1 ; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1} -; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; Index: test/CodeGen/X86/avx512-vec3-crash.ll =================================================================== --- test/CodeGen/X86/avx512-vec3-crash.ll +++ test/CodeGen/X86/avx512-vec3-crash.ll @@ -19,9 +19,9 @@ ; CHECK-NEXT: vpextrb $0, %xmm0, %eax ; CHECK-NEXT: vpextrb $4, %xmm0, %edx ; CHECK-NEXT: vpextrb $8, %xmm0, %ecx -; CHECK-NEXT: # kill: def %al killed %al killed %eax -; CHECK-NEXT: # kill: def %dl killed %dl killed %edx -; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx +; CHECK-NEXT: # kill: def $al killed $al killed $eax +; CHECK-NEXT: # kill: def $dl killed $dl killed $edx +; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx ; CHECK-NEXT: retq %cmp.i = icmp slt <3 x i8> %x, %a %res = sext <3 x i1> %cmp.i to <3 x i8> Index: test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll =================================================================== --- test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll +++ test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll @@ -1967,7 +1967,7 @@ ; AVX512F-32-NEXT: kmovd %edx, %k7 ; AVX512F-32-NEXT: movl %ebp, %edx ; AVX512F-32-NEXT: shrl $24, %edx -; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax +; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax ; AVX512F-32-NEXT: shrb $7, %al ; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4 ; AVX512F-32-NEXT: kshiftrq $47, %k4, %k4 @@ -1982,7 +1982,7 @@ ; AVX512F-32-NEXT: kshiftrq $18, %k4, %k3 ; AVX512F-32-NEXT: kxorq %k6, %k3, %k6 ; AVX512F-32-NEXT: kmovd %edx, %k3 -; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx +; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx ; AVX512F-32-NEXT: andb $15, %dl ; AVX512F-32-NEXT: andb $2, %al ; AVX512F-32-NEXT: shrb %al @@ -2232,7 +2232,7 @@ ; AVX512F-32-NEXT: kmovd %ecx, %k5 ; AVX512F-32-NEXT: movl %ebx, %edx ; AVX512F-32-NEXT: shrl $24, %edx -; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax +; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax ; AVX512F-32-NEXT: shrb $7, %al ; AVX512F-32-NEXT: kshiftlq $63, %k6, %k6 ; AVX512F-32-NEXT: kshiftrq $15, %k6, %k6 @@ -2248,7 +2248,7 @@ ; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k7 # 8-byte Reload ; AVX512F-32-NEXT: kxorq %k7, %k1, %k7 ; AVX512F-32-NEXT: kmovd %edx, %k1 -; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx +; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx ; AVX512F-32-NEXT: andb $15, %dl ; AVX512F-32-NEXT: andb $2, %al ; AVX512F-32-NEXT: shrb %al @@ -2667,7 +2667,7 @@ ; AVX512F-32-NEXT: kmovd %edx, %k7 ; AVX512F-32-NEXT: movl %ebp, %edx ; AVX512F-32-NEXT: shrl $24, %edx -; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax +; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax ; AVX512F-32-NEXT: shrb $7, %al ; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4 ; AVX512F-32-NEXT: kshiftrq $47, %k4, %k4 @@ -2682,7 +2682,7 @@ ; AVX512F-32-NEXT: kshiftrq $18, %k4, %k3 ; AVX512F-32-NEXT: kxorq %k6, %k3, %k6 ; AVX512F-32-NEXT: kmovd %edx, %k3 -; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx +; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx ; AVX512F-32-NEXT: andb $15, %dl ; AVX512F-32-NEXT: andb $2, %al ; AVX512F-32-NEXT: shrb %al @@ -2932,7 +2932,7 @@ ; AVX512F-32-NEXT: kmovd %ecx, %k5 ; AVX512F-32-NEXT: movl %ebx, %edx ; AVX512F-32-NEXT: shrl $24, %edx -; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax +; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax ; AVX512F-32-NEXT: shrb $7, %al ; AVX512F-32-NEXT: kshiftlq $63, %k6, %k6 ; AVX512F-32-NEXT: kshiftrq $15, %k6, %k6 @@ -2948,7 +2948,7 @@ ; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k7 # 8-byte Reload ; AVX512F-32-NEXT: kxorq %k7, %k1, %k7 ; AVX512F-32-NEXT: kmovd %edx, %k1 -; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx +; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx ; AVX512F-32-NEXT: andb $15, %dl ; AVX512F-32-NEXT: andb $2, %al ; AVX512F-32-NEXT: shrb %al Index: test/CodeGen/X86/avx512bw-mov.ll =================================================================== --- test/CodeGen/X86/avx512bw-mov.ll +++ test/CodeGen/X86/avx512bw-mov.ll @@ -100,7 +100,7 @@ ; CHECK-NEXT: vpmovb2m %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %k1 ; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z} -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; CHECK-NEXT: retq %res = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef) ret <16 x i8> %res @@ -114,7 +114,7 @@ ; CHECK-NEXT: vpmovb2m %zmm0, %k0 ; CHECK-NEXT: kmovd %k0, %k1 ; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z} -; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; CHECK-NEXT: retq %res = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> zeroinitializer) ret <32 x i8> %res @@ -129,7 +129,7 @@ ; CHECK-NEXT: kshiftld $24, %k0, %k0 ; CHECK-NEXT: kshiftrd $24, %k0, %k1 ; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z} -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; CHECK-NEXT: retq %res = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef) ret <8 x i16> %res @@ -143,7 +143,7 @@ ; CHECK-NEXT: vpmovb2m %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %k1 ; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z} -; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; CHECK-NEXT: retq %res = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer) ret <16 x i16> %res @@ -153,7 +153,7 @@ define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) { ; CHECK-LABEL: test_mask_store_16xi8: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 +; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0 ; CHECK-NEXT: vpmovb2m %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %k1 @@ -167,7 +167,7 @@ define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) { ; CHECK-LABEL: test_mask_store_32xi8: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 +; CHECK-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0 ; CHECK-NEXT: vpmovb2m %zmm0, %k0 ; CHECK-NEXT: kmovd %k0, %k1 @@ -181,7 +181,7 @@ define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) { ; CHECK-LABEL: test_mask_store_8xi16: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 +; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0 ; CHECK-NEXT: vpmovw2m %zmm0, %k0 ; CHECK-NEXT: kshiftld $24, %k0, %k0 @@ -196,7 +196,7 @@ define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) { ; CHECK-LABEL: test_mask_store_16xi16: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 +; CHECK-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0 ; CHECK-NEXT: vpmovb2m %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %k1 Index: test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll =================================================================== --- test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll +++ test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll @@ -503,7 +503,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1) @@ -516,7 +516,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask) @@ -555,7 +555,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x65,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1) @@ -568,7 +568,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x65,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask) @@ -582,7 +582,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1) ret i16 %res @@ -594,7 +594,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask) ret i16 %res @@ -607,7 +607,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1) ret i8 %res @@ -619,7 +619,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask) ret i8 %res @@ -632,7 +632,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1) ret i16 %res @@ -644,7 +644,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x64,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask) ret i16 %res @@ -657,7 +657,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1) ret i8 %res @@ -669,7 +669,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x65,0xc1] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask) ret i8 %res @@ -3683,7 +3683,7 @@ ; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) %res1 = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1) @@ -3721,7 +3721,7 @@ ; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1) @@ -3740,7 +3740,7 @@ ; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.ptestm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2) @@ -3760,7 +3760,7 @@ ; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) %res1 = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1) @@ -3798,7 +3798,7 @@ ; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1) @@ -3817,7 +3817,7 @@ ; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] ; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.ptestnm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2) @@ -3833,7 +3833,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovb2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x29,0xc0] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.cvtb2mask.128(<16 x i8> %x0) ret i16 %res @@ -3859,7 +3859,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovw2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc0] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %x0) ret i8 %res @@ -3872,7 +3872,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovw2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x29,0xc0] ; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0] -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i16 @llvm.x86.avx512.cvtw2mask.256(<16 x i16> %x0) Index: test/CodeGen/X86/avx512bwvl-vec-test-testn.ll =================================================================== --- test/CodeGen/X86/avx512bwvl-vec-test-testn.ll +++ test/CodeGen/X86/avx512bwvl-vec-test-testn.ll @@ -7,7 +7,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0 ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq entry: %and.i.i = and <2 x i64> %__B, %__A @@ -24,7 +24,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0 {%k1} ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq entry: %and.i.i = and <2 x i64> %__B, %__A @@ -42,7 +42,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0 ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq entry: %and.i.i = and <2 x i64> %__B, %__A @@ -59,7 +59,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0 {%k1} ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq entry: %and.i.i = and <2 x i64> %__B, %__A @@ -77,7 +77,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0 ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq entry: %and.i.i = and <2 x i64> %__B, %__A @@ -94,7 +94,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0 {%k1} ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq entry: %and.i.i = and <2 x i64> %__B, %__A @@ -112,7 +112,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0 ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq entry: %and.i.i = and <2 x i64> %__B, %__A @@ -129,7 +129,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0 {%k1} ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq entry: %and.i.i = and <2 x i64> %__B, %__A @@ -182,7 +182,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0 ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -200,7 +200,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0 {%k1} ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -254,7 +254,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0 ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -272,7 +272,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0 {%k1} ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: Index: test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll =================================================================== --- test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll +++ test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll @@ -151,7 +151,7 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0, <16 x float> %x2, i16 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x8_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm1 {%k1} @@ -185,7 +185,7 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0, <8 x double> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 @@ -220,7 +220,7 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16 x i32> %x2, i16 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x8_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1} @@ -254,7 +254,7 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x i64> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 @@ -289,7 +289,7 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0, <16 x float> %x2, i16 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 @@ -311,7 +311,7 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_512: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 @@ -335,7 +335,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovd2m %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.cvtd2mask.512(<16 x i32> %x0) ret i16 %res @@ -348,7 +348,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovq2m %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.cvtq2mask.512(<8 x i64> %x0) ret i8 %res Index: test/CodeGen/X86/avx512dq-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512dq-intrinsics.ll +++ test/CodeGen/X86/avx512dq-intrinsics.ll @@ -351,7 +351,7 @@ ; CHECK-NEXT: vfpclasspd $4, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: addb %cl, %al -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 2, i8 %x1) %res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 4, i8 -1) @@ -369,7 +369,7 @@ ; CHECK-NEXT: vfpclassps $4, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: addl %ecx, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 %x1) %res1 = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 -1) @@ -388,7 +388,7 @@ ; CHECK-NEXT: vfpclasssd $4, %xmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: addb %cl, %al -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 2, i8 %x1) %res1 = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1) @@ -401,7 +401,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfpclasssd $4, (%rdi), %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %x0 = load <2 x double>, <2 x double>* %x0ptr %res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1) @@ -419,7 +419,7 @@ ; CHECK-NEXT: vfpclassss $4, %xmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: addb %cl, %al -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 %x1) %res1 = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1) @@ -432,7 +432,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfpclassss $4, (%rdi), %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %x0 = load <4 x float>, <4 x float>* %x0ptr %res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1) Index: test/CodeGen/X86/avx512dq-mask-op.ll =================================================================== --- test/CodeGen/X86/avx512dq-mask-op.ll +++ test/CodeGen/X86/avx512dq-mask-op.ll @@ -7,7 +7,7 @@ ; CHECK-NEXT: kmovd %edi, %k0 ; CHECK-NEXT: knotb %k0, %k0 ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %m0 = bitcast i8 %x to <8 x i1> %m1 = xor <8 x i1> %m0, @@ -57,7 +57,7 @@ ; CHECK-NEXT: kxorb %k1, %k0, %k0 ; CHECK-NEXT: korb %k0, %k2, %k0 ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %ma = load <8 x i1>, <8 x i1>* %x %mb = load <8 x i1>, <8 x i1>* %y Index: test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll =================================================================== --- test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll +++ test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll @@ -1673,7 +1673,7 @@ define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0, <4 x double> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xc8,0x01] @@ -1708,7 +1708,7 @@ define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xc8,0x01] @@ -1743,7 +1743,7 @@ define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, <8 x float> %x2, i8 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_256: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01] @@ -1764,7 +1764,7 @@ define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x2_256(<4 x i32> %x0, <8 x i32> %x2, i8 %x3, i64 * %y_ptr) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_256: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vmovq (%rsi), %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x16] ; CHECK-NEXT: ## xmm2 = mem[0],zero ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] @@ -1811,7 +1811,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovd2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x39,0xc0] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %x0) ret i8 %res @@ -1824,7 +1824,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovd2m %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x39,0xc0] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.cvtd2mask.256(<8 x i32> %x0) ret i8 %res @@ -1837,7 +1837,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovq2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x39,0xc0] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.cvtq2mask.128(<2 x i64> %x0) ret i8 %res @@ -1850,7 +1850,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovq2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x39,0xc0] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.cvtq2mask.256(<4 x i64> %x0) ret i8 %res Index: test/CodeGen/X86/avx512dqvl-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512dqvl-intrinsics.ll +++ test/CodeGen/X86/avx512dqvl-intrinsics.ll @@ -560,7 +560,7 @@ ; CHECK-NEXT: vfpclassps $4, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x66,0xc0,0x04] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 2, i8 %x1) %res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 4, i8 -1) @@ -579,7 +579,7 @@ ; CHECK-NEXT: vfpclassps $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x66,0xc0,0x04] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 2, i8 %x1) %res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 4, i8 -1) @@ -598,7 +598,7 @@ ; CHECK-NEXT: vfpclasspd $2, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x66,0xc0,0x02] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 4, i8 %x1) %res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 2, i8 -1) @@ -617,7 +617,7 @@ ; CHECK-NEXT: vfpclasspd $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x66,0xc0,0x04] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 2, i8 %x1) %res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 4, i8 -1) Index: test/CodeGen/X86/avx512f-vec-test-testn.ll =================================================================== --- test/CodeGen/X86/avx512f-vec-test-testn.ll +++ test/CodeGen/X86/avx512f-vec-test-testn.ll @@ -7,7 +7,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -41,7 +41,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -60,7 +60,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -79,7 +79,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -95,7 +95,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -113,7 +113,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: @@ -132,7 +132,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: Index: test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll =================================================================== --- test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll +++ test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll @@ -1064,7 +1064,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1) ret i8 %res @@ -1076,7 +1076,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask) ret i8 %res @@ -1089,7 +1089,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1) ret i8 %res @@ -1101,7 +1101,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask) ret i8 %res @@ -1114,7 +1114,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x66,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1) ret i8 %res @@ -1126,7 +1126,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x66,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask) ret i8 %res @@ -1139,7 +1139,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1) ret i8 %res @@ -1151,7 +1151,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask) ret i8 %res @@ -1164,7 +1164,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1) ret i8 %res @@ -1176,7 +1176,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask) ret i8 %res @@ -1189,7 +1189,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1) ret i8 %res @@ -1201,7 +1201,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask) ret i8 %res @@ -1214,7 +1214,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1) ret i8 %res @@ -1226,7 +1226,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask) ret i8 %res @@ -1239,7 +1239,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1) ret i8 %res @@ -1251,7 +1251,7 @@ ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xc1] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask) ret i8 %res @@ -5863,7 +5863,7 @@ define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256(<4 x float> %x0, <8 x float> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_256: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01] @@ -5896,7 +5896,7 @@ define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x i32> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_256: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xc8,0x01] @@ -5999,7 +5999,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1) @@ -6018,7 +6018,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1) @@ -6037,7 +6037,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1) @@ -6056,7 +6056,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1) @@ -6075,7 +6075,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1) @@ -6094,7 +6094,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1) @@ -6113,7 +6113,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1) @@ -6132,7 +6132,7 @@ ; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] ; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) %res1 = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1) Index: test/CodeGen/X86/avx512vl-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512vl-intrinsics.ll +++ test/CodeGen/X86/avx512vl-intrinsics.ll @@ -718,7 +718,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc1,0x02] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %a, <8 x float> %b, i32 2, i8 -1) ret i8 %res @@ -730,7 +730,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vcmpleps %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x08,0xc2,0xc1,0x02] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %a, <4 x float> %b, i32 2, i8 -1) ret i8 %res @@ -742,7 +742,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vcmplepd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x28,0xc2,0xc1,0x02] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %a, <4 x double> %b, i32 2, i8 -1) ret i8 %res @@ -754,7 +754,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vcmplepd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x02] ; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0] -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %a, <2 x double> %b, i32 2, i8 -1) ret i8 %res Index: test/CodeGen/X86/avx512vl-vec-cmp.ll =================================================================== --- test/CodeGen/X86/avx512vl-vec-cmp.ll +++ test/CodeGen/X86/avx512vl-vec-cmp.ll @@ -11,11 +11,11 @@ ; ; NoVLX-LABEL: test256_1: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %mask = icmp eq <4 x i64> %x, %y %max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %y @@ -31,12 +31,12 @@ ; ; NoVLX-LABEL: test256_2: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %mask = icmp sgt <4 x i64> %x, %y %max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y @@ -52,12 +52,12 @@ ; ; NoVLX-LABEL: test256_3: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k1 ; NoVLX-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %mask = icmp sge <8 x i32> %x, %y %max = select <8 x i1> %mask, <8 x i32> %x1, <8 x i32> %y @@ -73,12 +73,12 @@ ; ; NoVLX-LABEL: test256_4: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %mask = icmp ugt <4 x i64> %x, %y %max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y @@ -94,12 +94,12 @@ ; ; NoVLX-LABEL: test256_5: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %yp, align 4 %mask = icmp eq <8 x i32> %x, %y @@ -116,12 +116,12 @@ ; ; NoVLX-LABEL: test256_5b: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpeqd %zmm0, %zmm2, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %yp, align 4 %mask = icmp eq <8 x i32> %y, %x @@ -138,12 +138,12 @@ ; ; NoVLX-LABEL: test256_6: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4 %mask = icmp sgt <8 x i32> %x, %y @@ -160,12 +160,12 @@ ; ; NoVLX-LABEL: test256_6b: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4 %mask = icmp slt <8 x i32> %y, %x @@ -182,12 +182,12 @@ ; ; NoVLX-LABEL: test256_7: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4 %mask = icmp sle <8 x i32> %x, %y @@ -204,12 +204,12 @@ ; ; NoVLX-LABEL: test256_7b: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4 %mask = icmp sge <8 x i32> %y, %x @@ -226,12 +226,12 @@ ; ; NoVLX-LABEL: test256_8: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpleud %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4 %mask = icmp ule <8 x i32> %x, %y @@ -248,12 +248,12 @@ ; ; NoVLX-LABEL: test256_8b: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4 %mask = icmp uge <8 x i32> %y, %x @@ -271,14 +271,14 @@ ; ; NoVLX-LABEL: test256_9: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3 -; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3 +; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1} ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %mask1 = icmp eq <8 x i32> %x1, %y1 %mask0 = icmp eq <8 x i32> %x, %y @@ -297,14 +297,14 @@ ; ; NoVLX-LABEL: test256_10: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3 -; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3 +; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1} ; NoVLX-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %mask1 = icmp sge <4 x i64> %x1, %y1 %mask0 = icmp sle <4 x i64> %x, %y @@ -323,14 +323,14 @@ ; ; NoVLX-LABEL: test256_11: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm3 ; NoVLX-NEXT: vpcmpgtq %zmm3, %zmm0, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm2, %zmm1, %k1 {%k1} ; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %mask1 = icmp sgt <4 x i64> %x1, %y1 %y = load <4 x i64>, <4 x i64>* %y.ptr, align 4 @@ -350,14 +350,14 @@ ; ; NoVLX-LABEL: test256_12: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm3 ; NoVLX-NEXT: vpcmpleud %zmm3, %zmm0, %k1 ; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1 {%k1} ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %mask1 = icmp sge <8 x i32> %x1, %y1 %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4 @@ -376,12 +376,12 @@ ; ; NoVLX-LABEL: test256_13: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %yb = load i64, i64* %yb.ptr, align 4 %y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0 @@ -400,12 +400,12 @@ ; ; NoVLX-LABEL: test256_14: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %yb = load i32, i32* %yb.ptr, align 4 %y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0 @@ -482,12 +482,12 @@ ; ; NoVLX-LABEL: test256_17: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %yp, align 4 %mask = icmp ne <8 x i32> %x, %y @@ -504,12 +504,12 @@ ; ; NoVLX-LABEL: test256_18: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpneqd %zmm0, %zmm2, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %yp, align 4 %mask = icmp ne <8 x i32> %y, %x @@ -526,12 +526,12 @@ ; ; NoVLX-LABEL: test256_19: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpnltud %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %yp, align 4 %mask = icmp uge <8 x i32> %x, %y @@ -548,12 +548,12 @@ ; ; NoVLX-LABEL: test256_20: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %ymm2 ; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <8 x i32>, <8 x i32>* %yp, align 4 %mask = icmp uge <8 x i32> %y, %x @@ -570,11 +570,11 @@ ; ; NoVLX-LABEL: test128_1: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %mask = icmp eq <2 x i64> %x, %y %max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %y @@ -590,12 +590,12 @@ ; ; NoVLX-LABEL: test128_2: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %mask = icmp sgt <2 x i64> %x, %y %max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y @@ -611,12 +611,12 @@ ; ; NoVLX-LABEL: test128_3: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k1 ; NoVLX-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %mask = icmp sge <4 x i32> %x, %y %max = select <4 x i1> %mask, <4 x i32> %x1, <4 x i32> %y @@ -632,12 +632,12 @@ ; ; NoVLX-LABEL: test128_4: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %mask = icmp ugt <2 x i64> %x, %y %max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y @@ -653,12 +653,12 @@ ; ; NoVLX-LABEL: test128_5: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %yp, align 4 %mask = icmp eq <4 x i32> %x, %y @@ -675,12 +675,12 @@ ; ; NoVLX-LABEL: test128_5b: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpeqd %zmm0, %zmm2, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %yp, align 4 %mask = icmp eq <4 x i32> %y, %x @@ -697,12 +697,12 @@ ; ; NoVLX-LABEL: test128_6: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp sgt <4 x i32> %x, %y @@ -719,12 +719,12 @@ ; ; NoVLX-LABEL: test128_6b: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp slt <4 x i32> %y, %x @@ -741,12 +741,12 @@ ; ; NoVLX-LABEL: test128_7: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp sle <4 x i32> %x, %y @@ -763,12 +763,12 @@ ; ; NoVLX-LABEL: test128_7b: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp sge <4 x i32> %y, %x @@ -785,12 +785,12 @@ ; ; NoVLX-LABEL: test128_8: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpleud %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp ule <4 x i32> %x, %y @@ -807,12 +807,12 @@ ; ; NoVLX-LABEL: test128_8b: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp uge <4 x i32> %y, %x @@ -830,14 +830,14 @@ ; ; NoVLX-LABEL: test128_9: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm3 killed %xmm3 def %zmm3 -; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm3 killed $xmm3 def $zmm3 +; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1} ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %mask1 = icmp eq <4 x i32> %x1, %y1 %mask0 = icmp eq <4 x i32> %x, %y @@ -856,14 +856,14 @@ ; ; NoVLX-LABEL: test128_10: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm3 killed %xmm3 def %zmm3 -; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm3 killed $xmm3 def $zmm3 +; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm1, %zmm0, %k1 ; NoVLX-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1} ; NoVLX-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %mask1 = icmp sge <2 x i64> %x1, %y1 %mask0 = icmp sle <2 x i64> %x, %y @@ -882,14 +882,14 @@ ; ; NoVLX-LABEL: test128_11: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm3 ; NoVLX-NEXT: vpcmpgtq %zmm3, %zmm0, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm2, %zmm1, %k1 {%k1} ; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %mask1 = icmp sgt <2 x i64> %x1, %y1 %y = load <2 x i64>, <2 x i64>* %y.ptr, align 4 @@ -909,14 +909,14 @@ ; ; NoVLX-LABEL: test128_12: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm3 ; NoVLX-NEXT: vpcmpleud %zmm3, %zmm0, %k1 ; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1 {%k1} ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %mask1 = icmp sge <4 x i32> %x1, %y1 %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 @@ -935,12 +935,12 @@ ; ; NoVLX-LABEL: test128_13: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %yb = load i64, i64* %yb.ptr, align 4 %y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0 @@ -959,12 +959,12 @@ ; ; NoVLX-LABEL: test128_14: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %yb = load i32, i32* %yb.ptr, align 4 %y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0 @@ -1041,12 +1041,12 @@ ; ; NoVLX-LABEL: test128_17: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp ne <4 x i32> %x, %y @@ -1063,12 +1063,12 @@ ; ; NoVLX-LABEL: test128_18: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpneqd %zmm0, %zmm2, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp ne <4 x i32> %y, %x @@ -1085,12 +1085,12 @@ ; ; NoVLX-LABEL: test128_19: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpnltud %zmm2, %zmm0, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp uge <4 x i32> %x, %y @@ -1107,12 +1107,12 @@ ; ; NoVLX-LABEL: test128_20: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqu (%rdi), %xmm2 ; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1 ; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; NoVLX-NEXT: retq %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4 %mask = icmp uge <4 x i32> %y, %x Index: test/CodeGen/X86/avx512vl-vec-masked-cmp.ll =================================================================== --- test/CodeGen/X86/avx512vl-vec-masked-cmp.ll +++ test/CodeGen/X86/avx512vl-vec-masked-cmp.ll @@ -364,7 +364,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask: @@ -373,7 +373,7 @@ ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -390,7 +390,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem: @@ -399,7 +399,7 @@ ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -418,7 +418,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask: @@ -428,7 +428,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -448,7 +448,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem: @@ -458,7 +458,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1572,18 +1572,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1600,18 +1600,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1630,19 +1630,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1663,19 +1663,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1697,18 +1697,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1728,19 +1728,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1763,18 +1763,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1791,18 +1791,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1821,19 +1821,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1854,19 +1854,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1888,18 +1888,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1919,19 +1919,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1958,8 +1958,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -1984,7 +1984,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -2012,8 +2012,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -2043,7 +2043,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} @@ -2075,7 +2075,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -2104,7 +2104,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} @@ -2137,8 +2137,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -2164,7 +2164,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -2193,8 +2193,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -2225,7 +2225,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} @@ -2258,7 +2258,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -2288,7 +2288,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} @@ -2318,19 +2318,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2347,19 +2347,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2378,20 +2378,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2411,20 +2411,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2445,19 +2445,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2477,20 +2477,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2517,8 +2517,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -2544,7 +2544,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -2573,8 +2573,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -2604,7 +2604,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} @@ -2636,7 +2636,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -2666,7 +2666,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} @@ -2699,8 +2699,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -2727,7 +2727,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -2757,8 +2757,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -2789,7 +2789,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} @@ -2822,7 +2822,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -2853,7 +2853,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} @@ -3208,8 +3208,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -3236,7 +3236,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -3266,8 +3266,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -3299,7 +3299,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -3333,7 +3333,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -3364,7 +3364,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -3395,18 +3395,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3423,18 +3423,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3453,19 +3453,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3486,19 +3486,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3520,18 +3520,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3551,19 +3551,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3586,18 +3586,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3614,18 +3614,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3644,19 +3644,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3677,19 +3677,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3711,18 +3711,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3742,19 +3742,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3781,8 +3781,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -3807,7 +3807,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -3835,8 +3835,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -3866,7 +3866,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -3898,7 +3898,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -3927,7 +3927,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -3960,8 +3960,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -3987,7 +3987,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -4016,8 +4016,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -4048,7 +4048,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -4081,7 +4081,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -4111,7 +4111,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -4141,19 +4141,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4170,19 +4170,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4201,20 +4201,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4235,20 +4235,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4270,19 +4270,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4302,20 +4302,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4338,19 +4338,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4367,19 +4367,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4398,20 +4398,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4432,20 +4432,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4467,19 +4467,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4499,20 +4499,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4540,8 +4540,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -4567,7 +4567,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -4596,8 +4596,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -4628,7 +4628,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -4661,7 +4661,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -4691,7 +4691,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -4725,8 +4725,8 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -4753,7 +4753,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -4783,8 +4783,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -4816,7 +4816,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -4850,7 +4850,7 @@ ; ; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -4881,7 +4881,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} @@ -4911,7 +4911,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -4919,7 +4919,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4936,7 +4936,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -4944,7 +4944,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4963,7 +4963,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -4972,7 +4972,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4992,7 +4992,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -5001,7 +5001,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5022,7 +5022,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -5030,7 +5030,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5050,7 +5050,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -5059,7 +5059,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5760,7 +5760,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask: @@ -5769,7 +5769,7 @@ ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5786,7 +5786,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem: @@ -5795,7 +5795,7 @@ ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5814,7 +5814,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask: @@ -5824,7 +5824,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5844,7 +5844,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem: @@ -5854,7 +5854,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6968,18 +6968,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6996,18 +6996,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7026,19 +7026,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7059,19 +7059,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7093,18 +7093,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7124,19 +7124,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7159,18 +7159,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7187,18 +7187,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7217,19 +7217,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7250,19 +7250,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7284,18 +7284,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7315,19 +7315,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7354,8 +7354,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -7380,7 +7380,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -7408,8 +7408,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -7439,7 +7439,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} @@ -7471,7 +7471,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -7500,7 +7500,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} @@ -7533,8 +7533,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -7560,7 +7560,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -7589,8 +7589,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -7621,7 +7621,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} @@ -7654,7 +7654,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -7684,7 +7684,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} @@ -7714,19 +7714,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7743,19 +7743,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7774,20 +7774,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7807,20 +7807,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7841,19 +7841,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7873,20 +7873,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7913,8 +7913,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -7940,7 +7940,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -7969,8 +7969,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -8000,7 +8000,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} @@ -8032,7 +8032,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -8062,7 +8062,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} @@ -8095,8 +8095,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -8123,7 +8123,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -8153,8 +8153,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -8185,7 +8185,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} @@ -8218,7 +8218,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -8249,7 +8249,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} @@ -8604,8 +8604,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -8632,7 +8632,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -8662,8 +8662,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -8695,7 +8695,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -8729,7 +8729,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -8760,7 +8760,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -8791,18 +8791,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8819,18 +8819,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8849,19 +8849,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8882,19 +8882,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8916,18 +8916,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8947,19 +8947,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8982,18 +8982,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9010,18 +9010,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9040,19 +9040,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9073,19 +9073,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9107,18 +9107,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9138,19 +9138,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9177,8 +9177,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -9203,7 +9203,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -9231,8 +9231,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -9262,7 +9262,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -9294,7 +9294,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -9323,7 +9323,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -9356,8 +9356,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -9383,7 +9383,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -9412,8 +9412,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -9444,7 +9444,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -9477,7 +9477,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -9507,7 +9507,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -9537,19 +9537,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9566,19 +9566,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9597,20 +9597,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9631,20 +9631,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9666,19 +9666,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9698,20 +9698,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9734,19 +9734,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9763,19 +9763,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9794,20 +9794,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9828,20 +9828,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9863,19 +9863,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9895,20 +9895,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9936,8 +9936,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -9963,7 +9963,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -9992,8 +9992,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -10024,7 +10024,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -10057,7 +10057,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -10087,7 +10087,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -10121,8 +10121,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -10149,7 +10149,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -10179,8 +10179,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -10212,7 +10212,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -10246,7 +10246,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -10277,7 +10277,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} @@ -10307,7 +10307,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -10315,7 +10315,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10332,7 +10332,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -10340,7 +10340,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10359,7 +10359,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -10368,7 +10368,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10388,7 +10388,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -10397,7 +10397,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10418,7 +10418,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -10426,7 +10426,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10446,7 +10446,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -10455,7 +10455,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11190,7 +11190,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask: @@ -11201,7 +11201,7 @@ ; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11218,7 +11218,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem: @@ -11230,7 +11230,7 @@ ; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11249,7 +11249,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask: @@ -11261,7 +11261,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11281,7 +11281,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem: @@ -11294,7 +11294,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12468,18 +12468,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12496,18 +12496,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12526,19 +12526,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12559,19 +12559,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12593,18 +12593,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12624,19 +12624,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12659,18 +12659,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12687,18 +12687,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12717,19 +12717,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12750,19 +12750,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12784,18 +12784,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12815,19 +12815,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12854,8 +12854,8 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -12880,7 +12880,7 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -12908,8 +12908,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -12939,7 +12939,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} @@ -12971,7 +12971,7 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -13000,7 +13000,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} @@ -13033,8 +13033,8 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -13060,7 +13060,7 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -13089,8 +13089,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -13121,7 +13121,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} @@ -13154,7 +13154,7 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -13184,7 +13184,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} @@ -13214,19 +13214,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13243,19 +13243,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13274,20 +13274,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13307,20 +13307,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13341,19 +13341,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltd (%rdi){1to8}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13373,20 +13373,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltd (%rsi){1to8}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13413,8 +13413,8 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -13440,7 +13440,7 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -13469,8 +13469,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -13500,7 +13500,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} @@ -13532,7 +13532,7 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -13562,7 +13562,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} @@ -13595,8 +13595,8 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -13623,7 +13623,7 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -13653,8 +13653,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -13685,7 +13685,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} @@ -13718,7 +13718,7 @@ ; ; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -13749,7 +13749,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1} @@ -14104,8 +14104,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -14132,7 +14132,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -14162,8 +14162,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -14195,7 +14195,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -14229,7 +14229,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -14260,7 +14260,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -14291,18 +14291,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14319,18 +14319,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14349,19 +14349,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14382,19 +14382,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14416,18 +14416,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14447,19 +14447,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14482,18 +14482,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14510,18 +14510,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14540,19 +14540,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14573,19 +14573,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14607,18 +14607,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14638,19 +14638,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14677,8 +14677,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -14703,7 +14703,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -14731,8 +14731,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -14762,7 +14762,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -14794,7 +14794,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -14823,7 +14823,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -14856,8 +14856,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -14883,7 +14883,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -14912,8 +14912,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -14944,7 +14944,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -14977,7 +14977,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -15007,7 +15007,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -15037,19 +15037,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15066,19 +15066,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15097,20 +15097,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15131,20 +15131,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15166,19 +15166,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15198,20 +15198,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15234,19 +15234,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15263,19 +15263,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15294,20 +15294,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15328,20 +15328,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15363,19 +15363,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15395,20 +15395,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15436,8 +15436,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -15463,7 +15463,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -15492,8 +15492,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -15524,7 +15524,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -15557,7 +15557,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -15587,7 +15587,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -15621,8 +15621,8 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -15649,7 +15649,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -15679,8 +15679,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -15712,7 +15712,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -15746,7 +15746,7 @@ ; ; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -15777,7 +15777,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} @@ -15807,7 +15807,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -15815,7 +15815,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15832,7 +15832,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -15840,7 +15840,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15859,7 +15859,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -15868,7 +15868,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15888,7 +15888,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -15897,7 +15897,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15918,7 +15918,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -15926,7 +15926,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15946,7 +15946,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -15955,7 +15955,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16692,7 +16692,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask: @@ -16704,7 +16704,7 @@ ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16721,7 +16721,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem: @@ -16733,7 +16733,7 @@ ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16752,7 +16752,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask: @@ -16765,7 +16765,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16785,7 +16785,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem: @@ -16798,7 +16798,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17980,18 +17980,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18008,18 +18008,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18038,19 +18038,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18071,19 +18071,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18105,18 +18105,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18136,19 +18136,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18171,18 +18171,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18199,18 +18199,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18229,19 +18229,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18262,19 +18262,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18296,18 +18296,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18327,19 +18327,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18366,8 +18366,8 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -18392,7 +18392,7 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -18420,8 +18420,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -18451,7 +18451,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} @@ -18483,7 +18483,7 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -18512,7 +18512,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} @@ -18545,8 +18545,8 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -18572,7 +18572,7 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -18601,8 +18601,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -18633,7 +18633,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} @@ -18666,7 +18666,7 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -18696,7 +18696,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} @@ -18726,19 +18726,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18755,19 +18755,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18786,20 +18786,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18819,20 +18819,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18853,19 +18853,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18885,20 +18885,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18925,8 +18925,8 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -18952,7 +18952,7 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -18981,8 +18981,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -19012,7 +19012,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} @@ -19044,7 +19044,7 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -19074,7 +19074,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} @@ -19107,8 +19107,8 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -19135,7 +19135,7 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -19165,8 +19165,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -19197,7 +19197,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} @@ -19230,7 +19230,7 @@ ; ; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -19261,7 +19261,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1} @@ -19616,8 +19616,8 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -19644,7 +19644,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -19674,8 +19674,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -19707,7 +19707,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -19741,7 +19741,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -19772,7 +19772,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -19803,18 +19803,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19831,18 +19831,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19861,19 +19861,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19894,19 +19894,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19928,18 +19928,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19959,19 +19959,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19994,18 +19994,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20022,18 +20022,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20052,19 +20052,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20085,19 +20085,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20119,18 +20119,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20150,19 +20150,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20189,8 +20189,8 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -20215,7 +20215,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -20243,8 +20243,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -20274,7 +20274,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -20306,7 +20306,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -20335,7 +20335,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -20368,8 +20368,8 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -20395,7 +20395,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -20424,8 +20424,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -20456,7 +20456,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -20489,7 +20489,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -20519,7 +20519,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -20549,19 +20549,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20578,19 +20578,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20609,20 +20609,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20643,20 +20643,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20678,19 +20678,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20710,20 +20710,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20746,19 +20746,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20775,19 +20775,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20806,20 +20806,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20840,20 +20840,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20875,19 +20875,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20907,20 +20907,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20948,8 +20948,8 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -20975,7 +20975,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -21004,8 +21004,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -21036,7 +21036,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -21069,7 +21069,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -21099,7 +21099,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -21133,8 +21133,8 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -21161,7 +21161,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -21191,8 +21191,8 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -21224,7 +21224,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovdqa (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -21258,7 +21258,7 @@ ; ; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -21289,7 +21289,7 @@ ; ; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} @@ -21319,7 +21319,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -21327,7 +21327,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21344,7 +21344,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -21352,7 +21352,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21371,7 +21371,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -21380,7 +21380,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21400,7 +21400,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -21409,7 +21409,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21430,7 +21430,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -21438,7 +21438,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21458,7 +21458,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -21467,7 +21467,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21812,18 +21812,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21840,18 +21840,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21869,18 +21869,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21900,19 +21900,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21932,19 +21932,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovaps (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21965,19 +21965,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vbroadcastss (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22000,18 +22000,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22028,18 +22028,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22057,18 +22057,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22088,19 +22088,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22120,19 +22120,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovaps (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22153,19 +22153,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vbroadcastss (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22192,8 +22192,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -22218,7 +22218,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -22245,7 +22245,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -22274,8 +22274,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -22304,7 +22304,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovaps (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} @@ -22335,7 +22335,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vbroadcastss (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} @@ -22368,8 +22368,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -22395,7 +22395,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -22423,7 +22423,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -22453,8 +22453,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -22484,7 +22484,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovaps (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} @@ -22516,7 +22516,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vbroadcastss (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} @@ -22546,19 +22546,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22575,19 +22575,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22605,19 +22605,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22637,20 +22637,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22670,20 +22670,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22704,20 +22704,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqps (%rsi){1to8}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22745,8 +22745,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -22772,7 +22772,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -22800,7 +22800,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -22830,8 +22830,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -22861,7 +22861,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} @@ -22893,7 +22893,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} @@ -22927,8 +22927,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 @@ -22955,7 +22955,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -22984,7 +22984,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -23015,8 +23015,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $8, %k0, %k0 @@ -23047,7 +23047,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovaps (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} @@ -23080,7 +23080,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1} @@ -23530,8 +23530,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -23558,7 +23558,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -23587,7 +23587,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -23618,8 +23618,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -23650,7 +23650,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -23683,7 +23683,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -23714,18 +23714,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23742,18 +23742,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23771,18 +23771,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23802,19 +23802,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23834,19 +23834,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23867,19 +23867,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23902,18 +23902,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23930,18 +23930,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23959,18 +23959,18 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23990,19 +23990,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24022,19 +24022,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24055,19 +24055,19 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24094,8 +24094,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -24120,7 +24120,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -24147,7 +24147,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -24176,8 +24176,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -24206,7 +24206,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -24237,7 +24237,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -24270,8 +24270,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 ; NoVLX-NEXT: kshiftrw $14, %k0, %k0 @@ -24297,7 +24297,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -24325,7 +24325,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -24355,8 +24355,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $14, %k0, %k0 @@ -24386,7 +24386,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %xmm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -24418,7 +24418,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -24448,19 +24448,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24477,19 +24477,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24507,19 +24507,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24539,20 +24539,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24572,20 +24572,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24606,20 +24606,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vbroadcastsd (%rsi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24642,19 +24642,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24671,19 +24671,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24701,19 +24701,19 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24733,20 +24733,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24766,20 +24766,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24800,20 +24800,20 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vbroadcastsd (%rsi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24841,8 +24841,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -24868,7 +24868,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -24896,7 +24896,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -24926,8 +24926,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -24957,7 +24957,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -24989,7 +24989,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vbroadcastsd (%rsi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -25023,8 +25023,8 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $12, %k0, %k0 @@ -25051,7 +25051,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vmovapd (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -25080,7 +25080,7 @@ ; ; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -25111,8 +25111,8 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 @@ -25143,7 +25143,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vmovapd (%rsi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -25176,7 +25176,7 @@ ; ; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem_b: ; NoVLX: # %bb.0: # %entry -; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vbroadcastsd (%rsi), %ymm1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} @@ -25206,7 +25206,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -25214,7 +25214,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25231,7 +25231,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -25239,7 +25239,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25257,7 +25257,7 @@ ; VLX: # %bb.0: # %entry ; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -25265,7 +25265,7 @@ ; NoVLX: # %bb.0: # %entry ; NoVLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25285,7 +25285,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -25294,7 +25294,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25314,7 +25314,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -25323,7 +25323,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25344,7 +25344,7 @@ ; VLX-NEXT: kmovd %edi, %k1 ; VLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -25353,7 +25353,7 @@ ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25377,7 +25377,7 @@ ; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 ; VLX-NEXT: kmovd %k0, %eax ; VLX-NEXT: movzbl %al, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -25386,7 +25386,7 @@ ; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzbl %al, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25404,7 +25404,7 @@ ; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1} ; VLX-NEXT: kmovd %k0, %eax ; VLX-NEXT: movzbl %al, %eax -; VLX-NEXT: # kill: def %ax killed %ax killed %eax +; VLX-NEXT: # kill: def $ax killed $ax killed $eax ; VLX-NEXT: vzeroupper ; VLX-NEXT: retq ; @@ -25414,7 +25414,7 @@ ; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzbl %al, %eax -; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax +; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25878,18 +25878,18 @@ ; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 ; VLX-NEXT: kshiftlb $4, %k0, %k0 ; VLX-NEXT: kmovd %k0, %eax -; VLX-NEXT: # kill: def %al killed %al killed %eax +; VLX-NEXT: # kill: def $al killed $al killed $eax ; VLX-NEXT: retq ; ; NoVLX-LABEL: mask_zero_lower: ; NoVLX: # %bb.0: -; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; NoVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 ; NoVLX-NEXT: kshiftlw $12, %k0, %k0 ; NoVLX-NEXT: kshiftrw $8, %k0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax -; NoVLX-NEXT: # kill: def %al killed %al killed %eax +; NoVLX-NEXT: # kill: def $al killed $al killed $eax ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq %cmp = icmp ult <4 x i32> %a, zeroinitializer Index: test/CodeGen/X86/avx512vl-vec-test-testn.ll =================================================================== --- test/CodeGen/X86/avx512vl-vec-test-testn.ll +++ test/CodeGen/X86/avx512vl-vec-test-testn.ll @@ -8,14 +8,14 @@ ; X86_64: # %bb.0: # %entry ; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0 ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: retq ; ; I386-LABEL: TEST_mm_test_epi64_mask: ; I386: # %bb.0: # %entry ; I386-NEXT: vptestmq %xmm0, %xmm1, %k0 ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: retl entry: %and.i.i = and <2 x i64> %__B, %__A @@ -31,14 +31,14 @@ ; X86_64: # %bb.0: # %entry ; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0 ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: retq ; ; I386-LABEL: TEST_mm_test_epi32_mask: ; I386: # %bb.0: # %entry ; I386-NEXT: vptestmd %xmm0, %xmm1, %k0 ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: retl entry: %and.i.i = and <2 x i64> %__B, %__A @@ -55,7 +55,7 @@ ; X86_64: # %bb.0: # %entry ; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0 ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: vzeroupper ; X86_64-NEXT: retq ; @@ -63,7 +63,7 @@ ; I386: # %bb.0: # %entry ; I386-NEXT: vptestmq %ymm0, %ymm1, %k0 ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: vzeroupper ; I386-NEXT: retl entry: @@ -80,7 +80,7 @@ ; X86_64: # %bb.0: # %entry ; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0 ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: vzeroupper ; X86_64-NEXT: retq ; @@ -88,7 +88,7 @@ ; I386: # %bb.0: # %entry ; I386-NEXT: vptestmd %ymm0, %ymm1, %k0 ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: vzeroupper ; I386-NEXT: retl entry: @@ -106,7 +106,7 @@ ; X86_64-NEXT: kmovw %edi, %k1 ; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1} ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: retq ; ; I386-LABEL: TEST_mm_mask_test_epi64_mask: @@ -115,7 +115,7 @@ ; I386-NEXT: kmovw %eax, %k1 ; I386-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1} ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: retl entry: %and.i.i = and <2 x i64> %__B, %__A @@ -135,7 +135,7 @@ ; X86_64-NEXT: kmovw %edi, %k1 ; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1} ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: retq ; ; I386-LABEL: TEST_mm_mask_test_epi32_mask: @@ -144,7 +144,7 @@ ; I386-NEXT: kmovw %eax, %k1 ; I386-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1} ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: retl entry: %and.i.i = and <2 x i64> %__B, %__A @@ -166,7 +166,7 @@ ; X86_64-NEXT: kmovw %edi, %k1 ; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1} ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: vzeroupper ; X86_64-NEXT: retq ; @@ -176,7 +176,7 @@ ; I386-NEXT: kmovw %eax, %k1 ; I386-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1} ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: vzeroupper ; I386-NEXT: retl entry: @@ -197,7 +197,7 @@ ; X86_64-NEXT: kmovw %edi, %k1 ; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1} ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: vzeroupper ; X86_64-NEXT: retq ; @@ -207,7 +207,7 @@ ; I386-NEXT: kmovw %eax, %k1 ; I386-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1} ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: vzeroupper ; I386-NEXT: retl entry: @@ -226,14 +226,14 @@ ; X86_64: # %bb.0: # %entry ; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0 ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: retq ; ; I386-LABEL: TEST_mm_testn_epi64_mask: ; I386: # %bb.0: # %entry ; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0 ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: retl entry: %and.i.i = and <2 x i64> %__B, %__A @@ -249,14 +249,14 @@ ; X86_64: # %bb.0: # %entry ; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0 ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: retq ; ; I386-LABEL: TEST_mm_testn_epi32_mask: ; I386: # %bb.0: # %entry ; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0 ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: retl entry: %and.i.i = and <2 x i64> %__B, %__A @@ -273,7 +273,7 @@ ; X86_64: # %bb.0: # %entry ; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0 ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: vzeroupper ; X86_64-NEXT: retq ; @@ -281,7 +281,7 @@ ; I386: # %bb.0: # %entry ; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0 ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: vzeroupper ; I386-NEXT: retl entry: @@ -298,7 +298,7 @@ ; X86_64: # %bb.0: # %entry ; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0 ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: vzeroupper ; X86_64-NEXT: retq ; @@ -306,7 +306,7 @@ ; I386: # %bb.0: # %entry ; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0 ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: vzeroupper ; I386-NEXT: retl entry: @@ -324,7 +324,7 @@ ; X86_64-NEXT: kmovw %edi, %k1 ; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1} ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: retq ; ; I386-LABEL: TEST_mm_mask_testn_epi64_mask: @@ -333,7 +333,7 @@ ; I386-NEXT: kmovw %eax, %k1 ; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1} ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: retl entry: %and.i.i = and <2 x i64> %__B, %__A @@ -353,7 +353,7 @@ ; X86_64-NEXT: kmovw %edi, %k1 ; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1} ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: retq ; ; I386-LABEL: TEST_mm_mask_testn_epi32_mask: @@ -362,7 +362,7 @@ ; I386-NEXT: kmovw %eax, %k1 ; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1} ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: retl entry: %and.i.i = and <2 x i64> %__B, %__A @@ -384,7 +384,7 @@ ; X86_64-NEXT: kmovw %edi, %k1 ; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1} ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: vzeroupper ; X86_64-NEXT: retq ; @@ -394,7 +394,7 @@ ; I386-NEXT: kmovw %eax, %k1 ; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1} ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: vzeroupper ; I386-NEXT: retl entry: @@ -415,7 +415,7 @@ ; X86_64-NEXT: kmovw %edi, %k1 ; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1} ; X86_64-NEXT: kmovw %k0, %eax -; X86_64-NEXT: # kill: def %al killed %al killed %eax +; X86_64-NEXT: # kill: def $al killed $al killed $eax ; X86_64-NEXT: vzeroupper ; X86_64-NEXT: retq ; @@ -425,7 +425,7 @@ ; I386-NEXT: kmovw %eax, %k1 ; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1} ; I386-NEXT: kmovw %k0, %eax -; I386-NEXT: # kill: def %al killed %al killed %eax +; I386-NEXT: # kill: def $al killed $al killed $eax ; I386-NEXT: vzeroupper ; I386-NEXT: retl entry: Index: test/CodeGen/X86/bitcast-and-setcc-128.ll =================================================================== --- test/CodeGen/X86/bitcast-and-setcc-128.ll +++ test/CodeGen/X86/bitcast-and-setcc-128.ll @@ -14,7 +14,7 @@ ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v8i16: @@ -24,7 +24,7 @@ ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v8i16: @@ -36,7 +36,7 @@ ; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -45,7 +45,7 @@ ; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <8 x i16> %a, %b %x1 = icmp sgt <8 x i16> %c, %d @@ -61,7 +61,7 @@ ; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i32: @@ -70,7 +70,7 @@ ; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v4i32: @@ -78,7 +78,7 @@ ; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k1 ; AVX512F-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v4i32: @@ -86,7 +86,7 @@ ; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <4 x i32> %a, %b %x1 = icmp sgt <4 x i32> %c, %d @@ -102,7 +102,7 @@ ; SSE2-SSSE3-NEXT: cmpltps %xmm2, %xmm3 ; SSE2-SSSE3-NEXT: andps %xmm1, %xmm3 ; SSE2-SSSE3-NEXT: movmskps %xmm3, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4f32: @@ -111,7 +111,7 @@ ; AVX12-NEXT: vcmpltps %xmm2, %xmm3, %xmm1 ; AVX12-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v4f32: @@ -119,7 +119,7 @@ ; AVX512F-NEXT: vcmpltps %xmm0, %xmm1, %k1 ; AVX512F-NEXT: vcmpltps %xmm2, %xmm3, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v4f32: @@ -127,7 +127,7 @@ ; AVX512BW-NEXT: vcmpltps %xmm0, %xmm1, %k1 ; AVX512BW-NEXT: vcmpltps %xmm2, %xmm3, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = fcmp ogt <4 x float> %a, %b %x1 = fcmp ogt <4 x float> %c, %d @@ -143,7 +143,7 @@ ; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax -; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax +; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v16i8: @@ -152,7 +152,7 @@ ; AVX12-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %ax killed %ax killed %eax +; AVX12-NEXT: # kill: def $ax killed $ax killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v16i8: @@ -164,7 +164,7 @@ ; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -173,7 +173,7 @@ ; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtb %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <16 x i8> %a, %b %x1 = icmp sgt <16 x i8> %c, %d @@ -236,7 +236,7 @@ ; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i8: @@ -265,7 +265,7 @@ ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i8: @@ -294,7 +294,7 @@ ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: v2i8: @@ -310,7 +310,7 @@ ; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2i8: @@ -326,7 +326,7 @@ ; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <2 x i8> %a, %b %x1 = icmp sgt <2 x i8> %c, %d @@ -389,7 +389,7 @@ ; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i16: @@ -418,7 +418,7 @@ ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i16: @@ -447,7 +447,7 @@ ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: v2i16: @@ -463,7 +463,7 @@ ; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2i16: @@ -479,7 +479,7 @@ ; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <2 x i16> %a, %b %x1 = icmp sgt <2 x i16> %c, %d @@ -534,7 +534,7 @@ ; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 ; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i32: @@ -559,7 +559,7 @@ ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i32: @@ -584,7 +584,7 @@ ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: v2i32: @@ -600,7 +600,7 @@ ; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2i32: @@ -616,7 +616,7 @@ ; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <2 x i32> %a, %b %x1 = icmp sgt <2 x i32> %c, %d @@ -651,7 +651,7 @@ ; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v2i64: @@ -660,7 +660,7 @@ ; AVX12-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskpd %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v2i64: @@ -668,7 +668,7 @@ ; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2i64: @@ -676,7 +676,7 @@ ; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <2 x i64> %a, %b %x1 = icmp sgt <2 x i64> %c, %d @@ -692,7 +692,7 @@ ; SSE2-SSSE3-NEXT: cmpltpd %xmm2, %xmm3 ; SSE2-SSSE3-NEXT: andpd %xmm1, %xmm3 ; SSE2-SSSE3-NEXT: movmskpd %xmm3, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v2f64: @@ -701,7 +701,7 @@ ; AVX12-NEXT: vcmpltpd %xmm2, %xmm3, %xmm1 ; AVX12-NEXT: vandpd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskpd %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v2f64: @@ -709,7 +709,7 @@ ; AVX512F-NEXT: vcmpltpd %xmm0, %xmm1, %k1 ; AVX512F-NEXT: vcmpltpd %xmm2, %xmm3, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2f64: @@ -717,7 +717,7 @@ ; AVX512BW-NEXT: vcmpltpd %xmm0, %xmm1, %k1 ; AVX512BW-NEXT: vcmpltpd %xmm2, %xmm3, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = fcmp ogt <2 x double> %a, %b %x1 = fcmp ogt <2 x double> %c, %d @@ -741,7 +741,7 @@ ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i8: @@ -758,7 +758,7 @@ ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v4i8: @@ -774,7 +774,7 @@ ; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k1 ; AVX512F-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v4i8: @@ -790,7 +790,7 @@ ; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <4 x i8> %a, %b %x1 = icmp sgt <4 x i8> %c, %d @@ -814,7 +814,7 @@ ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i16: @@ -831,7 +831,7 @@ ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v4i16: @@ -847,7 +847,7 @@ ; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k1 ; AVX512F-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v4i16: @@ -863,7 +863,7 @@ ; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <4 x i16> %a, %b %x1 = icmp sgt <4 x i16> %c, %d @@ -888,7 +888,7 @@ ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v8i8: @@ -906,7 +906,7 @@ ; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v8i8: @@ -926,7 +926,7 @@ ; AVX512F-NEXT: vpmovsxwd %xmm2, %ymm0 ; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -943,7 +943,7 @@ ; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1 ; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x0 = icmp sgt <8 x i8> %a, %b %x1 = icmp sgt <8 x i8> %c, %d Index: test/CodeGen/X86/bitcast-and-setcc-256.ll =================================================================== --- test/CodeGen/X86/bitcast-and-setcc-256.ll +++ test/CodeGen/X86/bitcast-and-setcc-256.ll @@ -54,7 +54,7 @@ ; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] ; SSE2-SSSE3-NEXT: andps %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4i64: @@ -71,7 +71,7 @@ ; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -85,7 +85,7 @@ ; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskps %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -94,7 +94,7 @@ ; AVX512F-NEXT: vpcmpgtq %ymm1, %ymm0, %k1 ; AVX512F-NEXT: vpcmpgtq %ymm3, %ymm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -103,7 +103,7 @@ ; AVX512BW-NEXT: vpcmpgtq %ymm1, %ymm0, %k1 ; AVX512BW-NEXT: vpcmpgtq %ymm3, %ymm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = icmp sgt <4 x i64> %a, %b @@ -124,7 +124,7 @@ ; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2] ; SSE2-SSSE3-NEXT: andps %xmm2, %xmm6 ; SSE2-SSSE3-NEXT: movmskps %xmm6, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4f64: @@ -137,7 +137,7 @@ ; AVX12-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: vzeroupper ; AVX12-NEXT: retq ; @@ -146,7 +146,7 @@ ; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %k1 ; AVX512F-NEXT: vcmpltpd %ymm2, %ymm3, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -155,7 +155,7 @@ ; AVX512BW-NEXT: vcmpltpd %ymm0, %ymm1, %k1 ; AVX512BW-NEXT: vcmpltpd %ymm2, %ymm3, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = fcmp ogt <4 x double> %a, %b @@ -176,7 +176,7 @@ ; SSE2-SSSE3-NEXT: packsswb %xmm5, %xmm4 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm4 ; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax -; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax +; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v16i16: @@ -193,7 +193,7 @@ ; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -207,7 +207,7 @@ ; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -220,7 +220,7 @@ ; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -229,7 +229,7 @@ ; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k1 ; AVX512BW-NEXT: vpcmpgtw %ymm3, %ymm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = icmp sgt <16 x i16> %a, %b @@ -251,7 +251,7 @@ ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm4 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm4 ; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v8i32: @@ -269,7 +269,7 @@ ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -284,7 +284,7 @@ ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -293,7 +293,7 @@ ; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k1 ; AVX512F-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -302,7 +302,7 @@ ; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k1 ; AVX512BW-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = icmp sgt <8 x i32> %a, %b @@ -324,7 +324,7 @@ ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm6 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm6 ; SSE2-SSSE3-NEXT: pmovmskb %xmm6, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v8f32: @@ -338,7 +338,7 @@ ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: vzeroupper ; AVX12-NEXT: retq ; @@ -347,7 +347,7 @@ ; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k1 ; AVX512F-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -356,7 +356,7 @@ ; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k1 ; AVX512BW-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = fcmp ogt <8 x float> %a, %b Index: test/CodeGen/X86/bitcast-and-setcc-512.ll =================================================================== --- test/CodeGen/X86/bitcast-and-setcc-512.ll +++ test/CodeGen/X86/bitcast-and-setcc-512.ll @@ -29,7 +29,7 @@ ; SSE-NEXT: pand %xmm0, %xmm8 ; SSE-NEXT: packsswb %xmm0, %xmm8 ; SSE-NEXT: pmovmskb %xmm8, %eax -; SSE-NEXT: # kill: def %al killed %al killed %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: v8i64: @@ -64,7 +64,7 @@ ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -92,7 +92,7 @@ ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -101,7 +101,7 @@ ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 ; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -110,7 +110,7 @@ ; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = icmp sgt <8 x i64> %a, %b @@ -144,7 +144,7 @@ ; SSE-NEXT: pand %xmm4, %xmm8 ; SSE-NEXT: packsswb %xmm0, %xmm8 ; SSE-NEXT: pmovmskb %xmm8, %eax -; SSE-NEXT: # kill: def %al killed %al killed %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX12-LABEL: v8f64: @@ -171,7 +171,7 @@ ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: vzeroupper ; AVX12-NEXT: retq ; @@ -180,7 +180,7 @@ ; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1 ; AVX512F-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -189,7 +189,7 @@ ; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1 ; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = fcmp ogt <8 x double> %a, %b @@ -336,7 +336,7 @@ ; SSE-NEXT: packsswb %xmm10, %xmm8 ; SSE-NEXT: pand %xmm0, %xmm8 ; SSE-NEXT: pmovmskb %xmm8, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: v16i32: @@ -365,7 +365,7 @@ ; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -387,7 +387,7 @@ ; AVX2-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -396,7 +396,7 @@ ; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1 ; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -405,7 +405,7 @@ ; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = icmp sgt <16 x i32> %a, %b @@ -438,7 +438,7 @@ ; SSE-NEXT: packsswb %xmm10, %xmm8 ; SSE-NEXT: pand %xmm4, %xmm8 ; SSE-NEXT: pmovmskb %xmm8, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX12-LABEL: v16f32: @@ -459,7 +459,7 @@ ; AVX12-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %ax killed %ax killed %eax +; AVX12-NEXT: # kill: def $ax killed $ax killed $eax ; AVX12-NEXT: vzeroupper ; AVX12-NEXT: retq ; @@ -468,7 +468,7 @@ ; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1 ; AVX512F-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1} ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -477,7 +477,7 @@ ; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1 ; AVX512BW-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x0 = fcmp ogt <16 x float> %a, %b Index: test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll =================================================================== --- test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll +++ test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll @@ -13,7 +13,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) { ; SSE2-SSSE3-LABEL: ext_i2_2i64: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2] @@ -25,7 +25,7 @@ ; ; AVX1-LABEL: ext_i2_2i64: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: vmovq %rdi, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2] @@ -35,7 +35,7 @@ ; ; AVX2-LABEL: ext_i2_2i64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2] @@ -189,7 +189,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) { ; SSE2-SSSE3-LABEL: ext_i4_4i64: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2] @@ -207,7 +207,7 @@ ; ; AVX1-LABEL: ext_i4_4i64: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: vmovq %rdi, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 @@ -224,7 +224,7 @@ ; ; AVX2-LABEL: ext_i4_4i64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8] @@ -423,7 +423,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) { ; SSE2-SSSE3-LABEL: ext_i8_8i64: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2] @@ -453,7 +453,7 @@ ; ; AVX1-LABEL: ext_i8_8i64: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: vmovq %rdi, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 @@ -477,7 +477,7 @@ ; ; AVX2-LABEL: ext_i8_8i64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8] Index: test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll =================================================================== --- test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll +++ test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll @@ -14,7 +14,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) { ; SSE2-SSSE3-LABEL: ext_i2_2i64: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2] @@ -27,7 +27,7 @@ ; ; AVX1-LABEL: ext_i2_2i64: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: vmovq %rdi, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2] @@ -38,7 +38,7 @@ ; ; AVX2-LABEL: ext_i2_2i64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2] @@ -51,7 +51,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: kmovw %edi, %k1 ; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -100,7 +100,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: kmovw %edi, %k1 ; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -152,7 +152,7 @@ ; AVX512F-NEXT: kmovw %edi, %k1 ; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -238,7 +238,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) { ; SSE2-SSSE3-LABEL: ext_i4_4i64: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2] @@ -258,7 +258,7 @@ ; ; AVX1-LABEL: ext_i4_4i64: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: vmovq %rdi, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 @@ -277,7 +277,7 @@ ; ; AVX2-LABEL: ext_i4_4i64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8] @@ -290,7 +290,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: kmovw %edi, %k1 ; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VLBW-LABEL: ext_i4_4i64: @@ -351,7 +351,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: kmovw %edi, %k1 ; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VLBW-LABEL: ext_i8_8i32: @@ -533,7 +533,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) { ; SSE2-SSSE3-LABEL: ext_i8_8i64: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2] @@ -567,7 +567,7 @@ ; ; AVX1-LABEL: ext_i8_8i64: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: vmovq %rdi, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 @@ -595,7 +595,7 @@ ; ; AVX2-LABEL: ext_i8_8i64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8] Index: test/CodeGen/X86/bitcast-int-to-vector-bool.ll =================================================================== --- test/CodeGen/X86/bitcast-int-to-vector-bool.ll +++ test/CodeGen/X86/bitcast-int-to-vector-bool.ll @@ -8,7 +8,7 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) { ; SSE2-SSSE3-LABEL: bitcast_i2_2i1: ; SSE2-SSSE3: # %bb.0: -; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-SSSE3-NEXT: movq %rdi, %xmm0 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2] @@ -21,7 +21,7 @@ ; ; AVX1-LABEL: bitcast_i2_2i1: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: vmovq %rdi, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2] @@ -32,7 +32,7 @@ ; ; AVX2-LABEL: bitcast_i2_2i1: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2] Index: test/CodeGen/X86/bitcast-int-to-vector.ll =================================================================== --- test/CodeGen/X86/bitcast-int-to-vector.ll +++ test/CodeGen/X86/bitcast-int-to-vector.ll @@ -10,7 +10,7 @@ ; X86-NEXT: flds {{[0-9]+}}(%esp) ; X86-NEXT: fucompp ; X86-NEXT: fnstsw %ax -; X86-NEXT: # kill: def %ah killed %ah killed %ax +; X86-NEXT: # kill: def $ah killed $ah killed $ax ; X86-NEXT: sahf ; X86-NEXT: setp %al ; X86-NEXT: retl Index: test/CodeGen/X86/bitcast-setcc-128.ll =================================================================== --- test/CodeGen/X86/bitcast-setcc-128.ll +++ test/CodeGen/X86/bitcast-setcc-128.ll @@ -12,7 +12,7 @@ ; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v8i16: @@ -20,7 +20,7 @@ ; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v8i16: @@ -29,7 +29,7 @@ ; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -37,7 +37,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <8 x i16> %a, %b %res = bitcast <8 x i1> %x to i8 @@ -49,28 +49,28 @@ ; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i32: ; AVX12: # %bb.0: ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v4i32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v4i32: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <4 x i32> %a, %b %res = bitcast <4 x i1> %x to i4 @@ -82,28 +82,28 @@ ; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4f32: ; AVX12: # %bb.0: ; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v4f32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vcmpltps %xmm0, %xmm1, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v4f32: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vcmpltps %xmm0, %xmm1, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = fcmp ogt <4 x float> %a, %b %res = bitcast <4 x i1> %x to i4 @@ -115,14 +115,14 @@ ; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax +; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v16i8: ; AVX12: # %bb.0: ; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %ax killed %ax killed %eax +; AVX12-NEXT: # kill: def $ax killed $ax killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v16i8: @@ -131,7 +131,7 @@ ; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -139,7 +139,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <16 x i8> %a, %b %res = bitcast <16 x i1> %x to i16 @@ -175,7 +175,7 @@ ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i8: @@ -192,7 +192,7 @@ ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i8: @@ -209,7 +209,7 @@ ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: v2i8: @@ -220,7 +220,7 @@ ; AVX512F-NEXT: vpsraq $56, %xmm0, %xmm0 ; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2i8: @@ -231,7 +231,7 @@ ; AVX512BW-NEXT: vpsraq $56, %xmm0, %xmm0 ; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <2 x i8> %a, %b %res = bitcast <2 x i1> %x to i2 @@ -267,7 +267,7 @@ ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i16: @@ -284,7 +284,7 @@ ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i16: @@ -301,7 +301,7 @@ ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: v2i16: @@ -312,7 +312,7 @@ ; AVX512F-NEXT: vpsraq $48, %xmm0, %xmm0 ; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2i16: @@ -323,7 +323,7 @@ ; AVX512BW-NEXT: vpsraq $48, %xmm0, %xmm0 ; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <2 x i16> %a, %b %res = bitcast <2 x i1> %x to i2 @@ -355,7 +355,7 @@ ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i32: @@ -370,7 +370,7 @@ ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i32: @@ -385,7 +385,7 @@ ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: v2i32: @@ -396,7 +396,7 @@ ; AVX512F-NEXT: vpsraq $32, %xmm0, %xmm0 ; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2i32: @@ -407,7 +407,7 @@ ; AVX512BW-NEXT: vpsraq $32, %xmm0, %xmm0 ; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <2 x i32> %a, %b %res = bitcast <2 x i1> %x to i2 @@ -429,28 +429,28 @@ ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v2i64: ; AVX12: # %bb.0: ; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskpd %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v2i64: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2i64: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <2 x i64> %a, %b %res = bitcast <2 x i1> %x to i2 @@ -462,28 +462,28 @@ ; SSE2-SSSE3: # %bb.0: ; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v2f64: ; AVX12: # %bb.0: ; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 ; AVX12-NEXT: vmovmskpd %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v2f64: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vcmpltpd %xmm0, %xmm1, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v2f64: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vcmpltpd %xmm0, %xmm1, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = fcmp ogt <2 x double> %a, %b %res = bitcast <2 x i1> %x to i2 @@ -499,7 +499,7 @@ ; SSE2-SSSE3-NEXT: psrad $24, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i8: @@ -510,7 +510,7 @@ ; AVX12-NEXT: vpsrad $24, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v4i8: @@ -521,7 +521,7 @@ ; AVX512F-NEXT: vpsrad $24, %xmm0, %xmm0 ; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v4i8: @@ -532,7 +532,7 @@ ; AVX512BW-NEXT: vpsrad $24, %xmm0, %xmm0 ; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <4 x i8> %a, %b %res = bitcast <4 x i1> %x to i4 @@ -548,7 +548,7 @@ ; SSE2-SSSE3-NEXT: psrad $16, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i16: @@ -559,7 +559,7 @@ ; AVX12-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v4i16: @@ -570,7 +570,7 @@ ; AVX512F-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: v4i16: @@ -581,7 +581,7 @@ ; AVX512BW-NEXT: vpsrad $16, %xmm0, %xmm0 ; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <4 x i16> %a, %b %res = bitcast <4 x i1> %x to i4 @@ -598,7 +598,7 @@ ; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v8i8: @@ -610,7 +610,7 @@ ; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: retq ; ; AVX512F-LABEL: v8i8: @@ -623,7 +623,7 @@ ; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -635,7 +635,7 @@ ; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0 ; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: retq %x = icmp sgt <8 x i8> %a, %b %res = bitcast <8 x i1> %x to i8 Index: test/CodeGen/X86/bitcast-setcc-256.ll =================================================================== --- test/CodeGen/X86/bitcast-setcc-256.ll +++ test/CodeGen/X86/bitcast-setcc-256.ll @@ -13,7 +13,7 @@ ; SSE2-SSSE3-NEXT: pcmpgtw %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: packsswb %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax +; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v16i16: @@ -24,7 +24,7 @@ ; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -34,7 +34,7 @@ ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -44,7 +44,7 @@ ; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -52,7 +52,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = icmp sgt <16 x i16> %a, %b @@ -68,7 +68,7 @@ ; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0 ; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v8i32: @@ -79,7 +79,7 @@ ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: vmovmskps %ymm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -87,7 +87,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -95,7 +95,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -103,7 +103,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = icmp sgt <8 x i32> %a, %b @@ -119,14 +119,14 @@ ; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v8f32: ; AVX12: # %bb.0: ; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 ; AVX12-NEXT: vmovmskps %ymm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: vzeroupper ; AVX12-NEXT: retq ; @@ -134,7 +134,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -142,7 +142,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = fcmp ogt <8 x float> %a, %b @@ -233,7 +233,7 @@ ; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm1 ; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4i64: @@ -244,7 +244,7 @@ ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: vmovmskpd %ymm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -252,7 +252,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskpd %ymm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -260,7 +260,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -268,7 +268,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = icmp sgt <4 x i64> %a, %b @@ -283,14 +283,14 @@ ; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax -; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax +; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4f64: ; AVX12: # %bb.0: ; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 ; AVX12-NEXT: vmovmskpd %ymm0, %eax -; AVX12-NEXT: # kill: def %al killed %al killed %eax +; AVX12-NEXT: # kill: def $al killed $al killed $eax ; AVX12-NEXT: vzeroupper ; AVX12-NEXT: retq ; @@ -298,7 +298,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -306,7 +306,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vcmpltpd %ymm0, %ymm1, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = fcmp ogt <4 x double> %a, %b Index: test/CodeGen/X86/bitcast-setcc-512.ll =================================================================== --- test/CodeGen/X86/bitcast-setcc-512.ll +++ test/CodeGen/X86/bitcast-setcc-512.ll @@ -86,7 +86,7 @@ ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: v16i32: @@ -103,7 +103,7 @@ ; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -116,7 +116,7 @@ ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -124,7 +124,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -132,7 +132,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = icmp sgt <16 x i32> %a, %b @@ -151,7 +151,7 @@ ; SSE-NEXT: packssdw %xmm5, %xmm4 ; SSE-NEXT: packsswb %xmm6, %xmm4 ; SSE-NEXT: pmovmskb %xmm4, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: v16f32: @@ -164,7 +164,7 @@ ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -177,7 +177,7 @@ ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -185,7 +185,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -193,7 +193,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = fcmp ogt <16 x float> %a, %b @@ -1046,7 +1046,7 @@ ; SSE-NEXT: packssdw %xmm2, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: # kill: def %al killed %al killed %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: v8i64: @@ -1063,7 +1063,7 @@ ; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: vmovmskps %ymm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -1074,7 +1074,7 @@ ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: vmovmskps %ymm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1082,7 +1082,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1090,7 +1090,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = icmp sgt <8 x i64> %a, %b @@ -1110,7 +1110,7 @@ ; SSE-NEXT: packssdw %xmm6, %xmm4 ; SSE-NEXT: packsswb %xmm0, %xmm4 ; SSE-NEXT: pmovmskb %xmm4, %eax -; SSE-NEXT: # kill: def %al killed %al killed %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: v8f64: @@ -1123,7 +1123,7 @@ ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: vmovmskps %ymm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -1134,7 +1134,7 @@ ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: vmovmskps %ymm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1142,7 +1142,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1150,7 +1150,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: # kill: def %al killed %al killed %eax +; AVX512BW-NEXT: # kill: def $al killed $al killed $eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %x = fcmp ogt <8 x double> %a, %b Index: test/CodeGen/X86/bitreverse.ll =================================================================== --- test/CodeGen/X86/bitreverse.ll +++ test/CodeGen/X86/bitreverse.ll @@ -46,8 +46,8 @@ ; X86-NEXT: andl $43690, %ecx # imm = 0xAAAA ; X86-NEXT: shrl %ecx ; X86-NEXT: leal (%ecx,%edx,2), %edx -; X86-NEXT: # kill: def %ax killed %ax killed %eax -; X86-NEXT: # kill: def %dx killed %dx killed %edx +; X86-NEXT: # kill: def $ax killed $ax killed $eax +; X86-NEXT: # kill: def $dx killed $dx killed $edx ; X86-NEXT: retl ; ; X64-LABEL: test_bitreverse_v2i16: @@ -191,7 +191,7 @@ ; ; X64-LABEL: test_bitreverse_i32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: bswapl %edi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F @@ -242,7 +242,7 @@ ; ; X64-LABEL: test_bitreverse_i24: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: bswapl %edi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F @@ -289,12 +289,12 @@ ; X86-NEXT: andl $43690, %eax # imm = 0xAAAA ; X86-NEXT: shrl %eax ; X86-NEXT: leal (%eax,%ecx,2), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_bitreverse_i16: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: rolw $8, %di ; X64-NEXT: movl %edi, %eax ; X64-NEXT: andl $3855, %eax # imm = 0xF0F @@ -312,7 +312,7 @@ ; X64-NEXT: andl $43690, %eax # imm = 0xAAAA ; X64-NEXT: shrl %eax ; X64-NEXT: leal (%rax,%rcx,2), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %b = call i16 @llvm.bitreverse.i16(i16 %a) ret i16 %b Index: test/CodeGen/X86/block-placement.mir =================================================================== --- test/CodeGen/X86/block-placement.mir +++ test/CodeGen/X86/block-placement.mir @@ -43,10 +43,10 @@ alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%esi' } + - { reg: '$rdi' } + - { reg: '$esi' } -# CHECK: %eax = FAULTING_OP 1, %bb.3, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr) +# CHECK: $eax = FAULTING_OP 1, %bb.3, 1684, killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr) # CHECK-NEXT: JMP_1 %bb.2 # CHECK: bb.3.null: # CHECK: bb.4.right: @@ -55,33 +55,33 @@ body: | bb.0.entry: successors: %bb.1(0x7ffff800), %bb.3(0x00000800) - liveins: %esi, %rdi + liveins: $esi, $rdi - frame-setup PUSH64r undef %rax, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - TEST8ri %sil, 1, implicit-def %eflags, implicit killed %esi - JE_1 %bb.3, implicit killed %eflags + TEST8ri $sil, 1, implicit-def $eflags, implicit killed $esi + JE_1 %bb.3, implicit killed $eflags bb.1.left: successors: %bb.2(0x7ffff800), %bb.4(0x00000800) - liveins: %rdi + liveins: $rdi - %eax = FAULTING_OP 1, %bb.2, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr) + $eax = FAULTING_OP 1, %bb.2, 1684, killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr) JMP_1 %bb.4 bb.4.not_null: - liveins: %rdi, %eax + liveins: $rdi, $eax - %rcx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + $rcx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax bb.2.null: - liveins: %rdi + liveins: $rdi - CALL64pcrel32 @stub, csr_64, implicit %rsp, implicit %rdi, implicit-def %rsp + CALL64pcrel32 @stub, csr_64, implicit $rsp, implicit $rdi, implicit-def $rsp bb.3.right: - dead %edi = XOR32rr undef %edi, undef %edi, implicit-def dead %eflags, implicit-def %rdi - CALL64pcrel32 @stub, csr_64, implicit %rsp, implicit %rdi, implicit-def %rsp + dead $edi = XOR32rr undef $edi, undef $edi, implicit-def dead $eflags, implicit-def $rdi + CALL64pcrel32 @stub, csr_64, implicit $rsp, implicit $rdi, implicit-def $rsp ... Index: test/CodeGen/X86/bmi-schedule.ll =================================================================== --- test/CodeGen/X86/bmi-schedule.ll +++ test/CodeGen/X86/bmi-schedule.ll @@ -14,7 +14,7 @@ ; GENERIC-NEXT: notl %edi # sched: [1:0.33] ; GENERIC-NEXT: andw (%rdx), %di # sched: [6:0.50] ; GENERIC-NEXT: addl %edi, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_andn_i16: @@ -23,7 +23,7 @@ ; HASWELL-NEXT: notl %edi # sched: [1:0.25] ; HASWELL-NEXT: andw (%rdx), %di # sched: [6:0.50] ; HASWELL-NEXT: addl %edi, %eax # sched: [1:0.25] -; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax +; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_andn_i16: @@ -32,7 +32,7 @@ ; BROADWELL-NEXT: notl %edi # sched: [1:0.25] ; BROADWELL-NEXT: andw (%rdx), %di # sched: [6:0.50] ; BROADWELL-NEXT: addl %edi, %eax # sched: [1:0.25] -; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax +; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_andn_i16: @@ -41,7 +41,7 @@ ; SKYLAKE-NEXT: notl %edi # sched: [1:0.25] ; SKYLAKE-NEXT: andw (%rdx), %di # sched: [6:0.50] ; SKYLAKE-NEXT: addl %edi, %eax # sched: [1:0.25] -; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax +; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_andn_i16: @@ -50,7 +50,7 @@ ; BTVER2-NEXT: notl %edi # sched: [1:0.50] ; BTVER2-NEXT: andw (%rdx), %di # sched: [4:1.00] ; BTVER2-NEXT: addl %edi, %eax # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax +; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andn_i16: @@ -59,7 +59,7 @@ ; ZNVER1-NEXT: notl %edi # sched: [1:0.25] ; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50] ; ZNVER1-NEXT: addl %edi, %eax # sched: [1:0.25] -; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax +; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i16, i16 *%a2 %2 = xor i16 %a0, -1 @@ -581,7 +581,7 @@ ; GENERIC-NEXT: tzcntw (%rsi), %cx # sched: [7:1.00] ; GENERIC-NEXT: tzcntw %di, %ax # sched: [3:1.00] ; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_cttz_i16: @@ -589,7 +589,7 @@ ; HASWELL-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00] ; HASWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00] ; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax +; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_cttz_i16: @@ -597,7 +597,7 @@ ; BROADWELL-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00] ; BROADWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00] ; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax +; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_cttz_i16: @@ -605,7 +605,7 @@ ; SKYLAKE-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00] ; SKYLAKE-NEXT: tzcntw %di, %ax # sched: [3:1.00] ; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25] -; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax +; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_cttz_i16: @@ -613,7 +613,7 @@ ; BTVER2-NEXT: tzcntw (%rsi), %cx # sched: [6:1.00] ; BTVER2-NEXT: tzcntw %di, %ax # sched: [3:1.00] ; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax +; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cttz_i16: @@ -621,7 +621,7 @@ ; ZNVER1-NEXT: tzcntw (%rsi), %cx # sched: [6:0.50] ; ZNVER1-NEXT: tzcntw %di, %ax # sched: [2:0.25] ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax +; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i16, i16 *%a1 %2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false ) Index: test/CodeGen/X86/bmi.ll =================================================================== --- test/CodeGen/X86/bmi.ll +++ test/CodeGen/X86/bmi.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: movzbl %dil, %eax ; CHECK-NEXT: orl $256, %eax # imm = 0x100 ; CHECK-NEXT: tzcntl %eax, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 false ) ret i8 %tmp @@ -61,7 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movzbl %dil, %eax ; CHECK-NEXT: tzcntl %eax, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 true ) ret i8 %tmp @@ -516,7 +516,7 @@ ; BMI1-NEXT: movl $32, %ecx ; BMI1-NEXT: subl %esi, %ecx ; BMI1-NEXT: movl $-1, %eax -; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx +; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx ; BMI1-NEXT: shrl %cl, %eax ; BMI1-NEXT: andl %edi, %eax ; BMI1-NEXT: retq @@ -538,7 +538,7 @@ ; BMI1-NEXT: movl $32, %ecx ; BMI1-NEXT: subl %esi, %ecx ; BMI1-NEXT: shll %cl, %edi -; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx +; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx ; BMI1-NEXT: shrl %cl, %edi ; BMI1-NEXT: movl %edi, %eax ; BMI1-NEXT: retq @@ -566,7 +566,7 @@ ; ; BMI2-LABEL: bzhi64b: ; BMI2: # %bb.0: # %entry -; BMI2-NEXT: # kill: def %esi killed %esi def %rsi +; BMI2-NEXT: # kill: def $esi killed $esi def $rsi ; BMI2-NEXT: bzhiq %rsi, %rdi, %rax ; BMI2-NEXT: retq entry: @@ -583,7 +583,7 @@ ; BMI1-NEXT: movl $64, %ecx ; BMI1-NEXT: subl %esi, %ecx ; BMI1-NEXT: movq $-1, %rax -; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx +; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx ; BMI1-NEXT: shrq %cl, %rax ; BMI1-NEXT: andq %rdi, %rax ; BMI1-NEXT: retq @@ -605,14 +605,14 @@ ; BMI1-NEXT: movl $64, %ecx ; BMI1-NEXT: subl %esi, %ecx ; BMI1-NEXT: movq $-1, %rax -; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx +; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx ; BMI1-NEXT: shrq %cl, %rax ; BMI1-NEXT: andq %rdi, %rax ; BMI1-NEXT: retq ; ; BMI2-LABEL: bzhi64d: ; BMI2: # %bb.0: # %entry -; BMI2-NEXT: # kill: def %esi killed %esi def %rsi +; BMI2-NEXT: # kill: def $esi killed $esi def $rsi ; BMI2-NEXT: bzhiq %rsi, %rdi, %rax ; BMI2-NEXT: retq entry: @@ -629,7 +629,7 @@ ; BMI1-NEXT: movl $64, %ecx ; BMI1-NEXT: subl %esi, %ecx ; BMI1-NEXT: shlq %cl, %rdi -; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx +; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx ; BMI1-NEXT: shrq %cl, %rdi ; BMI1-NEXT: movq %rdi, %rax ; BMI1-NEXT: retq @@ -651,14 +651,14 @@ ; BMI1-NEXT: movl $64, %ecx ; BMI1-NEXT: subl %esi, %ecx ; BMI1-NEXT: shlq %cl, %rdi -; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx +; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx ; BMI1-NEXT: shrq %cl, %rdi ; BMI1-NEXT: movq %rdi, %rax ; BMI1-NEXT: retq ; ; BMI2-LABEL: bzhi64f: ; BMI2: # %bb.0: # %entry -; BMI2-NEXT: # kill: def %esi killed %esi def %rsi +; BMI2-NEXT: # kill: def $esi killed $esi def $rsi ; BMI2-NEXT: bzhiq %rsi, %rdi, %rax ; BMI2-NEXT: retq entry: Index: test/CodeGen/X86/bool-simplify.ll =================================================================== --- test/CodeGen/X86/bool-simplify.ll +++ test/CodeGen/X86/bool-simplify.ll @@ -55,7 +55,7 @@ ; CHECK-NEXT: rdrandw %cx ; CHECK-NEXT: cmovbw %di, %ax ; CHECK-NEXT: addl %ecx, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %1 = tail call { i16, i32 } @llvm.x86.rdrand.16() nounwind %2 = extractvalue { i16, i32 } %1, 0 @@ -107,7 +107,7 @@ ; CHECK-NEXT: rdseedw %cx ; CHECK-NEXT: cmovbw %di, %ax ; CHECK-NEXT: addl %ecx, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %1 = tail call { i16, i32 } @llvm.x86.rdseed.16() nounwind %2 = extractvalue { i16, i32 } %1, 0 Index: test/CodeGen/X86/bool-vector.ll =================================================================== --- test/CodeGen/X86/bool-vector.ll +++ test/CodeGen/X86/bool-vector.ll @@ -138,10 +138,10 @@ ; ; X64-LABEL: PR15215_good: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %ecx killed %ecx def %rcx -; X64-NEXT: # kill: def %edx killed %edx def %rdx -; X64-NEXT: # kill: def %esi killed %esi def %rsi -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $ecx killed $ecx def $rcx +; X64-NEXT: # kill: def $edx killed $edx def $rdx +; X64-NEXT: # kill: def $esi killed $esi def $rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: andl $1, %edi ; X64-NEXT: andl $1, %esi ; X64-NEXT: andl $1, %edx Index: test/CodeGen/X86/branchfolding-undef.mir =================================================================== --- test/CodeGen/X86/branchfolding-undef.mir +++ test/CodeGen/X86/branchfolding-undef.mir @@ -7,22 +7,22 @@ --- # CHECK-LABEL: name: func # CHECK: bb.1: -# CHECK: %eax = MOV32ri 2 +# CHECK: $eax = MOV32ri 2 # CHECK-NOT: RET # CHECK: bb.2: -# CHECK-NOT: RET 0, undef %eax -# CHECK: RET 0, %eax +# CHECK-NOT: RET 0, undef $eax +# CHECK: RET 0, $eax name: func tracksRegLiveness: true body: | bb.0: - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags JMP_1 %bb.2 bb.1: - %eax = MOV32ri 2 - RET 0, %eax + $eax = MOV32ri 2 + RET 0, $eax bb.2: - RET 0, undef %eax + RET 0, undef $eax ... Index: test/CodeGen/X86/broadcastm-lowering.ll =================================================================== --- test/CodeGen/X86/broadcastm-lowering.ll +++ test/CodeGen/X86/broadcastm-lowering.ll @@ -104,8 +104,8 @@ define <8 x i64> @test_mm512_epi64(<8 x i32> %a, <8 x i32> %b) { ; AVX512CD-LABEL: test_mm512_epi64: ; AVX512CD: # %bb.0: # %entry -; AVX512CD-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; AVX512CD-NEXT: vpbroadcastmb2q %k0, %zmm0 ; AVX512CD-NEXT: retq @@ -136,8 +136,8 @@ define <4 x i64> @test_mm256_epi64(<8 x i32> %a, <8 x i32> %b) { ; AVX512CD-LABEL: test_mm256_epi64: ; AVX512CD: # %bb.0: # %entry -; AVX512CD-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; AVX512CD-NEXT: kmovw %k0, %eax ; AVX512CD-NEXT: vpxor %xmm0, %xmm0, %xmm0 Index: test/CodeGen/X86/bypass-slow-division-32.ll =================================================================== --- test/CodeGen/X86/bypass-slow-division-32.ll +++ test/CodeGen/X86/bypass-slow-division-32.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: retl ; CHECK-NEXT: .LBB0_1: ; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: retl @@ -41,7 +41,7 @@ ; CHECK-NEXT: retl ; CHECK-NEXT: .LBB1_1: ; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movzbl %ah, %eax ; CHECK-NEXT: retl @@ -65,7 +65,7 @@ ; CHECK-NEXT: retl ; CHECK-NEXT: .LBB2_1: ; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movzbl %ah, %edx ; CHECK-NEXT: movzbl %al, %eax @@ -103,14 +103,14 @@ ; CHECK-NEXT: jmp .LBB3_6 ; CHECK-NEXT: .LBB3_1: ; CHECK-NEXT: movzbl %cl, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %bl ; CHECK-NEXT: movzbl %al, %esi ; CHECK-NEXT: testl $-256, %edi ; CHECK-NEXT: jne .LBB3_5 ; CHECK-NEXT: .LBB3_4: ; CHECK-NEXT: movzbl %cl, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %bl ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: .LBB3_6: @@ -208,7 +208,7 @@ ; CHECK-NEXT: .LBB8_1: ; CHECK-NEXT: movb $4, %al ; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: retl @@ -230,7 +230,7 @@ ; CHECK-NEXT: .LBB9_1: ; CHECK-NEXT: movb $4, %al ; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: retl Index: test/CodeGen/X86/bypass-slow-division-64.ll =================================================================== --- test/CodeGen/X86/bypass-slow-division-64.ll +++ test/CodeGen/X86/bypass-slow-division-64.ll @@ -20,7 +20,7 @@ ; CHECK-NEXT: xorl %edx, %edx ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: divl %esi -; CHECK-NEXT: # kill: def %eax killed %eax def %rax +; CHECK-NEXT: # kill: def $eax killed $eax def $rax ; CHECK-NEXT: retq %result = sdiv i64 %a, %b ret i64 %result @@ -43,7 +43,7 @@ ; CHECK-NEXT: xorl %edx, %edx ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: divl %esi -; CHECK-NEXT: # kill: def %edx killed %edx def %rdx +; CHECK-NEXT: # kill: def $edx killed $edx def $rdx ; CHECK-NEXT: movq %rdx, %rax ; CHECK-NEXT: retq %result = srem i64 %a, %b @@ -67,8 +67,8 @@ ; CHECK-NEXT: xorl %edx, %edx ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: divl %esi -; CHECK-NEXT: # kill: def %edx killed %edx def %rdx -; CHECK-NEXT: # kill: def %eax killed %eax def %rax +; CHECK-NEXT: # kill: def $edx killed $edx def $rdx +; CHECK-NEXT: # kill: def $eax killed $eax def $rax ; CHECK-NEXT: addq %rdx, %rax ; CHECK-NEXT: retq %resultdiv = sdiv i64 %a, %b Index: test/CodeGen/X86/clz.ll =================================================================== --- test/CodeGen/X86/clz.ll +++ test/CodeGen/X86/clz.ll @@ -19,28 +19,28 @@ ; X32: # %bb.0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: bsfl %eax, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: cttz_i8: ; X64: # %bb.0: ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: bsfl %eax, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; ; X32-CLZ-LABEL: cttz_i8: ; X32-CLZ: # %bb.0: ; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-CLZ-NEXT: tzcntl %eax, %eax -; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X32-CLZ-NEXT: retl ; ; X64-CLZ-LABEL: cttz_i8: ; X64-CLZ: # %bb.0: ; X64-CLZ-NEXT: movzbl %dil, %eax ; X64-CLZ-NEXT: tzcntl %eax, %eax -; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X64-CLZ-NEXT: retq %tmp = call i8 @llvm.cttz.i8( i8 %x, i1 true ) ret i8 %tmp @@ -144,7 +144,7 @@ ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: bsrl %eax, %eax ; X32-NEXT: xorl $7, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: ctlz_i8: @@ -152,7 +152,7 @@ ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: bsrl %eax, %eax ; X64-NEXT: xorl $7, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; ; X32-CLZ-LABEL: ctlz_i8: @@ -160,7 +160,7 @@ ; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-CLZ-NEXT: lzcntl %eax, %eax ; X32-CLZ-NEXT: addl $-24, %eax -; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X32-CLZ-NEXT: retl ; ; X64-CLZ-LABEL: ctlz_i8: @@ -168,7 +168,7 @@ ; X64-CLZ-NEXT: movzbl %dil, %eax ; X64-CLZ-NEXT: lzcntl %eax, %eax ; X64-CLZ-NEXT: addl $-24, %eax -; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X64-CLZ-NEXT: retq %tmp2 = call i8 @llvm.ctlz.i8( i8 %x, i1 true ) ret i8 %tmp2 @@ -179,14 +179,14 @@ ; X32: # %bb.0: ; X32-NEXT: bsrw {{[0-9]+}}(%esp), %ax ; X32-NEXT: xorl $15, %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: ctlz_i16: ; X64: # %bb.0: ; X64-NEXT: bsrw %di, %ax ; X64-NEXT: xorl $15, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; ; X32-CLZ-LABEL: ctlz_i16: @@ -286,11 +286,11 @@ ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: bsrl %eax, %eax ; X32-NEXT: xorl $7, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; X32-NEXT: .LBB8_1: ; X32-NEXT: movb $8, %al -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: ctlz_i8_zero_test: @@ -301,11 +301,11 @@ ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: bsrl %eax, %eax ; X64-NEXT: xorl $7, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; X64-NEXT: .LBB8_1: ; X64-NEXT: movb $8, %al -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; ; X32-CLZ-LABEL: ctlz_i8_zero_test: @@ -313,7 +313,7 @@ ; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-CLZ-NEXT: lzcntl %eax, %eax ; X32-CLZ-NEXT: addl $-24, %eax -; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X32-CLZ-NEXT: retl ; ; X64-CLZ-LABEL: ctlz_i8_zero_test: @@ -321,7 +321,7 @@ ; X64-CLZ-NEXT: movzbl %dil, %eax ; X64-CLZ-NEXT: lzcntl %eax, %eax ; X64-CLZ-NEXT: addl $-24, %eax -; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X64-CLZ-NEXT: retq %tmp1 = call i8 @llvm.ctlz.i8(i8 %n, i1 false) ret i8 %tmp1 @@ -337,11 +337,11 @@ ; X32-NEXT: # %bb.2: # %cond.false ; X32-NEXT: bsrw %ax, %ax ; X32-NEXT: xorl $15, %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; X32-NEXT: .LBB9_1: ; X32-NEXT: movw $16, %ax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: ctlz_i16_zero_test: @@ -351,11 +351,11 @@ ; X64-NEXT: # %bb.2: # %cond.false ; X64-NEXT: bsrw %di, %ax ; X64-NEXT: xorl $15, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; X64-NEXT: .LBB9_1: ; X64-NEXT: movw $16, %ax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; ; X32-CLZ-LABEL: ctlz_i16_zero_test: @@ -480,11 +480,11 @@ ; X32-NEXT: # %bb.2: # %cond.false ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: bsfl %eax, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; X32-NEXT: .LBB12_1 ; X32-NEXT: movb $8, %al -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: cttz_i8_zero_test: @@ -494,11 +494,11 @@ ; X64-NEXT: # %bb.2: # %cond.false ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: bsfl %eax, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; X64-NEXT: .LBB12_1: ; X64-NEXT: movb $8, %al -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; ; X32-CLZ-LABEL: cttz_i8_zero_test: @@ -506,7 +506,7 @@ ; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-CLZ-NEXT: orl $256, %eax # imm = 0x100 ; X32-CLZ-NEXT: tzcntl %eax, %eax -; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X32-CLZ-NEXT: retl ; ; X64-CLZ-LABEL: cttz_i8_zero_test: @@ -514,7 +514,7 @@ ; X64-CLZ-NEXT: movzbl %dil, %eax ; X64-CLZ-NEXT: orl $256, %eax # imm = 0x100 ; X64-CLZ-NEXT: tzcntl %eax, %eax -; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X64-CLZ-NEXT: retq %tmp1 = call i8 @llvm.cttz.i8(i8 %n, i1 false) ret i8 %tmp1 @@ -786,7 +786,7 @@ ; X32-NEXT: orb $2, %al ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: bsfl %eax, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: cttz_i8_knownbits: @@ -794,7 +794,7 @@ ; X64-NEXT: orb $2, %dil ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: bsfl %eax, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; ; X32-CLZ-LABEL: cttz_i8_knownbits: @@ -803,7 +803,7 @@ ; X32-CLZ-NEXT: orb $2, %al ; X32-CLZ-NEXT: movzbl %al, %eax ; X32-CLZ-NEXT: tzcntl %eax, %eax -; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X32-CLZ-NEXT: retl ; ; X64-CLZ-LABEL: cttz_i8_knownbits: @@ -811,7 +811,7 @@ ; X64-CLZ-NEXT: orb $2, %dil ; X64-CLZ-NEXT: movzbl %dil, %eax ; X64-CLZ-NEXT: tzcntl %eax, %eax -; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X64-CLZ-NEXT: retq %x2 = or i8 %x, 2 %tmp = call i8 @llvm.cttz.i8(i8 %x2, i1 true ) @@ -827,7 +827,7 @@ ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: bsrl %eax, %eax ; X32-NEXT: xorl $7, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: ctlz_i8_knownbits: @@ -836,7 +836,7 @@ ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: bsrl %eax, %eax ; X64-NEXT: xorl $7, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; ; X32-CLZ-LABEL: ctlz_i8_knownbits: @@ -846,7 +846,7 @@ ; X32-CLZ-NEXT: movzbl %al, %eax ; X32-CLZ-NEXT: lzcntl %eax, %eax ; X32-CLZ-NEXT: addl $-24, %eax -; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X32-CLZ-NEXT: retl ; ; X64-CLZ-LABEL: ctlz_i8_knownbits: @@ -855,7 +855,7 @@ ; X64-CLZ-NEXT: movzbl %dil, %eax ; X64-CLZ-NEXT: lzcntl %eax, %eax ; X64-CLZ-NEXT: addl $-24, %eax -; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax +; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X64-CLZ-NEXT: retq %x2 = or i8 %x, 64 Index: test/CodeGen/X86/cmov-into-branch.ll =================================================================== --- test/CodeGen/X86/cmov-into-branch.ll +++ test/CodeGen/X86/cmov-into-branch.ll @@ -65,7 +65,7 @@ define void @test6(i32 %a, i32 %x, i32* %y.ptr, i64* %z.ptr) { ; CHECK-LABEL: test6: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi ; CHECK-NEXT: testl %edi, %edi ; CHECK-NEXT: cmovnsl (%rdx), %esi ; CHECK-NEXT: movq %rsi, (%rcx) Index: test/CodeGen/X86/cmov-promotion.ll =================================================================== --- test/CodeGen/X86/cmov-promotion.ll +++ test/CodeGen/X86/cmov-promotion.ll @@ -12,7 +12,7 @@ ; CMOV-NEXT: movb $-19, %al ; CMOV-NEXT: .LBB0_2: ; CMOV-NEXT: movzbl %al, %eax -; CMOV-NEXT: # kill: def %ax killed %ax killed %eax +; CMOV-NEXT: # kill: def $ax killed $ax killed $eax ; CMOV-NEXT: retq ; ; NO_CMOV-LABEL: cmov_zpromotion_8_to_16: @@ -24,7 +24,7 @@ ; NO_CMOV-NEXT: movb $-19, %al ; NO_CMOV-NEXT: .LBB0_2: ; NO_CMOV-NEXT: movzbl %al, %eax -; NO_CMOV-NEXT: # kill: def %ax killed %ax killed %eax +; NO_CMOV-NEXT: # kill: def $ax killed $ax killed $eax ; NO_CMOV-NEXT: retl %t0 = select i1 %c, i8 117, i8 -19 %ret = zext i8 %t0 to i16 @@ -167,7 +167,7 @@ ; CMOV-NEXT: movb $-19, %al ; CMOV-NEXT: .LBB6_2: ; CMOV-NEXT: movsbl %al, %eax -; CMOV-NEXT: # kill: def %ax killed %ax killed %eax +; CMOV-NEXT: # kill: def $ax killed $ax killed $eax ; CMOV-NEXT: retq ; ; NO_CMOV-LABEL: cmov_spromotion_8_to_16: @@ -179,7 +179,7 @@ ; NO_CMOV-NEXT: movb $-19, %al ; NO_CMOV-NEXT: .LBB6_2: ; NO_CMOV-NEXT: movsbl %al, %eax -; NO_CMOV-NEXT: # kill: def %ax killed %ax killed %eax +; NO_CMOV-NEXT: # kill: def $ax killed $ax killed $eax ; NO_CMOV-NEXT: retl %t0 = select i1 %c, i8 117, i8 -19 %ret = sext i8 %t0 to i16 Index: test/CodeGen/X86/cmov.ll =================================================================== --- test/CodeGen/X86/cmov.ll +++ test/CodeGen/X86/cmov.ll @@ -83,7 +83,7 @@ ; CHECK-NEXT: shrb $7, %al ; CHECK-NEXT: movzbl %al, %ecx ; CHECK-NEXT: xorl $1, %ecx -; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx +; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx ; CHECK-NEXT: sarl %cl, %edx ; CHECK-NEXT: movb {{.*}}(%rip), %al ; CHECK-NEXT: testb %al, %al Index: test/CodeGen/X86/combine-abs.ll =================================================================== --- test/CodeGen/X86/combine-abs.ll +++ test/CodeGen/X86/combine-abs.ll @@ -77,9 +77,9 @@ ; ; AVX512F-LABEL: combine_v4i64_abs_abs: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpabsq %zmm0, %zmm0 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: combine_v4i64_abs_abs: Index: test/CodeGen/X86/compress_expand.ll =================================================================== --- test/CodeGen/X86/compress_expand.ll +++ test/CodeGen/X86/compress_expand.ll @@ -72,11 +72,11 @@ ; ; KNL-LABEL: test4: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: movw $7, %ax ; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq %res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> , <4 x float> %src0) ret <4 x float>%res @@ -92,11 +92,11 @@ ; ; KNL-LABEL: test5: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: movb $2, %al ; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1} -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL-NEXT: retq %res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> , <2 x i64> %src0) ret <2 x i64>%res @@ -137,7 +137,7 @@ ; ; KNL-LABEL: test7: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpmovsxwq %xmm1, %zmm1 ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 @@ -198,7 +198,7 @@ ; ; KNL-LABEL: test10: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL-NEXT: kshiftlw $12, %k0, %k0 @@ -219,7 +219,7 @@ ; ; KNL-LABEL: test11: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpsllq $63, %xmm1, %xmm1 ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k0 ; KNL-NEXT: kshiftlw $14, %k0, %k0 @@ -240,7 +240,7 @@ ; ; KNL-LABEL: test12: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL-NEXT: kshiftlw $12, %k0, %k0 @@ -287,7 +287,7 @@ ; ; KNL-LABEL: test14: ; KNL: # %bb.0: -; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] ; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 Index: test/CodeGen/X86/conditional-tailcall-samedest.mir =================================================================== --- test/CodeGen/X86/conditional-tailcall-samedest.mir +++ test/CodeGen/X86/conditional-tailcall-samedest.mir @@ -9,8 +9,8 @@ # CHECK: body: | # CHECK: bb.0.entry: # CHECK: successors: %bb.1(0x40000000) -# CHECK: liveins: %edi -# CHECK: CMP32ri8 killed %edi, 2, implicit-def %eflags +# CHECK: liveins: $edi +# CHECK: CMP32ri8 killed $edi, 2, implicit-def $eflags # CHECK: TCRETURNdi64cc @mergeable_conditional_tailcall # This was the unconditional branch to a dead MBB that we left behind before @@ -78,7 +78,7 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%edi', virtual-reg: '' } + - { reg: '$edi', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -102,36 +102,36 @@ body: | bb.0.entry: successors: %bb.2(0x40000000), %bb.1(0x40000000) - liveins: %edi + liveins: $edi - CMP32ri8 killed %edi, 2, implicit-def %eflags - JB_1 %bb.2, implicit %eflags + CMP32ri8 killed $edi, 2, implicit-def $eflags + JB_1 %bb.2, implicit $eflags JMP_1 %bb.1 bb.1.entry: successors: %bb.4(0x40000000), %bb.5(0x40000000) - liveins: %eflags + liveins: $eflags - JE_1 %bb.4, implicit killed %eflags + JE_1 %bb.4, implicit killed $eflags JMP_1 %bb.5 bb.2.sw.bb: successors: %bb.3(0x00000800), %bb.6(0x7ffff800) - %al = ACQUIRE_MOV8rm %rip, 1, %noreg, @static_local_guard, %noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8) - TEST8rr killed %al, %al, implicit-def %eflags - JNE_1 %bb.6, implicit killed %eflags + $al = ACQUIRE_MOV8rm $rip, 1, $noreg, @static_local_guard, $noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8) + TEST8rr killed $al, $al, implicit-def $eflags + JNE_1 %bb.6, implicit killed $eflags JMP_1 %bb.3 bb.3.init.check.i: - dead %edi = MOV32ri64 @static_local_guard, implicit-def %rdi - TCRETURNdi64 @initialize_static_local, 0, csr_64, implicit %rsp, implicit %rdi + dead $edi = MOV32ri64 @static_local_guard, implicit-def $rdi + TCRETURNdi64 @initialize_static_local, 0, csr_64, implicit $rsp, implicit $rdi bb.4.sw.bb2: - TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit %rsp + TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit $rsp bb.5.sw.epilog: - TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit %rsp + TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit $rsp bb.6.return: RET 0 Index: test/CodeGen/X86/critical-edge-split-2.ll =================================================================== --- test/CodeGen/X86/critical-edge-split-2.ll +++ test/CodeGen/X86/critical-edge-split-2.ll @@ -25,7 +25,7 @@ ; CHECK-NEXT: divl %esi ; CHECK-NEXT: movl %edx, %eax ; CHECK-NEXT: .LBB0_2: # %cond.end.i -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq entry: br i1 %C, label %cond.end.i, label %cond.false.i Index: test/CodeGen/X86/ctpop-combine.ll =================================================================== --- test/CodeGen/X86/ctpop-combine.ll +++ test/CodeGen/X86/ctpop-combine.ll @@ -55,7 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andl $127, %edi ; CHECK-NEXT: popcntl %edi, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq %x2 = and i8 %x, 127 %count = tail call i8 @llvm.ctpop.i8(i8 %x2) Index: test/CodeGen/X86/dagcombine-cse.ll =================================================================== --- test/CodeGen/X86/dagcombine-cse.ll +++ test/CodeGen/X86/dagcombine-cse.ll @@ -19,8 +19,8 @@ ; ; X64-LABEL: t: ; X64: ## %bb.0: ## %entry -; X64-NEXT: ## kill: def %edx killed %edx def %rdx -; X64-NEXT: ## kill: def %esi killed %esi def %rsi +; X64-NEXT: ## kill: def $edx killed $edx def $rdx +; X64-NEXT: ## kill: def $esi killed $esi def $rsi ; X64-NEXT: imull %ecx, %esi ; X64-NEXT: leal (%rsi,%rdx), %eax ; X64-NEXT: cltq Index: test/CodeGen/X86/divide-by-constant.ll =================================================================== --- test/CodeGen/X86/divide-by-constant.ll +++ test/CodeGen/X86/divide-by-constant.ll @@ -8,14 +8,14 @@ ; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X32-NEXT: imull $63551, %eax, %eax # imm = 0xF83F ; X32-NEXT: shrl $21, %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test1: ; X64: # %bb.0: # %entry ; X64-NEXT: imull $63551, %edi, %eax # imm = 0xF83F ; X64-NEXT: shrl $21, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq entry: %div = udiv i16 %x, 33 @@ -28,14 +28,14 @@ ; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X32-NEXT: imull $43691, %eax, %eax # imm = 0xAAAB ; X32-NEXT: shrl $17, %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test2: ; X64: # %bb.0: # %entry ; X64-NEXT: imull $43691, %esi, %eax # imm = 0xAAAB ; X64-NEXT: shrl $17, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq entry: %div = udiv i16 %c, 3 @@ -49,14 +49,14 @@ ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: imull $171, %eax, %eax ; X32-NEXT: shrl $9, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test3: ; X64: # %bb.0: # %entry ; X64-NEXT: imull $171, %esi, %eax ; X64-NEXT: shrl $9, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq entry: %div = udiv i8 %c, 3 @@ -72,7 +72,7 @@ ; X32-NEXT: shrl $31, %ecx ; X32-NEXT: shrl $16, %eax ; X32-NEXT: addl %ecx, %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test4: @@ -82,7 +82,7 @@ ; X64-NEXT: shrl $31, %ecx ; X64-NEXT: shrl $16, %eax ; X64-NEXT: addl %ecx, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq entry: %div = sdiv i16 %x, 33 ; [#uses=1] @@ -103,7 +103,7 @@ ; X64-NEXT: movl %edi, %eax ; X64-NEXT: imulq $365384439, %rax, %rax # imm = 0x15C752F7 ; X64-NEXT: shrq $59, %rax -; X64-NEXT: # kill: def %eax killed %eax killed %rax +; X64-NEXT: # kill: def $eax killed $eax killed $rax ; X64-NEXT: retq %tmp1 = udiv i32 %A, 1577682821 ; [#uses=1] ret i32 %tmp1 @@ -118,7 +118,7 @@ ; X32-NEXT: shrl $31, %ecx ; X32-NEXT: sarl $18, %eax ; X32-NEXT: addl %ecx, %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test6: @@ -128,7 +128,7 @@ ; X64-NEXT: shrl $31, %ecx ; X64-NEXT: sarl $18, %eax ; X64-NEXT: addl %ecx, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq entry: %div = sdiv i16 %x, 10 @@ -147,11 +147,11 @@ ; ; X64-LABEL: test7: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: shrl $2, %edi ; X64-NEXT: imulq $613566757, %rdi, %rax # imm = 0x24924925 ; X64-NEXT: shrq $32, %rax -; X64-NEXT: # kill: def %eax killed %eax killed %rax +; X64-NEXT: # kill: def $eax killed $eax killed $rax ; X64-NEXT: retq %div = udiv i32 %x, 28 ret i32 %div @@ -166,7 +166,7 @@ ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: imull $211, %eax, %eax ; X32-NEXT: shrl $13, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test8: @@ -175,7 +175,7 @@ ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: imull $211, %eax, %eax ; X64-NEXT: shrl $13, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %div = udiv i8 %x, 78 ret i8 %div @@ -189,7 +189,7 @@ ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: imull $71, %eax, %eax ; X32-NEXT: shrl $11, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test9: @@ -198,7 +198,7 @@ ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: imull $71, %eax, %eax ; X64-NEXT: shrl $11, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %div = udiv i8 %x, 116 ret i8 %div Index: test/CodeGen/X86/divrem.ll =================================================================== --- test/CodeGen/X86/divrem.ll +++ test/CodeGen/X86/divrem.ll @@ -262,7 +262,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: # kill: def %eax killed %eax def %ax +; X32-NEXT: # kill: def $eax killed $eax def $ax ; X32-NEXT: divb {{[0-9]+}}(%esp) ; X32-NEXT: movzbl %ah, %ebx ; X32-NEXT: movb %al, (%edx) @@ -273,7 +273,7 @@ ; X64-LABEL: ui8: ; X64: # %bb.0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: # kill: def %eax killed %eax def %ax +; X64-NEXT: # kill: def $eax killed $eax def $ax ; X64-NEXT: divb %sil ; X64-NEXT: movzbl %ah, %esi ; X64-NEXT: movb %al, (%rdx) Index: test/CodeGen/X86/divrem8_ext.ll =================================================================== --- test/CodeGen/X86/divrem8_ext.ll +++ test/CodeGen/X86/divrem8_ext.ll @@ -6,7 +6,7 @@ ; X32-LABEL: test_udivrem_zext_ah: ; X32: # %bb.0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: # kill: def %eax killed %eax def %ax +; X32-NEXT: # kill: def $eax killed $eax def $ax ; X32-NEXT: divb {{[0-9]+}}(%esp) ; X32-NEXT: movzbl %ah, %ecx ; X32-NEXT: movb %al, z @@ -16,7 +16,7 @@ ; X64-LABEL: test_udivrem_zext_ah: ; X64: # %bb.0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: # kill: def %eax killed %eax def %ax +; X64-NEXT: # kill: def $eax killed $eax def $ax ; X64-NEXT: divb %sil ; X64-NEXT: movzbl %ah, %ecx ; X64-NEXT: movb %al, {{.*}}(%rip) @@ -32,19 +32,19 @@ ; X32-LABEL: test_urem_zext_ah: ; X32: # %bb.0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: # kill: def %eax killed %eax def %ax +; X32-NEXT: # kill: def $eax killed $eax def $ax ; X32-NEXT: divb {{[0-9]+}}(%esp) ; X32-NEXT: movzbl %ah, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test_urem_zext_ah: ; X64: # %bb.0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: # kill: def %eax killed %eax def %ax +; X64-NEXT: # kill: def $eax killed $eax def $ax ; X64-NEXT: divb %sil ; X64-NEXT: movzbl %ah, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %1 = urem i8 %x, %y ret i8 %1 @@ -55,21 +55,21 @@ ; X32: # %bb.0: ; X32-NEXT: movb {{[0-9]+}}(%esp), %cl ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: # kill: def %eax killed %eax def %ax +; X32-NEXT: # kill: def $eax killed $eax def $ax ; X32-NEXT: divb %cl ; X32-NEXT: movzbl %ah, %eax ; X32-NEXT: addb %cl, %al -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test_urem_noext_ah: ; X64: # %bb.0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: # kill: def %eax killed %eax def %ax +; X64-NEXT: # kill: def $eax killed $eax def $ax ; X64-NEXT: divb %sil ; X64-NEXT: movzbl %ah, %eax ; X64-NEXT: addb %sil, %al -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %1 = urem i8 %x, %y %2 = add i8 %1, %y @@ -80,7 +80,7 @@ ; X32-LABEL: test_urem_zext64_ah: ; X32: # %bb.0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: # kill: def %eax killed %eax def %ax +; X32-NEXT: # kill: def $eax killed $eax def $ax ; X32-NEXT: divb {{[0-9]+}}(%esp) ; X32-NEXT: movzbl %ah, %eax ; X32-NEXT: xorl %edx, %edx @@ -89,7 +89,7 @@ ; X64-LABEL: test_urem_zext64_ah: ; X64: # %bb.0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: # kill: def %eax killed %eax def %ax +; X64-NEXT: # kill: def $eax killed $eax def $ax ; X64-NEXT: divb %sil ; X64-NEXT: movzbl %ah, %eax ; X64-NEXT: retq @@ -131,7 +131,7 @@ ; X32-NEXT: cbtw ; X32-NEXT: idivb {{[0-9]+}}(%esp) ; X32-NEXT: movsbl %ah, %eax -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test_srem_sext_ah: @@ -140,7 +140,7 @@ ; X64-NEXT: cbtw ; X64-NEXT: idivb %sil ; X64-NEXT: movsbl %ah, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %1 = srem i8 %x, %y ret i8 %1 @@ -155,7 +155,7 @@ ; X32-NEXT: idivb %cl ; X32-NEXT: movsbl %ah, %eax ; X32-NEXT: addb %cl, %al -; X32-NEXT: # kill: def %al killed %al killed %eax +; X32-NEXT: # kill: def $al killed $al killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test_srem_noext_ah: @@ -165,7 +165,7 @@ ; X64-NEXT: idivb %sil ; X64-NEXT: movsbl %ah, %eax ; X64-NEXT: addb %sil, %al -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %1 = srem i8 %x, %y %2 = add i8 %1, %y @@ -200,7 +200,7 @@ ; X32-LABEL: pr25754: ; X32: # %bb.0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: # kill: def %eax killed %eax def %ax +; X32-NEXT: # kill: def $eax killed $eax def $ax ; X32-NEXT: divb {{[0-9]+}}(%esp) ; X32-NEXT: movzbl %ah, %ecx ; X32-NEXT: movzbl %al, %eax @@ -211,7 +211,7 @@ ; X64-LABEL: pr25754: ; X64: # %bb.0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: # kill: def %eax killed %eax def %ax +; X64-NEXT: # kill: def $eax killed $eax def $ax ; X64-NEXT: divb %sil ; X64-NEXT: movzbl %ah, %ecx ; X64-NEXT: movzbl %al, %eax Index: test/CodeGen/X86/domain-reassignment.mir =================================================================== --- test/CodeGen/X86/domain-reassignment.mir +++ test/CodeGen/X86/domain-reassignment.mir @@ -80,14 +80,14 @@ - { id: 21, class: fr128, preferred-register: '' } - { id: 22, class: fr32x, preferred-register: '' } liveins: - - { reg: '%edi', virtual-reg: '%3' } - - { reg: '%rsi', virtual-reg: '%4' } - - { reg: '%xmm0', virtual-reg: '%5' } - - { reg: '%xmm1', virtual-reg: '%6' } - - { reg: '%xmm2', virtual-reg: '%7' } - - { reg: '%xmm3', virtual-reg: '%8' } - - { reg: '%xmm4', virtual-reg: '%9' } - - { reg: '%xmm5', virtual-reg: '%10' } + - { reg: '$edi', virtual-reg: '%3' } + - { reg: '$rsi', virtual-reg: '%4' } + - { reg: '$xmm0', virtual-reg: '%5' } + - { reg: '$xmm1', virtual-reg: '%6' } + - { reg: '$xmm2', virtual-reg: '%7' } + - { reg: '$xmm3', virtual-reg: '%8' } + - { reg: '$xmm4', virtual-reg: '%9' } + - { reg: '$xmm5', virtual-reg: '%10' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -111,19 +111,19 @@ body: | bb.0.entry: successors: %bb.1(0x40000000), %bb.2(0x40000000) - liveins: %edi, %rsi, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5 + liveins: $edi, $rsi, $xmm0, $xmm1, $xmm2, $xmm3, $xmm4, $xmm5 - %10 = COPY %xmm5 - %9 = COPY %xmm4 - %8 = COPY %xmm3 - %7 = COPY %xmm2 - %6 = COPY %xmm1 - %5 = COPY %xmm0 - %4 = COPY %rsi - %3 = COPY %edi + %10 = COPY $xmm5 + %9 = COPY $xmm4 + %8 = COPY $xmm3 + %7 = COPY $xmm2 + %6 = COPY $xmm1 + %5 = COPY $xmm0 + %4 = COPY $rsi + %3 = COPY $edi %11 = COPY %3.sub_8bit - TEST8ri killed %11, 1, implicit-def %eflags - JE_1 %bb.2, implicit %eflags + TEST8ri killed %11, 1, implicit-def $eflags + JE_1 %bb.2, implicit $eflags JMP_1 %bb.1 bb.1.if: @@ -165,7 +165,7 @@ %21 = IMPLICIT_DEF %20 = VMOVSSZrrk %19, killed %18, killed %21, %5 %22 = COPY %20 - VMOVSSZmr %4, 1, %noreg, 0, %noreg, killed %22 :: (store 4 into %ir.fptr) + VMOVSSZmr %4, 1, $noreg, 0, $noreg, killed %22 :: (store 4 into %ir.fptr) RET 0 ... @@ -199,11 +199,11 @@ - { id: 17, class: gr8, preferred-register: '' } - { id: 18, class: gr8, preferred-register: '' } liveins: - - { reg: '%rdi', virtual-reg: '%0' } - - { reg: '%zmm0', virtual-reg: '%1' } - - { reg: '%zmm1', virtual-reg: '%2' } - - { reg: '%zmm2', virtual-reg: '%3' } - - { reg: '%zmm3', virtual-reg: '%4' } + - { reg: '$rdi', virtual-reg: '%0' } + - { reg: '$zmm0', virtual-reg: '%1' } + - { reg: '$zmm1', virtual-reg: '%2' } + - { reg: '$zmm2', virtual-reg: '%3' } + - { reg: '$zmm3', virtual-reg: '%4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -226,13 +226,13 @@ constants: body: | bb.0: - liveins: %rdi, %zmm0, %zmm1, %zmm2, %zmm3 + liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3 - %0 = COPY %rdi - %1 = COPY %zmm0 - %2 = COPY %zmm1 - %3 = COPY %zmm2 - %4 = COPY %zmm3 + %0 = COPY $rdi + %1 = COPY $zmm0 + %2 = COPY $zmm1 + %3 = COPY $zmm2 + %4 = COPY $zmm3 %5 = VCMPPDZrri %3, %4, 0 ; CHECK: %6:vk32 = COPY %5 @@ -247,13 +247,13 @@ ; CHECK: %16:vk8 = KANDBrr %15, %13 ; CHECK: %17:vk8 = KXORBrr %16, %12 ; CHECK: %18:vk8 = KADDBrr %17, %14 - %12 = SHR8ri %7, 2, implicit-def dead %eflags - %13 = SHL8ri %12, 1, implicit-def dead %eflags + %12 = SHR8ri %7, 2, implicit-def dead $eflags + %13 = SHL8ri %12, 1, implicit-def dead $eflags %14 = NOT8r %13 - %15 = OR8rr %14, %12, implicit-def dead %eflags - %16 = AND8rr %15, %13, implicit-def dead %eflags - %17 = XOR8rr %16, %12, implicit-def dead %eflags - %18 = ADD8rr %17, %14, implicit-def dead %eflags + %15 = OR8rr %14, %12, implicit-def dead $eflags + %16 = AND8rr %15, %13, implicit-def dead $eflags + %17 = XOR8rr %16, %12, implicit-def dead $eflags + %18 = ADD8rr %17, %14, implicit-def dead $eflags ; CHECK: %9:vk32 = COPY %18 ; CHECK: %10:vk8wm = COPY %9 @@ -261,11 +261,11 @@ %9 = INSERT_SUBREG %8, %18, 1 %10 = COPY %9 %11 = VMOVAPDZrrk %2, killed %10, %1 - VMOVAPDZmr %0, 1, %noreg, 0, %noreg, killed %11 + VMOVAPDZmr %0, 1, $noreg, 0, $noreg, killed %11 - ; CHECK: KTESTBrr %18, %18, implicit-def %eflags - TEST8rr %18, %18, implicit-def %eflags - JE_1 %bb.1, implicit %eflags + ; CHECK: KTESTBrr %18, %18, implicit-def $eflags + TEST8rr %18, %18, implicit-def $eflags + JE_1 %bb.1, implicit $eflags JMP_1 %bb.2 bb.1: @@ -303,11 +303,11 @@ - { id: 16, class: gr16, preferred-register: '' } - { id: 17, class: gr16, preferred-register: '' } liveins: - - { reg: '%rdi', virtual-reg: '%0' } - - { reg: '%zmm0', virtual-reg: '%1' } - - { reg: '%zmm1', virtual-reg: '%2' } - - { reg: '%zmm2', virtual-reg: '%3' } - - { reg: '%zmm3', virtual-reg: '%4' } + - { reg: '$rdi', virtual-reg: '%0' } + - { reg: '$zmm0', virtual-reg: '%1' } + - { reg: '$zmm1', virtual-reg: '%2' } + - { reg: '$zmm2', virtual-reg: '%3' } + - { reg: '$zmm3', virtual-reg: '%4' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -330,13 +330,13 @@ constants: body: | bb.0: - liveins: %rdi, %zmm0, %zmm1, %zmm2, %zmm3 + liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3 - %0 = COPY %rdi - %1 = COPY %zmm0 - %2 = COPY %zmm1 - %3 = COPY %zmm2 - %4 = COPY %zmm3 + %0 = COPY $rdi + %1 = COPY $zmm0 + %2 = COPY $zmm1 + %3 = COPY $zmm2 + %4 = COPY $zmm3 %5 = VCMPPSZrri %3, %4, 0 ; CHECK: %6:vk32 = COPY %5 @@ -350,12 +350,12 @@ ; CHECK: %15:vk16 = KORWrr %14, %12 ; CHECK: %16:vk16 = KANDWrr %15, %13 ; CHECK: %17:vk16 = KXORWrr %16, %12 - %12 = SHR16ri %7, 2, implicit-def dead %eflags - %13 = SHL16ri %12, 1, implicit-def dead %eflags + %12 = SHR16ri %7, 2, implicit-def dead $eflags + %13 = SHL16ri %12, 1, implicit-def dead $eflags %14 = NOT16r %13 - %15 = OR16rr %14, %12, implicit-def dead %eflags - %16 = AND16rr %15, %13, implicit-def dead %eflags - %17 = XOR16rr %16, %12, implicit-def dead %eflags + %15 = OR16rr %14, %12, implicit-def dead $eflags + %16 = AND16rr %15, %13, implicit-def dead $eflags + %17 = XOR16rr %16, %12, implicit-def dead $eflags ; CHECK: %9:vk32 = COPY %17 ; CHECK: %10:vk16wm = COPY %9 @@ -363,11 +363,11 @@ %9 = INSERT_SUBREG %8, %17, 3 %10 = COPY %9 %11 = VMOVAPSZrrk %2, killed %10, %1 - VMOVAPSZmr %0, 1, %noreg, 0, %noreg, killed %11 + VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %11 - ; CHECK: KTESTWrr %17, %17, implicit-def %eflags - TEST16rr %17, %17, implicit-def %eflags - JE_1 %bb.1, implicit %eflags + ; CHECK: KTESTWrr %17, %17, implicit-def $eflags + TEST16rr %17, %17, implicit-def $eflags + JE_1 %bb.1, implicit $eflags JMP_1 %bb.2 bb.1: @@ -401,9 +401,9 @@ - { id: 12, class: gr32, preferred-register: '' } - { id: 13, class: gr32, preferred-register: '' } liveins: - - { reg: '%rdi', virtual-reg: '%0' } - - { reg: '%zmm0', virtual-reg: '%1' } - - { reg: '%zmm1', virtual-reg: '%2' } + - { reg: '$rdi', virtual-reg: '%0' } + - { reg: '$zmm0', virtual-reg: '%1' } + - { reg: '$zmm1', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -426,13 +426,13 @@ constants: body: | bb.0: - liveins: %rdi, %zmm0, %zmm1 + liveins: $rdi, $zmm0, $zmm1 - %0 = COPY %rdi - %1 = COPY %zmm0 - %2 = COPY %zmm1 + %0 = COPY $rdi + %1 = COPY $zmm0 + %2 = COPY $zmm1 - ; CHECK: %5:vk32 = KMOVDkm %0, 1, %noreg, 0, %noreg + ; CHECK: %5:vk32 = KMOVDkm %0, 1, $noreg, 0, $noreg ; CHECK: %6:vk32 = KSHIFTRDri %5, 2 ; CHECK: %7:vk32 = KSHIFTLDri %6, 1 ; CHECK: %8:vk32 = KNOTDrr %7 @@ -441,24 +441,24 @@ ; CHECK: %11:vk32 = KXORDrr %10, %6 ; CHECK: %12:vk32 = KANDNDrr %11, %9 ; CHECK: %13:vk32 = KADDDrr %12, %11 - %5 = MOV32rm %0, 1, %noreg, 0, %noreg - %6 = SHR32ri %5, 2, implicit-def dead %eflags - %7 = SHL32ri %6, 1, implicit-def dead %eflags + %5 = MOV32rm %0, 1, $noreg, 0, $noreg + %6 = SHR32ri %5, 2, implicit-def dead $eflags + %7 = SHL32ri %6, 1, implicit-def dead $eflags %8 = NOT32r %7 - %9 = OR32rr %8, %6, implicit-def dead %eflags - %10 = AND32rr %9, %7, implicit-def dead %eflags - %11 = XOR32rr %10, %6, implicit-def dead %eflags - %12 = ANDN32rr %11, %9, implicit-def dead %eflags - %13 = ADD32rr %12, %11, implicit-def dead %eflags + %9 = OR32rr %8, %6, implicit-def dead $eflags + %10 = AND32rr %9, %7, implicit-def dead $eflags + %11 = XOR32rr %10, %6, implicit-def dead $eflags + %12 = ANDN32rr %11, %9, implicit-def dead $eflags + %13 = ADD32rr %12, %11, implicit-def dead $eflags ; CHECK: %3:vk32wm = COPY %13 %3 = COPY %13 %4 = VMOVDQU16Zrrk %2, killed %3, %1 - VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4 + VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4 - ; CHECK: KTESTDrr %13, %13, implicit-def %eflags - TEST32rr %13, %13, implicit-def %eflags - JE_1 %bb.1, implicit %eflags + ; CHECK: KTESTDrr %13, %13, implicit-def $eflags + TEST32rr %13, %13, implicit-def $eflags + JE_1 %bb.1, implicit $eflags JMP_1 %bb.2 bb.1: @@ -492,9 +492,9 @@ - { id: 12, class: gr64, preferred-register: '' } - { id: 13, class: gr64, preferred-register: '' } liveins: - - { reg: '%rdi', virtual-reg: '%0' } - - { reg: '%zmm0', virtual-reg: '%1' } - - { reg: '%zmm1', virtual-reg: '%2' } + - { reg: '$rdi', virtual-reg: '%0' } + - { reg: '$zmm0', virtual-reg: '%1' } + - { reg: '$zmm1', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -517,13 +517,13 @@ constants: body: | bb.0: - liveins: %rdi, %zmm0, %zmm1 + liveins: $rdi, $zmm0, $zmm1 - %0 = COPY %rdi - %1 = COPY %zmm0 - %2 = COPY %zmm1 + %0 = COPY $rdi + %1 = COPY $zmm0 + %2 = COPY $zmm1 - ; CHECK: %5:vk64 = KMOVQkm %0, 1, %noreg, 0, %noreg + ; CHECK: %5:vk64 = KMOVQkm %0, 1, $noreg, 0, $noreg ; CHECK: %6:vk64 = KSHIFTRQri %5, 2 ; CHECK: %7:vk64 = KSHIFTLQri %6, 1 ; CHECK: %8:vk64 = KNOTQrr %7 @@ -532,24 +532,24 @@ ; CHECK: %11:vk64 = KXORQrr %10, %6 ; CHECK: %12:vk64 = KANDNQrr %11, %9 ; CHECK: %13:vk64 = KADDQrr %12, %11 - %5 = MOV64rm %0, 1, %noreg, 0, %noreg - %6 = SHR64ri %5, 2, implicit-def dead %eflags - %7 = SHL64ri %6, 1, implicit-def dead %eflags + %5 = MOV64rm %0, 1, $noreg, 0, $noreg + %6 = SHR64ri %5, 2, implicit-def dead $eflags + %7 = SHL64ri %6, 1, implicit-def dead $eflags %8 = NOT64r %7 - %9 = OR64rr %8, %6, implicit-def dead %eflags - %10 = AND64rr %9, %7, implicit-def dead %eflags - %11 = XOR64rr %10, %6, implicit-def dead %eflags - %12 = ANDN64rr %11, %9, implicit-def dead %eflags - %13 = ADD64rr %12, %11, implicit-def dead %eflags + %9 = OR64rr %8, %6, implicit-def dead $eflags + %10 = AND64rr %9, %7, implicit-def dead $eflags + %11 = XOR64rr %10, %6, implicit-def dead $eflags + %12 = ANDN64rr %11, %9, implicit-def dead $eflags + %13 = ADD64rr %12, %11, implicit-def dead $eflags ; CHECK: %3:vk64wm = COPY %13 %3 = COPY %13 %4 = VMOVDQU8Zrrk %2, killed %3, %1 - VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4 + VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4 - ; CHECK: KTESTQrr %13, %13, implicit-def %eflags - TEST64rr %13, %13, implicit-def %eflags - JE_1 %bb.1, implicit %eflags + ; CHECK: KTESTQrr %13, %13, implicit-def $eflags + TEST64rr %13, %13, implicit-def $eflags + JE_1 %bb.1, implicit $eflags JMP_1 %bb.2 bb.1: @@ -576,9 +576,9 @@ - { id: 5, class: gr16, preferred-register: '' } - { id: 6, class: gr16, preferred-register: '' } liveins: - - { reg: '%rdi', virtual-reg: '%0' } - - { reg: '%zmm0', virtual-reg: '%1' } - - { reg: '%zmm1', virtual-reg: '%2' } + - { reg: '$rdi', virtual-reg: '%0' } + - { reg: '$zmm0', virtual-reg: '%1' } + - { reg: '$zmm1', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -601,22 +601,22 @@ constants: body: | bb.0: - liveins: %rdi, %zmm0, %zmm1 + liveins: $rdi, $zmm0, $zmm1 - %0 = COPY %rdi - %1 = COPY %zmm0 - %2 = COPY %zmm1 + %0 = COPY $rdi + %1 = COPY $zmm0 + %2 = COPY $zmm1 - ; CHECK: %7:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg + ; CHECK: %7:vk8 = KMOVBkm %0, 1, $noreg, 0, $noreg ; CHECK: %5:vk16 = COPY %7 ; CHECK: %6:vk16 = KNOTWrr %5 - %5 = MOVZX16rm8 %0, 1, %noreg, 0, %noreg + %5 = MOVZX16rm8 %0, 1, $noreg, 0, $noreg %6 = NOT16r %5 ; CHECK: %3:vk16wm = COPY %6 %3 = COPY %6 %4 = VMOVAPSZrrk %2, killed %3, %1 - VMOVAPSZmr %0, 1, %noreg, 0, %noreg, killed %4 + VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %4 RET 0 ... @@ -639,9 +639,9 @@ - { id: 6, class: gr32, preferred-register: '' } - { id: 7, class: gr32, preferred-register: '' } liveins: - - { reg: '%rdi', virtual-reg: '%0' } - - { reg: '%zmm0', virtual-reg: '%1' } - - { reg: '%zmm1', virtual-reg: '%2' } + - { reg: '$rdi', virtual-reg: '%0' } + - { reg: '$zmm0', virtual-reg: '%1' } + - { reg: '$zmm1', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -664,25 +664,25 @@ constants: body: | bb.0: - liveins: %rdi, %zmm0, %zmm1 + liveins: $rdi, $zmm0, $zmm1 - %0 = COPY %rdi - %1 = COPY %zmm0 - %2 = COPY %zmm1 + %0 = COPY $rdi + %1 = COPY $zmm0 + %2 = COPY $zmm1 - ; CHECK: %8:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg + ; CHECK: %8:vk8 = KMOVBkm %0, 1, $noreg, 0, $noreg ; CHECK: %5:vk32 = COPY %8 - ; CHECK: %9:vk16 = KMOVWkm %0, 1, %noreg, 0, %noreg + ; CHECK: %9:vk16 = KMOVWkm %0, 1, $noreg, 0, $noreg ; CHECK: %6:vk32 = COPY %9 ; CHECK: %7:vk32 = KADDDrr %5, %6 - %5 = MOVZX32rm8 %0, 1, %noreg, 0, %noreg - %6 = MOVZX32rm16 %0, 1, %noreg, 0, %noreg - %7 = ADD32rr %5, %6, implicit-def dead %eflags + %5 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg + %6 = MOVZX32rm16 %0, 1, $noreg, 0, $noreg + %7 = ADD32rr %5, %6, implicit-def dead $eflags ; CHECK: %3:vk64wm = COPY %7 %3 = COPY %7 %4 = VMOVDQU16Zrrk %2, killed %3, %1 - VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4 + VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4 RET 0 ... @@ -705,9 +705,9 @@ - { id: 6, class: gr64, preferred-register: '' } - { id: 7, class: gr64, preferred-register: '' } liveins: - - { reg: '%rdi', virtual-reg: '%0' } - - { reg: '%zmm0', virtual-reg: '%1' } - - { reg: '%zmm1', virtual-reg: '%2' } + - { reg: '$rdi', virtual-reg: '%0' } + - { reg: '$zmm0', virtual-reg: '%1' } + - { reg: '$zmm1', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -730,25 +730,25 @@ constants: body: | bb.0: - liveins: %rdi, %zmm0, %zmm1 + liveins: $rdi, $zmm0, $zmm1 - %0 = COPY %rdi - %1 = COPY %zmm0 - %2 = COPY %zmm1 + %0 = COPY $rdi + %1 = COPY $zmm0 + %2 = COPY $zmm1 - ; CHECK: %8:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg + ; CHECK: %8:vk8 = KMOVBkm %0, 1, $noreg, 0, $noreg ; CHECK: %5:vk64 = COPY %8 - ; CHECK: %9:vk16 = KMOVWkm %0, 1, %noreg, 0, %noreg + ; CHECK: %9:vk16 = KMOVWkm %0, 1, $noreg, 0, $noreg ; CHECK: %6:vk64 = COPY %9 ; CHECK: %7:vk64 = KADDQrr %5, %6 - %5 = MOVZX64rm8 %0, 1, %noreg, 0, %noreg - %6 = MOVZX64rm16 %0, 1, %noreg, 0, %noreg - %7 = ADD64rr %5, %6, implicit-def dead %eflags + %5 = MOVZX64rm8 %0, 1, $noreg, 0, $noreg + %6 = MOVZX64rm16 %0, 1, $noreg, 0, $noreg + %7 = ADD64rr %5, %6, implicit-def dead $eflags ; CHECK: %3:vk64wm = COPY %7 %3 = COPY %7 %4 = VMOVDQU8Zrrk %2, killed %3, %1 - VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4 + VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4 RET 0 ... Index: test/CodeGen/X86/dynamic-regmask.ll =================================================================== --- test/CodeGen/X86/dynamic-regmask.ll +++ test/CodeGen/X86/dynamic-regmask.ll @@ -11,8 +11,8 @@ ret i32 %b2 } ; CHECK: name: caller -; CHECK: CALL64pcrel32 @callee, CustomRegMask(%bh,%bl,%bp,%bpl,%bx,%ebp,%ebx,%esp,%rbp,%rbx,%rsp,%sp,%spl,%r10,%r11,%r12,%r13,%r14,%r15,%xmm8,%xmm9,%xmm10,%xmm11,%xmm12,%xmm13,%xmm14,%xmm15,%r10b,%r11b,%r12b,%r13b,%r14b,%r15b,%r10d,%r11d,%r12d,%r13d,%r14d,%r15d,%r10w,%r11w,%r12w,%r13w,%r14w,%r15w) -; CHECK: RET 0, %eax +; CHECK: CALL64pcrel32 @callee, CustomRegMask($bh,$bl,$bp,$bpl,$bx,$ebp,$ebx,$esp,$rbp,$rbx,$rsp,$sp,$spl,$r10,$r11,$r12,$r13,$r14,$r15,$xmm8,$xmm9,$xmm10,$xmm11,$xmm12,$xmm13,$xmm14,$xmm15,$r10b,$r11b,$r12b,$r13b,$r14b,$r15b,$r10d,$r11d,$r12d,$r13d,$r14d,$r15d,$r10w,$r11w,$r12w,$r13w,$r14w,$r15w) +; CHECK: RET 0, $eax define x86_regcallcc {i32, i32, i32} @test_callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0) nounwind { %b1 = mul i32 7, %e0 @@ -24,7 +24,7 @@ ret {i32, i32, i32} %b6 } ; CHECK: name: test_callee -; CHECK: calleeSavedRegisters: [ '%rbx', '%rbp', '%rsp', '%r10', '%r11', '%r12', -; CHECK: '%r13', '%r14', '%r15', '%xmm8', '%xmm9', '%xmm10', -; CHECK: '%xmm11', '%xmm12', '%xmm13', '%xmm14', '%xmm15' ] -; CHECK: RET 0, %eax, %ecx, %edx +; CHECK: calleeSavedRegisters: [ '$rbx', '$rbp', '$rsp', '$r10', '$r11', '$r12', +; CHECK: '$r13', '$r14', '$r15', '$xmm8', '$xmm9', '$xmm10', +; CHECK: '$xmm11', '$xmm12', '$xmm13', '$xmm14', '$xmm15' ] +; CHECK: RET 0, $eax, $ecx, $edx Index: test/CodeGen/X86/eflags-copy-expansion.mir =================================================================== --- test/CodeGen/X86/eflags-copy-expansion.mir +++ test/CodeGen/X86/eflags-copy-expansion.mir @@ -21,11 +21,11 @@ name: foo tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0.entry: - liveins: %edi - NOOP implicit-def %al + liveins: $edi + NOOP implicit-def $al ; The bug was triggered only when LivePhysReg is used, which ; happens only when the heuristic for the liveness computation @@ -46,19 +46,19 @@ NOOP NOOP ; Save AL. - ; CHECK: PUSH32r killed %eax + ; CHECK: PUSH32r killed $eax ; Copy edi into EFLAGS - ; CHECK-NEXT: %eax = MOV32rr %edi - ; CHECK-NEXT: %al = ADD8ri %al, 127, implicit-def %eflags - ; CHECK-NEXT: SAHF implicit-def %eflags, implicit %ah - %eflags = COPY %edi + ; CHECK-NEXT: $eax = MOV32rr $edi + ; CHECK-NEXT: $al = ADD8ri $al, 127, implicit-def $eflags + ; CHECK-NEXT: SAHF implicit-def $eflags, implicit $ah + $eflags = COPY $edi ; Restore AL. - ; CHECK-NEXT: %eax = POP32r + ; CHECK-NEXT: $eax = POP32r bb.1.false: - liveins: %al - NOOP implicit %al + liveins: $al + NOOP implicit $al RETQ ... Index: test/CodeGen/X86/evex-to-vex-compress.mir =================================================================== --- test/CodeGen/X86/evex-to-vex-compress.mir +++ test/CodeGen/X86/evex-to-vex-compress.mir @@ -17,880 +17,880 @@ name: evex_z256_to_vex_test body: | bb.0: - ; CHECK: VMOVAPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVAPDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVAPDYrr %ymm0 - %ymm0 = VMOVAPDZ256rr %ymm0 - ; CHECK: %ymm0 = VMOVAPDYrr_REV %ymm0 - %ymm0 = VMOVAPDZ256rr_REV %ymm0 - ; CHECK: VMOVAPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVAPSYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVAPSYrr %ymm0 - %ymm0 = VMOVAPSZ256rr %ymm0 - ; CHECK: %ymm0 = VMOVAPSYrr_REV %ymm0 - %ymm0 = VMOVAPSZ256rr_REV %ymm0 - ; CHECK: %ymm0 = VMOVDDUPYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVDDUPYrr %ymm0 - %ymm0 = VMOVDDUPZ256rr %ymm0 - ; CHECK: VMOVDQAYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVDQAYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVDQAYrr %ymm0 - %ymm0 = VMOVDQA32Z256rr %ymm0 - ; CHECK: %ymm0 = VMOVDQAYrr_REV %ymm0 - %ymm0 = VMOVDQA32Z256rr_REV %ymm0 - ; CHECK: VMOVDQAYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVDQAYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVDQAYrr %ymm0 - %ymm0 = VMOVDQA64Z256rr %ymm0 - ; CHECK: %ymm0 = VMOVDQAYrr_REV %ymm0 - %ymm0 = VMOVDQA64Z256rr_REV %ymm0 - ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVDQUYrr %ymm0 - %ymm0 = VMOVDQU16Z256rr %ymm0 - ; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0 - %ymm0 = VMOVDQU16Z256rr_REV %ymm0 - ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVDQUYrr %ymm0 - %ymm0 = VMOVDQU32Z256rr %ymm0 - ; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0 - %ymm0 = VMOVDQU32Z256rr_REV %ymm0 - ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVDQUYrr %ymm0 - %ymm0 = VMOVDQU64Z256rr %ymm0 - ; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0 - %ymm0 = VMOVDQU64Z256rr_REV %ymm0 - ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVDQUYrr %ymm0 - %ymm0 = VMOVDQU8Z256rr %ymm0 - ; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0 - %ymm0 = VMOVDQU8Z256rr_REV %ymm0 - ; CHECK: %ymm0 = VMOVNTDQAYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: VMOVNTDQYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: VMOVNTPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: VMOVNTPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVSHDUPYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVSHDUPYrr %ymm0 - %ymm0 = VMOVSHDUPZ256rr %ymm0 - ; CHECK: %ymm0 = VMOVSLDUPYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVSLDUPYrr %ymm0 - %ymm0 = VMOVSLDUPZ256rr %ymm0 - ; CHECK: VMOVUPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VMOVUPDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMOVUPDYrr %ymm0 - %ymm0 = VMOVUPDZ256rr %ymm0 - ; CHECK: %ymm0 = VMOVUPDYrr_REV %ymm0 - %ymm0 = VMOVUPDZ256rr_REV %ymm0 - ; CHECK: VMOVUPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0 - VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0 - ; CHECK: %ymm0 = VPANDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPANDDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPANDYrr %ymm0, %ymm1 - %ymm0 = VPANDDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPANDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPANDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPANDYrr %ymm0, %ymm1 - %ymm0 = VPANDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPANDNDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPANDNYrr %ymm0, %ymm1 - %ymm0 = VPANDNDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPANDNQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPANDNYrr %ymm0, %ymm1 - %ymm0 = VPANDNQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPAVGBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPAVGBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPAVGBYrr %ymm0, %ymm1 - %ymm0 = VPAVGBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPAVGWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPAVGWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPAVGWYrr %ymm0, %ymm1 - %ymm0 = VPAVGWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPADDBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPADDBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPADDBYrr %ymm0, %ymm1 - %ymm0 = VPADDBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPADDDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPADDDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPADDDYrr %ymm0, %ymm1 - %ymm0 = VPADDDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPADDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPADDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPADDQYrr %ymm0, %ymm1 - %ymm0 = VPADDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPADDSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPADDSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPADDSBYrr %ymm0, %ymm1 - %ymm0 = VPADDSBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPADDSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPADDSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPADDSWYrr %ymm0, %ymm1 - %ymm0 = VPADDSWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPADDUSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPADDUSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPADDUSBYrr %ymm0, %ymm1 - %ymm0 = VPADDUSBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPADDUSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPADDUSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPADDUSWYrr %ymm0, %ymm1 - %ymm0 = VPADDUSWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPADDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPADDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPADDWYrr %ymm0, %ymm1 - %ymm0 = VPADDWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMULPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMULPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMULPDYrr %ymm0, %ymm1 - %ymm0 = VMULPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMULPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMULPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMULPSYrr %ymm0, %ymm1 - %ymm0 = VMULPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VORPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VORPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VORPDYrr %ymm0, %ymm1 - %ymm0 = VORPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VORPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VORPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VORPSYrr %ymm0, %ymm1 - %ymm0 = VORPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMADDUBSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMADDUBSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMADDUBSWYrr %ymm0, %ymm1 - %ymm0 = VPMADDUBSWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMADDWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMADDWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMADDWDYrr %ymm0, %ymm1 - %ymm0 = VPMADDWDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMAXSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMAXSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMAXSBYrr %ymm0, %ymm1 - %ymm0 = VPMAXSBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMAXSDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMAXSDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMAXSDYrr %ymm0, %ymm1 - %ymm0 = VPMAXSDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMAXSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMAXSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMAXSWYrr %ymm0, %ymm1 - %ymm0 = VPMAXSWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMAXUBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMAXUBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMAXUBYrr %ymm0, %ymm1 - %ymm0 = VPMAXUBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMAXUDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMAXUDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMAXUDYrr %ymm0, %ymm1 - %ymm0 = VPMAXUDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMAXUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMAXUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMAXUWYrr %ymm0, %ymm1 - %ymm0 = VPMAXUWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMINSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMINSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMINSBYrr %ymm0, %ymm1 - %ymm0 = VPMINSBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMINSDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMINSDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMINSDYrr %ymm0, %ymm1 - %ymm0 = VPMINSDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMINSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMINSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMINSWYrr %ymm0, %ymm1 - %ymm0 = VPMINSWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMINUBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMINUBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMINUBYrr %ymm0, %ymm1 - %ymm0 = VPMINUBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMINUDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMINUDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMINUDYrr %ymm0, %ymm1 - %ymm0 = VPMINUDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMINUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMINUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMINUWYrr %ymm0, %ymm1 - %ymm0 = VPMINUWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMULDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMULDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMULDQYrr %ymm0, %ymm1 - %ymm0 = VPMULDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMULHRSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMULHRSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMULHRSWYrr %ymm0, %ymm1 - %ymm0 = VPMULHRSWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMULHUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMULHUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMULHUWYrr %ymm0, %ymm1 - %ymm0 = VPMULHUWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMULHWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMULHWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMULHWYrr %ymm0, %ymm1 - %ymm0 = VPMULHWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMULLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMULLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMULLDYrr %ymm0, %ymm1 - %ymm0 = VPMULLDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMULLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMULLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMULLWYrr %ymm0, %ymm1 - %ymm0 = VPMULLWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPMULUDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMULUDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMULUDQYrr %ymm0, %ymm1 - %ymm0 = VPMULUDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPORDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPORYrr %ymm0, %ymm1 - %ymm0 = VPORDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPORQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPORYrr %ymm0, %ymm1 - %ymm0 = VPORQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSUBBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSUBBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSUBBYrr %ymm0, %ymm1 - %ymm0 = VPSUBBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSUBDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSUBDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSUBDYrr %ymm0, %ymm1 - %ymm0 = VPSUBDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSUBQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSUBQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSUBQYrr %ymm0, %ymm1 - %ymm0 = VPSUBQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSUBSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSUBSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSUBSBYrr %ymm0, %ymm1 - %ymm0 = VPSUBSBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSUBSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSUBSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSUBSWYrr %ymm0, %ymm1 - %ymm0 = VPSUBSWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSUBUSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSUBUSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSUBUSBYrr %ymm0, %ymm1 - %ymm0 = VPSUBUSBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSUBUSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSUBUSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSUBUSWYrr %ymm0, %ymm1 - %ymm0 = VPSUBUSWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSUBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSUBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSUBWYrr %ymm0, %ymm1 - %ymm0 = VPSUBWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPXORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPXORDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPXORYrr %ymm0, %ymm1 - %ymm0 = VPXORDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPXORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPXORQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPXORYrr %ymm0, %ymm1 - %ymm0 = VPXORQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VADDPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VADDPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VADDPDYrr %ymm0, %ymm1 - %ymm0 = VADDPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VADDPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VADDPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VADDPSYrr %ymm0, %ymm1 - %ymm0 = VADDPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VANDNPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VANDNPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VANDNPDYrr %ymm0, %ymm1 - %ymm0 = VANDNPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VANDNPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VANDNPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VANDNPSYrr %ymm0, %ymm1 - %ymm0 = VANDNPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VANDPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VANDPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VANDPDYrr %ymm0, %ymm1 - %ymm0 = VANDPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VANDPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VANDPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VANDPSYrr %ymm0, %ymm1 - %ymm0 = VANDPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VDIVPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VDIVPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VDIVPDYrr %ymm0, %ymm1 - %ymm0 = VDIVPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VDIVPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VDIVPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VDIVPSYrr %ymm0, %ymm1 - %ymm0 = VDIVPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMAXCPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMAXCPDYrr %ymm0, %ymm1 - %ymm0 = VMAXCPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMAXCPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMAXCPSYrr %ymm0, %ymm1 - %ymm0 = VMAXCPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMAXPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMAXCPDYrr %ymm0, %ymm1 - %ymm0 = VMAXPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMAXPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMAXCPSYrr %ymm0, %ymm1 - %ymm0 = VMAXPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMINCPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMINCPDYrr %ymm0, %ymm1 - %ymm0 = VMINCPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMINCPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMINCPSYrr %ymm0, %ymm1 - %ymm0 = VMINCPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMINPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMINCPDYrr %ymm0, %ymm1 - %ymm0 = VMINPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VMINPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VMINCPSYrr %ymm0, %ymm1 - %ymm0 = VMINPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VXORPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VXORPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VXORPDYrr %ymm0, %ymm1 - %ymm0 = VXORPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VXORPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VXORPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VXORPSYrr %ymm0, %ymm1 - %ymm0 = VXORPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPACKSSDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPACKSSDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPACKSSDWYrr %ymm0, %ymm1 - %ymm0 = VPACKSSDWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPACKSSWBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPACKSSWBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPACKSSWBYrr %ymm0, %ymm1 - %ymm0 = VPACKSSWBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPACKUSDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPACKUSDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPACKUSDWYrr %ymm0, %ymm1 - %ymm0 = VPACKUSDWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPACKUSWBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPACKUSWBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPACKUSWBYrr %ymm0, %ymm1 - %ymm0 = VPACKUSWBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VUNPCKHPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VUNPCKHPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VUNPCKHPDYrr %ymm0, %ymm1 - %ymm0 = VUNPCKHPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VUNPCKHPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VUNPCKHPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VUNPCKHPSYrr %ymm0, %ymm1 - %ymm0 = VUNPCKHPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VUNPCKLPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VUNPCKLPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VUNPCKLPDYrr %ymm0, %ymm1 - %ymm0 = VUNPCKLPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VUNPCKLPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VUNPCKLPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VUNPCKLPSYrr %ymm0, %ymm1 - %ymm0 = VUNPCKLPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VSUBPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VSUBPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VSUBPDYrr %ymm0, %ymm1 - %ymm0 = VSUBPDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VSUBPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VSUBPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VSUBPSYrr %ymm0, %ymm1 - %ymm0 = VSUBPSZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPUNPCKHBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPUNPCKHBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPUNPCKHBWYrr %ymm0, %ymm1 - %ymm0 = VPUNPCKHBWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPUNPCKHDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPUNPCKHDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPUNPCKHDQYrr %ymm0, %ymm1 - %ymm0 = VPUNPCKHDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPUNPCKHQDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPUNPCKHQDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPUNPCKHQDQYrr %ymm0, %ymm1 - %ymm0 = VPUNPCKHQDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPUNPCKHWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPUNPCKHWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPUNPCKHWDYrr %ymm0, %ymm1 - %ymm0 = VPUNPCKHWDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPUNPCKLBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPUNPCKLBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPUNPCKLBWYrr %ymm0, %ymm1 - %ymm0 = VPUNPCKLBWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPUNPCKLDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPUNPCKLDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPUNPCKLDQYrr %ymm0, %ymm1 - %ymm0 = VPUNPCKLDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPUNPCKLQDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPUNPCKLQDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPUNPCKLQDQYrr %ymm0, %ymm1 - %ymm0 = VPUNPCKLQDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPUNPCKLWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPUNPCKLWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPUNPCKLWDYrr %ymm0, %ymm1 - %ymm0 = VPUNPCKLWDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VFMADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADD132PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADD132PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADD132PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADD132PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADD213PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADD213PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADD213PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADD213PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADD231PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADD231PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADD231PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADD231PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADDSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADDSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADDSUB132PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADDSUB132PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADDSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADDSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADDSUB132PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADDSUB132PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADDSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADDSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADDSUB213PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADDSUB213PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADDSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADDSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADDSUB213PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADDSUB213PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADDSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADDSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADDSUB231PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADDSUB231PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMADDSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMADDSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMADDSUB231PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMADDSUB231PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUB132PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUB132PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUB132PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUB132PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUB213PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUB213PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUB213PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUB213PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUB231PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUB231PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUB231PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUB231PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUBADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUBADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUBADD132PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUBADD132PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUBADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUBADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUBADD132PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUBADD132PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUBADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUBADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUBADD213PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUBADD213PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUBADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUBADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUBADD213PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUBADD213PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUBADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUBADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUBADD231PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUBADD231PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFMSUBADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFMSUBADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFMSUBADD231PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFMSUBADD231PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMADD132PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMADD132PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMADD132PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMADD132PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMADD213PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMADD213PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMADD213PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMADD213PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMADD231PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMADD231PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMADD231PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMADD231PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMSUB132PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMSUB132PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMSUB132PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMSUB132PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMSUB213PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMSUB213PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMSUB213PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMSUB213PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMSUB231PDYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMSUB231PDZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VFNMSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - %ymm0 = VFNMSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VFNMSUB231PSYr %ymm0, %ymm1, %ymm2 - %ymm0 = VFNMSUB231PSZ256r %ymm0, %ymm1, %ymm2 - ; CHECK: %ymm0 = VPSRADYri %ymm0, 7 - %ymm0 = VPSRADZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSRADYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSRADZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSRADYrr %ymm0, %xmm1 - %ymm0 = VPSRADZ256rr %ymm0, %xmm1 - ; CHECK: %ymm0 = VPSRAVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSRAVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSRAVDYrr %ymm0, %ymm1 - %ymm0 = VPSRAVDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSRAWYri %ymm0, 7 - %ymm0 = VPSRAWZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSRAWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSRAWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSRAWYrr %ymm0, %xmm1 - %ymm0 = VPSRAWZ256rr %ymm0, %xmm1 - ; CHECK: %ymm0 = VPSRLDQYri %ymm0, %ymm1 - %ymm0 = VPSRLDQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSRLDYri %ymm0, 7 - %ymm0 = VPSRLDZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSRLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSRLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSRLDYrr %ymm0, %xmm1 - %ymm0 = VPSRLDZ256rr %ymm0, %xmm1 - ; CHECK: %ymm0 = VPSRLQYri %ymm0, 7 - %ymm0 = VPSRLQZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSRLQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSRLQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSRLQYrr %ymm0, %xmm1 - %ymm0 = VPSRLQZ256rr %ymm0, %xmm1 - ; CHECK: %ymm0 = VPSRLVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSRLVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSRLVDYrr %ymm0, %ymm1 - %ymm0 = VPSRLVDZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSRLVQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSRLVQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSRLVQYrr %ymm0, %ymm1 - %ymm0 = VPSRLVQZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSRLWYri %ymm0, 7 - %ymm0 = VPSRLWZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSRLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSRLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSRLWYrr %ymm0, %xmm1 - %ymm0 = VPSRLWZ256rr %ymm0, %xmm1 - ; CHECK: %ymm0 = VPMOVSXBDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVSXBDYrr %xmm0 - %ymm0 = VPMOVSXBDZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVSXBQYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVSXBQYrr %xmm0 - %ymm0 = VPMOVSXBQZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVSXBWYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVSXBWYrr %xmm0 - %ymm0 = VPMOVSXBWZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVSXDQYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVSXDQYrr %xmm0 - %ymm0 = VPMOVSXDQZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVSXWDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVSXWDYrr %xmm0 - %ymm0 = VPMOVSXWDZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVSXWQYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVSXWQYrr %xmm0 - %ymm0 = VPMOVSXWQZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVZXBDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVZXBDYrr %xmm0 - %ymm0 = VPMOVZXBDZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVZXBQYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVZXBQYrr %xmm0 - %ymm0 = VPMOVZXBQZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVZXBWYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVZXBWYrr %xmm0 - %ymm0 = VPMOVZXBWZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVZXDQYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVZXDQYrr %xmm0 - %ymm0 = VPMOVZXDQZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVZXWDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVZXWDYrr %xmm0 - %ymm0 = VPMOVZXWDZ256rr %xmm0 - ; CHECK: %ymm0 = VPMOVZXWQYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPMOVZXWQYrr %xmm0 - %ymm0 = VPMOVZXWQZ256rr %xmm0 - ; CHECK: %ymm0 = VBROADCASTF128 %rip, 1, %noreg, %rax, %noreg - %ymm0 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0 - %ymm0 = VBROADCASTF32X2Z256r %xmm0 - ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0 - %ymm0 = VBROADCASTSDZ256r %xmm0 - ; CHECK: %ymm0 = VBROADCASTSSYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VBROADCASTSSYrr %xmm0 - %ymm0 = VBROADCASTSSZ256r %xmm0 - ; CHECK: %ymm0 = VPBROADCASTBYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPBROADCASTBYrr %xmm0 - %ymm0 = VPBROADCASTBZ256r %xmm0 - ; CHECK: %ymm0 = VPBROADCASTDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPBROADCASTDYrr %xmm0 - %ymm0 = VPBROADCASTDZ256r %xmm0 - ; CHECK: %ymm0 = VPBROADCASTWYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPBROADCASTWYrr %xmm0 - %ymm0 = VPBROADCASTWZ256r %xmm0 - ; CHECK: %ymm0 = VBROADCASTI128 %rip, 1, %noreg, %rax, %noreg - %ymm0 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPBROADCASTQYrr %xmm0 - %ymm0 = VBROADCASTI32X2Z256r %xmm0 - ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPBROADCASTQYrr %xmm0 - %ymm0 = VPBROADCASTQZ256r %xmm0 - ; CHECK: %ymm0 = VPABSBYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPABSBYrr %ymm0 - %ymm0 = VPABSBZ256rr %ymm0 - ; CHECK: %ymm0 = VPABSDYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPABSDYrr %ymm0 - %ymm0 = VPABSDZ256rr %ymm0 - ; CHECK: %ymm0 = VPABSWYrm %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPABSWYrr %ymm0 - %ymm0 = VPABSWZ256rr %ymm0 - ; CHECK: %ymm0 = VPSADBWYrm %ymm0, 1, %noreg, %rax, %noreg, %noreg - %ymm0 = VPSADBWZ256rm %ymm0, 1, %noreg, %rax, %noreg, %noreg - ; CHECK: %ymm0 = VPSADBWYrr %ymm0, %ymm1 - %ymm0 = VPSADBWZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPERMDYrm %ymm0, %rdi, 1, %noreg, 0, %noreg - %ymm0 = VPERMDZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VPERMDYrr %ymm1, %ymm0 - %ymm0 = VPERMDZ256rr %ymm1, %ymm0 - ; CHECK: %ymm0 = VPERMILPDYmi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm0 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm0 = VPERMILPDYri %ymm0, 7 - %ymm0 = VPERMILPDZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPERMILPDYrm %ymm0, %rdi, 1, %noreg, 0, %noreg - %ymm0 = VPERMILPDZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VPERMILPDYrr %ymm1, %ymm0 - %ymm0 = VPERMILPDZ256rr %ymm1, %ymm0 - ; CHECK: %ymm0 = VPERMILPSYmi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm0 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm0 = VPERMILPSYri %ymm0, 7 - %ymm0 = VPERMILPSZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPERMILPSYrm %ymm0, %rdi, 1, %noreg, 0, %noreg - %ymm0 = VPERMILPSZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VPERMILPSYrr %ymm1, %ymm0 - %ymm0 = VPERMILPSZ256rr %ymm1, %ymm0 - ; CHECK: %ymm0 = VPERMPDYmi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm0 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm0 = VPERMPDYri %ymm0, 7 - %ymm0 = VPERMPDZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPERMPSYrm %ymm0, %rdi, 1, %noreg, 0, %noreg - %ymm0 = VPERMPSZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VPERMPSYrr %ymm1, %ymm0 - %ymm0 = VPERMPSZ256rr %ymm1, %ymm0 - ; CHECK: %ymm0 = VPERMQYmi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm0 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm0 = VPERMQYri %ymm0, 7 - %ymm0 = VPERMQZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSLLDQYri %ymm0, 14 - %ymm0 = VPSLLDQZ256rr %ymm0, 14 - ; CHECK: %ymm0 = VPSLLDYri %ymm0, 7 - %ymm0 = VPSLLDZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSLLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSLLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSLLDYrr %ymm0, 14 - %ymm0 = VPSLLDZ256rr %ymm0, 14 - ; CHECK: %ymm0 = VPSLLQYri %ymm0, 7 - %ymm0 = VPSLLQZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSLLQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSLLQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSLLQYrr %ymm0, 14 - %ymm0 = VPSLLQZ256rr %ymm0, 14 - ; CHECK: %ymm0 = VPSLLVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSLLVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSLLVDYrr %ymm0, 14 - %ymm0 = VPSLLVDZ256rr %ymm0, 14 - ; CHECK: %ymm0 = VPSLLVQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSLLVQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSLLVQYrr %ymm0, 14 - %ymm0 = VPSLLVQZ256rr %ymm0, 14 - ; CHECK: %ymm0 = VPSLLWYri %ymm0, 7 - %ymm0 = VPSLLWZ256ri %ymm0, 7 - ; CHECK: %ymm0 = VPSLLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg - %ymm0 = VPSLLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm0 = VPSLLWYrr %ymm0, 14 - %ymm0 = VPSLLWZ256rr %ymm0, 14 - ; CHECK: %ymm0 = VCVTDQ2PDYrm %rdi, %ymm0, 1, %noreg, 0 - %ymm0 = VCVTDQ2PDZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %ymm0 = VCVTDQ2PDYrr %xmm0 - %ymm0 = VCVTDQ2PDZ256rr %xmm0 - ; CHECK: %ymm0 = VCVTDQ2PSYrm %rdi, %ymm0, 1, %noreg, 0 - %ymm0 = VCVTDQ2PSZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %ymm0 = VCVTDQ2PSYrr %ymm0 - %ymm0 = VCVTDQ2PSZ256rr %ymm0 - ; CHECK: %xmm0 = VCVTPD2DQYrm %rdi, %ymm0, 1, %noreg, 0 - %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPD2DQYrr %ymm0 - %xmm0 = VCVTPD2DQZ256rr %ymm0 - ; CHECK: %xmm0 = VCVTPD2PSYrm %rdi, %ymm0, 1, %noreg, 0 - %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPD2PSYrr %ymm0 - %xmm0 = VCVTPD2PSZ256rr %ymm0 - ; CHECK: %ymm0 = VCVTPS2DQYrm %rdi, %ymm0, 1, %noreg, 0 - %ymm0 = VCVTPS2DQZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %ymm0 = VCVTPS2DQYrr %ymm0 - %ymm0 = VCVTPS2DQZ256rr %ymm0 - ; CHECK: %ymm0 = VCVTPS2PDYrm %rdi, %ymm0, 1, %noreg, 0 - %ymm0 = VCVTPS2PDZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %ymm0 = VCVTPS2PDYrr %xmm0 - %ymm0 = VCVTPS2PDZ256rr %xmm0 - ; CHECK: VCVTPS2PHYmr %rdi, %ymm0, 1, %noreg, 0, %noreg, %noreg - VCVTPS2PHZ256mr %rdi, %ymm0, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm0 = VCVTPS2PHYrr %ymm0, %noreg - %xmm0 = VCVTPS2PHZ256rr %ymm0, %noreg - ; CHECK: %ymm0 = VCVTPH2PSYrm %rdi, %ymm0, 1, %noreg, 0 - %ymm0 = VCVTPH2PSZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %ymm0 = VCVTPH2PSYrr %xmm0 - %ymm0 = VCVTPH2PSZ256rr %xmm0 - ; CHECK: %xmm0 = VCVTTPD2DQYrm %rdi, %ymm0, 1, %noreg, 0 - %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTTPD2DQYrr %ymm0 - %xmm0 = VCVTTPD2DQZ256rr %ymm0 - ; CHECK: %ymm0 = VCVTTPS2DQYrm %rdi, %ymm0, 1, %noreg, 0 - %ymm0 = VCVTTPS2DQZ256rm %rdi, %ymm0, 1, %noreg, 0 - ; CHECK: %ymm0 = VCVTTPS2DQYrr %ymm0 - %ymm0 = VCVTTPS2DQZ256rr %ymm0 - ; CHECK: %ymm0 = VSQRTPDYm %rdi, %noreg, %noreg, %noreg, %noreg - %ymm0 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm0 = VSQRTPDYr %ymm0 - %ymm0 = VSQRTPDZ256r %ymm0 - ; CHECK: %ymm0 = VSQRTPSYm %rdi, %noreg, %noreg, %noreg, %noreg - %ymm0 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm0 = VSQRTPSYr %ymm0 - %ymm0 = VSQRTPSZ256r %ymm0 - ; CHECK: %ymm0 = VPALIGNRYrmi %ymm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg - %ymm0 = VPALIGNRZ256rmi %ymm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm0 = VPALIGNRYrri %ymm0, %ymm1, %noreg - %ymm0 = VPALIGNRZ256rri %ymm0, %ymm1, %noreg - ; CHECK: %ymm0 = VMOVUPSYrm %rdi, 1, %noreg, 0, %noreg - %ymm0 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm0 = VMOVUPSYrr %ymm0 - %ymm0 = VMOVUPSZ256rr %ymm0 - ; CHECK: %ymm0 = VMOVUPSYrr_REV %ymm0 - %ymm0 = VMOVUPSZ256rr_REV %ymm0 - ; CHECK: %ymm0 = VPSHUFBYrm %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg - %ymm0 = VPSHUFBZ256rm %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm0 = VPSHUFBYrr %ymm0, %ymm1 - %ymm0 = VPSHUFBZ256rr %ymm0, %ymm1 - ; CHECK: %ymm0 = VPSHUFDYmi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm0 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm0 = VPSHUFDYri %ymm0, -24 - %ymm0 = VPSHUFDZ256ri %ymm0, -24 - ; CHECK: %ymm0 = VPSHUFHWYmi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm0 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm0 = VPSHUFHWYri %ymm0, -24 - %ymm0 = VPSHUFHWZ256ri %ymm0, -24 - ; CHECK: %ymm0 = VPSHUFLWYmi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm0 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm0 = VPSHUFLWYri %ymm0, -24 - %ymm0 = VPSHUFLWZ256ri %ymm0, -24 - ; CHECK: %ymm0 = VSHUFPDYrmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %ymm0 = VSHUFPDZ256rmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm0 = VSHUFPDYrri %ymm0, %noreg, %noreg - %ymm0 = VSHUFPDZ256rri %ymm0, %noreg, %noreg - ; CHECK: %ymm0 = VSHUFPSYrmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %ymm0 = VSHUFPSZ256rmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm0 = VSHUFPSYrri %ymm0, %noreg, %noreg - %ymm0 = VSHUFPSZ256rri %ymm0, %noreg, %noreg + ; CHECK: VMOVAPDYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVAPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVAPDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVAPDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVAPDYrr $ymm0 + $ymm0 = VMOVAPDZ256rr $ymm0 + ; CHECK: $ymm0 = VMOVAPDYrr_REV $ymm0 + $ymm0 = VMOVAPDZ256rr_REV $ymm0 + ; CHECK: VMOVAPSYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVAPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVAPSYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVAPSZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVAPSYrr $ymm0 + $ymm0 = VMOVAPSZ256rr $ymm0 + ; CHECK: $ymm0 = VMOVAPSYrr_REV $ymm0 + $ymm0 = VMOVAPSZ256rr_REV $ymm0 + ; CHECK: $ymm0 = VMOVDDUPYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVDDUPZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVDDUPYrr $ymm0 + $ymm0 = VMOVDDUPZ256rr $ymm0 + ; CHECK: VMOVDQAYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVDQA32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVDQAYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVDQA32Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVDQAYrr $ymm0 + $ymm0 = VMOVDQA32Z256rr $ymm0 + ; CHECK: $ymm0 = VMOVDQAYrr_REV $ymm0 + $ymm0 = VMOVDQA32Z256rr_REV $ymm0 + ; CHECK: VMOVDQAYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVDQA64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVDQAYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVDQA64Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVDQAYrr $ymm0 + $ymm0 = VMOVDQA64Z256rr $ymm0 + ; CHECK: $ymm0 = VMOVDQAYrr_REV $ymm0 + $ymm0 = VMOVDQA64Z256rr_REV $ymm0 + ; CHECK: VMOVDQUYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVDQU16Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVDQUYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVDQU16Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVDQUYrr $ymm0 + $ymm0 = VMOVDQU16Z256rr $ymm0 + ; CHECK: $ymm0 = VMOVDQUYrr_REV $ymm0 + $ymm0 = VMOVDQU16Z256rr_REV $ymm0 + ; CHECK: VMOVDQUYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVDQU32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVDQUYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVDQU32Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVDQUYrr $ymm0 + $ymm0 = VMOVDQU32Z256rr $ymm0 + ; CHECK: $ymm0 = VMOVDQUYrr_REV $ymm0 + $ymm0 = VMOVDQU32Z256rr_REV $ymm0 + ; CHECK: VMOVDQUYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVDQU64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVDQUYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVDQU64Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVDQUYrr $ymm0 + $ymm0 = VMOVDQU64Z256rr $ymm0 + ; CHECK: $ymm0 = VMOVDQUYrr_REV $ymm0 + $ymm0 = VMOVDQU64Z256rr_REV $ymm0 + ; CHECK: VMOVDQUYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVDQU8Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVDQUYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVDQU8Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVDQUYrr $ymm0 + $ymm0 = VMOVDQU8Z256rr $ymm0 + ; CHECK: $ymm0 = VMOVDQUYrr_REV $ymm0 + $ymm0 = VMOVDQU8Z256rr_REV $ymm0 + ; CHECK: $ymm0 = VMOVNTDQAYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVNTDQAZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: VMOVNTDQYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVNTDQZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: VMOVNTPDYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVNTPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: VMOVNTPSYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVNTPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVSHDUPYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVSHDUPZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVSHDUPYrr $ymm0 + $ymm0 = VMOVSHDUPZ256rr $ymm0 + ; CHECK: $ymm0 = VMOVSLDUPYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVSLDUPZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVSLDUPYrr $ymm0 + $ymm0 = VMOVSLDUPZ256rr $ymm0 + ; CHECK: VMOVUPDYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVUPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VMOVUPDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMOVUPDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMOVUPDYrr $ymm0 + $ymm0 = VMOVUPDZ256rr $ymm0 + ; CHECK: $ymm0 = VMOVUPDYrr_REV $ymm0 + $ymm0 = VMOVUPDZ256rr_REV $ymm0 + ; CHECK: VMOVUPSYmr $rdi, 1, $noreg, 0, $noreg, $ymm0 + VMOVUPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0 + ; CHECK: $ymm0 = VPANDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPANDDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPANDYrr $ymm0, $ymm1 + $ymm0 = VPANDDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPANDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPANDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPANDYrr $ymm0, $ymm1 + $ymm0 = VPANDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPANDNYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPANDNDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPANDNYrr $ymm0, $ymm1 + $ymm0 = VPANDNDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPANDNYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPANDNQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPANDNYrr $ymm0, $ymm1 + $ymm0 = VPANDNQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPAVGBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPAVGBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPAVGBYrr $ymm0, $ymm1 + $ymm0 = VPAVGBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPAVGWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPAVGWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPAVGWYrr $ymm0, $ymm1 + $ymm0 = VPAVGWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPADDBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPADDBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPADDBYrr $ymm0, $ymm1 + $ymm0 = VPADDBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPADDDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPADDDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPADDDYrr $ymm0, $ymm1 + $ymm0 = VPADDDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPADDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPADDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPADDQYrr $ymm0, $ymm1 + $ymm0 = VPADDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPADDSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPADDSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPADDSBYrr $ymm0, $ymm1 + $ymm0 = VPADDSBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPADDSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPADDSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPADDSWYrr $ymm0, $ymm1 + $ymm0 = VPADDSWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPADDUSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPADDUSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPADDUSBYrr $ymm0, $ymm1 + $ymm0 = VPADDUSBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPADDUSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPADDUSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPADDUSWYrr $ymm0, $ymm1 + $ymm0 = VPADDUSWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPADDWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPADDWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPADDWYrr $ymm0, $ymm1 + $ymm0 = VPADDWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMULPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMULPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMULPDYrr $ymm0, $ymm1 + $ymm0 = VMULPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMULPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMULPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMULPSYrr $ymm0, $ymm1 + $ymm0 = VMULPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VORPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VORPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VORPDYrr $ymm0, $ymm1 + $ymm0 = VORPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VORPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VORPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VORPSYrr $ymm0, $ymm1 + $ymm0 = VORPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMADDUBSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMADDUBSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMADDUBSWYrr $ymm0, $ymm1 + $ymm0 = VPMADDUBSWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMADDWDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMADDWDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMADDWDYrr $ymm0, $ymm1 + $ymm0 = VPMADDWDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMAXSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMAXSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMAXSBYrr $ymm0, $ymm1 + $ymm0 = VPMAXSBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMAXSDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMAXSDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMAXSDYrr $ymm0, $ymm1 + $ymm0 = VPMAXSDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMAXSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMAXSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMAXSWYrr $ymm0, $ymm1 + $ymm0 = VPMAXSWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMAXUBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMAXUBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMAXUBYrr $ymm0, $ymm1 + $ymm0 = VPMAXUBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMAXUDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMAXUDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMAXUDYrr $ymm0, $ymm1 + $ymm0 = VPMAXUDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMAXUWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMAXUWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMAXUWYrr $ymm0, $ymm1 + $ymm0 = VPMAXUWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMINSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMINSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMINSBYrr $ymm0, $ymm1 + $ymm0 = VPMINSBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMINSDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMINSDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMINSDYrr $ymm0, $ymm1 + $ymm0 = VPMINSDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMINSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMINSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMINSWYrr $ymm0, $ymm1 + $ymm0 = VPMINSWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMINUBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMINUBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMINUBYrr $ymm0, $ymm1 + $ymm0 = VPMINUBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMINUDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMINUDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMINUDYrr $ymm0, $ymm1 + $ymm0 = VPMINUDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMINUWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMINUWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMINUWYrr $ymm0, $ymm1 + $ymm0 = VPMINUWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMULDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMULDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMULDQYrr $ymm0, $ymm1 + $ymm0 = VPMULDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMULHRSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMULHRSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMULHRSWYrr $ymm0, $ymm1 + $ymm0 = VPMULHRSWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMULHUWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMULHUWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMULHUWYrr $ymm0, $ymm1 + $ymm0 = VPMULHUWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMULHWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMULHWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMULHWYrr $ymm0, $ymm1 + $ymm0 = VPMULHWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMULLDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMULLDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMULLDYrr $ymm0, $ymm1 + $ymm0 = VPMULLDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMULLWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMULLWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMULLWYrr $ymm0, $ymm1 + $ymm0 = VPMULLWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPMULUDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMULUDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMULUDQYrr $ymm0, $ymm1 + $ymm0 = VPMULUDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPORYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPORDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPORYrr $ymm0, $ymm1 + $ymm0 = VPORDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPORYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPORQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPORYrr $ymm0, $ymm1 + $ymm0 = VPORQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSUBBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSUBBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSUBBYrr $ymm0, $ymm1 + $ymm0 = VPSUBBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSUBDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSUBDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSUBDYrr $ymm0, $ymm1 + $ymm0 = VPSUBDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSUBQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSUBQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSUBQYrr $ymm0, $ymm1 + $ymm0 = VPSUBQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSUBSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSUBSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSUBSBYrr $ymm0, $ymm1 + $ymm0 = VPSUBSBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSUBSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSUBSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSUBSWYrr $ymm0, $ymm1 + $ymm0 = VPSUBSWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSUBUSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSUBUSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSUBUSBYrr $ymm0, $ymm1 + $ymm0 = VPSUBUSBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSUBUSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSUBUSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSUBUSWYrr $ymm0, $ymm1 + $ymm0 = VPSUBUSWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSUBWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSUBWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSUBWYrr $ymm0, $ymm1 + $ymm0 = VPSUBWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPXORYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPXORDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPXORYrr $ymm0, $ymm1 + $ymm0 = VPXORDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPXORYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPXORQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPXORYrr $ymm0, $ymm1 + $ymm0 = VPXORQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VADDPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VADDPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VADDPDYrr $ymm0, $ymm1 + $ymm0 = VADDPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VADDPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VADDPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VADDPSYrr $ymm0, $ymm1 + $ymm0 = VADDPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VANDNPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VANDNPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VANDNPDYrr $ymm0, $ymm1 + $ymm0 = VANDNPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VANDNPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VANDNPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VANDNPSYrr $ymm0, $ymm1 + $ymm0 = VANDNPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VANDPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VANDPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VANDPDYrr $ymm0, $ymm1 + $ymm0 = VANDPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VANDPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VANDPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VANDPSYrr $ymm0, $ymm1 + $ymm0 = VANDPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VDIVPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VDIVPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VDIVPDYrr $ymm0, $ymm1 + $ymm0 = VDIVPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VDIVPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VDIVPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VDIVPSYrr $ymm0, $ymm1 + $ymm0 = VDIVPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMAXCPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMAXCPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMAXCPDYrr $ymm0, $ymm1 + $ymm0 = VMAXCPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMAXCPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMAXCPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMAXCPSYrr $ymm0, $ymm1 + $ymm0 = VMAXCPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMAXCPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMAXPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMAXCPDYrr $ymm0, $ymm1 + $ymm0 = VMAXPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMAXCPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMAXPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMAXCPSYrr $ymm0, $ymm1 + $ymm0 = VMAXPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMINCPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMINCPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMINCPDYrr $ymm0, $ymm1 + $ymm0 = VMINCPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMINCPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMINCPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMINCPSYrr $ymm0, $ymm1 + $ymm0 = VMINCPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMINCPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMINPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMINCPDYrr $ymm0, $ymm1 + $ymm0 = VMINPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VMINCPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VMINPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VMINCPSYrr $ymm0, $ymm1 + $ymm0 = VMINPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VXORPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VXORPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VXORPDYrr $ymm0, $ymm1 + $ymm0 = VXORPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VXORPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VXORPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VXORPSYrr $ymm0, $ymm1 + $ymm0 = VXORPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPACKSSDWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPACKSSDWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPACKSSDWYrr $ymm0, $ymm1 + $ymm0 = VPACKSSDWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPACKSSWBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPACKSSWBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPACKSSWBYrr $ymm0, $ymm1 + $ymm0 = VPACKSSWBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPACKUSDWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPACKUSDWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPACKUSDWYrr $ymm0, $ymm1 + $ymm0 = VPACKUSDWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPACKUSWBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPACKUSWBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPACKUSWBYrr $ymm0, $ymm1 + $ymm0 = VPACKUSWBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VUNPCKHPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VUNPCKHPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VUNPCKHPDYrr $ymm0, $ymm1 + $ymm0 = VUNPCKHPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VUNPCKHPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VUNPCKHPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VUNPCKHPSYrr $ymm0, $ymm1 + $ymm0 = VUNPCKHPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VUNPCKLPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VUNPCKLPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VUNPCKLPDYrr $ymm0, $ymm1 + $ymm0 = VUNPCKLPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VUNPCKLPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VUNPCKLPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VUNPCKLPSYrr $ymm0, $ymm1 + $ymm0 = VUNPCKLPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VSUBPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VSUBPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VSUBPDYrr $ymm0, $ymm1 + $ymm0 = VSUBPDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VSUBPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VSUBPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VSUBPSYrr $ymm0, $ymm1 + $ymm0 = VSUBPSZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPUNPCKHBWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPUNPCKHBWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPUNPCKHBWYrr $ymm0, $ymm1 + $ymm0 = VPUNPCKHBWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPUNPCKHDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPUNPCKHDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPUNPCKHDQYrr $ymm0, $ymm1 + $ymm0 = VPUNPCKHDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPUNPCKHQDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPUNPCKHQDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPUNPCKHQDQYrr $ymm0, $ymm1 + $ymm0 = VPUNPCKHQDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPUNPCKHWDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPUNPCKHWDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPUNPCKHWDYrr $ymm0, $ymm1 + $ymm0 = VPUNPCKHWDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPUNPCKLBWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPUNPCKLBWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPUNPCKLBWYrr $ymm0, $ymm1 + $ymm0 = VPUNPCKLBWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPUNPCKLDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPUNPCKLDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPUNPCKLDQYrr $ymm0, $ymm1 + $ymm0 = VPUNPCKLDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPUNPCKLQDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPUNPCKLQDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPUNPCKLQDQYrr $ymm0, $ymm1 + $ymm0 = VPUNPCKLQDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPUNPCKLWDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPUNPCKLWDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPUNPCKLWDYrr $ymm0, $ymm1 + $ymm0 = VPUNPCKLWDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VFMADD132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADD132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADD132PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADD132PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADD132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADD132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADD132PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADD132PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADD213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADD213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADD213PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADD213PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADD213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADD213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADD213PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADD213PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADD231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADD231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADD231PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADD231PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADD231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADD231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADD231PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADD231PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADDSUB132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADDSUB132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADDSUB132PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADDSUB132PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADDSUB132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADDSUB132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADDSUB132PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADDSUB132PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADDSUB213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADDSUB213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADDSUB213PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADDSUB213PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADDSUB213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADDSUB213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADDSUB213PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADDSUB213PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADDSUB231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADDSUB231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADDSUB231PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADDSUB231PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMADDSUB231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMADDSUB231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMADDSUB231PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMADDSUB231PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUB132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUB132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUB132PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUB132PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUB132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUB132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUB132PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUB132PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUB213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUB213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUB213PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUB213PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUB213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUB213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUB213PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUB213PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUB231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUB231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUB231PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUB231PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUB231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUB231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUB231PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUB231PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUBADD132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUBADD132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUBADD132PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUBADD132PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUBADD132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUBADD132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUBADD132PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUBADD132PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUBADD213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUBADD213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUBADD213PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUBADD213PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUBADD213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUBADD213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUBADD213PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUBADD213PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUBADD231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUBADD231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUBADD231PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUBADD231PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFMSUBADD231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFMSUBADD231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFMSUBADD231PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFMSUBADD231PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMADD132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMADD132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMADD132PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMADD132PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMADD132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMADD132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMADD132PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMADD132PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMADD213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMADD213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMADD213PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMADD213PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMADD213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMADD213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMADD213PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMADD213PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMADD231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMADD231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMADD231PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMADD231PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMADD231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMADD231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMADD231PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMADD231PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMSUB132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMSUB132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMSUB132PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMSUB132PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMSUB132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMSUB132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMSUB132PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMSUB132PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMSUB213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMSUB213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMSUB213PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMSUB213PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMSUB213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMSUB213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMSUB213PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMSUB213PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMSUB231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMSUB231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMSUB231PDYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMSUB231PDZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VFNMSUB231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + $ymm0 = VFNMSUB231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VFNMSUB231PSYr $ymm0, $ymm1, $ymm2 + $ymm0 = VFNMSUB231PSZ256r $ymm0, $ymm1, $ymm2 + ; CHECK: $ymm0 = VPSRADYri $ymm0, 7 + $ymm0 = VPSRADZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSRADYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSRADZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSRADYrr $ymm0, $xmm1 + $ymm0 = VPSRADZ256rr $ymm0, $xmm1 + ; CHECK: $ymm0 = VPSRAVDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSRAVDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSRAVDYrr $ymm0, $ymm1 + $ymm0 = VPSRAVDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSRAWYri $ymm0, 7 + $ymm0 = VPSRAWZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSRAWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSRAWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSRAWYrr $ymm0, $xmm1 + $ymm0 = VPSRAWZ256rr $ymm0, $xmm1 + ; CHECK: $ymm0 = VPSRLDQYri $ymm0, $ymm1 + $ymm0 = VPSRLDQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSRLDYri $ymm0, 7 + $ymm0 = VPSRLDZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSRLDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSRLDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSRLDYrr $ymm0, $xmm1 + $ymm0 = VPSRLDZ256rr $ymm0, $xmm1 + ; CHECK: $ymm0 = VPSRLQYri $ymm0, 7 + $ymm0 = VPSRLQZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSRLQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSRLQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSRLQYrr $ymm0, $xmm1 + $ymm0 = VPSRLQZ256rr $ymm0, $xmm1 + ; CHECK: $ymm0 = VPSRLVDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSRLVDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSRLVDYrr $ymm0, $ymm1 + $ymm0 = VPSRLVDZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSRLVQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSRLVQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSRLVQYrr $ymm0, $ymm1 + $ymm0 = VPSRLVQZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSRLWYri $ymm0, 7 + $ymm0 = VPSRLWZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSRLWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSRLWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSRLWYrr $ymm0, $xmm1 + $ymm0 = VPSRLWZ256rr $ymm0, $xmm1 + ; CHECK: $ymm0 = VPMOVSXBDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVSXBDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVSXBDYrr $xmm0 + $ymm0 = VPMOVSXBDZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVSXBQYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVSXBQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVSXBQYrr $xmm0 + $ymm0 = VPMOVSXBQZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVSXBWYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVSXBWZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVSXBWYrr $xmm0 + $ymm0 = VPMOVSXBWZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVSXDQYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVSXDQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVSXDQYrr $xmm0 + $ymm0 = VPMOVSXDQZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVSXWDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVSXWDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVSXWDYrr $xmm0 + $ymm0 = VPMOVSXWDZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVSXWQYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVSXWQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVSXWQYrr $xmm0 + $ymm0 = VPMOVSXWQZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVZXBDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVZXBDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVZXBDYrr $xmm0 + $ymm0 = VPMOVZXBDZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVZXBQYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVZXBQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVZXBQYrr $xmm0 + $ymm0 = VPMOVZXBQZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVZXBWYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVZXBWZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVZXBWYrr $xmm0 + $ymm0 = VPMOVZXBWZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVZXDQYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVZXDQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVZXDQYrr $xmm0 + $ymm0 = VPMOVZXDQZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVZXWDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVZXWDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVZXWDYrr $xmm0 + $ymm0 = VPMOVZXWDZ256rr $xmm0 + ; CHECK: $ymm0 = VPMOVZXWQYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPMOVZXWQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPMOVZXWQYrr $xmm0 + $ymm0 = VPMOVZXWQZ256rr $xmm0 + ; CHECK: $ymm0 = VBROADCASTF128 $rip, 1, $noreg, $rax, $noreg + $ymm0 = VBROADCASTF32X4Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VBROADCASTSDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VBROADCASTF32X2Z256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VBROADCASTSDYrr $xmm0 + $ymm0 = VBROADCASTF32X2Z256r $xmm0 + ; CHECK: $ymm0 = VBROADCASTSDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VBROADCASTSDZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VBROADCASTSDYrr $xmm0 + $ymm0 = VBROADCASTSDZ256r $xmm0 + ; CHECK: $ymm0 = VBROADCASTSSYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VBROADCASTSSZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VBROADCASTSSYrr $xmm0 + $ymm0 = VBROADCASTSSZ256r $xmm0 + ; CHECK: $ymm0 = VPBROADCASTBYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPBROADCASTBZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPBROADCASTBYrr $xmm0 + $ymm0 = VPBROADCASTBZ256r $xmm0 + ; CHECK: $ymm0 = VPBROADCASTDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPBROADCASTDZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPBROADCASTDYrr $xmm0 + $ymm0 = VPBROADCASTDZ256r $xmm0 + ; CHECK: $ymm0 = VPBROADCASTWYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPBROADCASTWZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPBROADCASTWYrr $xmm0 + $ymm0 = VPBROADCASTWZ256r $xmm0 + ; CHECK: $ymm0 = VBROADCASTI128 $rip, 1, $noreg, $rax, $noreg + $ymm0 = VBROADCASTI32X4Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPBROADCASTQYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VBROADCASTI32X2Z256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPBROADCASTQYrr $xmm0 + $ymm0 = VBROADCASTI32X2Z256r $xmm0 + ; CHECK: $ymm0 = VPBROADCASTQYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPBROADCASTQZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPBROADCASTQYrr $xmm0 + $ymm0 = VPBROADCASTQZ256r $xmm0 + ; CHECK: $ymm0 = VPABSBYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPABSBZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPABSBYrr $ymm0 + $ymm0 = VPABSBZ256rr $ymm0 + ; CHECK: $ymm0 = VPABSDYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPABSDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPABSDYrr $ymm0 + $ymm0 = VPABSDZ256rr $ymm0 + ; CHECK: $ymm0 = VPABSWYrm $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPABSWZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPABSWYrr $ymm0 + $ymm0 = VPABSWZ256rr $ymm0 + ; CHECK: $ymm0 = VPSADBWYrm $ymm0, 1, $noreg, $rax, $noreg, $noreg + $ymm0 = VPSADBWZ256rm $ymm0, 1, $noreg, $rax, $noreg, $noreg + ; CHECK: $ymm0 = VPSADBWYrr $ymm0, $ymm1 + $ymm0 = VPSADBWZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPERMDYrm $ymm0, $rdi, 1, $noreg, 0, $noreg + $ymm0 = VPERMDZ256rm $ymm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VPERMDYrr $ymm1, $ymm0 + $ymm0 = VPERMDZ256rr $ymm1, $ymm0 + ; CHECK: $ymm0 = VPERMILPDYmi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm0 = VPERMILPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm0 = VPERMILPDYri $ymm0, 7 + $ymm0 = VPERMILPDZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPERMILPDYrm $ymm0, $rdi, 1, $noreg, 0, $noreg + $ymm0 = VPERMILPDZ256rm $ymm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VPERMILPDYrr $ymm1, $ymm0 + $ymm0 = VPERMILPDZ256rr $ymm1, $ymm0 + ; CHECK: $ymm0 = VPERMILPSYmi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm0 = VPERMILPSZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm0 = VPERMILPSYri $ymm0, 7 + $ymm0 = VPERMILPSZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPERMILPSYrm $ymm0, $rdi, 1, $noreg, 0, $noreg + $ymm0 = VPERMILPSZ256rm $ymm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VPERMILPSYrr $ymm1, $ymm0 + $ymm0 = VPERMILPSZ256rr $ymm1, $ymm0 + ; CHECK: $ymm0 = VPERMPDYmi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm0 = VPERMPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm0 = VPERMPDYri $ymm0, 7 + $ymm0 = VPERMPDZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPERMPSYrm $ymm0, $rdi, 1, $noreg, 0, $noreg + $ymm0 = VPERMPSZ256rm $ymm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VPERMPSYrr $ymm1, $ymm0 + $ymm0 = VPERMPSZ256rr $ymm1, $ymm0 + ; CHECK: $ymm0 = VPERMQYmi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm0 = VPERMQZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm0 = VPERMQYri $ymm0, 7 + $ymm0 = VPERMQZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSLLDQYri $ymm0, 14 + $ymm0 = VPSLLDQZ256rr $ymm0, 14 + ; CHECK: $ymm0 = VPSLLDYri $ymm0, 7 + $ymm0 = VPSLLDZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSLLDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSLLDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSLLDYrr $ymm0, 14 + $ymm0 = VPSLLDZ256rr $ymm0, 14 + ; CHECK: $ymm0 = VPSLLQYri $ymm0, 7 + $ymm0 = VPSLLQZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSLLQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSLLQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSLLQYrr $ymm0, 14 + $ymm0 = VPSLLQZ256rr $ymm0, 14 + ; CHECK: $ymm0 = VPSLLVDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSLLVDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSLLVDYrr $ymm0, 14 + $ymm0 = VPSLLVDZ256rr $ymm0, 14 + ; CHECK: $ymm0 = VPSLLVQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSLLVQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSLLVQYrr $ymm0, 14 + $ymm0 = VPSLLVQZ256rr $ymm0, 14 + ; CHECK: $ymm0 = VPSLLWYri $ymm0, 7 + $ymm0 = VPSLLWZ256ri $ymm0, 7 + ; CHECK: $ymm0 = VPSLLWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg + $ymm0 = VPSLLWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm0 = VPSLLWYrr $ymm0, 14 + $ymm0 = VPSLLWZ256rr $ymm0, 14 + ; CHECK: $ymm0 = VCVTDQ2PDYrm $rdi, $ymm0, 1, $noreg, 0 + $ymm0 = VCVTDQ2PDZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $ymm0 = VCVTDQ2PDYrr $xmm0 + $ymm0 = VCVTDQ2PDZ256rr $xmm0 + ; CHECK: $ymm0 = VCVTDQ2PSYrm $rdi, $ymm0, 1, $noreg, 0 + $ymm0 = VCVTDQ2PSZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $ymm0 = VCVTDQ2PSYrr $ymm0 + $ymm0 = VCVTDQ2PSZ256rr $ymm0 + ; CHECK: $xmm0 = VCVTPD2DQYrm $rdi, $ymm0, 1, $noreg, 0 + $xmm0 = VCVTPD2DQZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPD2DQYrr $ymm0 + $xmm0 = VCVTPD2DQZ256rr $ymm0 + ; CHECK: $xmm0 = VCVTPD2PSYrm $rdi, $ymm0, 1, $noreg, 0 + $xmm0 = VCVTPD2PSZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPD2PSYrr $ymm0 + $xmm0 = VCVTPD2PSZ256rr $ymm0 + ; CHECK: $ymm0 = VCVTPS2DQYrm $rdi, $ymm0, 1, $noreg, 0 + $ymm0 = VCVTPS2DQZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $ymm0 = VCVTPS2DQYrr $ymm0 + $ymm0 = VCVTPS2DQZ256rr $ymm0 + ; CHECK: $ymm0 = VCVTPS2PDYrm $rdi, $ymm0, 1, $noreg, 0 + $ymm0 = VCVTPS2PDZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $ymm0 = VCVTPS2PDYrr $xmm0 + $ymm0 = VCVTPS2PDZ256rr $xmm0 + ; CHECK: VCVTPS2PHYmr $rdi, $ymm0, 1, $noreg, 0, $noreg, $noreg + VCVTPS2PHZ256mr $rdi, $ymm0, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm0 = VCVTPS2PHYrr $ymm0, $noreg + $xmm0 = VCVTPS2PHZ256rr $ymm0, $noreg + ; CHECK: $ymm0 = VCVTPH2PSYrm $rdi, $ymm0, 1, $noreg, 0 + $ymm0 = VCVTPH2PSZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $ymm0 = VCVTPH2PSYrr $xmm0 + $ymm0 = VCVTPH2PSZ256rr $xmm0 + ; CHECK: $xmm0 = VCVTTPD2DQYrm $rdi, $ymm0, 1, $noreg, 0 + $xmm0 = VCVTTPD2DQZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTTPD2DQYrr $ymm0 + $xmm0 = VCVTTPD2DQZ256rr $ymm0 + ; CHECK: $ymm0 = VCVTTPS2DQYrm $rdi, $ymm0, 1, $noreg, 0 + $ymm0 = VCVTTPS2DQZ256rm $rdi, $ymm0, 1, $noreg, 0 + ; CHECK: $ymm0 = VCVTTPS2DQYrr $ymm0 + $ymm0 = VCVTTPS2DQZ256rr $ymm0 + ; CHECK: $ymm0 = VSQRTPDYm $rdi, $noreg, $noreg, $noreg, $noreg + $ymm0 = VSQRTPDZ256m $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm0 = VSQRTPDYr $ymm0 + $ymm0 = VSQRTPDZ256r $ymm0 + ; CHECK: $ymm0 = VSQRTPSYm $rdi, $noreg, $noreg, $noreg, $noreg + $ymm0 = VSQRTPSZ256m $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm0 = VSQRTPSYr $ymm0 + $ymm0 = VSQRTPSZ256r $ymm0 + ; CHECK: $ymm0 = VPALIGNRYrmi $ymm0, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg + $ymm0 = VPALIGNRZ256rmi $ymm0, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm0 = VPALIGNRYrri $ymm0, $ymm1, $noreg + $ymm0 = VPALIGNRZ256rri $ymm0, $ymm1, $noreg + ; CHECK: $ymm0 = VMOVUPSYrm $rdi, 1, $noreg, 0, $noreg + $ymm0 = VMOVUPSZ256rm $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm0 = VMOVUPSYrr $ymm0 + $ymm0 = VMOVUPSZ256rr $ymm0 + ; CHECK: $ymm0 = VMOVUPSYrr_REV $ymm0 + $ymm0 = VMOVUPSZ256rr_REV $ymm0 + ; CHECK: $ymm0 = VPSHUFBYrm $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg + $ymm0 = VPSHUFBZ256rm $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm0 = VPSHUFBYrr $ymm0, $ymm1 + $ymm0 = VPSHUFBZ256rr $ymm0, $ymm1 + ; CHECK: $ymm0 = VPSHUFDYmi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm0 = VPSHUFDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm0 = VPSHUFDYri $ymm0, -24 + $ymm0 = VPSHUFDZ256ri $ymm0, -24 + ; CHECK: $ymm0 = VPSHUFHWYmi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm0 = VPSHUFHWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm0 = VPSHUFHWYri $ymm0, -24 + $ymm0 = VPSHUFHWZ256ri $ymm0, -24 + ; CHECK: $ymm0 = VPSHUFLWYmi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm0 = VPSHUFLWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm0 = VPSHUFLWYri $ymm0, -24 + $ymm0 = VPSHUFLWZ256ri $ymm0, -24 + ; CHECK: $ymm0 = VSHUFPDYrmi $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $ymm0 = VSHUFPDZ256rmi $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm0 = VSHUFPDYrri $ymm0, $noreg, $noreg + $ymm0 = VSHUFPDZ256rri $ymm0, $noreg, $noreg + ; CHECK: $ymm0 = VSHUFPSYrmi $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $ymm0 = VSHUFPSZ256rmi $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm0 = VSHUFPSYrri $ymm0, $noreg, $noreg + $ymm0 = VSHUFPSZ256rri $ymm0, $noreg, $noreg - RET 0, %zmm0, %zmm1 + RET 0, $zmm0, $zmm1 ... --- # CHECK-LABEL: name: evex_z128_to_vex_test @@ -899,868 +899,868 @@ name: evex_z128_to_vex_test body: | bb.0: - ; CHECK: VMOVAPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVAPDrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVAPDrr %xmm0 - %xmm0 = VMOVAPDZ128rr %xmm0 - ; CHECK: VMOVAPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVAPSrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVAPSrr %xmm0 - %xmm0 = VMOVAPSZ128rr %xmm0 - ; CHECK: VMOVDQAmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVDQArm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVDQArr %xmm0 - %xmm0 = VMOVDQA32Z128rr %xmm0 - ; CHECK: VMOVDQAmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVDQArm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVDQArr %xmm0 - %xmm0 = VMOVDQA64Z128rr %xmm0 - ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVDQUrr %xmm0 - %xmm0 = VMOVDQU16Z128rr %xmm0 - ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVDQUrr %xmm0 - %xmm0 = VMOVDQU32Z128rr %xmm0 - ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVDQUrr %xmm0 - %xmm0 = VMOVDQU64Z128rr %xmm0 - ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVDQUrr %xmm0 - %xmm0 = VMOVDQU8Z128rr %xmm0 - ; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0 - %xmm0 = VMOVDQU8Z128rr_REV %xmm0 - ; CHECK: %xmm0 = VMOVNTDQArm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: VMOVUPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVUPDrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVUPDrr %xmm0 - %xmm0 = VMOVUPDZ128rr %xmm0 - ; CHECK: %xmm0 = VMOVUPDrr_REV %xmm0 - %xmm0 = VMOVUPDZ128rr_REV %xmm0 - ; CHECK: VMOVUPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVUPSrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMOVUPSrr %xmm0 - %xmm0 = VMOVUPSZ128rr %xmm0 - ; CHECK: %xmm0 = VMOVUPSrr_REV %xmm0 - %xmm0 = VMOVUPSZ128rr_REV %xmm0 - ; CHECK: VMOVNTDQmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: VMOVNTPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: VMOVNTPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVAPDrr_REV %xmm0 - %xmm0 = VMOVAPDZ128rr_REV %xmm0 - ; CHECK: %xmm0 = VMOVAPSrr_REV %xmm0 - %xmm0 = VMOVAPSZ128rr_REV %xmm0 - ; CHECK: %xmm0 = VMOVDQArr_REV %xmm0 - %xmm0 = VMOVDQA32Z128rr_REV %xmm0 - ; CHECK: %xmm0 = VMOVDQArr_REV %xmm0 - %xmm0 = VMOVDQA64Z128rr_REV %xmm0 - ; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0 - %xmm0 = VMOVDQU16Z128rr_REV %xmm0 - ; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0 - %xmm0 = VMOVDQU32Z128rr_REV %xmm0 - ; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0 - %xmm0 = VMOVDQU64Z128rr_REV %xmm0 - ; CHECK: %xmm0 = VPMOVSXBDrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVSXBDrr %xmm0 - %xmm0 = VPMOVSXBDZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVSXBQrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVSXBQrr %xmm0 - %xmm0 = VPMOVSXBQZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVSXBWrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVSXBWrr %xmm0 - %xmm0 = VPMOVSXBWZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVSXDQrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVSXDQrr %xmm0 - %xmm0 = VPMOVSXDQZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVSXWDrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVSXWDrr %xmm0 - %xmm0 = VPMOVSXWDZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVSXWQrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVSXWQrr %xmm0 - %xmm0 = VPMOVSXWQZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVZXBDrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVZXBDrr %xmm0 - %xmm0 = VPMOVZXBDZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVZXBQrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVZXBQrr %xmm0 - %xmm0 = VPMOVZXBQZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVZXBWrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVZXBWrr %xmm0 - %xmm0 = VPMOVZXBWZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVZXDQrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVZXDQrr %xmm0 - %xmm0 = VPMOVZXDQZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVZXWDrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVZXWDrr %xmm0 - %xmm0 = VPMOVZXWDZ128rr %xmm0 - ; CHECK: %xmm0 = VPMOVZXWQrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMOVZXWQrr %xmm0 - %xmm0 = VPMOVZXWQZ128rr %xmm0 - ; CHECK: VMOVHPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVHPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VMOVHPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: VMOVHPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVHPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VMOVHPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: VMOVLPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVLPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VMOVLPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: VMOVLPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0 - VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0 - ; CHECK: %xmm0 = VMOVLPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VMOVLPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXCPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXCPDrr %xmm0, %xmm1 - %xmm0 = VMAXCPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXCPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXCPSrr %xmm0, %xmm1 - %xmm0 = VMAXCPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXCPDrr %xmm0, %xmm1 - %xmm0 = VMAXPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXCPSrr %xmm0, %xmm1 - %xmm0 = VMAXPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINCPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINCPDrr %xmm0, %xmm1 - %xmm0 = VMINCPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINCPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINCPSrr %xmm0, %xmm1 - %xmm0 = VMINCPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINCPDrr %xmm0, %xmm1 - %xmm0 = VMINPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINCPSrr %xmm0, %xmm1 - %xmm0 = VMINPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMULPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMULPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMULPDrr %xmm0, %xmm1 - %xmm0 = VMULPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMULPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMULPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMULPSrr %xmm0, %xmm1 - %xmm0 = VMULPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VORPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VORPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VORPDrr %xmm0, %xmm1 - %xmm0 = VORPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VORPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VORPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VORPSrr %xmm0, %xmm1 - %xmm0 = VORPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPADDBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPADDBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPADDBrr %xmm0, %xmm1 - %xmm0 = VPADDBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPADDDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPADDDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPADDDrr %xmm0, %xmm1 - %xmm0 = VPADDDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPADDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPADDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPADDQrr %xmm0, %xmm1 - %xmm0 = VPADDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPADDSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPADDSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPADDSBrr %xmm0, %xmm1 - %xmm0 = VPADDSBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPADDSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPADDSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPADDSWrr %xmm0, %xmm1 - %xmm0 = VPADDSWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPADDUSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPADDUSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPADDUSBrr %xmm0, %xmm1 - %xmm0 = VPADDUSBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPADDUSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPADDUSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPADDUSWrr %xmm0, %xmm1 - %xmm0 = VPADDUSWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPADDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPADDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPADDWrr %xmm0, %xmm1 - %xmm0 = VPADDWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPANDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPANDDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPANDrr %xmm0, %xmm1 - %xmm0 = VPANDDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPANDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPANDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPANDrr %xmm0, %xmm1 - %xmm0 = VPANDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPANDNDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPANDNrr %xmm0, %xmm1 - %xmm0 = VPANDNDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPANDNQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPANDNrr %xmm0, %xmm1 - %xmm0 = VPANDNQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPAVGBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPAVGBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPAVGBrr %xmm0, %xmm1 - %xmm0 = VPAVGBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPAVGWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPAVGWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPAVGWrr %xmm0, %xmm1 - %xmm0 = VPAVGWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMAXSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMAXSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMAXSBrr %xmm0, %xmm1 - %xmm0 = VPMAXSBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMAXSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMAXSDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMAXSDrr %xmm0, %xmm1 - %xmm0 = VPMAXSDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMAXSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMAXSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMAXSWrr %xmm0, %xmm1 - %xmm0 = VPMAXSWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMAXUBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMAXUBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMAXUBrr %xmm0, %xmm1 - %xmm0 = VPMAXUBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMAXUDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMAXUDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMAXUDrr %xmm0, %xmm1 - %xmm0 = VPMAXUDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMAXUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMAXUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMAXUWrr %xmm0, %xmm1 - %xmm0 = VPMAXUWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMINSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMINSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMINSBrr %xmm0, %xmm1 - %xmm0 = VPMINSBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMINSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMINSDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMINSDrr %xmm0, %xmm1 - %xmm0 = VPMINSDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMINSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMINSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMINSWrr %xmm0, %xmm1 - %xmm0 = VPMINSWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMINUBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMINUBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMINUBrr %xmm0, %xmm1 - %xmm0 = VPMINUBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMINUDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMINUDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMINUDrr %xmm0, %xmm1 - %xmm0 = VPMINUDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMINUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMINUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMINUWrr %xmm0, %xmm1 - %xmm0 = VPMINUWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMULDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMULDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMULDQrr %xmm0, %xmm1 - %xmm0 = VPMULDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMULHRSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMULHRSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMULHRSWrr %xmm0, %xmm1 - %xmm0 = VPMULHRSWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMULHUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMULHUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMULHUWrr %xmm0, %xmm1 - %xmm0 = VPMULHUWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMULHWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMULHWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMULHWrr %xmm0, %xmm1 - %xmm0 = VPMULHWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMULLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMULLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMULLDrr %xmm0, %xmm1 - %xmm0 = VPMULLDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMULLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMULLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMULLWrr %xmm0, %xmm1 - %xmm0 = VPMULLWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMULUDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMULUDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMULUDQrr %xmm0, %xmm1 - %xmm0 = VPMULUDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPORrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPORDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPORrr %xmm0, %xmm1 - %xmm0 = VPORDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPORrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPORQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPORrr %xmm0, %xmm1 - %xmm0 = VPORQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSUBBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSUBBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSUBBrr %xmm0, %xmm1 - %xmm0 = VPSUBBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSUBDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSUBDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSUBDrr %xmm0, %xmm1 - %xmm0 = VPSUBDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSUBQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSUBQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSUBQrr %xmm0, %xmm1 - %xmm0 = VPSUBQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSUBSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSUBSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSUBSBrr %xmm0, %xmm1 - %xmm0 = VPSUBSBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSUBSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSUBSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSUBSWrr %xmm0, %xmm1 - %xmm0 = VPSUBSWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSUBUSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSUBUSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSUBUSBrr %xmm0, %xmm1 - %xmm0 = VPSUBUSBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSUBUSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSUBUSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSUBUSWrr %xmm0, %xmm1 - %xmm0 = VPSUBUSWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSUBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSUBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSUBWrr %xmm0, %xmm1 - %xmm0 = VPSUBWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VADDPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VADDPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VADDPDrr %xmm0, %xmm1 - %xmm0 = VADDPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VADDPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VADDPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VADDPSrr %xmm0, %xmm1 - %xmm0 = VADDPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VANDNPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VANDNPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VANDNPDrr %xmm0, %xmm1 - %xmm0 = VANDNPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VANDNPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VANDNPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VANDNPSrr %xmm0, %xmm1 - %xmm0 = VANDNPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VANDPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VANDPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VANDPDrr %xmm0, %xmm1 - %xmm0 = VANDPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VANDPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VANDPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VANDPSrr %xmm0, %xmm1 - %xmm0 = VANDPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VDIVPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VDIVPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VDIVPDrr %xmm0, %xmm1 - %xmm0 = VDIVPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VDIVPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VDIVPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VDIVPSrr %xmm0, %xmm1 - %xmm0 = VDIVPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPXORrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPXORDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPXORrr %xmm0, %xmm1 - %xmm0 = VPXORDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPXORrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPXORQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPXORrr %xmm0, %xmm1 - %xmm0 = VPXORQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VSUBPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VSUBPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VSUBPDrr %xmm0, %xmm1 - %xmm0 = VSUBPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VSUBPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VSUBPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VSUBPSrr %xmm0, %xmm1 - %xmm0 = VSUBPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VXORPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VXORPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VXORPDrr %xmm0, %xmm1 - %xmm0 = VXORPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VXORPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VXORPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VXORPSrr %xmm0, %xmm1 - %xmm0 = VXORPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMADDUBSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMADDUBSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMADDUBSWrr %xmm0, %xmm1 - %xmm0 = VPMADDUBSWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPMADDWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPMADDWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPMADDWDrr %xmm0, %xmm1 - %xmm0 = VPMADDWDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPACKSSDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPACKSSDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPACKSSDWrr %xmm0, %xmm1 - %xmm0 = VPACKSSDWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPACKSSWBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPACKSSWBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPACKSSWBrr %xmm0, %xmm1 - %xmm0 = VPACKSSWBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPACKUSDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPACKUSDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPACKUSDWrr %xmm0, %xmm1 - %xmm0 = VPACKUSDWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPACKUSWBrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPACKUSWBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPACKUSWBrr %xmm0, %xmm1 - %xmm0 = VPACKUSWBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPUNPCKHBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPUNPCKHBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPUNPCKHBWrr %xmm0, %xmm1 - %xmm0 = VPUNPCKHBWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPUNPCKHDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPUNPCKHDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPUNPCKHDQrr %xmm0, %xmm1 - %xmm0 = VPUNPCKHDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPUNPCKHQDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPUNPCKHQDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPUNPCKHQDQrr %xmm0, %xmm1 - %xmm0 = VPUNPCKHQDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPUNPCKHWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPUNPCKHWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPUNPCKHWDrr %xmm0, %xmm1 - %xmm0 = VPUNPCKHWDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPUNPCKLBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPUNPCKLBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPUNPCKLBWrr %xmm0, %xmm1 - %xmm0 = VPUNPCKLBWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPUNPCKLDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPUNPCKLDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPUNPCKLDQrr %xmm0, %xmm1 - %xmm0 = VPUNPCKLDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPUNPCKLQDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPUNPCKLQDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPUNPCKLQDQrr %xmm0, %xmm1 - %xmm0 = VPUNPCKLQDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPUNPCKLWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPUNPCKLWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPUNPCKLWDrr %xmm0, %xmm1 - %xmm0 = VPUNPCKLWDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VUNPCKHPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VUNPCKHPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VUNPCKHPDrr %xmm0, %xmm1 - %xmm0 = VUNPCKHPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VUNPCKHPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VUNPCKHPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VUNPCKHPSrr %xmm0, %xmm1 - %xmm0 = VUNPCKHPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VUNPCKLPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VUNPCKLPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VUNPCKLPDrr %xmm0, %xmm1 - %xmm0 = VUNPCKLPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VUNPCKLPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VUNPCKLPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VUNPCKLPSrr %xmm0, %xmm1 - %xmm0 = VUNPCKLPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VFMADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD132PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD132PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD132PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD132PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD213PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD213PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD213PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD213PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD231PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD231PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD231PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD231PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADDSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADDSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADDSUB132PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADDSUB132PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADDSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADDSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADDSUB132PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADDSUB132PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADDSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADDSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADDSUB213PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADDSUB213PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADDSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADDSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADDSUB213PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADDSUB213PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADDSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADDSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADDSUB231PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADDSUB231PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADDSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADDSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADDSUB231PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADDSUB231PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB132PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB132PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB132PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB132PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB213PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB213PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB213PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB213PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB231PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB231PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB231PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB231PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUBADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUBADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUBADD132PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUBADD132PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUBADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUBADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUBADD132PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUBADD132PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUBADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUBADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUBADD213PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUBADD213PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUBADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUBADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUBADD213PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUBADD213PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUBADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUBADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUBADD231PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUBADD231PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUBADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUBADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUBADD231PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUBADD231PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD132PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD132PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD132PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD132PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD213PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD213PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD213PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD213PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD231PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD231PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD231PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD231PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB132PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB132PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB132PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB132PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB213PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB213PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB213PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB213PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB231PDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB231PDZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB231PSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB231PSZ128r %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VPSLLDri %xmm0, 7 - %xmm0 = VPSLLDZ128ri %xmm0, 7 - ; CHECK: %xmm0 = VPSLLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSLLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSLLDrr %xmm0, 14 - %xmm0 = VPSLLDZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSLLQri %xmm0, 7 - %xmm0 = VPSLLQZ128ri %xmm0, 7 - ; CHECK: %xmm0 = VPSLLQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSLLQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSLLQrr %xmm0, 14 - %xmm0 = VPSLLQZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSLLVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSLLVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSLLVDrr %xmm0, 14 - %xmm0 = VPSLLVDZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSLLVQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSLLVQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSLLVQrr %xmm0, 14 - %xmm0 = VPSLLVQZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSLLWri %xmm0, 7 - %xmm0 = VPSLLWZ128ri %xmm0, 7 - ; CHECK: %xmm0 = VPSLLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSLLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSLLWrr %xmm0, 14 - %xmm0 = VPSLLWZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRADri %xmm0, 7 - %xmm0 = VPSRADZ128ri %xmm0, 7 - ; CHECK: %xmm0 = VPSRADrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSRADZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSRADrr %xmm0, 14 - %xmm0 = VPSRADZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRAVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSRAVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSRAVDrr %xmm0, 14 - %xmm0 = VPSRAVDZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRAWri %xmm0, 7 - %xmm0 = VPSRAWZ128ri %xmm0, 7 - ; CHECK: %xmm0 = VPSRAWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSRAWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSRAWrr %xmm0, 14 - %xmm0 = VPSRAWZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRLDQri %xmm0, 14 - %xmm0 = VPSRLDQZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRLDri %xmm0, 7 - %xmm0 = VPSRLDZ128ri %xmm0, 7 - ; CHECK: %xmm0 = VPSRLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSRLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSRLDrr %xmm0, 14 - %xmm0 = VPSRLDZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRLQri %xmm0, 7 - %xmm0 = VPSRLQZ128ri %xmm0, 7 - ; CHECK: %xmm0 = VPSRLQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSRLQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSRLQrr %xmm0, 14 - %xmm0 = VPSRLQZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRLVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSRLVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSRLVDrr %xmm0, 14 - %xmm0 = VPSRLVDZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRLVQrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSRLVQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSRLVQrr %xmm0, 14 - %xmm0 = VPSRLVQZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPSRLWri %xmm0, 7 - %xmm0 = VPSRLWZ128ri %xmm0, 7 - ; CHECK: %xmm0 = VPSRLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPSRLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPSRLWrr %xmm0, 14 - %xmm0 = VPSRLWZ128rr %xmm0, 14 - ; CHECK: %xmm0 = VPERMILPDmi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm0 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm0 = VPERMILPDri %xmm0, 9 - %xmm0 = VPERMILPDZ128ri %xmm0, 9 - ; CHECK: %xmm0 = VPERMILPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VPERMILPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VPERMILPDrr %xmm0, %xmm1 - %xmm0 = VPERMILPDZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPERMILPSmi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm0 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm0 = VPERMILPSri %xmm0, 9 - %xmm0 = VPERMILPSZ128ri %xmm0, 9 - ; CHECK: %xmm0 = VPERMILPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VPERMILPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VPERMILPSrr %xmm0, %xmm1 - %xmm0 = VPERMILPSZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VCVTPH2PSrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTPH2PSZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPH2PSrr %xmm0 - %xmm0 = VCVTPH2PSZ128rr %xmm0 - ; CHECK: %xmm0 = VCVTDQ2PDrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTDQ2PDZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTDQ2PDrr %xmm0 - %xmm0 = VCVTDQ2PDZ128rr %xmm0 - ; CHECK: %xmm0 = VCVTDQ2PSrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTDQ2PSZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTDQ2PSrr %xmm0 - %xmm0 = VCVTDQ2PSZ128rr %xmm0 - ; CHECK: %xmm0 = VCVTPD2DQrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTPD2DQZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPD2DQrr %xmm0 - %xmm0 = VCVTPD2DQZ128rr %xmm0 - ; CHECK: %xmm0 = VCVTPD2PSrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTPD2PSZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPD2PSrr %xmm0 - %xmm0 = VCVTPD2PSZ128rr %xmm0 - ; CHECK: %xmm0 = VCVTPS2DQrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTPS2DQZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPS2DQrr %xmm0 - %xmm0 = VCVTPS2DQZ128rr %xmm0 - ; CHECK: %xmm0 = VCVTPS2PDrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTPS2PDZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPS2PDrr %xmm0 - %xmm0 = VCVTPS2PDZ128rr %xmm0 - ; CHECK: %xmm0 = VCVTTPD2DQrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTTPD2DQZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTTPD2DQrr %xmm0 - %xmm0 = VCVTTPD2DQZ128rr %xmm0 - ; CHECK: %xmm0 = VCVTTPS2DQrm %rdi, %xmm0, 1, %noreg, 0 - %xmm0 = VCVTTPS2DQZ128rm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTTPS2DQrr %xmm0 - %xmm0 = VCVTTPS2DQZ128rr %xmm0 - ; CHECK: %xmm0 = VSQRTPDm %rdi, %noreg, %noreg, %noreg, %noreg - %xmm0 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VSQRTPDr %xmm0 - %xmm0 = VSQRTPDZ128r %xmm0 - ; CHECK: %xmm0 = VSQRTPSm %rdi, %noreg, %noreg, %noreg, %noreg - %xmm0 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VSQRTPSr %xmm0 - %xmm0 = VSQRTPSZ128r %xmm0 - ; CHECK: %xmm0 = VMOVDDUPrm %rdi, 1, %noreg, 0, %noreg - %xmm0 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VMOVDDUPrr %xmm0 - %xmm0 = VMOVDDUPZ128rr %xmm0 - ; CHECK: %xmm0 = VMOVSHDUPrm %rdi, 1, %noreg, 0, %noreg - %xmm0 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VMOVSHDUPrr %xmm0 - %xmm0 = VMOVSHDUPZ128rr %xmm0 - ; CHECK: %xmm0 = VMOVSLDUPrm %rdi, 1, %noreg, 0, %noreg - %xmm0 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VMOVSLDUPrr %xmm0 - %xmm0 = VMOVSLDUPZ128rr %xmm0 - ; CHECK: %xmm0 = VPSHUFBrm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VPSHUFBZ128rm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VPSHUFBrr %xmm0, %xmm1 - %xmm0 = VPSHUFBZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VPSHUFDmi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm0 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm0 = VPSHUFDri %xmm0, -24 - %xmm0 = VPSHUFDZ128ri %xmm0, -24 - ; CHECK: %xmm0 = VPSHUFHWmi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm0 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm0 = VPSHUFHWri %xmm0, -24 - %xmm0 = VPSHUFHWZ128ri %xmm0, -24 - ; CHECK: %xmm0 = VPSHUFLWmi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm0 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm0 = VPSHUFLWri %xmm0, -24 - %xmm0 = VPSHUFLWZ128ri %xmm0, -24 - ; CHECK: %xmm0 = VPSLLDQri %xmm0, %xmm1 - %xmm0 = VPSLLDQZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VSHUFPDrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VSHUFPDZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VSHUFPDrri %xmm0, %noreg, %noreg - %xmm0 = VSHUFPDZ128rri %xmm0, %noreg, %noreg - ; CHECK: %xmm0 = VSHUFPSrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VSHUFPSZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VSHUFPSrri %xmm0, %noreg, %noreg - %xmm0 = VSHUFPSZ128rri %xmm0, %noreg, %noreg - ; CHECK: %xmm0 = VPSADBWrm %xmm0, 1, %noreg, %rax, %noreg, %noreg - %xmm0 = VPSADBWZ128rm %xmm0, 1, %noreg, %rax, %noreg, %noreg - ; CHECK: %xmm0 = VPSADBWrr %xmm0, %xmm1 - %xmm0 = VPSADBWZ128rr %xmm0, %xmm1 - ; CHECK: %xmm0 = VBROADCASTSSrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VBROADCASTSSrr %xmm0 - %xmm0 = VBROADCASTSSZ128r %xmm0 - ; CHECK: %xmm0 = VPBROADCASTBrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VPBROADCASTBrr %xmm0 - %xmm0 = VPBROADCASTBZ128r %xmm0 - ; CHECK: %xmm0 = VPBROADCASTDrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VPBROADCASTDrr %xmm0 - %xmm0 = VPBROADCASTDZ128r %xmm0 - ; CHECK: %xmm0 = VPBROADCASTQrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VPBROADCASTQrr %xmm0 - %xmm0 = VPBROADCASTQZ128r %xmm0 - ; CHECK: %xmm0 = VPBROADCASTWrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VPBROADCASTWrr %xmm0 - %xmm0 = VPBROADCASTWZ128r %xmm0 - ; CHECK: %xmm0 = VPBROADCASTQrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VPBROADCASTQrr %xmm0 - %xmm0 = VBROADCASTI32X2Z128r %xmm0 - ; CHECK: %xmm0 = VCVTPS2PHrr %xmm0, 2 - %xmm0 = VCVTPS2PHZ128rr %xmm0, 2 - ; CHECK: VCVTPS2PHmr %rdi, %xmm0, 1, %noreg, 0, %noreg, %noreg - VCVTPS2PHZ128mr %rdi, %xmm0, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm0 = VPABSBrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPABSBrr %xmm0 - %xmm0 = VPABSBZ128rr %xmm0 - ; CHECK: %xmm0 = VPABSDrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPABSDrr %xmm0 - %xmm0 = VPABSDZ128rr %xmm0 - ; CHECK: %xmm0 = VPABSWrm %rip, 1, %noreg, %rax, %noreg - %xmm0 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VPABSWrr %xmm0 - %xmm0 = VPABSWZ128rr %xmm0 - ; CHECK: %xmm0 = VPALIGNRrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VPALIGNRZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VPALIGNRrri %xmm0, %xmm1, 15 - %xmm0 = VPALIGNRZ128rri %xmm0, %xmm1, 15 + ; CHECK: VMOVAPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVAPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVAPDrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVAPDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVAPDrr $xmm0 + $xmm0 = VMOVAPDZ128rr $xmm0 + ; CHECK: VMOVAPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVAPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVAPSrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVAPSZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVAPSrr $xmm0 + $xmm0 = VMOVAPSZ128rr $xmm0 + ; CHECK: VMOVDQAmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVDQA32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVDQArm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVDQA32Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVDQArr $xmm0 + $xmm0 = VMOVDQA32Z128rr $xmm0 + ; CHECK: VMOVDQAmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVDQA64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVDQArm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVDQA64Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVDQArr $xmm0 + $xmm0 = VMOVDQA64Z128rr $xmm0 + ; CHECK: VMOVDQUmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVDQU16Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVDQUrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVDQU16Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVDQUrr $xmm0 + $xmm0 = VMOVDQU16Z128rr $xmm0 + ; CHECK: VMOVDQUmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVDQU32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVDQUrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVDQU32Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVDQUrr $xmm0 + $xmm0 = VMOVDQU32Z128rr $xmm0 + ; CHECK: VMOVDQUmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVDQU64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVDQUrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVDQU64Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVDQUrr $xmm0 + $xmm0 = VMOVDQU64Z128rr $xmm0 + ; CHECK: VMOVDQUmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVDQU8Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVDQUrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVDQU8Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVDQUrr $xmm0 + $xmm0 = VMOVDQU8Z128rr $xmm0 + ; CHECK: $xmm0 = VMOVDQUrr_REV $xmm0 + $xmm0 = VMOVDQU8Z128rr_REV $xmm0 + ; CHECK: $xmm0 = VMOVNTDQArm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVNTDQAZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: VMOVUPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVUPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVUPDrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVUPDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVUPDrr $xmm0 + $xmm0 = VMOVUPDZ128rr $xmm0 + ; CHECK: $xmm0 = VMOVUPDrr_REV $xmm0 + $xmm0 = VMOVUPDZ128rr_REV $xmm0 + ; CHECK: VMOVUPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVUPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVUPSrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMOVUPSZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMOVUPSrr $xmm0 + $xmm0 = VMOVUPSZ128rr $xmm0 + ; CHECK: $xmm0 = VMOVUPSrr_REV $xmm0 + $xmm0 = VMOVUPSZ128rr_REV $xmm0 + ; CHECK: VMOVNTDQmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVNTDQZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: VMOVNTPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVNTPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: VMOVNTPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVNTPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVAPDrr_REV $xmm0 + $xmm0 = VMOVAPDZ128rr_REV $xmm0 + ; CHECK: $xmm0 = VMOVAPSrr_REV $xmm0 + $xmm0 = VMOVAPSZ128rr_REV $xmm0 + ; CHECK: $xmm0 = VMOVDQArr_REV $xmm0 + $xmm0 = VMOVDQA32Z128rr_REV $xmm0 + ; CHECK: $xmm0 = VMOVDQArr_REV $xmm0 + $xmm0 = VMOVDQA64Z128rr_REV $xmm0 + ; CHECK: $xmm0 = VMOVDQUrr_REV $xmm0 + $xmm0 = VMOVDQU16Z128rr_REV $xmm0 + ; CHECK: $xmm0 = VMOVDQUrr_REV $xmm0 + $xmm0 = VMOVDQU32Z128rr_REV $xmm0 + ; CHECK: $xmm0 = VMOVDQUrr_REV $xmm0 + $xmm0 = VMOVDQU64Z128rr_REV $xmm0 + ; CHECK: $xmm0 = VPMOVSXBDrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVSXBDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVSXBDrr $xmm0 + $xmm0 = VPMOVSXBDZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVSXBQrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVSXBQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVSXBQrr $xmm0 + $xmm0 = VPMOVSXBQZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVSXBWrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVSXBWZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVSXBWrr $xmm0 + $xmm0 = VPMOVSXBWZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVSXDQrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVSXDQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVSXDQrr $xmm0 + $xmm0 = VPMOVSXDQZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVSXWDrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVSXWDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVSXWDrr $xmm0 + $xmm0 = VPMOVSXWDZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVSXWQrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVSXWQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVSXWQrr $xmm0 + $xmm0 = VPMOVSXWQZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVZXBDrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVZXBDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVZXBDrr $xmm0 + $xmm0 = VPMOVZXBDZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVZXBQrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVZXBQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVZXBQrr $xmm0 + $xmm0 = VPMOVZXBQZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVZXBWrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVZXBWZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVZXBWrr $xmm0 + $xmm0 = VPMOVZXBWZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVZXDQrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVZXDQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVZXDQrr $xmm0 + $xmm0 = VPMOVZXDQZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVZXWDrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVZXWDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVZXWDrr $xmm0 + $xmm0 = VPMOVZXWDZ128rr $xmm0 + ; CHECK: $xmm0 = VPMOVZXWQrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMOVZXWQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMOVZXWQrr $xmm0 + $xmm0 = VPMOVZXWQZ128rr $xmm0 + ; CHECK: VMOVHPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVHPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVHPDrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VMOVHPDZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: VMOVHPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVHPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVHPSrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VMOVHPSZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: VMOVLPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVLPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVLPDrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VMOVLPDZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: VMOVLPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0 + VMOVLPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0 + ; CHECK: $xmm0 = VMOVLPSrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VMOVLPSZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VMAXCPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXCPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXCPDrr $xmm0, $xmm1 + $xmm0 = VMAXCPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXCPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXCPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXCPSrr $xmm0, $xmm1 + $xmm0 = VMAXCPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXCPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXCPDrr $xmm0, $xmm1 + $xmm0 = VMAXPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXCPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXCPSrr $xmm0, $xmm1 + $xmm0 = VMAXPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINCPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINCPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINCPDrr $xmm0, $xmm1 + $xmm0 = VMINCPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINCPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINCPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINCPSrr $xmm0, $xmm1 + $xmm0 = VMINCPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINCPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINCPDrr $xmm0, $xmm1 + $xmm0 = VMINPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINCPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINCPSrr $xmm0, $xmm1 + $xmm0 = VMINPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMULPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMULPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMULPDrr $xmm0, $xmm1 + $xmm0 = VMULPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMULPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMULPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMULPSrr $xmm0, $xmm1 + $xmm0 = VMULPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VORPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VORPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VORPDrr $xmm0, $xmm1 + $xmm0 = VORPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VORPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VORPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VORPSrr $xmm0, $xmm1 + $xmm0 = VORPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPADDBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPADDBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPADDBrr $xmm0, $xmm1 + $xmm0 = VPADDBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPADDDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPADDDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPADDDrr $xmm0, $xmm1 + $xmm0 = VPADDDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPADDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPADDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPADDQrr $xmm0, $xmm1 + $xmm0 = VPADDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPADDSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPADDSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPADDSBrr $xmm0, $xmm1 + $xmm0 = VPADDSBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPADDSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPADDSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPADDSWrr $xmm0, $xmm1 + $xmm0 = VPADDSWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPADDUSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPADDUSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPADDUSBrr $xmm0, $xmm1 + $xmm0 = VPADDUSBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPADDUSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPADDUSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPADDUSWrr $xmm0, $xmm1 + $xmm0 = VPADDUSWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPADDWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPADDWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPADDWrr $xmm0, $xmm1 + $xmm0 = VPADDWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPANDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPANDDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPANDrr $xmm0, $xmm1 + $xmm0 = VPANDDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPANDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPANDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPANDrr $xmm0, $xmm1 + $xmm0 = VPANDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPANDNrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPANDNDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPANDNrr $xmm0, $xmm1 + $xmm0 = VPANDNDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPANDNrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPANDNQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPANDNrr $xmm0, $xmm1 + $xmm0 = VPANDNQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPAVGBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPAVGBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPAVGBrr $xmm0, $xmm1 + $xmm0 = VPAVGBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPAVGWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPAVGWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPAVGWrr $xmm0, $xmm1 + $xmm0 = VPAVGWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMAXSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMAXSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMAXSBrr $xmm0, $xmm1 + $xmm0 = VPMAXSBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMAXSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMAXSDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMAXSDrr $xmm0, $xmm1 + $xmm0 = VPMAXSDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMAXSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMAXSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMAXSWrr $xmm0, $xmm1 + $xmm0 = VPMAXSWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMAXUBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMAXUBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMAXUBrr $xmm0, $xmm1 + $xmm0 = VPMAXUBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMAXUDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMAXUDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMAXUDrr $xmm0, $xmm1 + $xmm0 = VPMAXUDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMAXUWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMAXUWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMAXUWrr $xmm0, $xmm1 + $xmm0 = VPMAXUWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMINSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMINSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMINSBrr $xmm0, $xmm1 + $xmm0 = VPMINSBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMINSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMINSDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMINSDrr $xmm0, $xmm1 + $xmm0 = VPMINSDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMINSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMINSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMINSWrr $xmm0, $xmm1 + $xmm0 = VPMINSWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMINUBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMINUBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMINUBrr $xmm0, $xmm1 + $xmm0 = VPMINUBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMINUDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMINUDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMINUDrr $xmm0, $xmm1 + $xmm0 = VPMINUDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMINUWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMINUWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMINUWrr $xmm0, $xmm1 + $xmm0 = VPMINUWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMULDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMULDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMULDQrr $xmm0, $xmm1 + $xmm0 = VPMULDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMULHRSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMULHRSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMULHRSWrr $xmm0, $xmm1 + $xmm0 = VPMULHRSWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMULHUWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMULHUWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMULHUWrr $xmm0, $xmm1 + $xmm0 = VPMULHUWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMULHWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMULHWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMULHWrr $xmm0, $xmm1 + $xmm0 = VPMULHWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMULLDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMULLDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMULLDrr $xmm0, $xmm1 + $xmm0 = VPMULLDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMULLWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMULLWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMULLWrr $xmm0, $xmm1 + $xmm0 = VPMULLWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMULUDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMULUDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMULUDQrr $xmm0, $xmm1 + $xmm0 = VPMULUDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPORrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPORDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPORrr $xmm0, $xmm1 + $xmm0 = VPORDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPORrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPORQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPORrr $xmm0, $xmm1 + $xmm0 = VPORQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSUBBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSUBBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSUBBrr $xmm0, $xmm1 + $xmm0 = VPSUBBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSUBDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSUBDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSUBDrr $xmm0, $xmm1 + $xmm0 = VPSUBDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSUBQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSUBQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSUBQrr $xmm0, $xmm1 + $xmm0 = VPSUBQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSUBSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSUBSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSUBSBrr $xmm0, $xmm1 + $xmm0 = VPSUBSBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSUBSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSUBSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSUBSWrr $xmm0, $xmm1 + $xmm0 = VPSUBSWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSUBUSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSUBUSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSUBUSBrr $xmm0, $xmm1 + $xmm0 = VPSUBUSBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSUBUSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSUBUSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSUBUSWrr $xmm0, $xmm1 + $xmm0 = VPSUBUSWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSUBWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSUBWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSUBWrr $xmm0, $xmm1 + $xmm0 = VPSUBWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VADDPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VADDPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VADDPDrr $xmm0, $xmm1 + $xmm0 = VADDPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VADDPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VADDPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VADDPSrr $xmm0, $xmm1 + $xmm0 = VADDPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VANDNPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VANDNPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VANDNPDrr $xmm0, $xmm1 + $xmm0 = VANDNPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VANDNPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VANDNPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VANDNPSrr $xmm0, $xmm1 + $xmm0 = VANDNPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VANDPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VANDPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VANDPDrr $xmm0, $xmm1 + $xmm0 = VANDPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VANDPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VANDPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VANDPSrr $xmm0, $xmm1 + $xmm0 = VANDPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VDIVPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VDIVPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VDIVPDrr $xmm0, $xmm1 + $xmm0 = VDIVPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VDIVPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VDIVPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VDIVPSrr $xmm0, $xmm1 + $xmm0 = VDIVPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPXORrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPXORDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPXORrr $xmm0, $xmm1 + $xmm0 = VPXORDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPXORrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPXORQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPXORrr $xmm0, $xmm1 + $xmm0 = VPXORQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VSUBPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VSUBPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VSUBPDrr $xmm0, $xmm1 + $xmm0 = VSUBPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VSUBPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VSUBPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VSUBPSrr $xmm0, $xmm1 + $xmm0 = VSUBPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VXORPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VXORPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VXORPDrr $xmm0, $xmm1 + $xmm0 = VXORPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VXORPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VXORPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VXORPSrr $xmm0, $xmm1 + $xmm0 = VXORPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMADDUBSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMADDUBSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMADDUBSWrr $xmm0, $xmm1 + $xmm0 = VPMADDUBSWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPMADDWDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPMADDWDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPMADDWDrr $xmm0, $xmm1 + $xmm0 = VPMADDWDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPACKSSDWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPACKSSDWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPACKSSDWrr $xmm0, $xmm1 + $xmm0 = VPACKSSDWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPACKSSWBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPACKSSWBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPACKSSWBrr $xmm0, $xmm1 + $xmm0 = VPACKSSWBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPACKUSDWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPACKUSDWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPACKUSDWrr $xmm0, $xmm1 + $xmm0 = VPACKUSDWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPACKUSWBrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPACKUSWBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPACKUSWBrr $xmm0, $xmm1 + $xmm0 = VPACKUSWBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPUNPCKHBWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPUNPCKHBWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPUNPCKHBWrr $xmm0, $xmm1 + $xmm0 = VPUNPCKHBWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPUNPCKHDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPUNPCKHDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPUNPCKHDQrr $xmm0, $xmm1 + $xmm0 = VPUNPCKHDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPUNPCKHQDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPUNPCKHQDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPUNPCKHQDQrr $xmm0, $xmm1 + $xmm0 = VPUNPCKHQDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPUNPCKHWDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPUNPCKHWDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPUNPCKHWDrr $xmm0, $xmm1 + $xmm0 = VPUNPCKHWDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPUNPCKLBWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPUNPCKLBWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPUNPCKLBWrr $xmm0, $xmm1 + $xmm0 = VPUNPCKLBWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPUNPCKLDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPUNPCKLDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPUNPCKLDQrr $xmm0, $xmm1 + $xmm0 = VPUNPCKLDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPUNPCKLQDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPUNPCKLQDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPUNPCKLQDQrr $xmm0, $xmm1 + $xmm0 = VPUNPCKLQDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPUNPCKLWDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPUNPCKLWDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPUNPCKLWDrr $xmm0, $xmm1 + $xmm0 = VPUNPCKLWDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VUNPCKHPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VUNPCKHPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VUNPCKHPDrr $xmm0, $xmm1 + $xmm0 = VUNPCKHPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VUNPCKHPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VUNPCKHPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VUNPCKHPSrr $xmm0, $xmm1 + $xmm0 = VUNPCKHPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VUNPCKLPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VUNPCKLPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VUNPCKLPDrr $xmm0, $xmm1 + $xmm0 = VUNPCKLPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VUNPCKLPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VUNPCKLPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VUNPCKLPSrr $xmm0, $xmm1 + $xmm0 = VUNPCKLPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VFMADD132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD132PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD132PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD132PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD132PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD213PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD213PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD213PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD213PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD231PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD231PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD231PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD231PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADDSUB132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADDSUB132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADDSUB132PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADDSUB132PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADDSUB132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADDSUB132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADDSUB132PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADDSUB132PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADDSUB213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADDSUB213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADDSUB213PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADDSUB213PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADDSUB213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADDSUB213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADDSUB213PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADDSUB213PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADDSUB231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADDSUB231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADDSUB231PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADDSUB231PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADDSUB231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADDSUB231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADDSUB231PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADDSUB231PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB132PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB132PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB132PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB132PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB213PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB213PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB213PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB213PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB231PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB231PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB231PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB231PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUBADD132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUBADD132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUBADD132PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUBADD132PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUBADD132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUBADD132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUBADD132PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUBADD132PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUBADD213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUBADD213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUBADD213PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUBADD213PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUBADD213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUBADD213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUBADD213PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUBADD213PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUBADD231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUBADD231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUBADD231PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUBADD231PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUBADD231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUBADD231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUBADD231PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUBADD231PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD132PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD132PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD132PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD132PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD213PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD213PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD213PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD213PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD231PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD231PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD231PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD231PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB132PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB132PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB132PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB132PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB213PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB213PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB213PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB213PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB231PDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB231PDZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB231PSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB231PSZ128r $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VPSLLDri $xmm0, 7 + $xmm0 = VPSLLDZ128ri $xmm0, 7 + ; CHECK: $xmm0 = VPSLLDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSLLDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSLLDrr $xmm0, 14 + $xmm0 = VPSLLDZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSLLQri $xmm0, 7 + $xmm0 = VPSLLQZ128ri $xmm0, 7 + ; CHECK: $xmm0 = VPSLLQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSLLQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSLLQrr $xmm0, 14 + $xmm0 = VPSLLQZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSLLVDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSLLVDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSLLVDrr $xmm0, 14 + $xmm0 = VPSLLVDZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSLLVQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSLLVQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSLLVQrr $xmm0, 14 + $xmm0 = VPSLLVQZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSLLWri $xmm0, 7 + $xmm0 = VPSLLWZ128ri $xmm0, 7 + ; CHECK: $xmm0 = VPSLLWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSLLWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSLLWrr $xmm0, 14 + $xmm0 = VPSLLWZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRADri $xmm0, 7 + $xmm0 = VPSRADZ128ri $xmm0, 7 + ; CHECK: $xmm0 = VPSRADrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSRADZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSRADrr $xmm0, 14 + $xmm0 = VPSRADZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRAVDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSRAVDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSRAVDrr $xmm0, 14 + $xmm0 = VPSRAVDZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRAWri $xmm0, 7 + $xmm0 = VPSRAWZ128ri $xmm0, 7 + ; CHECK: $xmm0 = VPSRAWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSRAWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSRAWrr $xmm0, 14 + $xmm0 = VPSRAWZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRLDQri $xmm0, 14 + $xmm0 = VPSRLDQZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRLDri $xmm0, 7 + $xmm0 = VPSRLDZ128ri $xmm0, 7 + ; CHECK: $xmm0 = VPSRLDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSRLDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSRLDrr $xmm0, 14 + $xmm0 = VPSRLDZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRLQri $xmm0, 7 + $xmm0 = VPSRLQZ128ri $xmm0, 7 + ; CHECK: $xmm0 = VPSRLQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSRLQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSRLQrr $xmm0, 14 + $xmm0 = VPSRLQZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRLVDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSRLVDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSRLVDrr $xmm0, 14 + $xmm0 = VPSRLVDZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRLVQrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSRLVQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSRLVQrr $xmm0, 14 + $xmm0 = VPSRLVQZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPSRLWri $xmm0, 7 + $xmm0 = VPSRLWZ128ri $xmm0, 7 + ; CHECK: $xmm0 = VPSRLWrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPSRLWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPSRLWrr $xmm0, 14 + $xmm0 = VPSRLWZ128rr $xmm0, 14 + ; CHECK: $xmm0 = VPERMILPDmi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm0 = VPERMILPDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm0 = VPERMILPDri $xmm0, 9 + $xmm0 = VPERMILPDZ128ri $xmm0, 9 + ; CHECK: $xmm0 = VPERMILPDrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VPERMILPDZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VPERMILPDrr $xmm0, $xmm1 + $xmm0 = VPERMILPDZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPERMILPSmi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm0 = VPERMILPSZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm0 = VPERMILPSri $xmm0, 9 + $xmm0 = VPERMILPSZ128ri $xmm0, 9 + ; CHECK: $xmm0 = VPERMILPSrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VPERMILPSZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VPERMILPSrr $xmm0, $xmm1 + $xmm0 = VPERMILPSZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VCVTPH2PSrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTPH2PSZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPH2PSrr $xmm0 + $xmm0 = VCVTPH2PSZ128rr $xmm0 + ; CHECK: $xmm0 = VCVTDQ2PDrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTDQ2PDZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTDQ2PDrr $xmm0 + $xmm0 = VCVTDQ2PDZ128rr $xmm0 + ; CHECK: $xmm0 = VCVTDQ2PSrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTDQ2PSZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTDQ2PSrr $xmm0 + $xmm0 = VCVTDQ2PSZ128rr $xmm0 + ; CHECK: $xmm0 = VCVTPD2DQrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTPD2DQZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPD2DQrr $xmm0 + $xmm0 = VCVTPD2DQZ128rr $xmm0 + ; CHECK: $xmm0 = VCVTPD2PSrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTPD2PSZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPD2PSrr $xmm0 + $xmm0 = VCVTPD2PSZ128rr $xmm0 + ; CHECK: $xmm0 = VCVTPS2DQrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTPS2DQZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPS2DQrr $xmm0 + $xmm0 = VCVTPS2DQZ128rr $xmm0 + ; CHECK: $xmm0 = VCVTPS2PDrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTPS2PDZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPS2PDrr $xmm0 + $xmm0 = VCVTPS2PDZ128rr $xmm0 + ; CHECK: $xmm0 = VCVTTPD2DQrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTTPD2DQZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTTPD2DQrr $xmm0 + $xmm0 = VCVTTPD2DQZ128rr $xmm0 + ; CHECK: $xmm0 = VCVTTPS2DQrm $rdi, $xmm0, 1, $noreg, 0 + $xmm0 = VCVTTPS2DQZ128rm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTTPS2DQrr $xmm0 + $xmm0 = VCVTTPS2DQZ128rr $xmm0 + ; CHECK: $xmm0 = VSQRTPDm $rdi, $noreg, $noreg, $noreg, $noreg + $xmm0 = VSQRTPDZ128m $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VSQRTPDr $xmm0 + $xmm0 = VSQRTPDZ128r $xmm0 + ; CHECK: $xmm0 = VSQRTPSm $rdi, $noreg, $noreg, $noreg, $noreg + $xmm0 = VSQRTPSZ128m $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VSQRTPSr $xmm0 + $xmm0 = VSQRTPSZ128r $xmm0 + ; CHECK: $xmm0 = VMOVDDUPrm $rdi, 1, $noreg, 0, $noreg + $xmm0 = VMOVDDUPZ128rm $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VMOVDDUPrr $xmm0 + $xmm0 = VMOVDDUPZ128rr $xmm0 + ; CHECK: $xmm0 = VMOVSHDUPrm $rdi, 1, $noreg, 0, $noreg + $xmm0 = VMOVSHDUPZ128rm $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VMOVSHDUPrr $xmm0 + $xmm0 = VMOVSHDUPZ128rr $xmm0 + ; CHECK: $xmm0 = VMOVSLDUPrm $rdi, 1, $noreg, 0, $noreg + $xmm0 = VMOVSLDUPZ128rm $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VMOVSLDUPrr $xmm0 + $xmm0 = VMOVSLDUPZ128rr $xmm0 + ; CHECK: $xmm0 = VPSHUFBrm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VPSHUFBZ128rm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VPSHUFBrr $xmm0, $xmm1 + $xmm0 = VPSHUFBZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VPSHUFDmi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm0 = VPSHUFDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm0 = VPSHUFDri $xmm0, -24 + $xmm0 = VPSHUFDZ128ri $xmm0, -24 + ; CHECK: $xmm0 = VPSHUFHWmi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm0 = VPSHUFHWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm0 = VPSHUFHWri $xmm0, -24 + $xmm0 = VPSHUFHWZ128ri $xmm0, -24 + ; CHECK: $xmm0 = VPSHUFLWmi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm0 = VPSHUFLWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm0 = VPSHUFLWri $xmm0, -24 + $xmm0 = VPSHUFLWZ128ri $xmm0, -24 + ; CHECK: $xmm0 = VPSLLDQri $xmm0, $xmm1 + $xmm0 = VPSLLDQZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VSHUFPDrmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VSHUFPDZ128rmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VSHUFPDrri $xmm0, $noreg, $noreg + $xmm0 = VSHUFPDZ128rri $xmm0, $noreg, $noreg + ; CHECK: $xmm0 = VSHUFPSrmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VSHUFPSZ128rmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VSHUFPSrri $xmm0, $noreg, $noreg + $xmm0 = VSHUFPSZ128rri $xmm0, $noreg, $noreg + ; CHECK: $xmm0 = VPSADBWrm $xmm0, 1, $noreg, $rax, $noreg, $noreg + $xmm0 = VPSADBWZ128rm $xmm0, 1, $noreg, $rax, $noreg, $noreg + ; CHECK: $xmm0 = VPSADBWrr $xmm0, $xmm1 + $xmm0 = VPSADBWZ128rr $xmm0, $xmm1 + ; CHECK: $xmm0 = VBROADCASTSSrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VBROADCASTSSZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VBROADCASTSSrr $xmm0 + $xmm0 = VBROADCASTSSZ128r $xmm0 + ; CHECK: $xmm0 = VPBROADCASTBrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VPBROADCASTBZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VPBROADCASTBrr $xmm0 + $xmm0 = VPBROADCASTBZ128r $xmm0 + ; CHECK: $xmm0 = VPBROADCASTDrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VPBROADCASTDZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VPBROADCASTDrr $xmm0 + $xmm0 = VPBROADCASTDZ128r $xmm0 + ; CHECK: $xmm0 = VPBROADCASTQrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VPBROADCASTQZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VPBROADCASTQrr $xmm0 + $xmm0 = VPBROADCASTQZ128r $xmm0 + ; CHECK: $xmm0 = VPBROADCASTWrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VPBROADCASTWZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VPBROADCASTWrr $xmm0 + $xmm0 = VPBROADCASTWZ128r $xmm0 + ; CHECK: $xmm0 = VPBROADCASTQrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VBROADCASTI32X2Z128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VPBROADCASTQrr $xmm0 + $xmm0 = VBROADCASTI32X2Z128r $xmm0 + ; CHECK: $xmm0 = VCVTPS2PHrr $xmm0, 2 + $xmm0 = VCVTPS2PHZ128rr $xmm0, 2 + ; CHECK: VCVTPS2PHmr $rdi, $xmm0, 1, $noreg, 0, $noreg, $noreg + VCVTPS2PHZ128mr $rdi, $xmm0, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm0 = VPABSBrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPABSBZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPABSBrr $xmm0 + $xmm0 = VPABSBZ128rr $xmm0 + ; CHECK: $xmm0 = VPABSDrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPABSDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPABSDrr $xmm0 + $xmm0 = VPABSDZ128rr $xmm0 + ; CHECK: $xmm0 = VPABSWrm $rip, 1, $noreg, $rax, $noreg + $xmm0 = VPABSWZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VPABSWrr $xmm0 + $xmm0 = VPABSWZ128rr $xmm0 + ; CHECK: $xmm0 = VPALIGNRrmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VPALIGNRZ128rmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VPALIGNRrri $xmm0, $xmm1, 15 + $xmm0 = VPALIGNRZ128rri $xmm0, $xmm1, 15 - RET 0, %zmm0, %zmm1 + RET 0, $zmm0, $zmm1 ... --- # CHECK-LABEL: name: evex_scalar_to_vex_test @@ -1770,554 +1770,554 @@ body: | bb.0: - ; CHECK: %xmm0 = VADDSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VADDSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VADDSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VADDSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VADDSDrr %xmm0, %xmm1 - %xmm0 = VADDSDZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VADDSDrr_Int %xmm0, %xmm1 - %xmm0 = VADDSDZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VADDSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VADDSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VADDSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VADDSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VADDSSrr %xmm0, %xmm1 - %xmm0 = VADDSSZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VADDSSrr_Int %xmm0, %xmm1 - %xmm0 = VADDSSZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VDIVSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VDIVSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VDIVSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VDIVSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VDIVSDrr %xmm0, %xmm1 - %xmm0 = VDIVSDZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VDIVSDrr_Int %xmm0, %xmm1 - %xmm0 = VDIVSDZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VDIVSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VDIVSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VDIVSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VDIVSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VDIVSSrr %xmm0, %xmm1 - %xmm0 = VDIVSSZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VDIVSSrr_Int %xmm0, %xmm1 - %xmm0 = VDIVSSZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXCSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXCSDrr %xmm0, %xmm1 - %xmm0 = VMAXCSDZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXCSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXCSSrr %xmm0, %xmm1 - %xmm0 = VMAXCSSZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXCSDrr %xmm0, %xmm1 - %xmm0 = VMAXSDZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXSDrr_Int %xmm0, %xmm1 - %xmm0 = VMAXSDZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMAXSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMAXCSSrr %xmm0, %xmm1 - %xmm0 = VMAXSSZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMAXSSrr_Int %xmm0, %xmm1 - %xmm0 = VMAXSSZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINCSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINCSDrr %xmm0, %xmm1 - %xmm0 = VMINCSDZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINCSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINCSSrr %xmm0, %xmm1 - %xmm0 = VMINCSSZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINCSDrr %xmm0, %xmm1 - %xmm0 = VMINSDZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINSDrr_Int %xmm0, %xmm1 - %xmm0 = VMINSDZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMINSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMINCSSrr %xmm0, %xmm1 - %xmm0 = VMINSSZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMINSSrr_Int %xmm0, %xmm1 - %xmm0 = VMINSSZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VMULSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMULSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMULSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMULSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMULSDrr %xmm0, %xmm1 - %xmm0 = VMULSDZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMULSDrr_Int %xmm0, %xmm1 - %xmm0 = VMULSDZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VMULSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMULSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMULSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VMULSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VMULSSrr %xmm0, %xmm1 - %xmm0 = VMULSSZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VMULSSrr_Int %xmm0, %xmm1 - %xmm0 = VMULSSZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VSUBSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VSUBSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VSUBSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VSUBSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VSUBSDrr %xmm0, %xmm1 - %xmm0 = VSUBSDZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VSUBSDrr_Int %xmm0, %xmm1 - %xmm0 = VSUBSDZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VSUBSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VSUBSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VSUBSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - %xmm0 = VSUBSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm0 = VSUBSSrr %xmm0, %xmm1 - %xmm0 = VSUBSSZrr %xmm0, %xmm1 - ; CHECK: %xmm0 = VSUBSSrr_Int %xmm0, %xmm1 - %xmm0 = VSUBSSZrr_Int %xmm0, %xmm1 - ; CHECK: %xmm0 = VFMADD132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD132SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD132SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD132SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD132SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD132SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD132SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD132SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD132SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD213SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD213SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD213SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD213SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD213SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD213SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD213SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD213SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD231SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD231SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD231SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD231SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMADD231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMADD231SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD231SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMADD231SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMADD231SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB132SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB132SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB132SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB132SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB132SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB132SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB132SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB132SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB213SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB213SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB213SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB213SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB213SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB213SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB213SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB213SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB231SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB231SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB231SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB231SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFMSUB231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFMSUB231SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB231SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFMSUB231SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFMSUB231SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD132SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD132SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD132SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD132SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD132SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD132SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD132SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD132SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD213SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD213SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD213SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD213SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD213SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD213SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD213SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD213SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD231SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD231SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD231SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD231SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMADD231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMADD231SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD231SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMADD231SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMADD231SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB132SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB132SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB132SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB132SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB132SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB132SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB132SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB132SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB213SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB213SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB213SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB213SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB213SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB213SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB213SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB213SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB231SDr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB231SDZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB231SDr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB231SDZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - %xmm0 = VFNMSUB231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VFNMSUB231SSr %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB231SSZr %xmm0, %xmm1, %xmm2 - ; CHECK: %xmm0 = VFNMSUB231SSr_Int %xmm0, %xmm1, %xmm2 - %xmm0 = VFNMSUB231SSZr_Int %xmm0, %xmm1, %xmm2 - ; CHECK: VPEXTRBmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3 - VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3 - ; CHECK: %eax = VPEXTRBrr %xmm0, 1 - %eax = VPEXTRBZrr %xmm0, 1 - ; CHECK: VPEXTRDmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3 - VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3 - ; CHECK: %eax = VPEXTRDrr %xmm0, 1 - %eax = VPEXTRDZrr %xmm0, 1 - ; CHECK: VPEXTRQmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3 - VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3 - ; CHECK: %rax = VPEXTRQrr %xmm0, 1 - %rax = VPEXTRQZrr %xmm0, 1 - ; CHECK: VPEXTRWmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3 - VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3 - ; CHECK: %eax = VPEXTRWrr %xmm0, 1 - %eax = VPEXTRWZrr %xmm0, 1 - ; CHECK: %eax = VPEXTRWrr_REV %xmm0, 1 - %eax = VPEXTRWZrr_REV %xmm0, 1 - ; CHECK: %xmm0 = VPINSRBrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3 - %xmm0 = VPINSRBZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3 - ; CHECK: %xmm0 = VPINSRBrr %xmm0, %edi, 5 - %xmm0 = VPINSRBZrr %xmm0, %edi, 5 - ; CHECK: %xmm0 = VPINSRDrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3 - %xmm0 = VPINSRDZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3 - ; CHECK: %xmm0 = VPINSRDrr %xmm0, %edi, 5 - %xmm0 = VPINSRDZrr %xmm0, %edi, 5 - ; CHECK: %xmm0 = VPINSRQrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3 - %xmm0 = VPINSRQZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3 - ; CHECK: %xmm0 = VPINSRQrr %xmm0, %rdi, 5 - %xmm0 = VPINSRQZrr %xmm0, %rdi, 5 - ; CHECK: %xmm0 = VPINSRWrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3 - %xmm0 = VPINSRWZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3 - ; CHECK: %xmm0 = VPINSRWrr %xmm0, %edi, 5 - %xmm0 = VPINSRWZrr %xmm0, %edi, 5 - ; CHECK: %xmm0 = VSQRTSDm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VSQRTSDZm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VSQRTSDm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VSQRTSDZm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VSQRTSDr %xmm0, %noreg - %xmm0 = VSQRTSDZr %xmm0, %noreg - ; CHECK: %xmm0 = VSQRTSDr_Int %xmm0, %noreg - %xmm0 = VSQRTSDZr_Int %xmm0, %noreg - ; CHECK: %xmm0 = VSQRTSSm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VSQRTSSZm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VSQRTSSm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VSQRTSSZm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VSQRTSSr %xmm0, %noreg - %xmm0 = VSQRTSSZr %xmm0, %noreg - ; CHECK: %xmm0 = VSQRTSSr_Int %xmm0, %noreg - %xmm0 = VSQRTSSZr_Int %xmm0, %noreg - ; CHECK: %rdi = VCVTSD2SI64rr_Int %xmm0 - %rdi = VCVTSD2SI64Zrr_Int %xmm0 - ; CHECK: %edi = VCVTSD2SIrr_Int %xmm0 - %edi = VCVTSD2SIZrr_Int %xmm0 - ; CHECK: %xmm0 = VCVTSD2SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSD2SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSD2SSrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSD2SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSD2SSrr %xmm0, %noreg - %xmm0 = VCVTSD2SSZrr %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSD2SSrr_Int %xmm0, %noreg - %xmm0 = VCVTSD2SSZrr_Int %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSI2SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSI2SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSI2SDrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSI2SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSI2SDrr %xmm0, %noreg - %xmm0 = VCVTSI2SDZrr %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSI2SDrr_Int %xmm0, %noreg - %xmm0 = VCVTSI2SDZrr_Int %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSI2SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSI2SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSI2SSrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSI2SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSI2SSrr %xmm0, %noreg - %xmm0 = VCVTSI2SSZrr %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSI2SSrr_Int %xmm0, %noreg - %xmm0 = VCVTSI2SSZrr_Int %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSI642SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSI642SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSI642SDrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSI642SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSI642SDrr %xmm0, %noreg - %xmm0 = VCVTSI642SDZrr %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSI642SDrr_Int %xmm0, %noreg - %xmm0 = VCVTSI642SDZrr_Int %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSI642SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSI642SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSI642SSrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSI642SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSI642SSrr %xmm0, %noreg - %xmm0 = VCVTSI642SSZrr %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSI642SSrr_Int %xmm0, %noreg - %xmm0 = VCVTSI642SSZrr_Int %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSS2SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSS2SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSS2SDrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - %xmm0 = VCVTSS2SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm0 = VCVTSS2SDrr %xmm0, %noreg - %xmm0 = VCVTSS2SDZrr %xmm0, %noreg - ; CHECK: %xmm0 = VCVTSS2SDrr_Int %xmm0, %noreg - %xmm0 = VCVTSS2SDZrr_Int %xmm0, %noreg - ; CHECK: %rdi = VCVTSS2SI64rm_Int %rdi, %xmm0, 1, %noreg, 0 - %rdi = VCVTSS2SI64Zrm_Int %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %rdi = VCVTSS2SI64rr_Int %xmm0 - %rdi = VCVTSS2SI64Zrr_Int %xmm0 - ; CHECK: %edi = VCVTSS2SIrm_Int %rdi, %xmm0, 1, %noreg, 0 - %edi = VCVTSS2SIZrm_Int %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %edi = VCVTSS2SIrr_Int %xmm0 - %edi = VCVTSS2SIZrr_Int %xmm0 - ; CHECK: %rdi = VCVTTSD2SI64rm %rdi, %xmm0, 1, %noreg, 0 - %rdi = VCVTTSD2SI64Zrm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %rdi = VCVTTSD2SI64rm_Int %rdi, %xmm0, 1, %noreg, 0 - %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %rdi = VCVTTSD2SI64rr %xmm0 - %rdi = VCVTTSD2SI64Zrr %xmm0 - ; CHECK: %rdi = VCVTTSD2SI64rr_Int %xmm0 - %rdi = VCVTTSD2SI64Zrr_Int %xmm0 - ; CHECK: %edi = VCVTTSD2SIrm %rdi, %xmm0, 1, %noreg, 0 - %edi = VCVTTSD2SIZrm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %edi = VCVTTSD2SIrm_Int %rdi, %xmm0, 1, %noreg, 0 - %edi = VCVTTSD2SIZrm_Int %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %edi = VCVTTSD2SIrr %xmm0 - %edi = VCVTTSD2SIZrr %xmm0 - ; CHECK: %edi = VCVTTSD2SIrr_Int %xmm0 - %edi = VCVTTSD2SIZrr_Int %xmm0 - ; CHECK: %rdi = VCVTTSS2SI64rm %rdi, %xmm0, 1, %noreg, 0 - %rdi = VCVTTSS2SI64Zrm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %rdi = VCVTTSS2SI64rm_Int %rdi, %xmm0, 1, %noreg, 0 - %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %rdi = VCVTTSS2SI64rr %xmm0 - %rdi = VCVTTSS2SI64Zrr %xmm0 - ; CHECK: %rdi = VCVTTSS2SI64rr_Int %xmm0 - %rdi = VCVTTSS2SI64Zrr_Int %xmm0 - ; CHECK: %edi = VCVTTSS2SIrm %rdi, %xmm0, 1, %noreg, 0 - %edi = VCVTTSS2SIZrm %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %edi = VCVTTSS2SIrm_Int %rdi, %xmm0, 1, %noreg, 0 - %edi = VCVTTSS2SIZrm_Int %rdi, %xmm0, 1, %noreg, 0 - ; CHECK: %edi = VCVTTSS2SIrr %xmm0 - %edi = VCVTTSS2SIZrr %xmm0 - ; CHECK: %edi = VCVTTSS2SIrr_Int %xmm0 - %edi = VCVTTSS2SIZrr_Int %xmm0 - ; CHECK: %xmm0 = VMOV64toSDrr %rdi - %xmm0 = VMOV64toSDZrr %rdi - ; CHECK: %xmm0 = VMOVDI2SSrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOVDI2SSrr %eax - %xmm0 = VMOVDI2SSZrr %eax - ; CHECK: VMOVSDmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - VMOVSDZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOVSDrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOVSDrr %xmm0, %noreg - %xmm0 = VMOVSDZrr %xmm0, %noreg - ; CHECK: %xmm0 = VMOVSDrr_REV %xmm0, %noreg - %xmm0 = VMOVSDZrr_REV %xmm0, %noreg - ; CHECK: %rax = VMOVSDto64rr %xmm0 - %rax = VMOVSDto64Zrr %xmm0 - ; CHECK: VMOVSDto64mr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - VMOVSDto64Zmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - ; CHECK: VMOVSSmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - VMOVSSZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOVSSrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOVSSrr %xmm0, %noreg - %xmm0 = VMOVSSZrr %xmm0, %noreg - ; CHECK: %xmm0 = VMOVSSrr_REV %xmm0, %noreg - %xmm0 = VMOVSSZrr_REV %xmm0, %noreg - ; CHECK: VMOVSS2DImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - VMOVSS2DIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - ; CHECK: %eax = VMOVSS2DIrr %xmm0 - %eax = VMOVSS2DIZrr %xmm0 - ; CHECK: %xmm0 = VMOV64toPQIrr %rdi - %xmm0 = VMOV64toPQIZrr %rdi - ; CHECK: %xmm0 = VMOV64toPQIrm %rdi, %noreg, %noreg, %noreg, %noreg - %xmm0 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOV64toSDrr %rdi - %xmm0 = VMOV64toSDZrr %rdi - ; CHECK: %xmm0 = VMOVDI2PDIrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOVDI2PDIrr %edi - %xmm0 = VMOVDI2PDIZrr %edi - ; CHECK: %xmm0 = VMOVLHPSrr %xmm0, %noreg - %xmm0 = VMOVLHPSZrr %xmm0, %noreg - ; CHECK: %xmm0 = VMOVHLPSrr %xmm0, %noreg - %xmm0 = VMOVHLPSZrr %xmm0, %noreg - ; CHECK: VMOVPDI2DImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - VMOVPDI2DIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - ; CHECK: %edi = VMOVPDI2DIrr %xmm0 - %edi = VMOVPDI2DIZrr %xmm0 - ; CHECK: %xmm0 = VMOVPQI2QIrr %xmm0 - %xmm0 = VMOVPQI2QIZrr %xmm0 - ; CHECK: VMOVPQI2QImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - VMOVPQI2QIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - ; CHECK: %rdi = VMOVPQIto64rr %xmm0 - %rdi = VMOVPQIto64Zrr %xmm0 - ; CHECK: VMOVPQIto64mr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - VMOVPQIto64Zmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOVQI2PQIrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm0 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VMOVZPQILo2PQIrr %xmm0 - %xmm0 = VMOVZPQILo2PQIZrr %xmm0 - ; CHECK: VCOMISDrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VCOMISDZrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VCOMISDrr_Int %xmm0, %xmm1, implicit-def %eflags - VCOMISDZrr_Int %xmm0, %xmm1, implicit-def %eflags - ; CHECK: VCOMISSrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VCOMISSZrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VCOMISSrr_Int %xmm0, %xmm1, implicit-def %eflags - VCOMISSZrr_Int %xmm0, %xmm1, implicit-def %eflags - ; CHECK: VUCOMISDrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VUCOMISDZrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VUCOMISDrr_Int %xmm0, %xmm1, implicit-def %eflags - VUCOMISDZrr_Int %xmm0, %xmm1, implicit-def %eflags - ; CHECK: VUCOMISSrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VUCOMISSZrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VUCOMISSrr_Int %xmm0, %xmm1, implicit-def %eflags - VUCOMISSZrr_Int %xmm0, %xmm1, implicit-def %eflags - ; CHECK: VCOMISDrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VCOMISDZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VCOMISDrr %xmm0, %xmm1, implicit-def %eflags - VCOMISDZrr %xmm0, %xmm1, implicit-def %eflags - ; CHECK: VCOMISSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VCOMISSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VCOMISSrr %xmm0, %xmm1, implicit-def %eflags - VCOMISSZrr %xmm0, %xmm1, implicit-def %eflags - ; CHECK: VUCOMISDrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VUCOMISDZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VUCOMISDrr %xmm0, %xmm1, implicit-def %eflags - VUCOMISDZrr %xmm0, %xmm1, implicit-def %eflags - ; CHECK: VUCOMISSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VUCOMISSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VUCOMISSrr %xmm0, %xmm1, implicit-def %eflags - VUCOMISSZrr %xmm0, %xmm1, implicit-def %eflags - ; CHECK: VEXTRACTPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0, %noreg - VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, %noreg - ; CHECK: %eax = VEXTRACTPSrr %xmm0, %noreg - %eax = VEXTRACTPSZrr %xmm0, %noreg - ; CHECK: %xmm0 = VINSERTPSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm0 = VINSERTPSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm0 = VINSERTPSrr %xmm0, %xmm0, %noreg - %xmm0 = VINSERTPSZrr %xmm0, %xmm0, %noreg + ; CHECK: $xmm0 = VADDSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VADDSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VADDSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VADDSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VADDSDrr $xmm0, $xmm1 + $xmm0 = VADDSDZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VADDSDrr_Int $xmm0, $xmm1 + $xmm0 = VADDSDZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VADDSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VADDSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VADDSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VADDSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VADDSSrr $xmm0, $xmm1 + $xmm0 = VADDSSZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VADDSSrr_Int $xmm0, $xmm1 + $xmm0 = VADDSSZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VDIVSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VDIVSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VDIVSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VDIVSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VDIVSDrr $xmm0, $xmm1 + $xmm0 = VDIVSDZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VDIVSDrr_Int $xmm0, $xmm1 + $xmm0 = VDIVSDZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VDIVSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VDIVSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VDIVSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VDIVSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VDIVSSrr $xmm0, $xmm1 + $xmm0 = VDIVSSZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VDIVSSrr_Int $xmm0, $xmm1 + $xmm0 = VDIVSSZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXCSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXCSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXCSDrr $xmm0, $xmm1 + $xmm0 = VMAXCSDZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXCSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXCSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXCSSrr $xmm0, $xmm1 + $xmm0 = VMAXCSSZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXCSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXCSDrr $xmm0, $xmm1 + $xmm0 = VMAXSDZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXSDrr_Int $xmm0, $xmm1 + $xmm0 = VMAXSDZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXCSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMAXSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMAXCSSrr $xmm0, $xmm1 + $xmm0 = VMAXSSZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMAXSSrr_Int $xmm0, $xmm1 + $xmm0 = VMAXSSZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINCSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINCSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINCSDrr $xmm0, $xmm1 + $xmm0 = VMINCSDZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINCSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINCSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINCSSrr $xmm0, $xmm1 + $xmm0 = VMINCSSZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINCSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINCSDrr $xmm0, $xmm1 + $xmm0 = VMINSDZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINSDrr_Int $xmm0, $xmm1 + $xmm0 = VMINSDZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINCSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMINSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMINCSSrr $xmm0, $xmm1 + $xmm0 = VMINSSZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMINSSrr_Int $xmm0, $xmm1 + $xmm0 = VMINSSZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VMULSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMULSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMULSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMULSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMULSDrr $xmm0, $xmm1 + $xmm0 = VMULSDZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMULSDrr_Int $xmm0, $xmm1 + $xmm0 = VMULSDZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VMULSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMULSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMULSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VMULSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VMULSSrr $xmm0, $xmm1 + $xmm0 = VMULSSZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VMULSSrr_Int $xmm0, $xmm1 + $xmm0 = VMULSSZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VSUBSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VSUBSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VSUBSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VSUBSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VSUBSDrr $xmm0, $xmm1 + $xmm0 = VSUBSDZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VSUBSDrr_Int $xmm0, $xmm1 + $xmm0 = VSUBSDZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VSUBSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VSUBSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VSUBSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + $xmm0 = VSUBSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm0 = VSUBSSrr $xmm0, $xmm1 + $xmm0 = VSUBSSZrr $xmm0, $xmm1 + ; CHECK: $xmm0 = VSUBSSrr_Int $xmm0, $xmm1 + $xmm0 = VSUBSSZrr_Int $xmm0, $xmm1 + ; CHECK: $xmm0 = VFMADD132SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD132SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD132SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD132SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD132SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD132SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD132SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD132SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD132SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD132SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD132SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD132SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD132SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD132SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD132SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD132SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD213SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD213SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD213SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD213SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD213SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD213SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD213SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD213SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD213SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD213SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD213SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD213SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD213SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD213SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD213SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD213SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD231SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD231SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD231SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD231SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD231SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD231SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD231SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD231SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD231SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD231SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD231SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMADD231SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMADD231SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD231SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMADD231SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMADD231SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB132SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB132SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB132SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB132SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB132SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB132SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB132SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB132SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB132SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB132SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB132SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB132SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB132SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB132SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB132SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB132SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB213SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB213SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB213SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB213SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB213SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB213SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB213SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB213SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB213SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB213SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB213SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB213SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB213SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB213SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB213SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB213SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB231SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB231SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB231SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB231SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB231SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB231SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB231SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB231SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB231SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB231SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB231SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFMSUB231SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFMSUB231SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB231SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFMSUB231SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFMSUB231SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD132SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD132SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD132SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD132SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD132SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD132SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD132SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD132SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD132SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD132SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD132SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD132SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD132SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD132SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD132SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD132SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD213SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD213SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD213SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD213SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD213SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD213SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD213SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD213SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD213SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD213SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD213SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD213SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD213SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD213SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD213SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD213SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD231SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD231SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD231SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD231SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD231SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD231SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD231SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD231SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD231SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD231SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD231SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMADD231SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMADD231SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD231SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMADD231SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMADD231SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB132SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB132SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB132SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB132SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB132SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB132SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB132SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB132SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB132SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB132SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB132SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB132SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB132SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB132SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB132SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB132SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB213SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB213SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB213SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB213SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB213SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB213SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB213SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB213SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB213SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB213SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB213SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB213SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB213SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB213SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB213SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB213SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB231SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB231SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB231SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB231SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB231SDr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB231SDZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB231SDr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB231SDZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB231SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB231SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB231SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + $xmm0 = VFNMSUB231SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VFNMSUB231SSr $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB231SSZr $xmm0, $xmm1, $xmm2 + ; CHECK: $xmm0 = VFNMSUB231SSr_Int $xmm0, $xmm1, $xmm2 + $xmm0 = VFNMSUB231SSZr_Int $xmm0, $xmm1, $xmm2 + ; CHECK: VPEXTRBmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + ; CHECK: $eax = VPEXTRBrr $xmm0, 1 + $eax = VPEXTRBZrr $xmm0, 1 + ; CHECK: VPEXTRDmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + ; CHECK: $eax = VPEXTRDrr $xmm0, 1 + $eax = VPEXTRDZrr $xmm0, 1 + ; CHECK: VPEXTRQmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + ; CHECK: $rax = VPEXTRQrr $xmm0, 1 + $rax = VPEXTRQZrr $xmm0, 1 + ; CHECK: VPEXTRWmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + ; CHECK: $eax = VPEXTRWrr $xmm0, 1 + $eax = VPEXTRWZrr $xmm0, 1 + ; CHECK: $eax = VPEXTRWrr_REV $xmm0, 1 + $eax = VPEXTRWZrr_REV $xmm0, 1 + ; CHECK: $xmm0 = VPINSRBrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm0 = VPINSRBZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm0 = VPINSRBrr $xmm0, $edi, 5 + $xmm0 = VPINSRBZrr $xmm0, $edi, 5 + ; CHECK: $xmm0 = VPINSRDrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm0 = VPINSRDZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm0 = VPINSRDrr $xmm0, $edi, 5 + $xmm0 = VPINSRDZrr $xmm0, $edi, 5 + ; CHECK: $xmm0 = VPINSRQrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm0 = VPINSRQZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm0 = VPINSRQrr $xmm0, $rdi, 5 + $xmm0 = VPINSRQZrr $xmm0, $rdi, 5 + ; CHECK: $xmm0 = VPINSRWrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm0 = VPINSRWZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm0 = VPINSRWrr $xmm0, $edi, 5 + $xmm0 = VPINSRWZrr $xmm0, $edi, 5 + ; CHECK: $xmm0 = VSQRTSDm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VSQRTSDZm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VSQRTSDm_Int $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VSQRTSDZm_Int $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VSQRTSDr $xmm0, $noreg + $xmm0 = VSQRTSDZr $xmm0, $noreg + ; CHECK: $xmm0 = VSQRTSDr_Int $xmm0, $noreg + $xmm0 = VSQRTSDZr_Int $xmm0, $noreg + ; CHECK: $xmm0 = VSQRTSSm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VSQRTSSZm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VSQRTSSm_Int $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VSQRTSSZm_Int $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VSQRTSSr $xmm0, $noreg + $xmm0 = VSQRTSSZr $xmm0, $noreg + ; CHECK: $xmm0 = VSQRTSSr_Int $xmm0, $noreg + $xmm0 = VSQRTSSZr_Int $xmm0, $noreg + ; CHECK: $rdi = VCVTSD2SI64rr_Int $xmm0 + $rdi = VCVTSD2SI64Zrr_Int $xmm0 + ; CHECK: $edi = VCVTSD2SIrr_Int $xmm0 + $edi = VCVTSD2SIZrr_Int $xmm0 + ; CHECK: $xmm0 = VCVTSD2SSrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSD2SSZrm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSD2SSrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSD2SSZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSD2SSrr $xmm0, $noreg + $xmm0 = VCVTSD2SSZrr $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSD2SSrr_Int $xmm0, $noreg + $xmm0 = VCVTSD2SSZrr_Int $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSI2SDrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSI2SDZrm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSI2SDrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSI2SDZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSI2SDrr $xmm0, $noreg + $xmm0 = VCVTSI2SDZrr $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSI2SDrr_Int $xmm0, $noreg + $xmm0 = VCVTSI2SDZrr_Int $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSI2SSrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSI2SSZrm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSI2SSrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSI2SSZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSI2SSrr $xmm0, $noreg + $xmm0 = VCVTSI2SSZrr $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSI2SSrr_Int $xmm0, $noreg + $xmm0 = VCVTSI2SSZrr_Int $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSI642SDrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSI642SDZrm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSI642SDrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSI642SDZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSI642SDrr $xmm0, $noreg + $xmm0 = VCVTSI642SDZrr $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSI642SDrr_Int $xmm0, $noreg + $xmm0 = VCVTSI642SDZrr_Int $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSI642SSrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSI642SSZrm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSI642SSrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSI642SSZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSI642SSrr $xmm0, $noreg + $xmm0 = VCVTSI642SSZrr $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSI642SSrr_Int $xmm0, $noreg + $xmm0 = VCVTSI642SSZrr_Int $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSS2SDrm $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSS2SDZrm $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSS2SDrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + $xmm0 = VCVTSS2SDZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm0 = VCVTSS2SDrr $xmm0, $noreg + $xmm0 = VCVTSS2SDZrr $xmm0, $noreg + ; CHECK: $xmm0 = VCVTSS2SDrr_Int $xmm0, $noreg + $xmm0 = VCVTSS2SDZrr_Int $xmm0, $noreg + ; CHECK: $rdi = VCVTSS2SI64rm_Int $rdi, $xmm0, 1, $noreg, 0 + $rdi = VCVTSS2SI64Zrm_Int $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $rdi = VCVTSS2SI64rr_Int $xmm0 + $rdi = VCVTSS2SI64Zrr_Int $xmm0 + ; CHECK: $edi = VCVTSS2SIrm_Int $rdi, $xmm0, 1, $noreg, 0 + $edi = VCVTSS2SIZrm_Int $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $edi = VCVTSS2SIrr_Int $xmm0 + $edi = VCVTSS2SIZrr_Int $xmm0 + ; CHECK: $rdi = VCVTTSD2SI64rm $rdi, $xmm0, 1, $noreg, 0 + $rdi = VCVTTSD2SI64Zrm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $rdi = VCVTTSD2SI64rm_Int $rdi, $xmm0, 1, $noreg, 0 + $rdi = VCVTTSD2SI64Zrm_Int $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $rdi = VCVTTSD2SI64rr $xmm0 + $rdi = VCVTTSD2SI64Zrr $xmm0 + ; CHECK: $rdi = VCVTTSD2SI64rr_Int $xmm0 + $rdi = VCVTTSD2SI64Zrr_Int $xmm0 + ; CHECK: $edi = VCVTTSD2SIrm $rdi, $xmm0, 1, $noreg, 0 + $edi = VCVTTSD2SIZrm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $edi = VCVTTSD2SIrm_Int $rdi, $xmm0, 1, $noreg, 0 + $edi = VCVTTSD2SIZrm_Int $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $edi = VCVTTSD2SIrr $xmm0 + $edi = VCVTTSD2SIZrr $xmm0 + ; CHECK: $edi = VCVTTSD2SIrr_Int $xmm0 + $edi = VCVTTSD2SIZrr_Int $xmm0 + ; CHECK: $rdi = VCVTTSS2SI64rm $rdi, $xmm0, 1, $noreg, 0 + $rdi = VCVTTSS2SI64Zrm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $rdi = VCVTTSS2SI64rm_Int $rdi, $xmm0, 1, $noreg, 0 + $rdi = VCVTTSS2SI64Zrm_Int $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $rdi = VCVTTSS2SI64rr $xmm0 + $rdi = VCVTTSS2SI64Zrr $xmm0 + ; CHECK: $rdi = VCVTTSS2SI64rr_Int $xmm0 + $rdi = VCVTTSS2SI64Zrr_Int $xmm0 + ; CHECK: $edi = VCVTTSS2SIrm $rdi, $xmm0, 1, $noreg, 0 + $edi = VCVTTSS2SIZrm $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $edi = VCVTTSS2SIrm_Int $rdi, $xmm0, 1, $noreg, 0 + $edi = VCVTTSS2SIZrm_Int $rdi, $xmm0, 1, $noreg, 0 + ; CHECK: $edi = VCVTTSS2SIrr $xmm0 + $edi = VCVTTSS2SIZrr $xmm0 + ; CHECK: $edi = VCVTTSS2SIrr_Int $xmm0 + $edi = VCVTTSS2SIZrr_Int $xmm0 + ; CHECK: $xmm0 = VMOV64toSDrr $rdi + $xmm0 = VMOV64toSDZrr $rdi + ; CHECK: $xmm0 = VMOVDI2SSrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOVDI2SSrr $eax + $xmm0 = VMOVDI2SSZrr $eax + ; CHECK: VMOVSDmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + VMOVSDZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOVSDrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VMOVSDZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOVSDrr $xmm0, $noreg + $xmm0 = VMOVSDZrr $xmm0, $noreg + ; CHECK: $xmm0 = VMOVSDrr_REV $xmm0, $noreg + $xmm0 = VMOVSDZrr_REV $xmm0, $noreg + ; CHECK: $rax = VMOVSDto64rr $xmm0 + $rax = VMOVSDto64Zrr $xmm0 + ; CHECK: VMOVSDto64mr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + VMOVSDto64Zmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + ; CHECK: VMOVSSmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + VMOVSSZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOVSSrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VMOVSSZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOVSSrr $xmm0, $noreg + $xmm0 = VMOVSSZrr $xmm0, $noreg + ; CHECK: $xmm0 = VMOVSSrr_REV $xmm0, $noreg + $xmm0 = VMOVSSZrr_REV $xmm0, $noreg + ; CHECK: VMOVSS2DImr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + VMOVSS2DIZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + ; CHECK: $eax = VMOVSS2DIrr $xmm0 + $eax = VMOVSS2DIZrr $xmm0 + ; CHECK: $xmm0 = VMOV64toPQIrr $rdi + $xmm0 = VMOV64toPQIZrr $rdi + ; CHECK: $xmm0 = VMOV64toPQIrm $rdi, $noreg, $noreg, $noreg, $noreg + $xmm0 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOV64toSDrr $rdi + $xmm0 = VMOV64toSDZrr $rdi + ; CHECK: $xmm0 = VMOVDI2PDIrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOVDI2PDIrr $edi + $xmm0 = VMOVDI2PDIZrr $edi + ; CHECK: $xmm0 = VMOVLHPSrr $xmm0, $noreg + $xmm0 = VMOVLHPSZrr $xmm0, $noreg + ; CHECK: $xmm0 = VMOVHLPSrr $xmm0, $noreg + $xmm0 = VMOVHLPSZrr $xmm0, $noreg + ; CHECK: VMOVPDI2DImr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + VMOVPDI2DIZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + ; CHECK: $edi = VMOVPDI2DIrr $xmm0 + $edi = VMOVPDI2DIZrr $xmm0 + ; CHECK: $xmm0 = VMOVPQI2QIrr $xmm0 + $xmm0 = VMOVPQI2QIZrr $xmm0 + ; CHECK: VMOVPQI2QImr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + VMOVPQI2QIZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + ; CHECK: $rdi = VMOVPQIto64rr $xmm0 + $rdi = VMOVPQIto64Zrr $xmm0 + ; CHECK: VMOVPQIto64mr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + VMOVPQIto64Zmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOVQI2PQIrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm0 = VMOVQI2PQIZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VMOVZPQILo2PQIrr $xmm0 + $xmm0 = VMOVZPQILo2PQIZrr $xmm0 + ; CHECK: VCOMISDrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VCOMISDZrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VCOMISDrr_Int $xmm0, $xmm1, implicit-def $eflags + VCOMISDZrr_Int $xmm0, $xmm1, implicit-def $eflags + ; CHECK: VCOMISSrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VCOMISSZrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VCOMISSrr_Int $xmm0, $xmm1, implicit-def $eflags + VCOMISSZrr_Int $xmm0, $xmm1, implicit-def $eflags + ; CHECK: VUCOMISDrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VUCOMISDZrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VUCOMISDrr_Int $xmm0, $xmm1, implicit-def $eflags + VUCOMISDZrr_Int $xmm0, $xmm1, implicit-def $eflags + ; CHECK: VUCOMISSrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VUCOMISSZrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VUCOMISSrr_Int $xmm0, $xmm1, implicit-def $eflags + VUCOMISSZrr_Int $xmm0, $xmm1, implicit-def $eflags + ; CHECK: VCOMISDrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VCOMISDZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VCOMISDrr $xmm0, $xmm1, implicit-def $eflags + VCOMISDZrr $xmm0, $xmm1, implicit-def $eflags + ; CHECK: VCOMISSrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VCOMISSZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VCOMISSrr $xmm0, $xmm1, implicit-def $eflags + VCOMISSZrr $xmm0, $xmm1, implicit-def $eflags + ; CHECK: VUCOMISDrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VUCOMISDZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VUCOMISDrr $xmm0, $xmm1, implicit-def $eflags + VUCOMISDZrr $xmm0, $xmm1, implicit-def $eflags + ; CHECK: VUCOMISSrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VUCOMISSZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VUCOMISSrr $xmm0, $xmm1, implicit-def $eflags + VUCOMISSZrr $xmm0, $xmm1, implicit-def $eflags + ; CHECK: VEXTRACTPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0, $noreg + VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, $noreg + ; CHECK: $eax = VEXTRACTPSrr $xmm0, $noreg + $eax = VEXTRACTPSZrr $xmm0, $noreg + ; CHECK: $xmm0 = VINSERTPSrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm0 = VINSERTPSZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm0 = VINSERTPSrr $xmm0, $xmm0, $noreg + $xmm0 = VINSERTPSZrr $xmm0, $xmm0, $noreg - RET 0, %zmm0, %zmm1 + RET 0, $zmm0, $zmm1 ... --- # CHECK-LABEL: name: evex_z256_to_evex_test @@ -2326,880 +2326,880 @@ name: evex_z256_to_evex_test body: | bb.0: - ; CHECK: VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVAPDZ256rr %ymm16 - %ymm16 = VMOVAPDZ256rr %ymm16 - ; CHECK: %ymm16 = VMOVAPDZ256rr_REV %ymm16 - %ymm16 = VMOVAPDZ256rr_REV %ymm16 - ; CHECK: VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVAPSZ256rr %ymm16 - %ymm16 = VMOVAPSZ256rr %ymm16 - ; CHECK: %ymm16 = VMOVAPSZ256rr_REV %ymm16 - %ymm16 = VMOVAPSZ256rr_REV %ymm16 - ; CHECK: %ymm16 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVDDUPZ256rr %ymm16 - %ymm16 = VMOVDDUPZ256rr %ymm16 - ; CHECK: VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVDQA32Z256rr %ymm16 - %ymm16 = VMOVDQA32Z256rr %ymm16 - ; CHECK: %ymm16 = VMOVDQA32Z256rr_REV %ymm16 - %ymm16 = VMOVDQA32Z256rr_REV %ymm16 - ; CHECK: VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVDQA64Z256rr %ymm16 - %ymm16 = VMOVDQA64Z256rr %ymm16 - ; CHECK: %ymm16 = VMOVDQA64Z256rr_REV %ymm16 - %ymm16 = VMOVDQA64Z256rr_REV %ymm16 - ; CHECK: VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVDQU16Z256rr %ymm16 - %ymm16 = VMOVDQU16Z256rr %ymm16 - ; CHECK: %ymm16 = VMOVDQU16Z256rr_REV %ymm16 - %ymm16 = VMOVDQU16Z256rr_REV %ymm16 - ; CHECK: VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVDQU32Z256rr %ymm16 - %ymm16 = VMOVDQU32Z256rr %ymm16 - ; CHECK: %ymm16 = VMOVDQU32Z256rr_REV %ymm16 - %ymm16 = VMOVDQU32Z256rr_REV %ymm16 - ; CHECK: VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVDQU64Z256rr %ymm16 - %ymm16 = VMOVDQU64Z256rr %ymm16 - ; CHECK: %ymm16 = VMOVDQU64Z256rr_REV %ymm16 - %ymm16 = VMOVDQU64Z256rr_REV %ymm16 - ; CHECK: VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVDQU8Z256rr %ymm16 - %ymm16 = VMOVDQU8Z256rr %ymm16 - ; CHECK: %ymm16 = VMOVDQU8Z256rr_REV %ymm16 - %ymm16 = VMOVDQU8Z256rr_REV %ymm16 - ; CHECK: %ymm16 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVSHDUPZ256rr %ymm16 - %ymm16 = VMOVSHDUPZ256rr %ymm16 - ; CHECK: %ymm16 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVSLDUPZ256rr %ymm16 - %ymm16 = VMOVSLDUPZ256rr %ymm16 - ; CHECK: VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMOVUPDZ256rr %ymm16 - %ymm16 = VMOVUPDZ256rr %ymm16 - ; CHECK: %ymm16 = VMOVUPDZ256rr_REV %ymm16 - %ymm16 = VMOVUPDZ256rr_REV %ymm16 - ; CHECK: VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16 - ; CHECK: %ymm16 = VPANDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPANDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPANDDZ256rr %ymm16, %ymm1 - %ymm16 = VPANDDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPANDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPANDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPANDQZ256rr %ymm16, %ymm1 - %ymm16 = VPANDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPANDNDZ256rr %ymm16, %ymm1 - %ymm16 = VPANDNDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPANDNQZ256rr %ymm16, %ymm1 - %ymm16 = VPANDNQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPAVGBZ256rr %ymm16, %ymm1 - %ymm16 = VPAVGBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPAVGWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPAVGWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPAVGWZ256rr %ymm16, %ymm1 - %ymm16 = VPAVGWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPADDBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPADDBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPADDBZ256rr %ymm16, %ymm1 - %ymm16 = VPADDBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPADDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPADDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPADDDZ256rr %ymm16, %ymm1 - %ymm16 = VPADDDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPADDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPADDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPADDQZ256rr %ymm16, %ymm1 - %ymm16 = VPADDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPADDSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPADDSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPADDSBZ256rr %ymm16, %ymm1 - %ymm16 = VPADDSBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPADDSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPADDSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPADDSWZ256rr %ymm16, %ymm1 - %ymm16 = VPADDSWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPADDUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPADDUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPADDUSBZ256rr %ymm16, %ymm1 - %ymm16 = VPADDUSBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPADDUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPADDUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPADDUSWZ256rr %ymm16, %ymm1 - %ymm16 = VPADDUSWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPADDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPADDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPADDWZ256rr %ymm16, %ymm1 - %ymm16 = VPADDWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMULPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMULPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMULPDZ256rr %ymm16, %ymm1 - %ymm16 = VMULPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMULPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMULPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMULPSZ256rr %ymm16, %ymm1 - %ymm16 = VMULPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VORPDZ256rr %ymm16, %ymm1 - %ymm16 = VORPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VORPSZ256rr %ymm16, %ymm1 - %ymm16 = VORPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMADDUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMADDUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMADDUBSWZ256rr %ymm16, %ymm1 - %ymm16 = VPMADDUBSWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMADDWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMADDWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMADDWDZ256rr %ymm16, %ymm1 - %ymm16 = VPMADDWDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMAXSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMAXSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMAXSBZ256rr %ymm16, %ymm1 - %ymm16 = VPMAXSBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMAXSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMAXSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMAXSDZ256rr %ymm16, %ymm1 - %ymm16 = VPMAXSDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMAXSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMAXSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMAXSWZ256rr %ymm16, %ymm1 - %ymm16 = VPMAXSWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMAXUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMAXUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMAXUBZ256rr %ymm16, %ymm1 - %ymm16 = VPMAXUBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMAXUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMAXUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMAXUDZ256rr %ymm16, %ymm1 - %ymm16 = VPMAXUDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMAXUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMAXUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMAXUWZ256rr %ymm16, %ymm1 - %ymm16 = VPMAXUWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMINSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMINSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMINSBZ256rr %ymm16, %ymm1 - %ymm16 = VPMINSBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMINSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMINSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMINSDZ256rr %ymm16, %ymm1 - %ymm16 = VPMINSDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMINSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMINSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMINSWZ256rr %ymm16, %ymm1 - %ymm16 = VPMINSWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMINUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMINUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMINUBZ256rr %ymm16, %ymm1 - %ymm16 = VPMINUBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMINUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMINUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMINUDZ256rr %ymm16, %ymm1 - %ymm16 = VPMINUDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMINUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMINUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMINUWZ256rr %ymm16, %ymm1 - %ymm16 = VPMINUWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMULDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMULDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMULDQZ256rr %ymm16, %ymm1 - %ymm16 = VPMULDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMULHRSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMULHRSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMULHRSWZ256rr %ymm16, %ymm1 - %ymm16 = VPMULHRSWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMULHUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMULHUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMULHUWZ256rr %ymm16, %ymm1 - %ymm16 = VPMULHUWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMULHWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMULHWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMULHWZ256rr %ymm16, %ymm1 - %ymm16 = VPMULHWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMULLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMULLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMULLDZ256rr %ymm16, %ymm1 - %ymm16 = VPMULLDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMULLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMULLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMULLWZ256rr %ymm16, %ymm1 - %ymm16 = VPMULLWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPMULUDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMULUDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMULUDQZ256rr %ymm16, %ymm1 - %ymm16 = VPMULUDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPORDZ256rr %ymm16, %ymm1 - %ymm16 = VPORDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPORQZ256rr %ymm16, %ymm1 - %ymm16 = VPORQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSUBBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSUBBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSUBBZ256rr %ymm16, %ymm1 - %ymm16 = VPSUBBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSUBDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSUBDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSUBDZ256rr %ymm16, %ymm1 - %ymm16 = VPSUBDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSUBQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSUBQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSUBQZ256rr %ymm16, %ymm1 - %ymm16 = VPSUBQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSUBSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSUBSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSUBSBZ256rr %ymm16, %ymm1 - %ymm16 = VPSUBSBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSUBSWZ256rr %ymm16, %ymm1 - %ymm16 = VPSUBSWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSUBUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSUBUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSUBUSBZ256rr %ymm16, %ymm1 - %ymm16 = VPSUBUSBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSUBUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSUBUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSUBUSWZ256rr %ymm16, %ymm1 - %ymm16 = VPSUBUSWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSUBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSUBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSUBWZ256rr %ymm16, %ymm1 - %ymm16 = VPSUBWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPXORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPXORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPXORDZ256rr %ymm16, %ymm1 - %ymm16 = VPXORDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPXORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPXORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPXORQZ256rr %ymm16, %ymm1 - %ymm16 = VPXORQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VADDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VADDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VADDPDZ256rr %ymm16, %ymm1 - %ymm16 = VADDPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VADDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VADDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VADDPSZ256rr %ymm16, %ymm1 - %ymm16 = VADDPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VANDNPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VANDNPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VANDNPDZ256rr %ymm16, %ymm1 - %ymm16 = VANDNPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VANDNPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VANDNPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VANDNPSZ256rr %ymm16, %ymm1 - %ymm16 = VANDNPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VANDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VANDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VANDPDZ256rr %ymm16, %ymm1 - %ymm16 = VANDPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VANDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VANDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VANDPSZ256rr %ymm16, %ymm1 - %ymm16 = VANDPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VDIVPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VDIVPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VDIVPDZ256rr %ymm16, %ymm1 - %ymm16 = VDIVPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VDIVPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VDIVPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VDIVPSZ256rr %ymm16, %ymm1 - %ymm16 = VDIVPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMAXCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMAXCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMAXCPDZ256rr %ymm16, %ymm1 - %ymm16 = VMAXCPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMAXCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMAXCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMAXCPSZ256rr %ymm16, %ymm1 - %ymm16 = VMAXCPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMAXPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMAXPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMAXPDZ256rr %ymm16, %ymm1 - %ymm16 = VMAXPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMAXPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMAXPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMAXPSZ256rr %ymm16, %ymm1 - %ymm16 = VMAXPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMINCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMINCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMINCPDZ256rr %ymm16, %ymm1 - %ymm16 = VMINCPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMINCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMINCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMINCPSZ256rr %ymm16, %ymm1 - %ymm16 = VMINCPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMINPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMINPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMINPDZ256rr %ymm16, %ymm1 - %ymm16 = VMINPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VMINPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VMINPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VMINPSZ256rr %ymm16, %ymm1 - %ymm16 = VMINPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VXORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VXORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VXORPDZ256rr %ymm16, %ymm1 - %ymm16 = VXORPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VXORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VXORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VXORPSZ256rr %ymm16, %ymm1 - %ymm16 = VXORPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPACKSSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPACKSSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPACKSSDWZ256rr %ymm16, %ymm1 - %ymm16 = VPACKSSDWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPACKSSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPACKSSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPACKSSWBZ256rr %ymm16, %ymm1 - %ymm16 = VPACKSSWBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPACKUSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPACKUSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPACKUSDWZ256rr %ymm16, %ymm1 - %ymm16 = VPACKUSDWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPACKUSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPACKUSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPACKUSWBZ256rr %ymm16, %ymm1 - %ymm16 = VPACKUSWBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VUNPCKHPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VUNPCKHPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VUNPCKHPDZ256rr %ymm16, %ymm1 - %ymm16 = VUNPCKHPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VUNPCKHPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VUNPCKHPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VUNPCKHPSZ256rr %ymm16, %ymm1 - %ymm16 = VUNPCKHPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VUNPCKLPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VUNPCKLPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VUNPCKLPDZ256rr %ymm16, %ymm1 - %ymm16 = VUNPCKLPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VUNPCKLPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VUNPCKLPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VUNPCKLPSZ256rr %ymm16, %ymm1 - %ymm16 = VUNPCKLPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VSUBPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VSUBPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VSUBPDZ256rr %ymm16, %ymm1 - %ymm16 = VSUBPDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VSUBPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VSUBPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VSUBPSZ256rr %ymm16, %ymm1 - %ymm16 = VSUBPSZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPUNPCKHBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPUNPCKHBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPUNPCKHBWZ256rr %ymm16, %ymm1 - %ymm16 = VPUNPCKHBWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPUNPCKHDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPUNPCKHDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPUNPCKHDQZ256rr %ymm16, %ymm1 - %ymm16 = VPUNPCKHDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPUNPCKHQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPUNPCKHQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPUNPCKHQDQZ256rr %ymm16, %ymm1 - %ymm16 = VPUNPCKHQDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPUNPCKHWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPUNPCKHWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPUNPCKHWDZ256rr %ymm16, %ymm1 - %ymm16 = VPUNPCKHWDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPUNPCKLBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPUNPCKLBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPUNPCKLBWZ256rr %ymm16, %ymm1 - %ymm16 = VPUNPCKLBWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPUNPCKLDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPUNPCKLDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPUNPCKLDQZ256rr %ymm16, %ymm1 - %ymm16 = VPUNPCKLDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPUNPCKLQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPUNPCKLQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPUNPCKLQDQZ256rr %ymm16, %ymm1 - %ymm16 = VPUNPCKLQDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPUNPCKLWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPUNPCKLWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPUNPCKLWDZ256rr %ymm16, %ymm1 - %ymm16 = VPUNPCKLWDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VFMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADD132PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADD132PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADD132PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADD132PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADD213PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADD213PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADD213PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADD213PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADD231PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADD231PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADD231PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADD231PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADDSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADDSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADDSUB132PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADDSUB132PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADDSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADDSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADDSUB132PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADDSUB132PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADDSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADDSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADDSUB213PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADDSUB213PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADDSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADDSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADDSUB213PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADDSUB213PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADDSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADDSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADDSUB231PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADDSUB231PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMADDSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMADDSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMADDSUB231PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMADDSUB231PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUB132PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUB132PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUB132PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUB132PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUB213PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUB213PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUB213PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUB213PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUB231PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUB231PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUB231PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUB231PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUBADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUBADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUBADD132PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUBADD132PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUBADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUBADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUBADD132PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUBADD132PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUBADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUBADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUBADD213PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUBADD213PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUBADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUBADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUBADD213PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUBADD213PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUBADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUBADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUBADD231PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUBADD231PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFMSUBADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFMSUBADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFMSUBADD231PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFMSUBADD231PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMADD132PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMADD132PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMADD132PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMADD132PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMADD213PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMADD213PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMADD213PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMADD213PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMADD231PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMADD231PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMADD231PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMADD231PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMSUB132PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMSUB132PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMSUB132PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMSUB132PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMSUB213PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMSUB213PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMSUB213PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMSUB213PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMSUB231PDZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMSUB231PDZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VFNMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - %ymm16 = VFNMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VFNMSUB231PSZ256r %ymm16, %ymm1, %ymm2 - %ymm16 = VFNMSUB231PSZ256r %ymm16, %ymm1, %ymm2 - ; CHECK: %ymm16 = VPSRADZ256ri %ymm16, 7 - %ymm16 = VPSRADZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSRADZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSRADZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSRADZ256rr %ymm16, %xmm1 - %ymm16 = VPSRADZ256rr %ymm16, %xmm1 - ; CHECK: %ymm16 = VPSRAVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSRAVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSRAVDZ256rr %ymm16, %ymm1 - %ymm16 = VPSRAVDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSRAWZ256ri %ymm16, 7 - %ymm16 = VPSRAWZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSRAWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSRAWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSRAWZ256rr %ymm16, %xmm1 - %ymm16 = VPSRAWZ256rr %ymm16, %xmm1 - ; CHECK: %ymm16 = VPSRLDQZ256rr %ymm16, %ymm1 - %ymm16 = VPSRLDQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSRLDZ256ri %ymm16, 7 - %ymm16 = VPSRLDZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSRLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSRLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSRLDZ256rr %ymm16, %xmm1 - %ymm16 = VPSRLDZ256rr %ymm16, %xmm1 - ; CHECK: %ymm16 = VPSRLQZ256ri %ymm16, 7 - %ymm16 = VPSRLQZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSRLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSRLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSRLQZ256rr %ymm16, %xmm1 - %ymm16 = VPSRLQZ256rr %ymm16, %xmm1 - ; CHECK: %ymm16 = VPSRLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSRLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSRLVDZ256rr %ymm16, %ymm1 - %ymm16 = VPSRLVDZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSRLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSRLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSRLVQZ256rr %ymm16, %ymm1 - %ymm16 = VPSRLVQZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSRLWZ256ri %ymm16, 7 - %ymm16 = VPSRLWZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSRLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSRLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSRLWZ256rr %ymm16, %xmm1 - %ymm16 = VPSRLWZ256rr %ymm16, %xmm1 - ; CHECK: %ymm16 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVSXBDZ256rr %xmm0 - %ymm16 = VPMOVSXBDZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVSXBQZ256rr %xmm0 - %ymm16 = VPMOVSXBQZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVSXBWZ256rr %xmm0 - %ymm16 = VPMOVSXBWZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVSXDQZ256rr %xmm0 - %ymm16 = VPMOVSXDQZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVSXWDZ256rr %xmm0 - %ymm16 = VPMOVSXWDZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVSXWQZ256rr %xmm0 - %ymm16 = VPMOVSXWQZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVZXBDZ256rr %xmm0 - %ymm16 = VPMOVZXBDZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVZXBQZ256rr %xmm0 - %ymm16 = VPMOVZXBQZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVZXBWZ256rr %xmm0 - %ymm16 = VPMOVZXBWZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVZXDQZ256rr %xmm0 - %ymm16 = VPMOVZXDQZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVZXWDZ256rr %xmm0 - %ymm16 = VPMOVZXWDZ256rr %xmm0 - ; CHECK: %ymm16 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPMOVZXWQZ256rr %xmm0 - %ymm16 = VPMOVZXWQZ256rr %xmm0 - ; CHECK: %ymm16 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg - %ymm16 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VBROADCASTF32X2Z256r %xmm16 - %ymm16 = VBROADCASTF32X2Z256r %xmm16 - ; CHECK: %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg - %ymm16 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VBROADCASTSDZ256r %xmm0 - %ymm16 = VBROADCASTSDZ256r %xmm0 - ; CHECK: %ymm16 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg - %ymm16 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VBROADCASTSSZ256r %xmm0 - %ymm16 = VBROADCASTSSZ256r %xmm0 - ; CHECK: %ymm16 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPBROADCASTBZ256r %xmm0 - %ymm16 = VPBROADCASTBZ256r %xmm0 - ; CHECK: %ymm16 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPBROADCASTDZ256r %xmm0 - %ymm16 = VPBROADCASTDZ256r %xmm0 - ; CHECK: %ymm16 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPBROADCASTWZ256r %xmm0 - %ymm16 = VPBROADCASTWZ256r %xmm0 - ; CHECK: %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg - %ymm16 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VBROADCASTI32X2Z256r %xmm16 - %ymm16 = VBROADCASTI32X2Z256r %xmm16 - ; CHECK: %ymm16 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPBROADCASTQZ256r %xmm0 - %ymm16 = VPBROADCASTQZ256r %xmm0 - ; CHECK: %ymm16 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPABSBZ256rr %ymm16 - %ymm16 = VPABSBZ256rr %ymm16 - ; CHECK: %ymm16 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPABSDZ256rr %ymm16 - %ymm16 = VPABSDZ256rr %ymm16 - ; CHECK: %ymm16 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPABSWZ256rr %ymm16 - %ymm16 = VPABSWZ256rr %ymm16 - ; CHECK: %ymm16 = VPSADBWZ256rm %ymm16, 1, %noreg, %rax, %noreg, %noreg - %ymm16 = VPSADBWZ256rm %ymm16, 1, %noreg, %rax, %noreg, %noreg - ; CHECK: %ymm16 = VPSADBWZ256rr %ymm16, %ymm1 - %ymm16 = VPSADBWZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPERMDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg - %ymm16 = VPERMDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VPERMDZ256rr %ymm1, %ymm16 - %ymm16 = VPERMDZ256rr %ymm1, %ymm16 - ; CHECK: %ymm16 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm16 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm16 = VPERMILPDZ256ri %ymm16, 7 - %ymm16 = VPERMILPDZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPERMILPDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg - %ymm16 = VPERMILPDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VPERMILPDZ256rr %ymm1, %ymm16 - %ymm16 = VPERMILPDZ256rr %ymm1, %ymm16 - ; CHECK: %ymm16 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm16 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm16 = VPERMILPSZ256ri %ymm16, 7 - %ymm16 = VPERMILPSZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPERMILPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg - %ymm16 = VPERMILPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VPERMILPSZ256rr %ymm1, %ymm16 - %ymm16 = VPERMILPSZ256rr %ymm1, %ymm16 - ; CHECK: %ymm16 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm16 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm16 = VPERMPDZ256ri %ymm16, 7 - %ymm16 = VPERMPDZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPERMPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg - %ymm16 = VPERMPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VPERMPSZ256rr %ymm1, %ymm16 - %ymm16 = VPERMPSZ256rr %ymm1, %ymm16 - ; CHECK: %ymm16 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm16 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm16 = VPERMQZ256ri %ymm16, 7 - %ymm16 = VPERMQZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSLLDQZ256rr %ymm16, 14 - %ymm16 = VPSLLDQZ256rr %ymm16, 14 - ; CHECK: %ymm16 = VPSLLDZ256ri %ymm16, 7 - %ymm16 = VPSLLDZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSLLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSLLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSLLDZ256rr %ymm16, 14 - %ymm16 = VPSLLDZ256rr %ymm16, 14 - ; CHECK: %ymm16 = VPSLLQZ256ri %ymm16, 7 - %ymm16 = VPSLLQZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSLLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSLLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSLLQZ256rr %ymm16, 14 - %ymm16 = VPSLLQZ256rr %ymm16, 14 - ; CHECK: %ymm16 = VPSLLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSLLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSLLVDZ256rr %ymm16, 14 - %ymm16 = VPSLLVDZ256rr %ymm16, 14 - ; CHECK: %ymm16 = VPSLLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSLLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSLLVQZ256rr %ymm16, 14 - %ymm16 = VPSLLVQZ256rr %ymm16, 14 - ; CHECK: %ymm16 = VPSLLWZ256ri %ymm16, 7 - %ymm16 = VPSLLWZ256ri %ymm16, 7 - ; CHECK: %ymm16 = VPSLLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - %ymm16 = VPSLLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %ymm16 = VPSLLWZ256rr %ymm16, 14 - %ymm16 = VPSLLWZ256rr %ymm16, 14 - ; CHECK: %ymm16 = VCVTDQ2PDZ256rm %rdi, %ymm16, 1, %noreg, 0 - %ymm16 = VCVTDQ2PDZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %ymm16 = VCVTDQ2PDZ256rr %xmm0 - %ymm16 = VCVTDQ2PDZ256rr %xmm0 - ; CHECK: %ymm16 = VCVTDQ2PSZ256rm %rdi, %ymm16, 1, %noreg, 0 - %ymm16 = VCVTDQ2PSZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %ymm16 = VCVTDQ2PSZ256rr %ymm16 - %ymm16 = VCVTDQ2PSZ256rr %ymm16 - ; CHECK: %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0 - %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPD2DQZ256rr %ymm16 - %xmm0 = VCVTPD2DQZ256rr %ymm16 - ; CHECK: %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm16, 1, %noreg, 0 - %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTPD2PSZ256rr %ymm16 - %xmm0 = VCVTPD2PSZ256rr %ymm16 - ; CHECK: %ymm16 = VCVTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0 - %ymm16 = VCVTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %ymm16 = VCVTPS2DQZ256rr %ymm16 - %ymm16 = VCVTPS2DQZ256rr %ymm16 - ; CHECK: %ymm16 = VCVTPS2PDZ256rm %rdi, %ymm16, 1, %noreg, 0 - %ymm16 = VCVTPS2PDZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %ymm16 = VCVTPS2PDZ256rr %xmm0 - %ymm16 = VCVTPS2PDZ256rr %xmm0 - ; CHECK: VCVTPS2PHZ256mr %rdi, %ymm16, 1, %noreg, 0, %noreg, %noreg - VCVTPS2PHZ256mr %rdi, %ymm16, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm0 = VCVTPS2PHZ256rr %ymm16, %noreg - %xmm0 = VCVTPS2PHZ256rr %ymm16, %noreg - ; CHECK: %ymm16 = VCVTPH2PSZ256rm %rdi, %ymm16, 1, %noreg, 0 - %ymm16 = VCVTPH2PSZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %ymm16 = VCVTPH2PSZ256rr %xmm0 - %ymm16 = VCVTPH2PSZ256rr %xmm0 - ; CHECK: %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0 - %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %xmm0 = VCVTTPD2DQZ256rr %ymm16 - %xmm0 = VCVTTPD2DQZ256rr %ymm16 - ; CHECK: %ymm16 = VCVTTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0 - %ymm16 = VCVTTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0 - ; CHECK: %ymm16 = VCVTTPS2DQZ256rr %ymm16 - %ymm16 = VCVTTPS2DQZ256rr %ymm16 - ; CHECK: %ymm16 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg - %ymm16 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm16 = VSQRTPDZ256r %ymm16 - %ymm16 = VSQRTPDZ256r %ymm16 - ; CHECK: %ymm16 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg - %ymm16 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm16 = VSQRTPSZ256r %ymm16 - %ymm16 = VSQRTPSZ256r %ymm16 - ; CHECK: %ymm16 = VPALIGNRZ256rmi %ymm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg - %ymm16 = VPALIGNRZ256rmi %ymm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm16 = VPALIGNRZ256rri %ymm16, %ymm1, %noreg - %ymm16 = VPALIGNRZ256rri %ymm16, %ymm1, %noreg - ; CHECK: %ymm16 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg - %ymm16 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg - ; CHECK: %ymm16 = VMOVUPSZ256rr %ymm16 - %ymm16 = VMOVUPSZ256rr %ymm16 - ; CHECK: %ymm16 = VMOVUPSZ256rr_REV %ymm16 - %ymm16 = VMOVUPSZ256rr_REV %ymm16 - ; CHECK: %ymm16 = VPSHUFBZ256rm %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg - %ymm16 = VPSHUFBZ256rm %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm16 = VPSHUFBZ256rr %ymm16, %ymm1 - %ymm16 = VPSHUFBZ256rr %ymm16, %ymm1 - ; CHECK: %ymm16 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm16 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm16 = VPSHUFDZ256ri %ymm16, -24 - %ymm16 = VPSHUFDZ256ri %ymm16, -24 - ; CHECK: %ymm16 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm16 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm16 = VPSHUFHWZ256ri %ymm16, -24 - %ymm16 = VPSHUFHWZ256ri %ymm16, -24 - ; CHECK: %ymm16 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - %ymm16 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %ymm16 = VPSHUFLWZ256ri %ymm16, -24 - %ymm16 = VPSHUFLWZ256ri %ymm16, -24 - ; CHECK: %ymm16 = VSHUFPDZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %ymm16 = VSHUFPDZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm16 = VSHUFPDZ256rri %ymm16, %noreg, %noreg - %ymm16 = VSHUFPDZ256rri %ymm16, %noreg, %noreg - ; CHECK: %ymm16 = VSHUFPSZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %ymm16 = VSHUFPSZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %ymm16 = VSHUFPSZ256rri %ymm16, %noreg, %noreg - %ymm16 = VSHUFPSZ256rri %ymm16, %noreg, %noreg + ; CHECK: VMOVAPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVAPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVAPDZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVAPDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVAPDZ256rr $ymm16 + $ymm16 = VMOVAPDZ256rr $ymm16 + ; CHECK: $ymm16 = VMOVAPDZ256rr_REV $ymm16 + $ymm16 = VMOVAPDZ256rr_REV $ymm16 + ; CHECK: VMOVAPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVAPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVAPSZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVAPSZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVAPSZ256rr $ymm16 + $ymm16 = VMOVAPSZ256rr $ymm16 + ; CHECK: $ymm16 = VMOVAPSZ256rr_REV $ymm16 + $ymm16 = VMOVAPSZ256rr_REV $ymm16 + ; CHECK: $ymm16 = VMOVDDUPZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVDDUPZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVDDUPZ256rr $ymm16 + $ymm16 = VMOVDDUPZ256rr $ymm16 + ; CHECK: VMOVDQA32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVDQA32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVDQA32Z256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVDQA32Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVDQA32Z256rr $ymm16 + $ymm16 = VMOVDQA32Z256rr $ymm16 + ; CHECK: $ymm16 = VMOVDQA32Z256rr_REV $ymm16 + $ymm16 = VMOVDQA32Z256rr_REV $ymm16 + ; CHECK: VMOVDQA64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVDQA64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVDQA64Z256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVDQA64Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVDQA64Z256rr $ymm16 + $ymm16 = VMOVDQA64Z256rr $ymm16 + ; CHECK: $ymm16 = VMOVDQA64Z256rr_REV $ymm16 + $ymm16 = VMOVDQA64Z256rr_REV $ymm16 + ; CHECK: VMOVDQU16Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVDQU16Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVDQU16Z256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVDQU16Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVDQU16Z256rr $ymm16 + $ymm16 = VMOVDQU16Z256rr $ymm16 + ; CHECK: $ymm16 = VMOVDQU16Z256rr_REV $ymm16 + $ymm16 = VMOVDQU16Z256rr_REV $ymm16 + ; CHECK: VMOVDQU32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVDQU32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVDQU32Z256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVDQU32Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVDQU32Z256rr $ymm16 + $ymm16 = VMOVDQU32Z256rr $ymm16 + ; CHECK: $ymm16 = VMOVDQU32Z256rr_REV $ymm16 + $ymm16 = VMOVDQU32Z256rr_REV $ymm16 + ; CHECK: VMOVDQU64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVDQU64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVDQU64Z256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVDQU64Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVDQU64Z256rr $ymm16 + $ymm16 = VMOVDQU64Z256rr $ymm16 + ; CHECK: $ymm16 = VMOVDQU64Z256rr_REV $ymm16 + $ymm16 = VMOVDQU64Z256rr_REV $ymm16 + ; CHECK: VMOVDQU8Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVDQU8Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVDQU8Z256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVDQU8Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVDQU8Z256rr $ymm16 + $ymm16 = VMOVDQU8Z256rr $ymm16 + ; CHECK: $ymm16 = VMOVDQU8Z256rr_REV $ymm16 + $ymm16 = VMOVDQU8Z256rr_REV $ymm16 + ; CHECK: $ymm16 = VMOVNTDQAZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVNTDQAZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: VMOVNTDQZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVNTDQZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: VMOVNTPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVNTPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: VMOVNTPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVNTPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVSHDUPZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVSHDUPZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVSHDUPZ256rr $ymm16 + $ymm16 = VMOVSHDUPZ256rr $ymm16 + ; CHECK: $ymm16 = VMOVSLDUPZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVSLDUPZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVSLDUPZ256rr $ymm16 + $ymm16 = VMOVSLDUPZ256rr $ymm16 + ; CHECK: VMOVUPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVUPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VMOVUPDZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMOVUPDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMOVUPDZ256rr $ymm16 + $ymm16 = VMOVUPDZ256rr $ymm16 + ; CHECK: $ymm16 = VMOVUPDZ256rr_REV $ymm16 + $ymm16 = VMOVUPDZ256rr_REV $ymm16 + ; CHECK: VMOVUPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + VMOVUPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16 + ; CHECK: $ymm16 = VPANDDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPANDDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPANDDZ256rr $ymm16, $ymm1 + $ymm16 = VPANDDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPANDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPANDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPANDQZ256rr $ymm16, $ymm1 + $ymm16 = VPANDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPANDNDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPANDNDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPANDNDZ256rr $ymm16, $ymm1 + $ymm16 = VPANDNDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPANDNQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPANDNQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPANDNQZ256rr $ymm16, $ymm1 + $ymm16 = VPANDNQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPAVGBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPAVGBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPAVGBZ256rr $ymm16, $ymm1 + $ymm16 = VPAVGBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPAVGWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPAVGWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPAVGWZ256rr $ymm16, $ymm1 + $ymm16 = VPAVGWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPADDBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPADDBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPADDBZ256rr $ymm16, $ymm1 + $ymm16 = VPADDBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPADDDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPADDDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPADDDZ256rr $ymm16, $ymm1 + $ymm16 = VPADDDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPADDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPADDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPADDQZ256rr $ymm16, $ymm1 + $ymm16 = VPADDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPADDSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPADDSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPADDSBZ256rr $ymm16, $ymm1 + $ymm16 = VPADDSBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPADDSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPADDSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPADDSWZ256rr $ymm16, $ymm1 + $ymm16 = VPADDSWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPADDUSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPADDUSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPADDUSBZ256rr $ymm16, $ymm1 + $ymm16 = VPADDUSBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPADDUSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPADDUSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPADDUSWZ256rr $ymm16, $ymm1 + $ymm16 = VPADDUSWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPADDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPADDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPADDWZ256rr $ymm16, $ymm1 + $ymm16 = VPADDWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMULPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMULPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMULPDZ256rr $ymm16, $ymm1 + $ymm16 = VMULPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMULPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMULPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMULPSZ256rr $ymm16, $ymm1 + $ymm16 = VMULPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VORPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VORPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VORPDZ256rr $ymm16, $ymm1 + $ymm16 = VORPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VORPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VORPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VORPSZ256rr $ymm16, $ymm1 + $ymm16 = VORPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMADDUBSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMADDUBSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMADDUBSWZ256rr $ymm16, $ymm1 + $ymm16 = VPMADDUBSWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMADDWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMADDWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMADDWDZ256rr $ymm16, $ymm1 + $ymm16 = VPMADDWDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMAXSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMAXSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMAXSBZ256rr $ymm16, $ymm1 + $ymm16 = VPMAXSBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMAXSDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMAXSDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMAXSDZ256rr $ymm16, $ymm1 + $ymm16 = VPMAXSDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMAXSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMAXSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMAXSWZ256rr $ymm16, $ymm1 + $ymm16 = VPMAXSWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMAXUBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMAXUBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMAXUBZ256rr $ymm16, $ymm1 + $ymm16 = VPMAXUBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMAXUDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMAXUDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMAXUDZ256rr $ymm16, $ymm1 + $ymm16 = VPMAXUDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMAXUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMAXUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMAXUWZ256rr $ymm16, $ymm1 + $ymm16 = VPMAXUWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMINSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMINSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMINSBZ256rr $ymm16, $ymm1 + $ymm16 = VPMINSBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMINSDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMINSDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMINSDZ256rr $ymm16, $ymm1 + $ymm16 = VPMINSDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMINSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMINSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMINSWZ256rr $ymm16, $ymm1 + $ymm16 = VPMINSWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMINUBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMINUBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMINUBZ256rr $ymm16, $ymm1 + $ymm16 = VPMINUBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMINUDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMINUDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMINUDZ256rr $ymm16, $ymm1 + $ymm16 = VPMINUDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMINUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMINUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMINUWZ256rr $ymm16, $ymm1 + $ymm16 = VPMINUWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMULDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMULDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMULDQZ256rr $ymm16, $ymm1 + $ymm16 = VPMULDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMULHRSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMULHRSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMULHRSWZ256rr $ymm16, $ymm1 + $ymm16 = VPMULHRSWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMULHUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMULHUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMULHUWZ256rr $ymm16, $ymm1 + $ymm16 = VPMULHUWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMULHWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMULHWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMULHWZ256rr $ymm16, $ymm1 + $ymm16 = VPMULHWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMULLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMULLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMULLDZ256rr $ymm16, $ymm1 + $ymm16 = VPMULLDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMULLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMULLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMULLWZ256rr $ymm16, $ymm1 + $ymm16 = VPMULLWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPMULUDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMULUDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMULUDQZ256rr $ymm16, $ymm1 + $ymm16 = VPMULUDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPORDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPORDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPORDZ256rr $ymm16, $ymm1 + $ymm16 = VPORDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPORQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPORQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPORQZ256rr $ymm16, $ymm1 + $ymm16 = VPORQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSUBBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSUBBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSUBBZ256rr $ymm16, $ymm1 + $ymm16 = VPSUBBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSUBDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSUBDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSUBDZ256rr $ymm16, $ymm1 + $ymm16 = VPSUBDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSUBQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSUBQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSUBQZ256rr $ymm16, $ymm1 + $ymm16 = VPSUBQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSUBSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSUBSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSUBSBZ256rr $ymm16, $ymm1 + $ymm16 = VPSUBSBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSUBSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSUBSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSUBSWZ256rr $ymm16, $ymm1 + $ymm16 = VPSUBSWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSUBUSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSUBUSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSUBUSBZ256rr $ymm16, $ymm1 + $ymm16 = VPSUBUSBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSUBUSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSUBUSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSUBUSWZ256rr $ymm16, $ymm1 + $ymm16 = VPSUBUSWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSUBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSUBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSUBWZ256rr $ymm16, $ymm1 + $ymm16 = VPSUBWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPXORDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPXORDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPXORDZ256rr $ymm16, $ymm1 + $ymm16 = VPXORDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPXORQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPXORQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPXORQZ256rr $ymm16, $ymm1 + $ymm16 = VPXORQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VADDPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VADDPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VADDPDZ256rr $ymm16, $ymm1 + $ymm16 = VADDPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VADDPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VADDPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VADDPSZ256rr $ymm16, $ymm1 + $ymm16 = VADDPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VANDNPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VANDNPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VANDNPDZ256rr $ymm16, $ymm1 + $ymm16 = VANDNPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VANDNPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VANDNPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VANDNPSZ256rr $ymm16, $ymm1 + $ymm16 = VANDNPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VANDPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VANDPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VANDPDZ256rr $ymm16, $ymm1 + $ymm16 = VANDPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VANDPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VANDPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VANDPSZ256rr $ymm16, $ymm1 + $ymm16 = VANDPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VDIVPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VDIVPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VDIVPDZ256rr $ymm16, $ymm1 + $ymm16 = VDIVPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VDIVPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VDIVPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VDIVPSZ256rr $ymm16, $ymm1 + $ymm16 = VDIVPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMAXCPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMAXCPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMAXCPDZ256rr $ymm16, $ymm1 + $ymm16 = VMAXCPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMAXCPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMAXCPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMAXCPSZ256rr $ymm16, $ymm1 + $ymm16 = VMAXCPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMAXPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMAXPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMAXPDZ256rr $ymm16, $ymm1 + $ymm16 = VMAXPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMAXPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMAXPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMAXPSZ256rr $ymm16, $ymm1 + $ymm16 = VMAXPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMINCPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMINCPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMINCPDZ256rr $ymm16, $ymm1 + $ymm16 = VMINCPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMINCPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMINCPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMINCPSZ256rr $ymm16, $ymm1 + $ymm16 = VMINCPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMINPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMINPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMINPDZ256rr $ymm16, $ymm1 + $ymm16 = VMINPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VMINPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VMINPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VMINPSZ256rr $ymm16, $ymm1 + $ymm16 = VMINPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VXORPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VXORPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VXORPDZ256rr $ymm16, $ymm1 + $ymm16 = VXORPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VXORPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VXORPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VXORPSZ256rr $ymm16, $ymm1 + $ymm16 = VXORPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPACKSSDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPACKSSDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPACKSSDWZ256rr $ymm16, $ymm1 + $ymm16 = VPACKSSDWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPACKSSWBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPACKSSWBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPACKSSWBZ256rr $ymm16, $ymm1 + $ymm16 = VPACKSSWBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPACKUSDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPACKUSDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPACKUSDWZ256rr $ymm16, $ymm1 + $ymm16 = VPACKUSDWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPACKUSWBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPACKUSWBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPACKUSWBZ256rr $ymm16, $ymm1 + $ymm16 = VPACKUSWBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VUNPCKHPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VUNPCKHPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VUNPCKHPDZ256rr $ymm16, $ymm1 + $ymm16 = VUNPCKHPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VUNPCKHPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VUNPCKHPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VUNPCKHPSZ256rr $ymm16, $ymm1 + $ymm16 = VUNPCKHPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VUNPCKLPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VUNPCKLPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VUNPCKLPDZ256rr $ymm16, $ymm1 + $ymm16 = VUNPCKLPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VUNPCKLPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VUNPCKLPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VUNPCKLPSZ256rr $ymm16, $ymm1 + $ymm16 = VUNPCKLPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VSUBPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VSUBPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VSUBPDZ256rr $ymm16, $ymm1 + $ymm16 = VSUBPDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VSUBPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VSUBPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VSUBPSZ256rr $ymm16, $ymm1 + $ymm16 = VSUBPSZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPUNPCKHBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPUNPCKHBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPUNPCKHBWZ256rr $ymm16, $ymm1 + $ymm16 = VPUNPCKHBWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPUNPCKHDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPUNPCKHDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPUNPCKHDQZ256rr $ymm16, $ymm1 + $ymm16 = VPUNPCKHDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPUNPCKHQDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPUNPCKHQDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPUNPCKHQDQZ256rr $ymm16, $ymm1 + $ymm16 = VPUNPCKHQDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPUNPCKHWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPUNPCKHWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPUNPCKHWDZ256rr $ymm16, $ymm1 + $ymm16 = VPUNPCKHWDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPUNPCKLBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPUNPCKLBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPUNPCKLBWZ256rr $ymm16, $ymm1 + $ymm16 = VPUNPCKLBWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPUNPCKLDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPUNPCKLDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPUNPCKLDQZ256rr $ymm16, $ymm1 + $ymm16 = VPUNPCKLDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPUNPCKLQDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPUNPCKLQDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPUNPCKLQDQZ256rr $ymm16, $ymm1 + $ymm16 = VPUNPCKLQDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPUNPCKLWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPUNPCKLWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPUNPCKLWDZ256rr $ymm16, $ymm1 + $ymm16 = VPUNPCKLWDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VFMADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADD132PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADD132PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADD132PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADD132PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADD213PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADD213PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADD213PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADD213PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADD231PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADD231PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADD231PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADD231PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADDSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADDSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADDSUB132PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADDSUB132PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADDSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADDSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADDSUB132PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADDSUB132PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADDSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADDSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADDSUB213PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADDSUB213PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADDSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADDSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADDSUB213PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADDSUB213PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADDSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADDSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADDSUB231PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADDSUB231PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMADDSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMADDSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMADDSUB231PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMADDSUB231PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUB132PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUB132PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUB132PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUB132PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUB213PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUB213PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUB213PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUB213PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUB231PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUB231PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUB231PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUB231PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUBADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUBADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUBADD132PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUBADD132PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUBADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUBADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUBADD132PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUBADD132PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUBADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUBADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUBADD213PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUBADD213PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUBADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUBADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUBADD213PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUBADD213PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUBADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUBADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUBADD231PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUBADD231PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFMSUBADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFMSUBADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFMSUBADD231PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFMSUBADD231PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMADD132PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMADD132PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMADD132PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMADD132PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMADD213PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMADD213PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMADD213PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMADD213PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMADD231PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMADD231PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMADD231PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMADD231PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMSUB132PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMSUB132PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMSUB132PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMSUB132PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMSUB213PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMSUB213PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMSUB213PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMSUB213PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMSUB231PDZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMSUB231PDZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VFNMSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + $ymm16 = VFNMSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VFNMSUB231PSZ256r $ymm16, $ymm1, $ymm2 + $ymm16 = VFNMSUB231PSZ256r $ymm16, $ymm1, $ymm2 + ; CHECK: $ymm16 = VPSRADZ256ri $ymm16, 7 + $ymm16 = VPSRADZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSRADZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSRADZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSRADZ256rr $ymm16, $xmm1 + $ymm16 = VPSRADZ256rr $ymm16, $xmm1 + ; CHECK: $ymm16 = VPSRAVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSRAVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSRAVDZ256rr $ymm16, $ymm1 + $ymm16 = VPSRAVDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSRAWZ256ri $ymm16, 7 + $ymm16 = VPSRAWZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSRAWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSRAWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSRAWZ256rr $ymm16, $xmm1 + $ymm16 = VPSRAWZ256rr $ymm16, $xmm1 + ; CHECK: $ymm16 = VPSRLDQZ256rr $ymm16, $ymm1 + $ymm16 = VPSRLDQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSRLDZ256ri $ymm16, 7 + $ymm16 = VPSRLDZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSRLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSRLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSRLDZ256rr $ymm16, $xmm1 + $ymm16 = VPSRLDZ256rr $ymm16, $xmm1 + ; CHECK: $ymm16 = VPSRLQZ256ri $ymm16, 7 + $ymm16 = VPSRLQZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSRLQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSRLQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSRLQZ256rr $ymm16, $xmm1 + $ymm16 = VPSRLQZ256rr $ymm16, $xmm1 + ; CHECK: $ymm16 = VPSRLVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSRLVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSRLVDZ256rr $ymm16, $ymm1 + $ymm16 = VPSRLVDZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSRLVQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSRLVQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSRLVQZ256rr $ymm16, $ymm1 + $ymm16 = VPSRLVQZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSRLWZ256ri $ymm16, 7 + $ymm16 = VPSRLWZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSRLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSRLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSRLWZ256rr $ymm16, $xmm1 + $ymm16 = VPSRLWZ256rr $ymm16, $xmm1 + ; CHECK: $ymm16 = VPMOVSXBDZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVSXBDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVSXBDZ256rr $xmm0 + $ymm16 = VPMOVSXBDZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVSXBQZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVSXBQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVSXBQZ256rr $xmm0 + $ymm16 = VPMOVSXBQZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVSXBWZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVSXBWZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVSXBWZ256rr $xmm0 + $ymm16 = VPMOVSXBWZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVSXDQZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVSXDQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVSXDQZ256rr $xmm0 + $ymm16 = VPMOVSXDQZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVSXWDZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVSXWDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVSXWDZ256rr $xmm0 + $ymm16 = VPMOVSXWDZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVSXWQZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVSXWQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVSXWQZ256rr $xmm0 + $ymm16 = VPMOVSXWQZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVZXBDZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVZXBDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVZXBDZ256rr $xmm0 + $ymm16 = VPMOVZXBDZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVZXBQZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVZXBQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVZXBQZ256rr $xmm0 + $ymm16 = VPMOVZXBQZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVZXBWZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVZXBWZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVZXBWZ256rr $xmm0 + $ymm16 = VPMOVZXBWZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVZXDQZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVZXDQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVZXDQZ256rr $xmm0 + $ymm16 = VPMOVZXDQZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVZXWDZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVZXWDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVZXWDZ256rr $xmm0 + $ymm16 = VPMOVZXWDZ256rr $xmm0 + ; CHECK: $ymm16 = VPMOVZXWQZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPMOVZXWQZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPMOVZXWQZ256rr $xmm0 + $ymm16 = VPMOVZXWQZ256rr $xmm0 + ; CHECK: $ymm16 = VBROADCASTF32X2Z256m $rip, 1, $noreg, $rax, $noreg + $ymm16 = VBROADCASTF32X2Z256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VBROADCASTF32X2Z256r $xmm16 + $ymm16 = VBROADCASTF32X2Z256r $xmm16 + ; CHECK: $ymm16 = VBROADCASTF32X4Z256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VBROADCASTF32X4Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VBROADCASTSDZ256m $rip, 1, $noreg, $rax, $noreg + $ymm16 = VBROADCASTSDZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VBROADCASTSDZ256r $xmm0 + $ymm16 = VBROADCASTSDZ256r $xmm0 + ; CHECK: $ymm16 = VBROADCASTSSZ256m $rip, 1, $noreg, $rax, $noreg + $ymm16 = VBROADCASTSSZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VBROADCASTSSZ256r $xmm0 + $ymm16 = VBROADCASTSSZ256r $xmm0 + ; CHECK: $ymm16 = VPBROADCASTBZ256m $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPBROADCASTBZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPBROADCASTBZ256r $xmm0 + $ymm16 = VPBROADCASTBZ256r $xmm0 + ; CHECK: $ymm16 = VPBROADCASTDZ256m $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPBROADCASTDZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPBROADCASTDZ256r $xmm0 + $ymm16 = VPBROADCASTDZ256r $xmm0 + ; CHECK: $ymm16 = VPBROADCASTWZ256m $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPBROADCASTWZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPBROADCASTWZ256r $xmm0 + $ymm16 = VPBROADCASTWZ256r $xmm0 + ; CHECK: $ymm16 = VBROADCASTI32X4Z256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VBROADCASTI32X4Z256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VBROADCASTI32X2Z256m $rip, 1, $noreg, $rax, $noreg + $ymm16 = VBROADCASTI32X2Z256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VBROADCASTI32X2Z256r $xmm16 + $ymm16 = VBROADCASTI32X2Z256r $xmm16 + ; CHECK: $ymm16 = VPBROADCASTQZ256m $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPBROADCASTQZ256m $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPBROADCASTQZ256r $xmm0 + $ymm16 = VPBROADCASTQZ256r $xmm0 + ; CHECK: $ymm16 = VPABSBZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPABSBZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPABSBZ256rr $ymm16 + $ymm16 = VPABSBZ256rr $ymm16 + ; CHECK: $ymm16 = VPABSDZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPABSDZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPABSDZ256rr $ymm16 + $ymm16 = VPABSDZ256rr $ymm16 + ; CHECK: $ymm16 = VPABSWZ256rm $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPABSWZ256rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPABSWZ256rr $ymm16 + $ymm16 = VPABSWZ256rr $ymm16 + ; CHECK: $ymm16 = VPSADBWZ256rm $ymm16, 1, $noreg, $rax, $noreg, $noreg + $ymm16 = VPSADBWZ256rm $ymm16, 1, $noreg, $rax, $noreg, $noreg + ; CHECK: $ymm16 = VPSADBWZ256rr $ymm16, $ymm1 + $ymm16 = VPSADBWZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPERMDZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg + $ymm16 = VPERMDZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VPERMDZ256rr $ymm1, $ymm16 + $ymm16 = VPERMDZ256rr $ymm1, $ymm16 + ; CHECK: $ymm16 = VPERMILPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm16 = VPERMILPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm16 = VPERMILPDZ256ri $ymm16, 7 + $ymm16 = VPERMILPDZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPERMILPDZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg + $ymm16 = VPERMILPDZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VPERMILPDZ256rr $ymm1, $ymm16 + $ymm16 = VPERMILPDZ256rr $ymm1, $ymm16 + ; CHECK: $ymm16 = VPERMILPSZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm16 = VPERMILPSZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm16 = VPERMILPSZ256ri $ymm16, 7 + $ymm16 = VPERMILPSZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPERMILPSZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg + $ymm16 = VPERMILPSZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VPERMILPSZ256rr $ymm1, $ymm16 + $ymm16 = VPERMILPSZ256rr $ymm1, $ymm16 + ; CHECK: $ymm16 = VPERMPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm16 = VPERMPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm16 = VPERMPDZ256ri $ymm16, 7 + $ymm16 = VPERMPDZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPERMPSZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg + $ymm16 = VPERMPSZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VPERMPSZ256rr $ymm1, $ymm16 + $ymm16 = VPERMPSZ256rr $ymm1, $ymm16 + ; CHECK: $ymm16 = VPERMQZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm16 = VPERMQZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm16 = VPERMQZ256ri $ymm16, 7 + $ymm16 = VPERMQZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSLLDQZ256rr $ymm16, 14 + $ymm16 = VPSLLDQZ256rr $ymm16, 14 + ; CHECK: $ymm16 = VPSLLDZ256ri $ymm16, 7 + $ymm16 = VPSLLDZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSLLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSLLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSLLDZ256rr $ymm16, 14 + $ymm16 = VPSLLDZ256rr $ymm16, 14 + ; CHECK: $ymm16 = VPSLLQZ256ri $ymm16, 7 + $ymm16 = VPSLLQZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSLLQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSLLQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSLLQZ256rr $ymm16, 14 + $ymm16 = VPSLLQZ256rr $ymm16, 14 + ; CHECK: $ymm16 = VPSLLVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSLLVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSLLVDZ256rr $ymm16, 14 + $ymm16 = VPSLLVDZ256rr $ymm16, 14 + ; CHECK: $ymm16 = VPSLLVQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSLLVQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSLLVQZ256rr $ymm16, 14 + $ymm16 = VPSLLVQZ256rr $ymm16, 14 + ; CHECK: $ymm16 = VPSLLWZ256ri $ymm16, 7 + $ymm16 = VPSLLWZ256ri $ymm16, 7 + ; CHECK: $ymm16 = VPSLLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + $ymm16 = VPSLLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $ymm16 = VPSLLWZ256rr $ymm16, 14 + $ymm16 = VPSLLWZ256rr $ymm16, 14 + ; CHECK: $ymm16 = VCVTDQ2PDZ256rm $rdi, $ymm16, 1, $noreg, 0 + $ymm16 = VCVTDQ2PDZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $ymm16 = VCVTDQ2PDZ256rr $xmm0 + $ymm16 = VCVTDQ2PDZ256rr $xmm0 + ; CHECK: $ymm16 = VCVTDQ2PSZ256rm $rdi, $ymm16, 1, $noreg, 0 + $ymm16 = VCVTDQ2PSZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $ymm16 = VCVTDQ2PSZ256rr $ymm16 + $ymm16 = VCVTDQ2PSZ256rr $ymm16 + ; CHECK: $xmm0 = VCVTPD2DQZ256rm $rdi, $ymm16, 1, $noreg, 0 + $xmm0 = VCVTPD2DQZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPD2DQZ256rr $ymm16 + $xmm0 = VCVTPD2DQZ256rr $ymm16 + ; CHECK: $xmm0 = VCVTPD2PSZ256rm $rdi, $ymm16, 1, $noreg, 0 + $xmm0 = VCVTPD2PSZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTPD2PSZ256rr $ymm16 + $xmm0 = VCVTPD2PSZ256rr $ymm16 + ; CHECK: $ymm16 = VCVTPS2DQZ256rm $rdi, $ymm16, 1, $noreg, 0 + $ymm16 = VCVTPS2DQZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $ymm16 = VCVTPS2DQZ256rr $ymm16 + $ymm16 = VCVTPS2DQZ256rr $ymm16 + ; CHECK: $ymm16 = VCVTPS2PDZ256rm $rdi, $ymm16, 1, $noreg, 0 + $ymm16 = VCVTPS2PDZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $ymm16 = VCVTPS2PDZ256rr $xmm0 + $ymm16 = VCVTPS2PDZ256rr $xmm0 + ; CHECK: VCVTPS2PHZ256mr $rdi, $ymm16, 1, $noreg, 0, $noreg, $noreg + VCVTPS2PHZ256mr $rdi, $ymm16, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm0 = VCVTPS2PHZ256rr $ymm16, $noreg + $xmm0 = VCVTPS2PHZ256rr $ymm16, $noreg + ; CHECK: $ymm16 = VCVTPH2PSZ256rm $rdi, $ymm16, 1, $noreg, 0 + $ymm16 = VCVTPH2PSZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $ymm16 = VCVTPH2PSZ256rr $xmm0 + $ymm16 = VCVTPH2PSZ256rr $xmm0 + ; CHECK: $xmm0 = VCVTTPD2DQZ256rm $rdi, $ymm16, 1, $noreg, 0 + $xmm0 = VCVTTPD2DQZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $xmm0 = VCVTTPD2DQZ256rr $ymm16 + $xmm0 = VCVTTPD2DQZ256rr $ymm16 + ; CHECK: $ymm16 = VCVTTPS2DQZ256rm $rdi, $ymm16, 1, $noreg, 0 + $ymm16 = VCVTTPS2DQZ256rm $rdi, $ymm16, 1, $noreg, 0 + ; CHECK: $ymm16 = VCVTTPS2DQZ256rr $ymm16 + $ymm16 = VCVTTPS2DQZ256rr $ymm16 + ; CHECK: $ymm16 = VSQRTPDZ256m $rdi, $noreg, $noreg, $noreg, $noreg + $ymm16 = VSQRTPDZ256m $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm16 = VSQRTPDZ256r $ymm16 + $ymm16 = VSQRTPDZ256r $ymm16 + ; CHECK: $ymm16 = VSQRTPSZ256m $rdi, $noreg, $noreg, $noreg, $noreg + $ymm16 = VSQRTPSZ256m $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm16 = VSQRTPSZ256r $ymm16 + $ymm16 = VSQRTPSZ256r $ymm16 + ; CHECK: $ymm16 = VPALIGNRZ256rmi $ymm16, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg + $ymm16 = VPALIGNRZ256rmi $ymm16, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm16 = VPALIGNRZ256rri $ymm16, $ymm1, $noreg + $ymm16 = VPALIGNRZ256rri $ymm16, $ymm1, $noreg + ; CHECK: $ymm16 = VMOVUPSZ256rm $rdi, 1, $noreg, 0, $noreg + $ymm16 = VMOVUPSZ256rm $rdi, 1, $noreg, 0, $noreg + ; CHECK: $ymm16 = VMOVUPSZ256rr $ymm16 + $ymm16 = VMOVUPSZ256rr $ymm16 + ; CHECK: $ymm16 = VMOVUPSZ256rr_REV $ymm16 + $ymm16 = VMOVUPSZ256rr_REV $ymm16 + ; CHECK: $ymm16 = VPSHUFBZ256rm $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg + $ymm16 = VPSHUFBZ256rm $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm16 = VPSHUFBZ256rr $ymm16, $ymm1 + $ymm16 = VPSHUFBZ256rr $ymm16, $ymm1 + ; CHECK: $ymm16 = VPSHUFDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm16 = VPSHUFDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm16 = VPSHUFDZ256ri $ymm16, -24 + $ymm16 = VPSHUFDZ256ri $ymm16, -24 + ; CHECK: $ymm16 = VPSHUFHWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm16 = VPSHUFHWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm16 = VPSHUFHWZ256ri $ymm16, -24 + $ymm16 = VPSHUFHWZ256ri $ymm16, -24 + ; CHECK: $ymm16 = VPSHUFLWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + $ymm16 = VPSHUFLWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $ymm16 = VPSHUFLWZ256ri $ymm16, -24 + $ymm16 = VPSHUFLWZ256ri $ymm16, -24 + ; CHECK: $ymm16 = VSHUFPDZ256rmi $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $ymm16 = VSHUFPDZ256rmi $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm16 = VSHUFPDZ256rri $ymm16, $noreg, $noreg + $ymm16 = VSHUFPDZ256rri $ymm16, $noreg, $noreg + ; CHECK: $ymm16 = VSHUFPSZ256rmi $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $ymm16 = VSHUFPSZ256rmi $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $ymm16 = VSHUFPSZ256rri $ymm16, $noreg, $noreg + $ymm16 = VSHUFPSZ256rri $ymm16, $noreg, $noreg - RET 0, %zmm0, %zmm1 + RET 0, $zmm0, $zmm1 ... --- # CHECK-LABEL: name: evex_z128_to_evex_test @@ -3208,876 +3208,876 @@ name: evex_z128_to_evex_test body: | bb.0: - ; CHECK: VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVAPDZ128rr %xmm16 - %xmm16 = VMOVAPDZ128rr %xmm16 - ; CHECK: VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVAPSZ128rr %xmm16 - %xmm16 = VMOVAPSZ128rr %xmm16 - ; CHECK: VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVDQA32Z128rr %xmm16 - %xmm16 = VMOVDQA32Z128rr %xmm16 - ; CHECK: VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVDQA64Z128rr %xmm16 - %xmm16 = VMOVDQA64Z128rr %xmm16 - ; CHECK: VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVDQU16Z128rr %xmm16 - %xmm16 = VMOVDQU16Z128rr %xmm16 - ; CHECK: VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVDQU32Z128rr %xmm16 - %xmm16 = VMOVDQU32Z128rr %xmm16 - ; CHECK: VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVDQU64Z128rr %xmm16 - %xmm16 = VMOVDQU64Z128rr %xmm16 - ; CHECK: VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVDQU8Z128rr %xmm16 - %xmm16 = VMOVDQU8Z128rr %xmm16 - ; CHECK: %xmm16 = VMOVDQU8Z128rr_REV %xmm16 - %xmm16 = VMOVDQU8Z128rr_REV %xmm16 - ; CHECK: %xmm16 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVUPDZ128rr %xmm16 - %xmm16 = VMOVUPDZ128rr %xmm16 - ; CHECK: %xmm16 = VMOVUPDZ128rr_REV %xmm16 - %xmm16 = VMOVUPDZ128rr_REV %xmm16 - ; CHECK: VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMOVUPSZ128rr %xmm16 - %xmm16 = VMOVUPSZ128rr %xmm16 - ; CHECK: %xmm16 = VMOVUPSZ128rr_REV %xmm16 - %xmm16 = VMOVUPSZ128rr_REV %xmm16 - ; CHECK: VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVAPDZ128rr_REV %xmm16 - %xmm16 = VMOVAPDZ128rr_REV %xmm16 - ; CHECK: %xmm16 = VMOVAPSZ128rr_REV %xmm16 - %xmm16 = VMOVAPSZ128rr_REV %xmm16 - ; CHECK: %xmm16 = VMOVDQA32Z128rr_REV %xmm16 - %xmm16 = VMOVDQA32Z128rr_REV %xmm16 - ; CHECK: %xmm16 = VMOVDQA64Z128rr_REV %xmm16 - %xmm16 = VMOVDQA64Z128rr_REV %xmm16 - ; CHECK: %xmm16 = VMOVDQU16Z128rr_REV %xmm16 - %xmm16 = VMOVDQU16Z128rr_REV %xmm16 - ; CHECK: %xmm16 = VMOVDQU32Z128rr_REV %xmm16 - %xmm16 = VMOVDQU32Z128rr_REV %xmm16 - ; CHECK: %xmm16 = VMOVDQU64Z128rr_REV %xmm16 - %xmm16 = VMOVDQU64Z128rr_REV %xmm16 - ; CHECK: %xmm16 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVSXBDZ128rr %xmm16 - %xmm16 = VPMOVSXBDZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVSXBQZ128rr %xmm16 - %xmm16 = VPMOVSXBQZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVSXBWZ128rr %xmm16 - %xmm16 = VPMOVSXBWZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVSXDQZ128rr %xmm16 - %xmm16 = VPMOVSXDQZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVSXWDZ128rr %xmm16 - %xmm16 = VPMOVSXWDZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVSXWQZ128rr %xmm16 - %xmm16 = VPMOVSXWQZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVZXBDZ128rr %xmm16 - %xmm16 = VPMOVZXBDZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVZXBQZ128rr %xmm16 - %xmm16 = VPMOVZXBQZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVZXBWZ128rr %xmm16 - %xmm16 = VPMOVZXBWZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVZXDQZ128rr %xmm16 - %xmm16 = VPMOVZXDQZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVZXWDZ128rr %xmm16 - %xmm16 = VPMOVZXWDZ128rr %xmm16 - ; CHECK: %xmm16 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMOVZXWQZ128rr %xmm16 - %xmm16 = VPMOVZXWQZ128rr %xmm16 - ; CHECK: VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVHPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VMOVHPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVHPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VMOVHPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVLPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VMOVLPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16 - ; CHECK: %xmm16 = VMOVLPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VMOVLPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VMAXCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXCPDZ128rr %xmm16, %xmm1 - %xmm16 = VMAXCPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXCPSZ128rr %xmm16, %xmm1 - %xmm16 = VMAXCPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXPDZ128rr %xmm16, %xmm1 - %xmm16 = VMAXPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXPSZ128rr %xmm16, %xmm1 - %xmm16 = VMAXPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINCPDZ128rr %xmm16, %xmm1 - %xmm16 = VMINCPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINCPSZ128rr %xmm16, %xmm1 - %xmm16 = VMINCPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINPDZ128rr %xmm16, %xmm1 - %xmm16 = VMINPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINPSZ128rr %xmm16, %xmm1 - %xmm16 = VMINPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMULPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMULPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMULPDZ128rr %xmm16, %xmm1 - %xmm16 = VMULPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMULPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMULPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMULPSZ128rr %xmm16, %xmm1 - %xmm16 = VMULPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VORPDZ128rr %xmm16, %xmm1 - %xmm16 = VORPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VORPSZ128rr %xmm16, %xmm1 - %xmm16 = VORPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPADDBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPADDBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPADDBZ128rr %xmm16, %xmm1 - %xmm16 = VPADDBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPADDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPADDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPADDDZ128rr %xmm16, %xmm1 - %xmm16 = VPADDDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPADDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPADDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPADDQZ128rr %xmm16, %xmm1 - %xmm16 = VPADDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPADDSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPADDSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPADDSBZ128rr %xmm16, %xmm1 - %xmm16 = VPADDSBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPADDSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPADDSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPADDSWZ128rr %xmm16, %xmm1 - %xmm16 = VPADDSWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPADDUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPADDUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPADDUSBZ128rr %xmm16, %xmm1 - %xmm16 = VPADDUSBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPADDUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPADDUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPADDUSWZ128rr %xmm16, %xmm1 - %xmm16 = VPADDUSWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPADDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPADDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPADDWZ128rr %xmm16, %xmm1 - %xmm16 = VPADDWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPANDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPANDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPANDDZ128rr %xmm16, %xmm1 - %xmm16 = VPANDDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPANDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPANDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPANDQZ128rr %xmm16, %xmm1 - %xmm16 = VPANDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPANDNDZ128rr %xmm16, %xmm1 - %xmm16 = VPANDNDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPANDNQZ128rr %xmm16, %xmm1 - %xmm16 = VPANDNQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPAVGBZ128rr %xmm16, %xmm1 - %xmm16 = VPAVGBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPAVGWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPAVGWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPAVGWZ128rr %xmm16, %xmm1 - %xmm16 = VPAVGWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMAXSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMAXSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMAXSBZ128rr %xmm16, %xmm1 - %xmm16 = VPMAXSBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMAXSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMAXSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMAXSDZ128rr %xmm16, %xmm1 - %xmm16 = VPMAXSDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMAXSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMAXSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMAXSWZ128rr %xmm16, %xmm1 - %xmm16 = VPMAXSWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMAXUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMAXUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMAXUBZ128rr %xmm16, %xmm1 - %xmm16 = VPMAXUBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMAXUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMAXUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMAXUDZ128rr %xmm16, %xmm1 - %xmm16 = VPMAXUDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMAXUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMAXUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMAXUWZ128rr %xmm16, %xmm1 - %xmm16 = VPMAXUWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMINSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMINSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMINSBZ128rr %xmm16, %xmm1 - %xmm16 = VPMINSBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMINSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMINSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMINSDZ128rr %xmm16, %xmm1 - %xmm16 = VPMINSDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMINSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMINSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMINSWZ128rr %xmm16, %xmm1 - %xmm16 = VPMINSWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMINUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMINUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMINUBZ128rr %xmm16, %xmm1 - %xmm16 = VPMINUBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMINUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMINUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMINUDZ128rr %xmm16, %xmm1 - %xmm16 = VPMINUDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMINUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMINUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMINUWZ128rr %xmm16, %xmm1 - %xmm16 = VPMINUWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMULDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMULDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMULDQZ128rr %xmm16, %xmm1 - %xmm16 = VPMULDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMULHRSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMULHRSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMULHRSWZ128rr %xmm16, %xmm1 - %xmm16 = VPMULHRSWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMULHUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMULHUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMULHUWZ128rr %xmm16, %xmm1 - %xmm16 = VPMULHUWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMULHWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMULHWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMULHWZ128rr %xmm16, %xmm1 - %xmm16 = VPMULHWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMULLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMULLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMULLDZ128rr %xmm16, %xmm1 - %xmm16 = VPMULLDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMULLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMULLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMULLWZ128rr %xmm16, %xmm1 - %xmm16 = VPMULLWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMULUDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMULUDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMULUDQZ128rr %xmm16, %xmm1 - %xmm16 = VPMULUDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPORDZ128rr %xmm16, %xmm1 - %xmm16 = VPORDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPORQZ128rr %xmm16, %xmm1 - %xmm16 = VPORQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSUBBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSUBBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSUBBZ128rr %xmm16, %xmm1 - %xmm16 = VPSUBBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSUBDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSUBDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSUBDZ128rr %xmm16, %xmm1 - %xmm16 = VPSUBDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSUBQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSUBQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSUBQZ128rr %xmm16, %xmm1 - %xmm16 = VPSUBQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSUBSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSUBSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSUBSBZ128rr %xmm16, %xmm1 - %xmm16 = VPSUBSBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSUBSWZ128rr %xmm16, %xmm1 - %xmm16 = VPSUBSWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSUBUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSUBUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSUBUSBZ128rr %xmm16, %xmm1 - %xmm16 = VPSUBUSBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSUBUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSUBUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSUBUSWZ128rr %xmm16, %xmm1 - %xmm16 = VPSUBUSWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSUBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSUBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSUBWZ128rr %xmm16, %xmm1 - %xmm16 = VPSUBWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VADDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VADDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VADDPDZ128rr %xmm16, %xmm1 - %xmm16 = VADDPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VADDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VADDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VADDPSZ128rr %xmm16, %xmm1 - %xmm16 = VADDPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VANDNPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VANDNPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VANDNPDZ128rr %xmm16, %xmm1 - %xmm16 = VANDNPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VANDNPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VANDNPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VANDNPSZ128rr %xmm16, %xmm1 - %xmm16 = VANDNPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VANDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VANDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VANDPDZ128rr %xmm16, %xmm1 - %xmm16 = VANDPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VANDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VANDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VANDPSZ128rr %xmm16, %xmm1 - %xmm16 = VANDPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VDIVPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VDIVPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VDIVPDZ128rr %xmm16, %xmm1 - %xmm16 = VDIVPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VDIVPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VDIVPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VDIVPSZ128rr %xmm16, %xmm1 - %xmm16 = VDIVPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPXORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPXORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPXORDZ128rr %xmm16, %xmm1 - %xmm16 = VPXORDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPXORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPXORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPXORQZ128rr %xmm16, %xmm1 - %xmm16 = VPXORQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VSUBPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VSUBPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VSUBPDZ128rr %xmm16, %xmm1 - %xmm16 = VSUBPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VSUBPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VSUBPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VSUBPSZ128rr %xmm16, %xmm1 - %xmm16 = VSUBPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VXORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VXORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VXORPDZ128rr %xmm16, %xmm1 - %xmm16 = VXORPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VXORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VXORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VXORPSZ128rr %xmm16, %xmm1 - %xmm16 = VXORPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMADDUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMADDUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMADDUBSWZ128rr %xmm16, %xmm1 - %xmm16 = VPMADDUBSWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPMADDWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPMADDWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPMADDWDZ128rr %xmm16, %xmm1 - %xmm16 = VPMADDWDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPACKSSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPACKSSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPACKSSDWZ128rr %xmm16, %xmm1 - %xmm16 = VPACKSSDWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPACKSSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPACKSSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPACKSSWBZ128rr %xmm16, %xmm1 - %xmm16 = VPACKSSWBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPACKUSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPACKUSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPACKUSDWZ128rr %xmm16, %xmm1 - %xmm16 = VPACKUSDWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPACKUSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPACKUSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPACKUSWBZ128rr %xmm16, %xmm1 - %xmm16 = VPACKUSWBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPUNPCKHBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPUNPCKHBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPUNPCKHBWZ128rr %xmm16, %xmm1 - %xmm16 = VPUNPCKHBWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPUNPCKHDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPUNPCKHDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPUNPCKHDQZ128rr %xmm16, %xmm1 - %xmm16 = VPUNPCKHDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPUNPCKHQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPUNPCKHQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPUNPCKHQDQZ128rr %xmm16, %xmm1 - %xmm16 = VPUNPCKHQDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPUNPCKHWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPUNPCKHWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPUNPCKHWDZ128rr %xmm16, %xmm1 - %xmm16 = VPUNPCKHWDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPUNPCKLBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPUNPCKLBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPUNPCKLBWZ128rr %xmm16, %xmm1 - %xmm16 = VPUNPCKLBWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPUNPCKLDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPUNPCKLDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPUNPCKLDQZ128rr %xmm16, %xmm1 - %xmm16 = VPUNPCKLDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPUNPCKLQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPUNPCKLQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPUNPCKLQDQZ128rr %xmm16, %xmm1 - %xmm16 = VPUNPCKLQDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPUNPCKLWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPUNPCKLWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPUNPCKLWDZ128rr %xmm16, %xmm1 - %xmm16 = VPUNPCKLWDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VUNPCKHPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VUNPCKHPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VUNPCKHPDZ128rr %xmm16, %xmm1 - %xmm16 = VUNPCKHPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VUNPCKHPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VUNPCKHPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VUNPCKHPSZ128rr %xmm16, %xmm1 - %xmm16 = VUNPCKHPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VUNPCKLPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VUNPCKLPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VUNPCKLPDZ128rr %xmm16, %xmm1 - %xmm16 = VUNPCKLPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VUNPCKLPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VUNPCKLPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VUNPCKLPSZ128rr %xmm16, %xmm1 - %xmm16 = VUNPCKLPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VFMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD132PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD132PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD132PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD132PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD213PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD213PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD213PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD213PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD231PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD231PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD231PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD231PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADDSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADDSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADDSUB132PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADDSUB132PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADDSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADDSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADDSUB132PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADDSUB132PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADDSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADDSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADDSUB213PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADDSUB213PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADDSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADDSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADDSUB213PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADDSUB213PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADDSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADDSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADDSUB231PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADDSUB231PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADDSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADDSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADDSUB231PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADDSUB231PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB132PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB132PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB132PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB132PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB213PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB213PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB213PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB213PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB231PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB231PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB231PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB231PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUBADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUBADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUBADD132PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUBADD132PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUBADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUBADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUBADD132PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUBADD132PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUBADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUBADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUBADD213PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUBADD213PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUBADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUBADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUBADD213PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUBADD213PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUBADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUBADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUBADD231PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUBADD231PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUBADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUBADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUBADD231PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUBADD231PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD132PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD132PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD132PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD132PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD213PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD213PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD213PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD213PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD231PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD231PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD231PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD231PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB132PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB132PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB132PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB132PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB213PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB213PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB213PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB213PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB231PDZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB231PDZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB231PSZ128r %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB231PSZ128r %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VPSLLDZ128ri %xmm16, 7 - %xmm16 = VPSLLDZ128ri %xmm16, 7 - ; CHECK: %xmm16 = VPSLLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSLLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSLLDZ128rr %xmm16, 14 - %xmm16 = VPSLLDZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSLLQZ128ri %xmm16, 7 - %xmm16 = VPSLLQZ128ri %xmm16, 7 - ; CHECK: %xmm16 = VPSLLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSLLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSLLQZ128rr %xmm16, 14 - %xmm16 = VPSLLQZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSLLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSLLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSLLVDZ128rr %xmm16, 14 - %xmm16 = VPSLLVDZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSLLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSLLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSLLVQZ128rr %xmm16, 14 - %xmm16 = VPSLLVQZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSLLWZ128ri %xmm16, 7 - %xmm16 = VPSLLWZ128ri %xmm16, 7 - ; CHECK: %xmm16 = VPSLLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSLLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSLLWZ128rr %xmm16, 14 - %xmm16 = VPSLLWZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRADZ128ri %xmm16, 7 - %xmm16 = VPSRADZ128ri %xmm16, 7 - ; CHECK: %xmm16 = VPSRADZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSRADZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSRADZ128rr %xmm16, 14 - %xmm16 = VPSRADZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRAVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSRAVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSRAVDZ128rr %xmm16, 14 - %xmm16 = VPSRAVDZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRAWZ128ri %xmm16, 7 - %xmm16 = VPSRAWZ128ri %xmm16, 7 - ; CHECK: %xmm16 = VPSRAWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSRAWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSRAWZ128rr %xmm16, 14 - %xmm16 = VPSRAWZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRLDQZ128rr %xmm16, 14 - %xmm16 = VPSRLDQZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRLDZ128ri %xmm16, 7 - %xmm16 = VPSRLDZ128ri %xmm16, 7 - ; CHECK: %xmm16 = VPSRLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSRLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSRLDZ128rr %xmm16, 14 - %xmm16 = VPSRLDZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRLQZ128ri %xmm16, 7 - %xmm16 = VPSRLQZ128ri %xmm16, 7 - ; CHECK: %xmm16 = VPSRLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSRLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSRLQZ128rr %xmm16, 14 - %xmm16 = VPSRLQZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSRLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSRLVDZ128rr %xmm16, 14 - %xmm16 = VPSRLVDZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSRLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSRLVQZ128rr %xmm16, 14 - %xmm16 = VPSRLVQZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPSRLWZ128ri %xmm16, 7 - %xmm16 = VPSRLWZ128ri %xmm16, 7 - ; CHECK: %xmm16 = VPSRLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPSRLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPSRLWZ128rr %xmm16, 14 - %xmm16 = VPSRLWZ128rr %xmm16, 14 - ; CHECK: %xmm16 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm16 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm16 = VPERMILPDZ128ri %xmm16, 9 - %xmm16 = VPERMILPDZ128ri %xmm16, 9 - ; CHECK: %xmm16 = VPERMILPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VPERMILPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VPERMILPDZ128rr %xmm16, %xmm1 - %xmm16 = VPERMILPDZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm16 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm16 = VPERMILPSZ128ri %xmm16, 9 - %xmm16 = VPERMILPSZ128ri %xmm16, 9 - ; CHECK: %xmm16 = VPERMILPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VPERMILPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VPERMILPSZ128rr %xmm16, %xmm1 - %xmm16 = VPERMILPSZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VCVTPH2PSZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTPH2PSZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTPH2PSZ128rr %xmm16 - %xmm16 = VCVTPH2PSZ128rr %xmm16 - ; CHECK: %xmm16 = VCVTDQ2PDZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTDQ2PDZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTDQ2PDZ128rr %xmm16 - %xmm16 = VCVTDQ2PDZ128rr %xmm16 - ; CHECK: %xmm16 = VCVTDQ2PSZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTDQ2PSZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTDQ2PSZ128rr %xmm16 - %xmm16 = VCVTDQ2PSZ128rr %xmm16 - ; CHECK: %xmm16 = VCVTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTPD2DQZ128rr %xmm16 - %xmm16 = VCVTPD2DQZ128rr %xmm16 - ; CHECK: %xmm16 = VCVTPD2PSZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTPD2PSZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTPD2PSZ128rr %xmm16 - %xmm16 = VCVTPD2PSZ128rr %xmm16 - ; CHECK: %xmm16 = VCVTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTPS2DQZ128rr %xmm16 - %xmm16 = VCVTPS2DQZ128rr %xmm16 - ; CHECK: %xmm16 = VCVTPS2PDZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTPS2PDZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTPS2PDZ128rr %xmm16 - %xmm16 = VCVTPS2PDZ128rr %xmm16 - ; CHECK: %xmm16 = VCVTTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTTPD2DQZ128rr %xmm16 - %xmm16 = VCVTTPD2DQZ128rr %xmm16 - ; CHECK: %xmm16 = VCVTTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0 - %xmm16 = VCVTTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %xmm16 = VCVTTPS2DQZ128rr %xmm16 - %xmm16 = VCVTTPS2DQZ128rr %xmm16 - ; CHECK: %xmm16 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg - %xmm16 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VSQRTPDZ128r %xmm16 - %xmm16 = VSQRTPDZ128r %xmm16 - ; CHECK: %xmm16 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg - %xmm16 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VSQRTPSZ128r %xmm16 - %xmm16 = VSQRTPSZ128r %xmm16 - ; CHECK: %xmm16 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg - %xmm16 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VMOVDDUPZ128rr %xmm16 - %xmm16 = VMOVDDUPZ128rr %xmm16 - ; CHECK: %xmm16 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg - %xmm16 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VMOVSHDUPZ128rr %xmm16 - %xmm16 = VMOVSHDUPZ128rr %xmm16 - ; CHECK: %xmm16 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg - %xmm16 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VMOVSLDUPZ128rr %xmm16 - %xmm16 = VMOVSLDUPZ128rr %xmm16 - ; CHECK: %xmm16 = VPSHUFBZ128rm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VPSHUFBZ128rm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VPSHUFBZ128rr %xmm16, %xmm1 - %xmm16 = VPSHUFBZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm16 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm16 = VPSHUFDZ128ri %xmm16, -24 - %xmm16 = VPSHUFDZ128ri %xmm16, -24 - ; CHECK: %xmm16 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm16 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm16 = VPSHUFHWZ128ri %xmm16, -24 - %xmm16 = VPSHUFHWZ128ri %xmm16, -24 - ; CHECK: %xmm16 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - %xmm16 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm16 = VPSHUFLWZ128ri %xmm16, -24 - %xmm16 = VPSHUFLWZ128ri %xmm16, -24 - ; CHECK: %xmm16 = VPSLLDQZ128rr %xmm16, %xmm1 - %xmm16 = VPSLLDQZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VSHUFPDZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VSHUFPDZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VSHUFPDZ128rri %xmm16, %noreg, %noreg - %xmm16 = VSHUFPDZ128rri %xmm16, %noreg, %noreg - ; CHECK: %xmm16 = VSHUFPSZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VSHUFPSZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VSHUFPSZ128rri %xmm16, %noreg, %noreg - %xmm16 = VSHUFPSZ128rri %xmm16, %noreg, %noreg - ; CHECK: %xmm16 = VPSADBWZ128rm %xmm16, 1, %noreg, %rax, %noreg, %noreg - %xmm16 = VPSADBWZ128rm %xmm16, 1, %noreg, %rax, %noreg, %noreg - ; CHECK: %xmm16 = VPSADBWZ128rr %xmm16, %xmm1 - %xmm16 = VPSADBWZ128rr %xmm16, %xmm1 - ; CHECK: %xmm16 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VBROADCASTSSZ128r %xmm16 - %xmm16 = VBROADCASTSSZ128r %xmm16 - ; CHECK: %xmm16 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VPBROADCASTBZ128r %xmm16 - %xmm16 = VPBROADCASTBZ128r %xmm16 - ; CHECK: %xmm16 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VPBROADCASTDZ128r %xmm16 - %xmm16 = VPBROADCASTDZ128r %xmm16 - ; CHECK: %xmm16 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VPBROADCASTQZ128r %xmm16 - %xmm16 = VPBROADCASTQZ128r %xmm16 - ; CHECK: %xmm16 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VPBROADCASTWZ128r %xmm16 - %xmm16 = VPBROADCASTWZ128r %xmm16 - ; CHECK: %xmm16 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VBROADCASTI32X2Z128r %xmm0 - %xmm16 = VBROADCASTI32X2Z128r %xmm0 - ; CHECK: %xmm16 = VCVTPS2PHZ128rr %xmm16, 2 - %xmm16 = VCVTPS2PHZ128rr %xmm16, 2 - ; CHECK: VCVTPS2PHZ128mr %rdi, %xmm16, 1, %noreg, 0, %noreg, %noreg - VCVTPS2PHZ128mr %rdi, %xmm16, 1, %noreg, 0, %noreg, %noreg - ; CHECK: %xmm16 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPABSBZ128rr %xmm16 - %xmm16 = VPABSBZ128rr %xmm16 - ; CHECK: %xmm16 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPABSDZ128rr %xmm16 - %xmm16 = VPABSDZ128rr %xmm16 - ; CHECK: %xmm16 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg - %xmm16 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VPABSWZ128rr %xmm16 - %xmm16 = VPABSWZ128rr %xmm16 - ; CHECK: %xmm16 = VPALIGNRZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VPALIGNRZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VPALIGNRZ128rri %xmm16, %xmm1, 15 - %xmm16 = VPALIGNRZ128rri %xmm16, %xmm1, 15 - ; CHECK: VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, %noreg - VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, %noreg - ; CHECK: %eax = VEXTRACTPSZrr %xmm16, %noreg - %eax = VEXTRACTPSZrr %xmm16, %noreg - ; CHECK: %xmm16 = VINSERTPSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VINSERTPSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VINSERTPSZrr %xmm16, %xmm16, %noreg - %xmm16 = VINSERTPSZrr %xmm16, %xmm16, %noreg + ; CHECK: VMOVAPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVAPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVAPDZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVAPDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVAPDZ128rr $xmm16 + $xmm16 = VMOVAPDZ128rr $xmm16 + ; CHECK: VMOVAPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVAPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVAPSZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVAPSZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVAPSZ128rr $xmm16 + $xmm16 = VMOVAPSZ128rr $xmm16 + ; CHECK: VMOVDQA32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVDQA32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVDQA32Z128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVDQA32Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVDQA32Z128rr $xmm16 + $xmm16 = VMOVDQA32Z128rr $xmm16 + ; CHECK: VMOVDQA64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVDQA64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVDQA64Z128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVDQA64Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVDQA64Z128rr $xmm16 + $xmm16 = VMOVDQA64Z128rr $xmm16 + ; CHECK: VMOVDQU16Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVDQU16Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVDQU16Z128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVDQU16Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVDQU16Z128rr $xmm16 + $xmm16 = VMOVDQU16Z128rr $xmm16 + ; CHECK: VMOVDQU32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVDQU32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVDQU32Z128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVDQU32Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVDQU32Z128rr $xmm16 + $xmm16 = VMOVDQU32Z128rr $xmm16 + ; CHECK: VMOVDQU64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVDQU64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVDQU64Z128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVDQU64Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVDQU64Z128rr $xmm16 + $xmm16 = VMOVDQU64Z128rr $xmm16 + ; CHECK: VMOVDQU8Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVDQU8Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVDQU8Z128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVDQU8Z128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVDQU8Z128rr $xmm16 + $xmm16 = VMOVDQU8Z128rr $xmm16 + ; CHECK: $xmm16 = VMOVDQU8Z128rr_REV $xmm16 + $xmm16 = VMOVDQU8Z128rr_REV $xmm16 + ; CHECK: $xmm16 = VMOVNTDQAZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVNTDQAZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: VMOVUPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVUPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVUPDZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVUPDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVUPDZ128rr $xmm16 + $xmm16 = VMOVUPDZ128rr $xmm16 + ; CHECK: $xmm16 = VMOVUPDZ128rr_REV $xmm16 + $xmm16 = VMOVUPDZ128rr_REV $xmm16 + ; CHECK: VMOVUPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVUPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVUPSZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMOVUPSZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMOVUPSZ128rr $xmm16 + $xmm16 = VMOVUPSZ128rr $xmm16 + ; CHECK: $xmm16 = VMOVUPSZ128rr_REV $xmm16 + $xmm16 = VMOVUPSZ128rr_REV $xmm16 + ; CHECK: VMOVNTDQZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVNTDQZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: VMOVNTPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVNTPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: VMOVNTPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVNTPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVAPDZ128rr_REV $xmm16 + $xmm16 = VMOVAPDZ128rr_REV $xmm16 + ; CHECK: $xmm16 = VMOVAPSZ128rr_REV $xmm16 + $xmm16 = VMOVAPSZ128rr_REV $xmm16 + ; CHECK: $xmm16 = VMOVDQA32Z128rr_REV $xmm16 + $xmm16 = VMOVDQA32Z128rr_REV $xmm16 + ; CHECK: $xmm16 = VMOVDQA64Z128rr_REV $xmm16 + $xmm16 = VMOVDQA64Z128rr_REV $xmm16 + ; CHECK: $xmm16 = VMOVDQU16Z128rr_REV $xmm16 + $xmm16 = VMOVDQU16Z128rr_REV $xmm16 + ; CHECK: $xmm16 = VMOVDQU32Z128rr_REV $xmm16 + $xmm16 = VMOVDQU32Z128rr_REV $xmm16 + ; CHECK: $xmm16 = VMOVDQU64Z128rr_REV $xmm16 + $xmm16 = VMOVDQU64Z128rr_REV $xmm16 + ; CHECK: $xmm16 = VPMOVSXBDZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVSXBDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVSXBDZ128rr $xmm16 + $xmm16 = VPMOVSXBDZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVSXBQZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVSXBQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVSXBQZ128rr $xmm16 + $xmm16 = VPMOVSXBQZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVSXBWZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVSXBWZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVSXBWZ128rr $xmm16 + $xmm16 = VPMOVSXBWZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVSXDQZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVSXDQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVSXDQZ128rr $xmm16 + $xmm16 = VPMOVSXDQZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVSXWDZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVSXWDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVSXWDZ128rr $xmm16 + $xmm16 = VPMOVSXWDZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVSXWQZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVSXWQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVSXWQZ128rr $xmm16 + $xmm16 = VPMOVSXWQZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVZXBDZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVZXBDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVZXBDZ128rr $xmm16 + $xmm16 = VPMOVZXBDZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVZXBQZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVZXBQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVZXBQZ128rr $xmm16 + $xmm16 = VPMOVZXBQZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVZXBWZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVZXBWZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVZXBWZ128rr $xmm16 + $xmm16 = VPMOVZXBWZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVZXDQZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVZXDQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVZXDQZ128rr $xmm16 + $xmm16 = VPMOVZXDQZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVZXWDZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVZXWDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVZXWDZ128rr $xmm16 + $xmm16 = VPMOVZXWDZ128rr $xmm16 + ; CHECK: $xmm16 = VPMOVZXWQZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMOVZXWQZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMOVZXWQZ128rr $xmm16 + $xmm16 = VPMOVZXWQZ128rr $xmm16 + ; CHECK: VMOVHPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVHPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVHPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VMOVHPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: VMOVHPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVHPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVHPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VMOVHPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: VMOVLPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVLPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVLPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VMOVLPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: VMOVLPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + VMOVLPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16 + ; CHECK: $xmm16 = VMOVLPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VMOVLPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VMAXCPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXCPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXCPDZ128rr $xmm16, $xmm1 + $xmm16 = VMAXCPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXCPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXCPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXCPSZ128rr $xmm16, $xmm1 + $xmm16 = VMAXCPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXPDZ128rr $xmm16, $xmm1 + $xmm16 = VMAXPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXPSZ128rr $xmm16, $xmm1 + $xmm16 = VMAXPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINCPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINCPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINCPDZ128rr $xmm16, $xmm1 + $xmm16 = VMINCPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINCPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINCPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINCPSZ128rr $xmm16, $xmm1 + $xmm16 = VMINCPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINPDZ128rr $xmm16, $xmm1 + $xmm16 = VMINPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINPSZ128rr $xmm16, $xmm1 + $xmm16 = VMINPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMULPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMULPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMULPDZ128rr $xmm16, $xmm1 + $xmm16 = VMULPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMULPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMULPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMULPSZ128rr $xmm16, $xmm1 + $xmm16 = VMULPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VORPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VORPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VORPDZ128rr $xmm16, $xmm1 + $xmm16 = VORPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VORPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VORPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VORPSZ128rr $xmm16, $xmm1 + $xmm16 = VORPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPADDBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPADDBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPADDBZ128rr $xmm16, $xmm1 + $xmm16 = VPADDBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPADDDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPADDDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPADDDZ128rr $xmm16, $xmm1 + $xmm16 = VPADDDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPADDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPADDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPADDQZ128rr $xmm16, $xmm1 + $xmm16 = VPADDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPADDSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPADDSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPADDSBZ128rr $xmm16, $xmm1 + $xmm16 = VPADDSBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPADDSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPADDSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPADDSWZ128rr $xmm16, $xmm1 + $xmm16 = VPADDSWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPADDUSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPADDUSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPADDUSBZ128rr $xmm16, $xmm1 + $xmm16 = VPADDUSBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPADDUSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPADDUSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPADDUSWZ128rr $xmm16, $xmm1 + $xmm16 = VPADDUSWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPADDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPADDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPADDWZ128rr $xmm16, $xmm1 + $xmm16 = VPADDWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPANDDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPANDDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPANDDZ128rr $xmm16, $xmm1 + $xmm16 = VPANDDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPANDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPANDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPANDQZ128rr $xmm16, $xmm1 + $xmm16 = VPANDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPANDNDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPANDNDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPANDNDZ128rr $xmm16, $xmm1 + $xmm16 = VPANDNDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPANDNQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPANDNQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPANDNQZ128rr $xmm16, $xmm1 + $xmm16 = VPANDNQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPAVGBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPAVGBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPAVGBZ128rr $xmm16, $xmm1 + $xmm16 = VPAVGBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPAVGWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPAVGWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPAVGWZ128rr $xmm16, $xmm1 + $xmm16 = VPAVGWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMAXSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMAXSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMAXSBZ128rr $xmm16, $xmm1 + $xmm16 = VPMAXSBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMAXSDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMAXSDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMAXSDZ128rr $xmm16, $xmm1 + $xmm16 = VPMAXSDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMAXSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMAXSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMAXSWZ128rr $xmm16, $xmm1 + $xmm16 = VPMAXSWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMAXUBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMAXUBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMAXUBZ128rr $xmm16, $xmm1 + $xmm16 = VPMAXUBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMAXUDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMAXUDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMAXUDZ128rr $xmm16, $xmm1 + $xmm16 = VPMAXUDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMAXUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMAXUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMAXUWZ128rr $xmm16, $xmm1 + $xmm16 = VPMAXUWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMINSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMINSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMINSBZ128rr $xmm16, $xmm1 + $xmm16 = VPMINSBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMINSDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMINSDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMINSDZ128rr $xmm16, $xmm1 + $xmm16 = VPMINSDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMINSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMINSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMINSWZ128rr $xmm16, $xmm1 + $xmm16 = VPMINSWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMINUBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMINUBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMINUBZ128rr $xmm16, $xmm1 + $xmm16 = VPMINUBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMINUDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMINUDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMINUDZ128rr $xmm16, $xmm1 + $xmm16 = VPMINUDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMINUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMINUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMINUWZ128rr $xmm16, $xmm1 + $xmm16 = VPMINUWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMULDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMULDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMULDQZ128rr $xmm16, $xmm1 + $xmm16 = VPMULDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMULHRSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMULHRSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMULHRSWZ128rr $xmm16, $xmm1 + $xmm16 = VPMULHRSWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMULHUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMULHUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMULHUWZ128rr $xmm16, $xmm1 + $xmm16 = VPMULHUWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMULHWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMULHWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMULHWZ128rr $xmm16, $xmm1 + $xmm16 = VPMULHWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMULLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMULLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMULLDZ128rr $xmm16, $xmm1 + $xmm16 = VPMULLDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMULLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMULLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMULLWZ128rr $xmm16, $xmm1 + $xmm16 = VPMULLWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMULUDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMULUDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMULUDQZ128rr $xmm16, $xmm1 + $xmm16 = VPMULUDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPORDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPORDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPORDZ128rr $xmm16, $xmm1 + $xmm16 = VPORDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPORQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPORQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPORQZ128rr $xmm16, $xmm1 + $xmm16 = VPORQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSUBBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSUBBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSUBBZ128rr $xmm16, $xmm1 + $xmm16 = VPSUBBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSUBDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSUBDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSUBDZ128rr $xmm16, $xmm1 + $xmm16 = VPSUBDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSUBQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSUBQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSUBQZ128rr $xmm16, $xmm1 + $xmm16 = VPSUBQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSUBSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSUBSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSUBSBZ128rr $xmm16, $xmm1 + $xmm16 = VPSUBSBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSUBSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSUBSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSUBSWZ128rr $xmm16, $xmm1 + $xmm16 = VPSUBSWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSUBUSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSUBUSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSUBUSBZ128rr $xmm16, $xmm1 + $xmm16 = VPSUBUSBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSUBUSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSUBUSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSUBUSWZ128rr $xmm16, $xmm1 + $xmm16 = VPSUBUSWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSUBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSUBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSUBWZ128rr $xmm16, $xmm1 + $xmm16 = VPSUBWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VADDPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VADDPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VADDPDZ128rr $xmm16, $xmm1 + $xmm16 = VADDPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VADDPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VADDPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VADDPSZ128rr $xmm16, $xmm1 + $xmm16 = VADDPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VANDNPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VANDNPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VANDNPDZ128rr $xmm16, $xmm1 + $xmm16 = VANDNPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VANDNPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VANDNPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VANDNPSZ128rr $xmm16, $xmm1 + $xmm16 = VANDNPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VANDPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VANDPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VANDPDZ128rr $xmm16, $xmm1 + $xmm16 = VANDPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VANDPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VANDPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VANDPSZ128rr $xmm16, $xmm1 + $xmm16 = VANDPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VDIVPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VDIVPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VDIVPDZ128rr $xmm16, $xmm1 + $xmm16 = VDIVPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VDIVPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VDIVPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VDIVPSZ128rr $xmm16, $xmm1 + $xmm16 = VDIVPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPXORDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPXORDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPXORDZ128rr $xmm16, $xmm1 + $xmm16 = VPXORDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPXORQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPXORQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPXORQZ128rr $xmm16, $xmm1 + $xmm16 = VPXORQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VSUBPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VSUBPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VSUBPDZ128rr $xmm16, $xmm1 + $xmm16 = VSUBPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VSUBPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VSUBPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VSUBPSZ128rr $xmm16, $xmm1 + $xmm16 = VSUBPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VXORPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VXORPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VXORPDZ128rr $xmm16, $xmm1 + $xmm16 = VXORPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VXORPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VXORPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VXORPSZ128rr $xmm16, $xmm1 + $xmm16 = VXORPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMADDUBSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMADDUBSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMADDUBSWZ128rr $xmm16, $xmm1 + $xmm16 = VPMADDUBSWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPMADDWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPMADDWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPMADDWDZ128rr $xmm16, $xmm1 + $xmm16 = VPMADDWDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPACKSSDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPACKSSDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPACKSSDWZ128rr $xmm16, $xmm1 + $xmm16 = VPACKSSDWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPACKSSWBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPACKSSWBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPACKSSWBZ128rr $xmm16, $xmm1 + $xmm16 = VPACKSSWBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPACKUSDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPACKUSDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPACKUSDWZ128rr $xmm16, $xmm1 + $xmm16 = VPACKUSDWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPACKUSWBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPACKUSWBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPACKUSWBZ128rr $xmm16, $xmm1 + $xmm16 = VPACKUSWBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPUNPCKHBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPUNPCKHBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPUNPCKHBWZ128rr $xmm16, $xmm1 + $xmm16 = VPUNPCKHBWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPUNPCKHDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPUNPCKHDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPUNPCKHDQZ128rr $xmm16, $xmm1 + $xmm16 = VPUNPCKHDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPUNPCKHQDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPUNPCKHQDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPUNPCKHQDQZ128rr $xmm16, $xmm1 + $xmm16 = VPUNPCKHQDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPUNPCKHWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPUNPCKHWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPUNPCKHWDZ128rr $xmm16, $xmm1 + $xmm16 = VPUNPCKHWDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPUNPCKLBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPUNPCKLBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPUNPCKLBWZ128rr $xmm16, $xmm1 + $xmm16 = VPUNPCKLBWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPUNPCKLDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPUNPCKLDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPUNPCKLDQZ128rr $xmm16, $xmm1 + $xmm16 = VPUNPCKLDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPUNPCKLQDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPUNPCKLQDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPUNPCKLQDQZ128rr $xmm16, $xmm1 + $xmm16 = VPUNPCKLQDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPUNPCKLWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPUNPCKLWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPUNPCKLWDZ128rr $xmm16, $xmm1 + $xmm16 = VPUNPCKLWDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VUNPCKHPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VUNPCKHPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VUNPCKHPDZ128rr $xmm16, $xmm1 + $xmm16 = VUNPCKHPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VUNPCKHPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VUNPCKHPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VUNPCKHPSZ128rr $xmm16, $xmm1 + $xmm16 = VUNPCKHPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VUNPCKLPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VUNPCKLPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VUNPCKLPDZ128rr $xmm16, $xmm1 + $xmm16 = VUNPCKLPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VUNPCKLPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VUNPCKLPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VUNPCKLPSZ128rr $xmm16, $xmm1 + $xmm16 = VUNPCKLPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VFMADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD132PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD132PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD132PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD132PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD213PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD213PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD213PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD213PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD231PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD231PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD231PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD231PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADDSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADDSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADDSUB132PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADDSUB132PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADDSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADDSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADDSUB132PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADDSUB132PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADDSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADDSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADDSUB213PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADDSUB213PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADDSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADDSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADDSUB213PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADDSUB213PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADDSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADDSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADDSUB231PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADDSUB231PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADDSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADDSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADDSUB231PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADDSUB231PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB132PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB132PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB132PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB132PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB213PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB213PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB213PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB213PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB231PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB231PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB231PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB231PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUBADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUBADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUBADD132PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUBADD132PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUBADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUBADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUBADD132PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUBADD132PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUBADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUBADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUBADD213PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUBADD213PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUBADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUBADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUBADD213PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUBADD213PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUBADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUBADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUBADD231PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUBADD231PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUBADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUBADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUBADD231PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUBADD231PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD132PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD132PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD132PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD132PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD213PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD213PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD213PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD213PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD231PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD231PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD231PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD231PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB132PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB132PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB132PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB132PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB213PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB213PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB213PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB213PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB231PDZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB231PDZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB231PSZ128r $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB231PSZ128r $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VPSLLDZ128ri $xmm16, 7 + $xmm16 = VPSLLDZ128ri $xmm16, 7 + ; CHECK: $xmm16 = VPSLLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSLLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSLLDZ128rr $xmm16, 14 + $xmm16 = VPSLLDZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSLLQZ128ri $xmm16, 7 + $xmm16 = VPSLLQZ128ri $xmm16, 7 + ; CHECK: $xmm16 = VPSLLQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSLLQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSLLQZ128rr $xmm16, 14 + $xmm16 = VPSLLQZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSLLVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSLLVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSLLVDZ128rr $xmm16, 14 + $xmm16 = VPSLLVDZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSLLVQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSLLVQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSLLVQZ128rr $xmm16, 14 + $xmm16 = VPSLLVQZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSLLWZ128ri $xmm16, 7 + $xmm16 = VPSLLWZ128ri $xmm16, 7 + ; CHECK: $xmm16 = VPSLLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSLLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSLLWZ128rr $xmm16, 14 + $xmm16 = VPSLLWZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRADZ128ri $xmm16, 7 + $xmm16 = VPSRADZ128ri $xmm16, 7 + ; CHECK: $xmm16 = VPSRADZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSRADZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSRADZ128rr $xmm16, 14 + $xmm16 = VPSRADZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRAVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSRAVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSRAVDZ128rr $xmm16, 14 + $xmm16 = VPSRAVDZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRAWZ128ri $xmm16, 7 + $xmm16 = VPSRAWZ128ri $xmm16, 7 + ; CHECK: $xmm16 = VPSRAWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSRAWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSRAWZ128rr $xmm16, 14 + $xmm16 = VPSRAWZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRLDQZ128rr $xmm16, 14 + $xmm16 = VPSRLDQZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRLDZ128ri $xmm16, 7 + $xmm16 = VPSRLDZ128ri $xmm16, 7 + ; CHECK: $xmm16 = VPSRLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSRLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSRLDZ128rr $xmm16, 14 + $xmm16 = VPSRLDZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRLQZ128ri $xmm16, 7 + $xmm16 = VPSRLQZ128ri $xmm16, 7 + ; CHECK: $xmm16 = VPSRLQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSRLQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSRLQZ128rr $xmm16, 14 + $xmm16 = VPSRLQZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRLVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSRLVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSRLVDZ128rr $xmm16, 14 + $xmm16 = VPSRLVDZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRLVQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSRLVQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSRLVQZ128rr $xmm16, 14 + $xmm16 = VPSRLVQZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPSRLWZ128ri $xmm16, 7 + $xmm16 = VPSRLWZ128ri $xmm16, 7 + ; CHECK: $xmm16 = VPSRLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPSRLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPSRLWZ128rr $xmm16, 14 + $xmm16 = VPSRLWZ128rr $xmm16, 14 + ; CHECK: $xmm16 = VPERMILPDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm16 = VPERMILPDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm16 = VPERMILPDZ128ri $xmm16, 9 + $xmm16 = VPERMILPDZ128ri $xmm16, 9 + ; CHECK: $xmm16 = VPERMILPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VPERMILPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VPERMILPDZ128rr $xmm16, $xmm1 + $xmm16 = VPERMILPDZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPERMILPSZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm16 = VPERMILPSZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm16 = VPERMILPSZ128ri $xmm16, 9 + $xmm16 = VPERMILPSZ128ri $xmm16, 9 + ; CHECK: $xmm16 = VPERMILPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VPERMILPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VPERMILPSZ128rr $xmm16, $xmm1 + $xmm16 = VPERMILPSZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VCVTPH2PSZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTPH2PSZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTPH2PSZ128rr $xmm16 + $xmm16 = VCVTPH2PSZ128rr $xmm16 + ; CHECK: $xmm16 = VCVTDQ2PDZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTDQ2PDZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTDQ2PDZ128rr $xmm16 + $xmm16 = VCVTDQ2PDZ128rr $xmm16 + ; CHECK: $xmm16 = VCVTDQ2PSZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTDQ2PSZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTDQ2PSZ128rr $xmm16 + $xmm16 = VCVTDQ2PSZ128rr $xmm16 + ; CHECK: $xmm16 = VCVTPD2DQZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTPD2DQZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTPD2DQZ128rr $xmm16 + $xmm16 = VCVTPD2DQZ128rr $xmm16 + ; CHECK: $xmm16 = VCVTPD2PSZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTPD2PSZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTPD2PSZ128rr $xmm16 + $xmm16 = VCVTPD2PSZ128rr $xmm16 + ; CHECK: $xmm16 = VCVTPS2DQZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTPS2DQZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTPS2DQZ128rr $xmm16 + $xmm16 = VCVTPS2DQZ128rr $xmm16 + ; CHECK: $xmm16 = VCVTPS2PDZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTPS2PDZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTPS2PDZ128rr $xmm16 + $xmm16 = VCVTPS2PDZ128rr $xmm16 + ; CHECK: $xmm16 = VCVTTPD2DQZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTTPD2DQZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTTPD2DQZ128rr $xmm16 + $xmm16 = VCVTTPD2DQZ128rr $xmm16 + ; CHECK: $xmm16 = VCVTTPS2DQZ128rm $rdi, $xmm16, 1, $noreg, 0 + $xmm16 = VCVTTPS2DQZ128rm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $xmm16 = VCVTTPS2DQZ128rr $xmm16 + $xmm16 = VCVTTPS2DQZ128rr $xmm16 + ; CHECK: $xmm16 = VSQRTPDZ128m $rdi, $noreg, $noreg, $noreg, $noreg + $xmm16 = VSQRTPDZ128m $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VSQRTPDZ128r $xmm16 + $xmm16 = VSQRTPDZ128r $xmm16 + ; CHECK: $xmm16 = VSQRTPSZ128m $rdi, $noreg, $noreg, $noreg, $noreg + $xmm16 = VSQRTPSZ128m $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VSQRTPSZ128r $xmm16 + $xmm16 = VSQRTPSZ128r $xmm16 + ; CHECK: $xmm16 = VMOVDDUPZ128rm $rdi, 1, $noreg, 0, $noreg + $xmm16 = VMOVDDUPZ128rm $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VMOVDDUPZ128rr $xmm16 + $xmm16 = VMOVDDUPZ128rr $xmm16 + ; CHECK: $xmm16 = VMOVSHDUPZ128rm $rdi, 1, $noreg, 0, $noreg + $xmm16 = VMOVSHDUPZ128rm $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VMOVSHDUPZ128rr $xmm16 + $xmm16 = VMOVSHDUPZ128rr $xmm16 + ; CHECK: $xmm16 = VMOVSLDUPZ128rm $rdi, 1, $noreg, 0, $noreg + $xmm16 = VMOVSLDUPZ128rm $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VMOVSLDUPZ128rr $xmm16 + $xmm16 = VMOVSLDUPZ128rr $xmm16 + ; CHECK: $xmm16 = VPSHUFBZ128rm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VPSHUFBZ128rm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VPSHUFBZ128rr $xmm16, $xmm1 + $xmm16 = VPSHUFBZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VPSHUFDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm16 = VPSHUFDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm16 = VPSHUFDZ128ri $xmm16, -24 + $xmm16 = VPSHUFDZ128ri $xmm16, -24 + ; CHECK: $xmm16 = VPSHUFHWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm16 = VPSHUFHWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm16 = VPSHUFHWZ128ri $xmm16, -24 + $xmm16 = VPSHUFHWZ128ri $xmm16, -24 + ; CHECK: $xmm16 = VPSHUFLWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + $xmm16 = VPSHUFLWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm16 = VPSHUFLWZ128ri $xmm16, -24 + $xmm16 = VPSHUFLWZ128ri $xmm16, -24 + ; CHECK: $xmm16 = VPSLLDQZ128rr $xmm16, $xmm1 + $xmm16 = VPSLLDQZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VSHUFPDZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VSHUFPDZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VSHUFPDZ128rri $xmm16, $noreg, $noreg + $xmm16 = VSHUFPDZ128rri $xmm16, $noreg, $noreg + ; CHECK: $xmm16 = VSHUFPSZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VSHUFPSZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VSHUFPSZ128rri $xmm16, $noreg, $noreg + $xmm16 = VSHUFPSZ128rri $xmm16, $noreg, $noreg + ; CHECK: $xmm16 = VPSADBWZ128rm $xmm16, 1, $noreg, $rax, $noreg, $noreg + $xmm16 = VPSADBWZ128rm $xmm16, 1, $noreg, $rax, $noreg, $noreg + ; CHECK: $xmm16 = VPSADBWZ128rr $xmm16, $xmm1 + $xmm16 = VPSADBWZ128rr $xmm16, $xmm1 + ; CHECK: $xmm16 = VBROADCASTSSZ128m $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VBROADCASTSSZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VBROADCASTSSZ128r $xmm16 + $xmm16 = VBROADCASTSSZ128r $xmm16 + ; CHECK: $xmm16 = VPBROADCASTBZ128m $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VPBROADCASTBZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VPBROADCASTBZ128r $xmm16 + $xmm16 = VPBROADCASTBZ128r $xmm16 + ; CHECK: $xmm16 = VPBROADCASTDZ128m $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VPBROADCASTDZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VPBROADCASTDZ128r $xmm16 + $xmm16 = VPBROADCASTDZ128r $xmm16 + ; CHECK: $xmm16 = VPBROADCASTQZ128m $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VPBROADCASTQZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VPBROADCASTQZ128r $xmm16 + $xmm16 = VPBROADCASTQZ128r $xmm16 + ; CHECK: $xmm16 = VPBROADCASTWZ128m $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VPBROADCASTWZ128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VPBROADCASTWZ128r $xmm16 + $xmm16 = VPBROADCASTWZ128r $xmm16 + ; CHECK: $xmm16 = VBROADCASTI32X2Z128m $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VBROADCASTI32X2Z128m $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VBROADCASTI32X2Z128r $xmm0 + $xmm16 = VBROADCASTI32X2Z128r $xmm0 + ; CHECK: $xmm16 = VCVTPS2PHZ128rr $xmm16, 2 + $xmm16 = VCVTPS2PHZ128rr $xmm16, 2 + ; CHECK: VCVTPS2PHZ128mr $rdi, $xmm16, 1, $noreg, 0, $noreg, $noreg + VCVTPS2PHZ128mr $rdi, $xmm16, 1, $noreg, 0, $noreg, $noreg + ; CHECK: $xmm16 = VPABSBZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPABSBZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPABSBZ128rr $xmm16 + $xmm16 = VPABSBZ128rr $xmm16 + ; CHECK: $xmm16 = VPABSDZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPABSDZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPABSDZ128rr $xmm16 + $xmm16 = VPABSDZ128rr $xmm16 + ; CHECK: $xmm16 = VPABSWZ128rm $rip, 1, $noreg, $rax, $noreg + $xmm16 = VPABSWZ128rm $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VPABSWZ128rr $xmm16 + $xmm16 = VPABSWZ128rr $xmm16 + ; CHECK: $xmm16 = VPALIGNRZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VPALIGNRZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VPALIGNRZ128rri $xmm16, $xmm1, 15 + $xmm16 = VPALIGNRZ128rri $xmm16, $xmm1, 15 + ; CHECK: VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, $noreg + VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, $noreg + ; CHECK: $eax = VEXTRACTPSZrr $xmm16, $noreg + $eax = VEXTRACTPSZrr $xmm16, $noreg + ; CHECK: $xmm16 = VINSERTPSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VINSERTPSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VINSERTPSZrr $xmm16, $xmm16, $noreg + $xmm16 = VINSERTPSZrr $xmm16, $xmm16, $noreg - RET 0, %zmm0, %zmm1 + RET 0, $zmm0, $zmm1 ... --- # CHECK-LABEL: name: evex_scalar_to_evex_test @@ -4086,548 +4086,548 @@ name: evex_scalar_to_evex_test body: | bb.0: - ; CHECK: %xmm16 = VADDSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VADDSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VADDSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VADDSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VADDSDZrr %xmm16, %xmm1 - %xmm16 = VADDSDZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VADDSDZrr_Int %xmm16, %xmm1 - %xmm16 = VADDSDZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VADDSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VADDSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VADDSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VADDSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VADDSSZrr %xmm16, %xmm1 - %xmm16 = VADDSSZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VADDSSZrr_Int %xmm16, %xmm1 - %xmm16 = VADDSSZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VDIVSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VDIVSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VDIVSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VDIVSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VDIVSDZrr %xmm16, %xmm1 - %xmm16 = VDIVSDZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VDIVSDZrr_Int %xmm16, %xmm1 - %xmm16 = VDIVSDZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VDIVSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VDIVSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VDIVSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VDIVSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VDIVSSZrr %xmm16, %xmm1 - %xmm16 = VDIVSSZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VDIVSSZrr_Int %xmm16, %xmm1 - %xmm16 = VDIVSSZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXCSDZrr %xmm16, %xmm1 - %xmm16 = VMAXCSDZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXCSSZrr %xmm16, %xmm1 - %xmm16 = VMAXCSSZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXSDZrr %xmm16, %xmm1 - %xmm16 = VMAXSDZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXSDZrr_Int %xmm16, %xmm1 - %xmm16 = VMAXSDZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMAXSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMAXSSZrr %xmm16, %xmm1 - %xmm16 = VMAXSSZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMAXSSZrr_Int %xmm16, %xmm1 - %xmm16 = VMAXSSZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINCSDZrr %xmm16, %xmm1 - %xmm16 = VMINCSDZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINCSSZrr %xmm16, %xmm1 - %xmm16 = VMINCSSZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINSDZrr %xmm16, %xmm1 - %xmm16 = VMINSDZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINSDZrr_Int %xmm16, %xmm1 - %xmm16 = VMINSDZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMINSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMINSSZrr %xmm16, %xmm1 - %xmm16 = VMINSSZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMINSSZrr_Int %xmm16, %xmm1 - %xmm16 = VMINSSZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VMULSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMULSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMULSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMULSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMULSDZrr %xmm16, %xmm1 - %xmm16 = VMULSDZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMULSDZrr_Int %xmm16, %xmm1 - %xmm16 = VMULSDZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VMULSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMULSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMULSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VMULSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VMULSSZrr %xmm16, %xmm1 - %xmm16 = VMULSSZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VMULSSZrr_Int %xmm16, %xmm1 - %xmm16 = VMULSSZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VSUBSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VSUBSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VSUBSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VSUBSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VSUBSDZrr %xmm16, %xmm1 - %xmm16 = VSUBSDZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VSUBSDZrr_Int %xmm16, %xmm1 - %xmm16 = VSUBSDZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VSUBSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VSUBSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VSUBSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - %xmm16 = VSUBSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg - ; CHECK: %xmm16 = VSUBSSZrr %xmm16, %xmm1 - %xmm16 = VSUBSSZrr %xmm16, %xmm1 - ; CHECK: %xmm16 = VSUBSSZrr_Int %xmm16, %xmm1 - %xmm16 = VSUBSSZrr_Int %xmm16, %xmm1 - ; CHECK: %xmm16 = VFMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD132SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD132SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD132SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD132SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD132SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD132SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD132SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD132SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD213SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD213SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD213SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD213SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD213SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD213SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD213SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD213SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD231SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD231SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD231SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD231SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMADD231SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD231SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMADD231SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMADD231SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB132SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB132SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB132SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB132SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB132SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB132SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB132SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB132SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB213SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB213SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB213SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB213SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB213SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB213SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB213SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB213SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB231SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB231SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB231SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB231SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFMSUB231SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB231SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFMSUB231SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFMSUB231SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD132SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD132SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD132SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD132SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD132SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD132SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD132SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD132SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD213SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD213SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD213SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD213SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD213SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD213SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD213SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD213SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD231SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD231SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD231SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD231SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMADD231SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD231SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMADD231SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMADD231SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB132SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB132SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB132SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB132SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB132SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB132SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB132SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB132SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB213SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB213SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB213SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB213SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB213SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB213SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB213SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB213SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB231SDZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB231SDZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB231SDZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB231SDZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - %xmm16 = VFNMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VFNMSUB231SSZr %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB231SSZr %xmm16, %xmm1, %xmm2 - ; CHECK: %xmm16 = VFNMSUB231SSZr_Int %xmm16, %xmm1, %xmm2 - %xmm16 = VFNMSUB231SSZr_Int %xmm16, %xmm1, %xmm2 - ; CHECK: VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3 - VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3 - ; CHECK: %eax = VPEXTRBZrr %xmm16, 1 - %eax = VPEXTRBZrr %xmm16, 1 - ; CHECK: VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3 - VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3 - ; CHECK: %eax = VPEXTRDZrr %xmm16, 1 - %eax = VPEXTRDZrr %xmm16, 1 - ; CHECK: VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3 - VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3 - ; CHECK: %rax = VPEXTRQZrr %xmm16, 1 - %rax = VPEXTRQZrr %xmm16, 1 - ; CHECK: VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3 - VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3 - ; CHECK: %eax = VPEXTRWZrr %xmm16, 1 - %eax = VPEXTRWZrr %xmm16, 1 - ; CHECK: %eax = VPEXTRWZrr_REV %xmm16, 1 - %eax = VPEXTRWZrr_REV %xmm16, 1 - ; CHECK: %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3 - %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3 - ; CHECK: %xmm16 = VPINSRBZrr %xmm16, %edi, 5 - %xmm16 = VPINSRBZrr %xmm16, %edi, 5 - ; CHECK: %xmm16 = VPINSRDZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3 - %xmm16 = VPINSRDZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3 - ; CHECK: %xmm16 = VPINSRDZrr %xmm16, %edi, 5 - %xmm16 = VPINSRDZrr %xmm16, %edi, 5 - ; CHECK: %xmm16 = VPINSRQZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3 - %xmm16 = VPINSRQZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3 - ; CHECK: %xmm16 = VPINSRQZrr %xmm16, %rdi, 5 - %xmm16 = VPINSRQZrr %xmm16, %rdi, 5 - ; CHECK: %xmm16 = VPINSRWZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3 - %xmm16 = VPINSRWZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3 - ; CHECK: %xmm16 = VPINSRWZrr %xmm16, %edi, 5 - %xmm16 = VPINSRWZrr %xmm16, %edi, 5 - ; CHECK: %xmm16 = VSQRTSDZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VSQRTSDZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VSQRTSDZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VSQRTSDZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VSQRTSDZr %xmm16, %noreg - %xmm16 = VSQRTSDZr %xmm16, %noreg - ; CHECK: %xmm16 = VSQRTSDZr_Int %xmm16, %noreg - %xmm16 = VSQRTSDZr_Int %xmm16, %noreg - ; CHECK: %xmm16 = VSQRTSSZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VSQRTSSZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VSQRTSSZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - %xmm16 = VSQRTSSZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VSQRTSSZr %xmm16, %noreg - %xmm16 = VSQRTSSZr %xmm16, %noreg - ; CHECK: %xmm16 = VSQRTSSZr_Int %xmm16, %noreg - %xmm16 = VSQRTSSZr_Int %xmm16, %noreg - ; CHECK: %rdi = VCVTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0 - %rdi = VCVTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %rdi = VCVTSD2SI64Zrr_Int %xmm16 - %rdi = VCVTSD2SI64Zrr_Int %xmm16 - ; CHECK: %edi = VCVTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0 - %edi = VCVTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %edi = VCVTSD2SIZrr_Int %xmm16 - %edi = VCVTSD2SIZrr_Int %xmm16 - ; CHECK: %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSD2SSZrr %xmm16, %noreg - %xmm16 = VCVTSD2SSZrr %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSD2SSZrr_Int %xmm16, %noreg - %xmm16 = VCVTSD2SSZrr_Int %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSI2SDZrr %xmm16, %noreg - %xmm16 = VCVTSI2SDZrr %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSI2SDZrr_Int %xmm16, %noreg - %xmm16 = VCVTSI2SDZrr_Int %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSI2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSI2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSI2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSI2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSI2SSZrr %xmm16, %noreg - %xmm16 = VCVTSI2SSZrr %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSI2SSZrr_Int %xmm16, %noreg - %xmm16 = VCVTSI2SSZrr_Int %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSI642SDZrr %xmm16, %noreg - %xmm16 = VCVTSI642SDZrr %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSI642SDZrr_Int %xmm16, %noreg - %xmm16 = VCVTSI642SDZrr_Int %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSI642SSZrr %xmm16, %noreg - %xmm16 = VCVTSI642SSZrr %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSI642SSZrr_Int %xmm16, %noreg - %xmm16 = VCVTSI642SSZrr_Int %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg - ; CHECK: %xmm16 = VCVTSS2SDZrr %xmm16, %noreg - %xmm16 = VCVTSS2SDZrr %xmm16, %noreg - ; CHECK: %xmm16 = VCVTSS2SDZrr_Int %xmm16, %noreg - %xmm16 = VCVTSS2SDZrr_Int %xmm16, %noreg - ; CHECK: %rdi = VCVTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0 - %rdi = VCVTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %rdi = VCVTSS2SI64Zrr_Int %xmm16 - %rdi = VCVTSS2SI64Zrr_Int %xmm16 - ; CHECK: %edi = VCVTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0 - %edi = VCVTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %edi = VCVTSS2SIZrr_Int %xmm16 - %edi = VCVTSS2SIZrr_Int %xmm16 - ; CHECK: %rdi = VCVTTSD2SI64Zrm %rdi, %xmm16, 1, %noreg, 0 - %rdi = VCVTTSD2SI64Zrm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0 - %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %rdi = VCVTTSD2SI64Zrr %xmm16 - %rdi = VCVTTSD2SI64Zrr %xmm16 - ; CHECK: %rdi = VCVTTSD2SI64Zrr_Int %xmm16 - %rdi = VCVTTSD2SI64Zrr_Int %xmm16 - ; CHECK: %edi = VCVTTSD2SIZrm %rdi, %xmm16, 1, %noreg, 0 - %edi = VCVTTSD2SIZrm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %edi = VCVTTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0 - %edi = VCVTTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %edi = VCVTTSD2SIZrr %xmm16 - %edi = VCVTTSD2SIZrr %xmm16 - ; CHECK: %edi = VCVTTSD2SIZrr_Int %xmm16 - %edi = VCVTTSD2SIZrr_Int %xmm16 - ; CHECK: %rdi = VCVTTSS2SI64Zrm %rdi, %xmm16, 1, %noreg, 0 - %rdi = VCVTTSS2SI64Zrm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0 - %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %rdi = VCVTTSS2SI64Zrr %xmm16 - %rdi = VCVTTSS2SI64Zrr %xmm16 - ; CHECK: %rdi = VCVTTSS2SI64Zrr_Int %xmm16 - %rdi = VCVTTSS2SI64Zrr_Int %xmm16 - ; CHECK: %edi = VCVTTSS2SIZrm %rdi, %xmm16, 1, %noreg, 0 - %edi = VCVTTSS2SIZrm %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %edi = VCVTTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0 - %edi = VCVTTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0 - ; CHECK: %edi = VCVTTSS2SIZrr %xmm16 - %edi = VCVTTSS2SIZrr %xmm16 - ; CHECK: %edi = VCVTTSS2SIZrr_Int %xmm16 - %edi = VCVTTSS2SIZrr_Int %xmm16 - ; CHECK: %xmm16 = VMOV64toSDZrr %rdi - %xmm16 = VMOV64toSDZrr %rdi - ; CHECK: %xmm16 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOVDI2SSZrr %eax - %xmm16 = VMOVDI2SSZrr %eax - ; CHECK: VMOVSDZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - VMOVSDZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOVSDZrr %xmm16, %noreg - %xmm16 = VMOVSDZrr %xmm16, %noreg - ; CHECK: %xmm16 = VMOVSDZrr_REV %xmm16, %noreg - %xmm16 = VMOVSDZrr_REV %xmm16, %noreg - ; CHECK: %rax = VMOVSDto64Zrr %xmm16 - %rax = VMOVSDto64Zrr %xmm16 - ; CHECK: VMOVSDto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - VMOVSDto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - ; CHECK: VMOVSSZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - VMOVSSZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOVSSZrr %xmm16, %noreg - %xmm16 = VMOVSSZrr %xmm16, %noreg - ; CHECK: %xmm16 = VMOVSSZrr_REV %xmm16, %noreg - %xmm16 = VMOVSSZrr_REV %xmm16, %noreg - ; CHECK: VMOVSS2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - VMOVSS2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - ; CHECK: %eax = VMOVSS2DIZrr %xmm16 - %eax = VMOVSS2DIZrr %xmm16 - ; CHECK: %xmm16 = VMOV64toPQIZrr %rdi - %xmm16 = VMOV64toPQIZrr %rdi - ; CHECK: %xmm16 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg - %xmm16 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOV64toSDZrr %rdi - %xmm16 = VMOV64toSDZrr %rdi - ; CHECK: %xmm16 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOVDI2PDIZrr %edi - %xmm16 = VMOVDI2PDIZrr %edi - ; CHECK: %xmm16 = VMOVLHPSZrr %xmm16, %noreg - %xmm16 = VMOVLHPSZrr %xmm16, %noreg - ; CHECK: %xmm16 = VMOVHLPSZrr %xmm16, %noreg - %xmm16 = VMOVHLPSZrr %xmm16, %noreg - ; CHECK: VMOVPDI2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - VMOVPDI2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - ; CHECK: %edi = VMOVPDI2DIZrr %xmm16 - %edi = VMOVPDI2DIZrr %xmm16 - ; CHECK: %xmm16 = VMOVPQI2QIZrr %xmm16 - %xmm16 = VMOVPQI2QIZrr %xmm16 - ; CHECK: VMOVPQI2QIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - VMOVPQI2QIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - ; CHECK: %rdi = VMOVPQIto64Zrr %xmm16 - %rdi = VMOVPQIto64Zrr %xmm16 - ; CHECK: VMOVPQIto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - VMOVPQIto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg - %xmm16 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg - ; CHECK: %xmm16 = VMOVZPQILo2PQIZrr %xmm16 - %xmm16 = VMOVZPQILo2PQIZrr %xmm16 - ; CHECK: VCOMISDZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VCOMISDZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VCOMISDZrr_Int %xmm16, %xmm1, implicit-def %eflags - VCOMISDZrr_Int %xmm16, %xmm1, implicit-def %eflags - ; CHECK: VCOMISSZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VCOMISSZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VCOMISSZrr_Int %xmm16, %xmm1, implicit-def %eflags - VCOMISSZrr_Int %xmm16, %xmm1, implicit-def %eflags - ; CHECK: VUCOMISDZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VUCOMISDZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VUCOMISDZrr_Int %xmm16, %xmm1, implicit-def %eflags - VUCOMISDZrr_Int %xmm16, %xmm1, implicit-def %eflags - ; CHECK: VUCOMISSZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VUCOMISSZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VUCOMISSZrr_Int %xmm16, %xmm1, implicit-def %eflags - VUCOMISSZrr_Int %xmm16, %xmm1, implicit-def %eflags - ; CHECK: VCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VCOMISDZrr %xmm16, %xmm1, implicit-def %eflags - VCOMISDZrr %xmm16, %xmm1, implicit-def %eflags - ; CHECK: VCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VCOMISSZrr %xmm16, %xmm1, implicit-def %eflags - VCOMISSZrr %xmm16, %xmm1, implicit-def %eflags - ; CHECK: VUCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VUCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VUCOMISDZrr %xmm16, %xmm1, implicit-def %eflags - VUCOMISDZrr %xmm16, %xmm1, implicit-def %eflags - ; CHECK: VUCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - VUCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags - ; CHECK: VUCOMISSZrr %xmm16, %xmm1, implicit-def %eflags - VUCOMISSZrr %xmm16, %xmm1, implicit-def %eflags + ; CHECK: $xmm16 = VADDSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VADDSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VADDSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VADDSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VADDSDZrr $xmm16, $xmm1 + $xmm16 = VADDSDZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VADDSDZrr_Int $xmm16, $xmm1 + $xmm16 = VADDSDZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VADDSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VADDSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VADDSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VADDSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VADDSSZrr $xmm16, $xmm1 + $xmm16 = VADDSSZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VADDSSZrr_Int $xmm16, $xmm1 + $xmm16 = VADDSSZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VDIVSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VDIVSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VDIVSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VDIVSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VDIVSDZrr $xmm16, $xmm1 + $xmm16 = VDIVSDZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VDIVSDZrr_Int $xmm16, $xmm1 + $xmm16 = VDIVSDZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VDIVSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VDIVSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VDIVSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VDIVSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VDIVSSZrr $xmm16, $xmm1 + $xmm16 = VDIVSSZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VDIVSSZrr_Int $xmm16, $xmm1 + $xmm16 = VDIVSSZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXCSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXCSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXCSDZrr $xmm16, $xmm1 + $xmm16 = VMAXCSDZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXCSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXCSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXCSSZrr $xmm16, $xmm1 + $xmm16 = VMAXCSSZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXSDZrr $xmm16, $xmm1 + $xmm16 = VMAXSDZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXSDZrr_Int $xmm16, $xmm1 + $xmm16 = VMAXSDZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMAXSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMAXSSZrr $xmm16, $xmm1 + $xmm16 = VMAXSSZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMAXSSZrr_Int $xmm16, $xmm1 + $xmm16 = VMAXSSZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINCSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINCSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINCSDZrr $xmm16, $xmm1 + $xmm16 = VMINCSDZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINCSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINCSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINCSSZrr $xmm16, $xmm1 + $xmm16 = VMINCSSZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINSDZrr $xmm16, $xmm1 + $xmm16 = VMINSDZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINSDZrr_Int $xmm16, $xmm1 + $xmm16 = VMINSDZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMINSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMINSSZrr $xmm16, $xmm1 + $xmm16 = VMINSSZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMINSSZrr_Int $xmm16, $xmm1 + $xmm16 = VMINSSZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VMULSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMULSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMULSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMULSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMULSDZrr $xmm16, $xmm1 + $xmm16 = VMULSDZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMULSDZrr_Int $xmm16, $xmm1 + $xmm16 = VMULSDZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VMULSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMULSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMULSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VMULSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VMULSSZrr $xmm16, $xmm1 + $xmm16 = VMULSSZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VMULSSZrr_Int $xmm16, $xmm1 + $xmm16 = VMULSSZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VSUBSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VSUBSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VSUBSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VSUBSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VSUBSDZrr $xmm16, $xmm1 + $xmm16 = VSUBSDZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VSUBSDZrr_Int $xmm16, $xmm1 + $xmm16 = VSUBSDZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VSUBSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VSUBSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VSUBSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + $xmm16 = VSUBSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg + ; CHECK: $xmm16 = VSUBSSZrr $xmm16, $xmm1 + $xmm16 = VSUBSSZrr $xmm16, $xmm1 + ; CHECK: $xmm16 = VSUBSSZrr_Int $xmm16, $xmm1 + $xmm16 = VSUBSSZrr_Int $xmm16, $xmm1 + ; CHECK: $xmm16 = VFMADD132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD132SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD132SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD132SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD132SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD132SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD132SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD132SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD132SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD213SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD213SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD213SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD213SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD213SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD213SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD213SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD213SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD231SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD231SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD231SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD231SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMADD231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMADD231SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD231SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMADD231SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMADD231SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB132SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB132SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB132SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB132SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB132SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB132SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB132SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB132SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB213SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB213SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB213SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB213SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB213SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB213SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB213SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB213SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB231SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB231SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB231SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB231SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFMSUB231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFMSUB231SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB231SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFMSUB231SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFMSUB231SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD132SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD132SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD132SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD132SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD132SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD132SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD132SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD132SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD213SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD213SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD213SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD213SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD213SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD213SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD213SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD213SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD231SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD231SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD231SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD231SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMADD231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMADD231SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD231SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMADD231SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMADD231SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB132SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB132SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB132SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB132SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB132SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB132SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB132SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB132SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB213SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB213SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB213SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB213SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB213SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB213SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB213SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB213SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB231SDZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB231SDZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB231SDZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB231SDZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + $xmm16 = VFNMSUB231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VFNMSUB231SSZr $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB231SSZr $xmm16, $xmm1, $xmm2 + ; CHECK: $xmm16 = VFNMSUB231SSZr_Int $xmm16, $xmm1, $xmm2 + $xmm16 = VFNMSUB231SSZr_Int $xmm16, $xmm1, $xmm2 + ; CHECK: VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + ; CHECK: $eax = VPEXTRBZrr $xmm16, 1 + $eax = VPEXTRBZrr $xmm16, 1 + ; CHECK: VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + ; CHECK: $eax = VPEXTRDZrr $xmm16, 1 + $eax = VPEXTRDZrr $xmm16, 1 + ; CHECK: VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + ; CHECK: $rax = VPEXTRQZrr $xmm16, 1 + $rax = VPEXTRQZrr $xmm16, 1 + ; CHECK: VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + ; CHECK: $eax = VPEXTRWZrr $xmm16, 1 + $eax = VPEXTRWZrr $xmm16, 1 + ; CHECK: $eax = VPEXTRWZrr_REV $xmm16, 1 + $eax = VPEXTRWZrr_REV $xmm16, 1 + ; CHECK: $xmm16 = VPINSRBZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm16 = VPINSRBZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm16 = VPINSRBZrr $xmm16, $edi, 5 + $xmm16 = VPINSRBZrr $xmm16, $edi, 5 + ; CHECK: $xmm16 = VPINSRDZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm16 = VPINSRDZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm16 = VPINSRDZrr $xmm16, $edi, 5 + $xmm16 = VPINSRDZrr $xmm16, $edi, 5 + ; CHECK: $xmm16 = VPINSRQZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm16 = VPINSRQZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm16 = VPINSRQZrr $xmm16, $rdi, 5 + $xmm16 = VPINSRQZrr $xmm16, $rdi, 5 + ; CHECK: $xmm16 = VPINSRWZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm16 = VPINSRWZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm16 = VPINSRWZrr $xmm16, $edi, 5 + $xmm16 = VPINSRWZrr $xmm16, $edi, 5 + ; CHECK: $xmm16 = VSQRTSDZm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VSQRTSDZm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VSQRTSDZm_Int $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VSQRTSDZm_Int $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VSQRTSDZr $xmm16, $noreg + $xmm16 = VSQRTSDZr $xmm16, $noreg + ; CHECK: $xmm16 = VSQRTSDZr_Int $xmm16, $noreg + $xmm16 = VSQRTSDZr_Int $xmm16, $noreg + ; CHECK: $xmm16 = VSQRTSSZm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VSQRTSSZm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VSQRTSSZm_Int $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + $xmm16 = VSQRTSSZm_Int $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VSQRTSSZr $xmm16, $noreg + $xmm16 = VSQRTSSZr $xmm16, $noreg + ; CHECK: $xmm16 = VSQRTSSZr_Int $xmm16, $noreg + $xmm16 = VSQRTSSZr_Int $xmm16, $noreg + ; CHECK: $rdi = VCVTSD2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0 + $rdi = VCVTSD2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $rdi = VCVTSD2SI64Zrr_Int $xmm16 + $rdi = VCVTSD2SI64Zrr_Int $xmm16 + ; CHECK: $edi = VCVTSD2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0 + $edi = VCVTSD2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $edi = VCVTSD2SIZrr_Int $xmm16 + $edi = VCVTSD2SIZrr_Int $xmm16 + ; CHECK: $xmm16 = VCVTSD2SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSD2SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSD2SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSD2SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSD2SSZrr $xmm16, $noreg + $xmm16 = VCVTSD2SSZrr $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSD2SSZrr_Int $xmm16, $noreg + $xmm16 = VCVTSD2SSZrr_Int $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSI2SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSI2SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSI2SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSI2SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSI2SDZrr $xmm16, $noreg + $xmm16 = VCVTSI2SDZrr $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSI2SDZrr_Int $xmm16, $noreg + $xmm16 = VCVTSI2SDZrr_Int $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSI2SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSI2SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSI2SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSI2SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSI2SSZrr $xmm16, $noreg + $xmm16 = VCVTSI2SSZrr $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSI2SSZrr_Int $xmm16, $noreg + $xmm16 = VCVTSI2SSZrr_Int $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSI642SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSI642SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSI642SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSI642SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSI642SDZrr $xmm16, $noreg + $xmm16 = VCVTSI642SDZrr $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSI642SDZrr_Int $xmm16, $noreg + $xmm16 = VCVTSI642SDZrr_Int $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSI642SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSI642SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSI642SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSI642SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSI642SSZrr $xmm16, $noreg + $xmm16 = VCVTSI642SSZrr $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSI642SSZrr_Int $xmm16, $noreg + $xmm16 = VCVTSI642SSZrr_Int $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSS2SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSS2SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSS2SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + $xmm16 = VCVTSS2SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg + ; CHECK: $xmm16 = VCVTSS2SDZrr $xmm16, $noreg + $xmm16 = VCVTSS2SDZrr $xmm16, $noreg + ; CHECK: $xmm16 = VCVTSS2SDZrr_Int $xmm16, $noreg + $xmm16 = VCVTSS2SDZrr_Int $xmm16, $noreg + ; CHECK: $rdi = VCVTSS2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0 + $rdi = VCVTSS2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $rdi = VCVTSS2SI64Zrr_Int $xmm16 + $rdi = VCVTSS2SI64Zrr_Int $xmm16 + ; CHECK: $edi = VCVTSS2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0 + $edi = VCVTSS2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $edi = VCVTSS2SIZrr_Int $xmm16 + $edi = VCVTSS2SIZrr_Int $xmm16 + ; CHECK: $rdi = VCVTTSD2SI64Zrm $rdi, $xmm16, 1, $noreg, 0 + $rdi = VCVTTSD2SI64Zrm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $rdi = VCVTTSD2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0 + $rdi = VCVTTSD2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $rdi = VCVTTSD2SI64Zrr $xmm16 + $rdi = VCVTTSD2SI64Zrr $xmm16 + ; CHECK: $rdi = VCVTTSD2SI64Zrr_Int $xmm16 + $rdi = VCVTTSD2SI64Zrr_Int $xmm16 + ; CHECK: $edi = VCVTTSD2SIZrm $rdi, $xmm16, 1, $noreg, 0 + $edi = VCVTTSD2SIZrm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $edi = VCVTTSD2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0 + $edi = VCVTTSD2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $edi = VCVTTSD2SIZrr $xmm16 + $edi = VCVTTSD2SIZrr $xmm16 + ; CHECK: $edi = VCVTTSD2SIZrr_Int $xmm16 + $edi = VCVTTSD2SIZrr_Int $xmm16 + ; CHECK: $rdi = VCVTTSS2SI64Zrm $rdi, $xmm16, 1, $noreg, 0 + $rdi = VCVTTSS2SI64Zrm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $rdi = VCVTTSS2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0 + $rdi = VCVTTSS2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $rdi = VCVTTSS2SI64Zrr $xmm16 + $rdi = VCVTTSS2SI64Zrr $xmm16 + ; CHECK: $rdi = VCVTTSS2SI64Zrr_Int $xmm16 + $rdi = VCVTTSS2SI64Zrr_Int $xmm16 + ; CHECK: $edi = VCVTTSS2SIZrm $rdi, $xmm16, 1, $noreg, 0 + $edi = VCVTTSS2SIZrm $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $edi = VCVTTSS2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0 + $edi = VCVTTSS2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0 + ; CHECK: $edi = VCVTTSS2SIZrr $xmm16 + $edi = VCVTTSS2SIZrr $xmm16 + ; CHECK: $edi = VCVTTSS2SIZrr_Int $xmm16 + $edi = VCVTTSS2SIZrr_Int $xmm16 + ; CHECK: $xmm16 = VMOV64toSDZrr $rdi + $xmm16 = VMOV64toSDZrr $rdi + ; CHECK: $xmm16 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOVDI2SSZrr $eax + $xmm16 = VMOVDI2SSZrr $eax + ; CHECK: VMOVSDZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + VMOVSDZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOVSDZrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VMOVSDZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOVSDZrr $xmm16, $noreg + $xmm16 = VMOVSDZrr $xmm16, $noreg + ; CHECK: $xmm16 = VMOVSDZrr_REV $xmm16, $noreg + $xmm16 = VMOVSDZrr_REV $xmm16, $noreg + ; CHECK: $rax = VMOVSDto64Zrr $xmm16 + $rax = VMOVSDto64Zrr $xmm16 + ; CHECK: VMOVSDto64Zmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + VMOVSDto64Zmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + ; CHECK: VMOVSSZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + VMOVSSZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOVSSZrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VMOVSSZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOVSSZrr $xmm16, $noreg + $xmm16 = VMOVSSZrr $xmm16, $noreg + ; CHECK: $xmm16 = VMOVSSZrr_REV $xmm16, $noreg + $xmm16 = VMOVSSZrr_REV $xmm16, $noreg + ; CHECK: VMOVSS2DIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + VMOVSS2DIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + ; CHECK: $eax = VMOVSS2DIZrr $xmm16 + $eax = VMOVSS2DIZrr $xmm16 + ; CHECK: $xmm16 = VMOV64toPQIZrr $rdi + $xmm16 = VMOV64toPQIZrr $rdi + ; CHECK: $xmm16 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg + $xmm16 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOV64toSDZrr $rdi + $xmm16 = VMOV64toSDZrr $rdi + ; CHECK: $xmm16 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOVDI2PDIZrr $edi + $xmm16 = VMOVDI2PDIZrr $edi + ; CHECK: $xmm16 = VMOVLHPSZrr $xmm16, $noreg + $xmm16 = VMOVLHPSZrr $xmm16, $noreg + ; CHECK: $xmm16 = VMOVHLPSZrr $xmm16, $noreg + $xmm16 = VMOVHLPSZrr $xmm16, $noreg + ; CHECK: VMOVPDI2DIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + VMOVPDI2DIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + ; CHECK: $edi = VMOVPDI2DIZrr $xmm16 + $edi = VMOVPDI2DIZrr $xmm16 + ; CHECK: $xmm16 = VMOVPQI2QIZrr $xmm16 + $xmm16 = VMOVPQI2QIZrr $xmm16 + ; CHECK: VMOVPQI2QIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + VMOVPQI2QIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + ; CHECK: $rdi = VMOVPQIto64Zrr $xmm16 + $rdi = VMOVPQIto64Zrr $xmm16 + ; CHECK: VMOVPQIto64Zmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + VMOVPQIto64Zmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOVQI2PQIZrm $rip, $noreg, $noreg, $noreg, $noreg + $xmm16 = VMOVQI2PQIZrm $rip, $noreg, $noreg, $noreg, $noreg + ; CHECK: $xmm16 = VMOVZPQILo2PQIZrr $xmm16 + $xmm16 = VMOVZPQILo2PQIZrr $xmm16 + ; CHECK: VCOMISDZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VCOMISDZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VCOMISDZrr_Int $xmm16, $xmm1, implicit-def $eflags + VCOMISDZrr_Int $xmm16, $xmm1, implicit-def $eflags + ; CHECK: VCOMISSZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VCOMISSZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VCOMISSZrr_Int $xmm16, $xmm1, implicit-def $eflags + VCOMISSZrr_Int $xmm16, $xmm1, implicit-def $eflags + ; CHECK: VUCOMISDZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VUCOMISDZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VUCOMISDZrr_Int $xmm16, $xmm1, implicit-def $eflags + VUCOMISDZrr_Int $xmm16, $xmm1, implicit-def $eflags + ; CHECK: VUCOMISSZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VUCOMISSZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VUCOMISSZrr_Int $xmm16, $xmm1, implicit-def $eflags + VUCOMISSZrr_Int $xmm16, $xmm1, implicit-def $eflags + ; CHECK: VCOMISDZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VCOMISDZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VCOMISDZrr $xmm16, $xmm1, implicit-def $eflags + VCOMISDZrr $xmm16, $xmm1, implicit-def $eflags + ; CHECK: VCOMISSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VCOMISSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VCOMISSZrr $xmm16, $xmm1, implicit-def $eflags + VCOMISSZrr $xmm16, $xmm1, implicit-def $eflags + ; CHECK: VUCOMISDZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VUCOMISDZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VUCOMISDZrr $xmm16, $xmm1, implicit-def $eflags + VUCOMISDZrr $xmm16, $xmm1, implicit-def $eflags + ; CHECK: VUCOMISSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + VUCOMISSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags + ; CHECK: VUCOMISSZrr $xmm16, $xmm1, implicit-def $eflags + VUCOMISSZrr $xmm16, $xmm1, implicit-def $eflags - RET 0, %zmm0, %zmm1 + RET 0, $zmm0, $zmm1 ... Index: test/CodeGen/X86/expand-vr64-gr64-copy.mir =================================================================== --- test/CodeGen/X86/expand-vr64-gr64-copy.mir +++ test/CodeGen/X86/expand-vr64-gr64-copy.mir @@ -20,17 +20,17 @@ tracksRegLiveness: true body: | bb.0.entry: - liveins: %xmm0 + liveins: $xmm0 - %xmm0 = PSHUFDri killed %xmm0, -24 - MOVPQI2QImr %rsp, 1, %noreg, -8, %noreg, killed %xmm0 - %mm0 = PSWAPDrm %rsp, 1, %noreg, -8, %noreg - ; CHECK: %rax = MMX_MOVD64from64rr %mm0 - ; CHECK-NEXT: %mm0 = MMX_MOVD64to64rr %rax - %rax = COPY %mm0 - %mm0 = COPY %rax - MMX_MOVQ64mr %rsp, 1, %noreg, -16, %noreg, killed %mm0 - %xmm0 = MOVQI2PQIrm %rsp, 1, %noreg, -16, %noreg - %xmm0 = PSHUFDri killed %xmm0, -44 - RETQ %xmm0 + $xmm0 = PSHUFDri killed $xmm0, -24 + MOVPQI2QImr $rsp, 1, $noreg, -8, $noreg, killed $xmm0 + $mm0 = PSWAPDrm $rsp, 1, $noreg, -8, $noreg + ; CHECK: $rax = MMX_MOVD64from64rr $mm0 + ; CHECK-NEXT: $mm0 = MMX_MOVD64to64rr $rax + $rax = COPY $mm0 + $mm0 = COPY $rax + MMX_MOVQ64mr $rsp, 1, $noreg, -16, $noreg, killed $mm0 + $xmm0 = MOVQI2PQIrm $rsp, 1, $noreg, -16, $noreg + $xmm0 = PSHUFDri killed $xmm0, -44 + RETQ $xmm0 ... Index: test/CodeGen/X86/extractelement-index.ll =================================================================== --- test/CodeGen/X86/extractelement-index.ll +++ test/CodeGen/X86/extractelement-index.ll @@ -13,19 +13,19 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: # kill: def %al killed %al killed %eax +; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; SSE41-LABEL: extractelement_v16i8_1: ; SSE41: # %bb.0: ; SSE41-NEXT: pextrb $1, %xmm0, %eax -; SSE41-NEXT: # kill: def %al killed %al killed %eax +; SSE41-NEXT: # kill: def $al killed $al killed $eax ; SSE41-NEXT: retq ; ; AVX-LABEL: extractelement_v16i8_1: ; AVX: # %bb.0: ; AVX-NEXT: vpextrb $1, %xmm0, %eax -; AVX-NEXT: # kill: def %al killed %al killed %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: retq %b = extractelement <16 x i8> %a, i256 1 ret i8 %b @@ -36,19 +36,19 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: pextrw $5, %xmm0, %eax ; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: # kill: def %al killed %al killed %eax +; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; SSE41-LABEL: extractelement_v16i8_11: ; SSE41: # %bb.0: ; SSE41-NEXT: pextrb $11, %xmm0, %eax -; SSE41-NEXT: # kill: def %al killed %al killed %eax +; SSE41-NEXT: # kill: def $al killed $al killed $eax ; SSE41-NEXT: retq ; ; AVX-LABEL: extractelement_v16i8_11: ; AVX: # %bb.0: ; AVX-NEXT: vpextrb $11, %xmm0, %eax -; AVX-NEXT: # kill: def %al killed %al killed %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: retq %b = extractelement <16 x i8> %a, i256 11 ret i8 %b @@ -58,19 +58,19 @@ ; SSE2-LABEL: extractelement_v16i8_14: ; SSE2: # %bb.0: ; SSE2-NEXT: pextrw $7, %xmm0, %eax -; SSE2-NEXT: # kill: def %al killed %al killed %eax +; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; SSE41-LABEL: extractelement_v16i8_14: ; SSE41: # %bb.0: ; SSE41-NEXT: pextrb $14, %xmm0, %eax -; SSE41-NEXT: # kill: def %al killed %al killed %eax +; SSE41-NEXT: # kill: def $al killed $al killed $eax ; SSE41-NEXT: retq ; ; AVX-LABEL: extractelement_v16i8_14: ; AVX: # %bb.0: ; AVX-NEXT: vpextrb $14, %xmm0, %eax -; AVX-NEXT: # kill: def %al killed %al killed %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: retq %b = extractelement <16 x i8> %a, i256 14 ret i8 %b @@ -81,19 +81,19 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: # kill: def %al killed %al killed %eax +; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; SSE41-LABEL: extractelement_v32i8_1: ; SSE41: # %bb.0: ; SSE41-NEXT: pextrb $1, %xmm0, %eax -; SSE41-NEXT: # kill: def %al killed %al killed %eax +; SSE41-NEXT: # kill: def $al killed $al killed $eax ; SSE41-NEXT: retq ; ; AVX-LABEL: extractelement_v32i8_1: ; AVX: # %bb.0: ; AVX-NEXT: vpextrb $1, %xmm0, %eax -; AVX-NEXT: # kill: def %al killed %al killed %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %b = extractelement <32 x i8> %a, i256 1 @@ -105,20 +105,20 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: # kill: def %al killed %al killed %eax +; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; SSE41-LABEL: extractelement_v32i8_17: ; SSE41: # %bb.0: ; SSE41-NEXT: pextrb $1, %xmm1, %eax -; SSE41-NEXT: # kill: def %al killed %al killed %eax +; SSE41-NEXT: # kill: def $al killed $al killed $eax ; SSE41-NEXT: retq ; ; AVX1-LABEL: extractelement_v32i8_17: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpextrb $1, %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -126,7 +126,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq %b = extractelement <32 x i8> %a, i256 17 @@ -137,13 +137,13 @@ ; SSE-LABEL: extractelement_v8i16_0: ; SSE: # %bb.0: ; SSE-NEXT: movd %xmm0, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX-LABEL: extractelement_v8i16_0: ; AVX: # %bb.0: ; AVX-NEXT: vmovd %xmm0, %eax -; AVX-NEXT: # kill: def %ax killed %ax killed %eax +; AVX-NEXT: # kill: def $ax killed $ax killed $eax ; AVX-NEXT: retq %b = extractelement <8 x i16> %a, i256 0 ret i16 %b @@ -153,13 +153,13 @@ ; SSE-LABEL: extractelement_v8i16_3: ; SSE: # %bb.0: ; SSE-NEXT: pextrw $3, %xmm0, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX-LABEL: extractelement_v8i16_3: ; AVX: # %bb.0: ; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: # kill: def %ax killed %ax killed %eax +; AVX-NEXT: # kill: def $ax killed $ax killed $eax ; AVX-NEXT: retq %b = extractelement <8 x i16> %a, i256 3 ret i16 %b @@ -169,13 +169,13 @@ ; SSE-LABEL: extractelement_v16i16_0: ; SSE: # %bb.0: ; SSE-NEXT: movd %xmm0, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX-LABEL: extractelement_v16i16_0: ; AVX: # %bb.0: ; AVX-NEXT: vmovd %xmm0, %eax -; AVX-NEXT: # kill: def %ax killed %ax killed %eax +; AVX-NEXT: # kill: def $ax killed $ax killed $eax ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %b = extractelement <16 x i16> %a, i256 0 @@ -186,14 +186,14 @@ ; SSE-LABEL: extractelement_v16i16_13: ; SSE: # %bb.0: ; SSE-NEXT: pextrw $5, %xmm1, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: extractelement_v16i16_13: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpextrw $5, %xmm0, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -201,7 +201,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpextrw $5, %xmm0, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq %b = extractelement <16 x i16> %a, i256 13 Index: test/CodeGen/X86/f16c-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/f16c-intrinsics-fast-isel.ll +++ test/CodeGen/X86/f16c-intrinsics-fast-isel.ll @@ -43,7 +43,7 @@ ; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; X32-NEXT: vcvtps2ph $0, %xmm0, %xmm0 ; X32-NEXT: vmovd %xmm0, %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test_cvtss_sh: @@ -52,7 +52,7 @@ ; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0 ; X64-NEXT: vmovd %xmm0, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %ins0 = insertelement <4 x float> undef, float %a0, i32 0 %ins1 = insertelement <4 x float> %ins0, float 0.000000e+00, i32 1 Index: test/CodeGen/X86/fast-isel-cmp.ll =================================================================== --- test/CodeGen/X86/fast-isel-cmp.ll +++ test/CodeGen/X86/fast-isel-cmp.ll @@ -10,7 +10,7 @@ ; SDAG-NEXT: cmpeqss %xmm1, %xmm0 ; SDAG-NEXT: movd %xmm0, %eax ; SDAG-NEXT: andl $1, %eax -; SDAG-NEXT: ## kill: def %al killed %al killed %eax +; SDAG-NEXT: ## kill: def $al killed $al killed $eax ; SDAG-NEXT: retq ; ; FAST_NOAVX-LABEL: fcmp_oeq: @@ -354,7 +354,7 @@ ; SDAG-NEXT: cmpneqss %xmm1, %xmm0 ; SDAG-NEXT: movd %xmm0, %eax ; SDAG-NEXT: andl $1, %eax -; SDAG-NEXT: ## kill: def %al killed %al killed %eax +; SDAG-NEXT: ## kill: def $al killed $al killed $eax ; SDAG-NEXT: retq ; ; FAST_NOAVX-LABEL: fcmp_une: @@ -594,7 +594,7 @@ ; SDAG-NEXT: cmpeqss %xmm0, %xmm1 ; SDAG-NEXT: movd %xmm1, %eax ; SDAG-NEXT: andl $1, %eax -; SDAG-NEXT: ## kill: def %al killed %al killed %eax +; SDAG-NEXT: ## kill: def $al killed $al killed $eax ; SDAG-NEXT: retq ; ; FAST_NOAVX-LABEL: fcmp_oeq3: @@ -1249,7 +1249,7 @@ ; SDAG-NEXT: cmpneqss %xmm0, %xmm1 ; SDAG-NEXT: movd %xmm1, %eax ; SDAG-NEXT: andl $1, %eax -; SDAG-NEXT: ## kill: def %al killed %al killed %eax +; SDAG-NEXT: ## kill: def $al killed $al killed $eax ; SDAG-NEXT: retq ; ; FAST_NOAVX-LABEL: fcmp_une3: Index: test/CodeGen/X86/fast-isel-nontemporal.ll =================================================================== --- test/CodeGen/X86/fast-isel-nontemporal.ll +++ test/CodeGen/X86/fast-isel-nontemporal.ll @@ -547,7 +547,7 @@ ; AVX1-LABEL: test_load_nt8xfloat: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 @@ -589,7 +589,7 @@ ; AVX1-LABEL: test_load_nt4xdouble: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 @@ -631,7 +631,7 @@ ; AVX1-LABEL: test_load_nt32xi8: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 @@ -673,7 +673,7 @@ ; AVX1-LABEL: test_load_nt16xi16: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 @@ -715,7 +715,7 @@ ; AVX1-LABEL: test_load_nt8xi32: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 @@ -757,7 +757,7 @@ ; AVX1-LABEL: test_load_nt4xi64: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 @@ -997,12 +997,12 @@ ; AVX1-LABEL: test_load_nt16xfloat: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm2, %xmm1 ; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -1051,12 +1051,12 @@ ; AVX1-LABEL: test_load_nt8xdouble: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm2, %xmm1 ; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -1105,12 +1105,12 @@ ; AVX1-LABEL: test_load_nt64xi8: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm2, %xmm1 ; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -1171,12 +1171,12 @@ ; AVX1-LABEL: test_load_nt32xi16: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm2, %xmm1 ; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -1237,12 +1237,12 @@ ; AVX1-LABEL: test_load_nt16xi32: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm2, %xmm1 ; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -1291,12 +1291,12 @@ ; AVX1-LABEL: test_load_nt8xi64: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2 -; AVX1-NEXT: # implicit-def: %ymm1 +; AVX1-NEXT: # implicit-def: $ymm1 ; AVX1-NEXT: vmovaps %xmm2, %xmm1 ; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 Index: test/CodeGen/X86/fast-isel-sext-zext.ll =================================================================== --- test/CodeGen/X86/fast-isel-sext-zext.ll +++ test/CodeGen/X86/fast-isel-sext-zext.ll @@ -30,7 +30,7 @@ ; X32-NEXT: andb $1, %al ; X32-NEXT: negb %al ; X32-NEXT: movsbl %al, %eax -; X32-NEXT: ## kill: def %ax killed %ax killed %eax +; X32-NEXT: ## kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; X32-NEXT: ## -- End function ; @@ -39,7 +39,7 @@ ; X64-NEXT: andb $1, %dil ; X64-NEXT: negb %dil ; X64-NEXT: movsbl %dil, %eax -; X64-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; X64-NEXT: ## -- End function %z = trunc i16 %x to i1 @@ -116,7 +116,7 @@ ; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X32-NEXT: andb $1, %al ; X32-NEXT: movzbl %al, %eax -; X32-NEXT: ## kill: def %ax killed %ax killed %eax +; X32-NEXT: ## kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; X32-NEXT: ## -- End function ; @@ -124,7 +124,7 @@ ; X64: ## %bb.0: ; X64-NEXT: andb $1, %dil ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; X64-NEXT: ## -- End function %z = trunc i16 %x to i1 @@ -176,14 +176,14 @@ ; X32-LABEL: test9: ; X32: ## %bb.0: ; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: ## kill: def %ax killed %ax killed %eax +; X32-NEXT: ## kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; X32-NEXT: ## -- End function ; ; X64-LABEL: test9: ; X64: ## %bb.0: ; X64-NEXT: movsbl %dil, %eax -; X64-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; X64-NEXT: ## -- End function %u = sext i8 %x to i16 @@ -228,14 +228,14 @@ ; X32-LABEL: test12: ; X32: ## %bb.0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: ## kill: def %ax killed %ax killed %eax +; X32-NEXT: ## kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; X32-NEXT: ## -- End function ; ; X64-LABEL: test12: ; X64: ## %bb.0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; X64-NEXT: ## -- End function %u = zext i8 %x to i16 Index: test/CodeGen/X86/fast-isel-shift.ll =================================================================== --- test/CodeGen/X86/fast-isel-shift.ll +++ test/CodeGen/X86/fast-isel-shift.ll @@ -16,7 +16,7 @@ ; CHECK-LABEL: shl_i16: ; CHECK: ## %bb.0: ; CHECK-NEXT: movl %esi, %ecx -; CHECK-NEXT: ## kill: def %cl killed %cx +; CHECK-NEXT: ## kill: def $cl killed $cx ; CHECK-NEXT: shlw %cl, %di ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq @@ -28,7 +28,7 @@ ; CHECK-LABEL: shl_i32: ; CHECK: ## %bb.0: ; CHECK-NEXT: movl %esi, %ecx -; CHECK-NEXT: ## kill: def %cl killed %ecx +; CHECK-NEXT: ## kill: def $cl killed $ecx ; CHECK-NEXT: shll %cl, %edi ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq @@ -40,7 +40,7 @@ ; CHECK-LABEL: shl_i64: ; CHECK: ## %bb.0: ; CHECK-NEXT: movq %rsi, %rcx -; CHECK-NEXT: ## kill: def %cl killed %rcx +; CHECK-NEXT: ## kill: def $cl killed $rcx ; CHECK-NEXT: shlq %cl, %rdi ; CHECK-NEXT: movq %rdi, %rax ; CHECK-NEXT: retq @@ -63,7 +63,7 @@ ; CHECK-LABEL: lshr_i16: ; CHECK: ## %bb.0: ; CHECK-NEXT: movl %esi, %ecx -; CHECK-NEXT: ## kill: def %cl killed %cx +; CHECK-NEXT: ## kill: def $cl killed $cx ; CHECK-NEXT: shrw %cl, %di ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq @@ -75,7 +75,7 @@ ; CHECK-LABEL: lshr_i32: ; CHECK: ## %bb.0: ; CHECK-NEXT: movl %esi, %ecx -; CHECK-NEXT: ## kill: def %cl killed %ecx +; CHECK-NEXT: ## kill: def $cl killed $ecx ; CHECK-NEXT: shrl %cl, %edi ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq @@ -87,7 +87,7 @@ ; CHECK-LABEL: lshr_i64: ; CHECK: ## %bb.0: ; CHECK-NEXT: movq %rsi, %rcx -; CHECK-NEXT: ## kill: def %cl killed %rcx +; CHECK-NEXT: ## kill: def $cl killed $rcx ; CHECK-NEXT: shrq %cl, %rdi ; CHECK-NEXT: movq %rdi, %rax ; CHECK-NEXT: retq @@ -110,7 +110,7 @@ ; CHECK-LABEL: ashr_i16: ; CHECK: ## %bb.0: ; CHECK-NEXT: movl %esi, %ecx -; CHECK-NEXT: ## kill: def %cl killed %cx +; CHECK-NEXT: ## kill: def $cl killed $cx ; CHECK-NEXT: sarw %cl, %di ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq @@ -122,7 +122,7 @@ ; CHECK-LABEL: ashr_i32: ; CHECK: ## %bb.0: ; CHECK-NEXT: movl %esi, %ecx -; CHECK-NEXT: ## kill: def %cl killed %ecx +; CHECK-NEXT: ## kill: def $cl killed $ecx ; CHECK-NEXT: sarl %cl, %edi ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq @@ -134,7 +134,7 @@ ; CHECK-LABEL: ashr_i64: ; CHECK: ## %bb.0: ; CHECK-NEXT: movq %rsi, %rcx -; CHECK-NEXT: ## kill: def %cl killed %rcx +; CHECK-NEXT: ## kill: def $cl killed $rcx ; CHECK-NEXT: sarq %cl, %rdi ; CHECK-NEXT: movq %rdi, %rax ; CHECK-NEXT: retq @@ -155,9 +155,9 @@ define i16 @shl_imm1_i16(i16 %a) { ; CHECK-LABEL: shl_imm1_i16: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal (,%rdi,2), %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %c = shl i16 %a, 1 ret i16 %c @@ -166,7 +166,7 @@ define i32 @shl_imm1_i32(i32 %a) { ; CHECK-LABEL: shl_imm1_i32: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal (,%rdi,2), %eax ; CHECK-NEXT: retq %c = shl i32 %a, 1 Index: test/CodeGen/X86/fixup-bw-copy.ll =================================================================== --- test/CodeGen/X86/fixup-bw-copy.ll +++ test/CodeGen/X86/fixup-bw-copy.ll @@ -54,7 +54,7 @@ ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shrl $8, %eax ; X64-NEXT: addb %dil, %al -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; ; X32-LABEL: test_movb_hreg: Index: test/CodeGen/X86/fixup-bw-copy.mir =================================================================== --- test/CodeGen/X86/fixup-bw-copy.mir +++ test/CodeGen/X86/fixup-bw-copy.mir @@ -40,14 +40,14 @@ name: test_movb_killed tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0 (%ir-block.0): - liveins: %edi + liveins: $edi - ; CHECK: %eax = MOV32rr undef %edi, implicit %dil - %al = MOV8rr killed %dil - RETQ killed %al + ; CHECK: $eax = MOV32rr undef $edi, implicit $dil + $al = MOV8rr killed $dil + RETQ killed $al ... @@ -55,14 +55,14 @@ name: test_movb_impuse tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0 (%ir-block.0): - liveins: %edi + liveins: $edi - ; CHECK: %eax = MOV32rr undef %edi, implicit %dil - %al = MOV8rr %dil, implicit %edi - RETQ killed %al + ; CHECK: $eax = MOV32rr undef $edi, implicit $dil + $al = MOV8rr $dil, implicit $edi + RETQ killed $al ... @@ -70,14 +70,14 @@ name: test_movb_impdef_gr64 tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0 (%ir-block.0): - liveins: %edi + liveins: $edi - ; CHECK: %eax = MOV32rr undef %edi, implicit %dil, implicit-def %rax - %al = MOV8rr %dil, implicit-def %rax - RETQ killed %al + ; CHECK: $eax = MOV32rr undef $edi, implicit $dil, implicit-def $rax + $al = MOV8rr $dil, implicit-def $rax + RETQ killed $al ... @@ -85,14 +85,14 @@ name: test_movb_impdef_gr32 tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0 (%ir-block.0): - liveins: %edi + liveins: $edi - ; CHECK: %eax = MOV32rr undef %edi, implicit %dil - %al = MOV8rr %dil, implicit-def %eax - RETQ killed %al + ; CHECK: $eax = MOV32rr undef $edi, implicit $dil + $al = MOV8rr $dil, implicit-def $eax + RETQ killed $al ... @@ -100,14 +100,14 @@ name: test_movb_impdef_gr16 tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0 (%ir-block.0): - liveins: %edi + liveins: $edi - ; CHECK: %eax = MOV32rr undef %edi, implicit %dil - %al = MOV8rr %dil, implicit-def %ax - RETQ killed %al + ; CHECK: $eax = MOV32rr undef $edi, implicit $dil + $al = MOV8rr $dil, implicit-def $ax + RETQ killed $al ... @@ -115,14 +115,14 @@ name: test_movw_impdef_gr32 tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0 (%ir-block.0): - liveins: %edi + liveins: $edi - ; CHECK: %eax = MOV32rr undef %edi, implicit %di - %ax = MOV16rr %di, implicit-def %eax - RETQ killed %ax + ; CHECK: $eax = MOV32rr undef $edi, implicit $di + $ax = MOV16rr $di, implicit-def $eax + RETQ killed $ax ... @@ -130,13 +130,13 @@ name: test_movw_impdef_gr64 tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } body: | bb.0 (%ir-block.0): - liveins: %edi + liveins: $edi - ; CHECK: %eax = MOV32rr undef %edi, implicit %di, implicit-def %rax - %ax = MOV16rr %di, implicit-def %rax - RETQ killed %ax + ; CHECK: $eax = MOV32rr undef $edi, implicit $di, implicit-def $rax + $ax = MOV16rr $di, implicit-def $rax + RETQ killed $ax ... Index: test/CodeGen/X86/fixup-bw-inst.mir =================================================================== --- test/CodeGen/X86/fixup-bw-inst.mir +++ test/CodeGen/X86/fixup-bw-inst.mir @@ -68,15 +68,15 @@ # Imp-use of any super-register means the register is live before the MOV body: | bb.0: - liveins: %dl, %rbx, %rcx, %r14 + liveins: $dl, $rbx, $rcx, $r14 - %cl = MOV8rr killed %dl, implicit killed %rcx, implicit-def %rcx - ; CHECK: %cl = MOV8rr killed %dl, implicit killed %rcx, implicit-def %rcx + $cl = MOV8rr killed $dl, implicit killed $rcx, implicit-def $rcx + ; CHECK: $cl = MOV8rr killed $dl, implicit killed $rcx, implicit-def $rcx JMP_1 %bb.1 bb.1: - liveins: %rcx + liveins: $rcx - RETQ %cl + RETQ $cl ... --- @@ -94,23 +94,23 @@ body: | bb.0.entry: successors: %bb.1(0x30000000), %bb.2.if.then(0x50000000) - liveins: %rdi + liveins: $rdi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.1, implicit %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.1, implicit $eflags bb.2.if.then: - liveins: %rdi + liveins: $rdi - %ax = MOV16rm killed %rdi, 1, %noreg, 0, %noreg, implicit-def %eax :: (load 2 from %ir.p) - ; CHECK: %eax = MOVZX32rm16 killed %rdi, 1, %noreg, 0, %noreg, implicit-def %eax :: (load 2 from %ir.p) - %ax = KILL %ax, implicit killed %eax - RETQ %ax + $ax = MOV16rm killed $rdi, 1, $noreg, 0, $noreg, implicit-def $eax :: (load 2 from %ir.p) + ; CHECK: $eax = MOVZX32rm16 killed $rdi, 1, $noreg, 0, $noreg, implicit-def $eax :: (load 2 from %ir.p) + $ax = KILL $ax, implicit killed $eax + RETQ $ax bb.1: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - %ax = KILL %ax, implicit killed %eax - RETQ %ax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + $ax = KILL $ax, implicit killed $eax + RETQ $ax ... --- Index: test/CodeGen/X86/gpr-to-mask.ll =================================================================== --- test/CodeGen/X86/gpr-to-mask.ll +++ test/CodeGen/X86/gpr-to-mask.ll @@ -260,8 +260,8 @@ define void @test_shl1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) { ; X86-64-LABEL: test_shl1: ; X86-64: # %bb.0: # %entry -; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-64-NEXT: testb $1, %dil ; X86-64-NEXT: je .LBB5_2 ; X86-64-NEXT: # %bb.1: # %if @@ -278,8 +278,8 @@ ; ; X86-32-LABEL: test_shl1: ; X86-32: # %bb.0: # %entry -; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp) ; X86-32-NEXT: je .LBB5_2 @@ -319,8 +319,8 @@ define void @test_shr1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) { ; X86-64-LABEL: test_shr1: ; X86-64: # %bb.0: # %entry -; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-64-NEXT: testb $1, %dil ; X86-64-NEXT: je .LBB6_2 ; X86-64-NEXT: # %bb.1: # %if @@ -338,8 +338,8 @@ ; ; X86-32-LABEL: test_shr1: ; X86-32: # %bb.0: # %entry -; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp) ; X86-32-NEXT: je .LBB6_2 @@ -380,8 +380,8 @@ define void @test_shr2(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) { ; X86-64-LABEL: test_shr2: ; X86-64: # %bb.0: # %entry -; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-64-NEXT: testb $1, %dil ; X86-64-NEXT: je .LBB7_2 ; X86-64-NEXT: # %bb.1: # %if @@ -398,8 +398,8 @@ ; ; X86-32-LABEL: test_shr2: ; X86-32: # %bb.0: # %entry -; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp) ; X86-32-NEXT: je .LBB7_2 @@ -439,8 +439,8 @@ define void @test_shl(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) { ; X86-64-LABEL: test_shl: ; X86-64: # %bb.0: # %entry -; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-64-NEXT: testb $1, %dil ; X86-64-NEXT: je .LBB8_2 ; X86-64-NEXT: # %bb.1: # %if @@ -457,8 +457,8 @@ ; ; X86-32-LABEL: test_shl: ; X86-32: # %bb.0: # %entry -; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp) ; X86-32-NEXT: je .LBB8_2 @@ -498,8 +498,8 @@ define void @test_add(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) { ; X86-64-LABEL: test_add: ; X86-64: # %bb.0: # %entry -; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-64-NEXT: kmovb (%rsi), %k0 ; X86-64-NEXT: kmovb (%rdx), %k1 ; X86-64-NEXT: testb $1, %dil @@ -517,8 +517,8 @@ ; ; X86-32-LABEL: test_add: ; X86-32: # %bb.0: # %entry -; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-32-NEXT: movl {{[0-9]+}}(%esp), %edx Index: test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll =================================================================== --- test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll +++ test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll @@ -5,23 +5,23 @@ ; This test is meant to make sure bad eviction sequence like the one described ; below does not occur ; -; movl %ebp, 8(%esp) # 4-byte Spill +; movl %ebp, 8($esp) # 4-byte Spill ; movl %ecx, %ebp ; movl %ebx, %ecx -; movl %edi, %ebx -; movl %edx, %edi +; movl $edi, %ebx +; movl $edx, $edi ; cltd ; idivl %esi -; movl %edi, %edx -; movl %ebx, %edi +; movl $edi, $edx +; movl %ebx, $edi ; movl %ecx, %ebx ; movl %ebp, %ecx -; movl 16(%esp), %ebp # 4 - byte Reload +; movl 16($esp), %ebp # 4 - byte Reload ; Make sure we have no redundant copies in the problematic code seqtion ; CHECK-LABEL: name: bar ; CHECK: bb.3.for.body: -; CHECK: %eax = COPY +; CHECK: $eax = COPY ; CHECK-NEXT: CDQ ; CHECK-NEXT: IDIV32r ; CHECK-NEXT: ADD32rr Index: test/CodeGen/X86/half.ll =================================================================== --- test/CodeGen/X86/half.ll +++ test/CodeGen/X86/half.ll @@ -777,7 +777,7 @@ ; BWON-F16C-NEXT: callq __truncdfhf2 ; BWON-F16C-NEXT: movl %eax, %r15d ; BWON-F16C-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; BWON-F16C-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; BWON-F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; BWON-F16C-NEXT: vzeroupper ; BWON-F16C-NEXT: callq __truncdfhf2 ; BWON-F16C-NEXT: movl %eax, %ebp Index: test/CodeGen/X86/horizontal-reduce-smax.ll =================================================================== --- test/CodeGen/X86/horizontal-reduce-smax.ll +++ test/CodeGen/X86/horizontal-reduce-smax.ll @@ -206,7 +206,7 @@ ; X86-SSE2-NEXT: psrld $16, %xmm1 ; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v8i16: @@ -216,7 +216,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX-LABEL: test_reduce_v8i16: @@ -226,7 +226,7 @@ ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovd %xmm0, %eax -; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_reduce_v8i16: @@ -239,7 +239,7 @@ ; X64-SSE2-NEXT: psrld $16, %xmm1 ; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v8i16: @@ -249,7 +249,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX-LABEL: test_reduce_v8i16: @@ -259,7 +259,7 @@ ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vmovd %xmm0, %eax -; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX-NEXT: retq %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> %2 = icmp sgt <8 x i16> %a0, %1 @@ -304,7 +304,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm1 ; X86-SSE2-NEXT: por %xmm2, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v16i8: @@ -317,7 +317,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX-LABEL: test_reduce_v16i8: @@ -329,7 +329,7 @@ ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_reduce_v16i8: @@ -361,7 +361,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm1 ; X64-SSE2-NEXT: por %xmm2, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v16i8: @@ -374,7 +374,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX-LABEL: test_reduce_v16i8: @@ -386,7 +386,7 @@ ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX-NEXT: retq %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> %2 = icmp sgt <16 x i8> %a0, %1 @@ -736,7 +736,7 @@ ; X86-SSE2-NEXT: psrld $16, %xmm1 ; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v16i16: @@ -747,7 +747,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v16i16: @@ -759,7 +759,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -772,7 +772,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -787,7 +787,7 @@ ; X64-SSE2-NEXT: psrld $16, %xmm1 ; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v16i16: @@ -798,7 +798,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v16i16: @@ -810,7 +810,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -823,7 +823,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -836,7 +836,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> @@ -890,7 +890,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm2 ; X86-SSE2-NEXT: por %xmm1, %xmm2 ; X86-SSE2-NEXT: movd %xmm2, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v32i8: @@ -904,7 +904,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v32i8: @@ -918,7 +918,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -933,7 +933,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -971,7 +971,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm2 ; X64-SSE2-NEXT: por %xmm1, %xmm2 ; X64-SSE2-NEXT: movd %xmm2, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v32i8: @@ -985,7 +985,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v32i8: @@ -999,7 +999,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1014,7 +1014,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1029,7 +1029,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> @@ -1526,7 +1526,7 @@ ; X86-SSE2-NEXT: psrld $16, %xmm1 ; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v32i16: @@ -1539,7 +1539,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v32i16: @@ -1554,7 +1554,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1568,7 +1568,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1585,7 +1585,7 @@ ; X64-SSE2-NEXT: psrld $16, %xmm1 ; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v32i16: @@ -1598,7 +1598,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v32i16: @@ -1613,7 +1613,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1627,7 +1627,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1642,7 +1642,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> @@ -1709,7 +1709,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm1 ; X86-SSE2-NEXT: por %xmm2, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v64i8: @@ -1725,7 +1725,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v64i8: @@ -1742,7 +1742,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1758,7 +1758,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1806,7 +1806,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm1 ; X64-SSE2-NEXT: por %xmm2, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v64i8: @@ -1822,7 +1822,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v64i8: @@ -1839,7 +1839,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1855,7 +1855,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1872,7 +1872,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> Index: test/CodeGen/X86/horizontal-reduce-smin.ll =================================================================== --- test/CodeGen/X86/horizontal-reduce-smin.ll +++ test/CodeGen/X86/horizontal-reduce-smin.ll @@ -208,7 +208,7 @@ ; X86-SSE2-NEXT: psrld $16, %xmm1 ; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v8i16: @@ -218,7 +218,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX-LABEL: test_reduce_v8i16: @@ -228,7 +228,7 @@ ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovd %xmm0, %eax -; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_reduce_v8i16: @@ -241,7 +241,7 @@ ; X64-SSE2-NEXT: psrld $16, %xmm1 ; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v8i16: @@ -251,7 +251,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX-LABEL: test_reduce_v8i16: @@ -261,7 +261,7 @@ ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vmovd %xmm0, %eax -; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX-NEXT: retq %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> %2 = icmp slt <8 x i16> %a0, %1 @@ -306,7 +306,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm1 ; X86-SSE2-NEXT: por %xmm2, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v16i8: @@ -319,7 +319,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX-LABEL: test_reduce_v16i8: @@ -331,7 +331,7 @@ ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_reduce_v16i8: @@ -363,7 +363,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm1 ; X64-SSE2-NEXT: por %xmm2, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v16i8: @@ -376,7 +376,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX-LABEL: test_reduce_v16i8: @@ -388,7 +388,7 @@ ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX-NEXT: retq %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> %2 = icmp slt <16 x i8> %a0, %1 @@ -740,7 +740,7 @@ ; X86-SSE2-NEXT: psrld $16, %xmm1 ; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v16i16: @@ -751,7 +751,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v16i16: @@ -763,7 +763,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -776,7 +776,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -791,7 +791,7 @@ ; X64-SSE2-NEXT: psrld $16, %xmm1 ; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v16i16: @@ -802,7 +802,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v16i16: @@ -814,7 +814,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -827,7 +827,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -840,7 +840,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> @@ -894,7 +894,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm2 ; X86-SSE2-NEXT: por %xmm1, %xmm2 ; X86-SSE2-NEXT: movd %xmm2, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v32i8: @@ -908,7 +908,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v32i8: @@ -922,7 +922,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -937,7 +937,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -975,7 +975,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm2 ; X64-SSE2-NEXT: por %xmm1, %xmm2 ; X64-SSE2-NEXT: movd %xmm2, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v32i8: @@ -989,7 +989,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v32i8: @@ -1003,7 +1003,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1018,7 +1018,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1033,7 +1033,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> @@ -1528,7 +1528,7 @@ ; X86-SSE2-NEXT: psrld $16, %xmm1 ; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v32i16: @@ -1541,7 +1541,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v32i16: @@ -1556,7 +1556,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1570,7 +1570,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1587,7 +1587,7 @@ ; X64-SSE2-NEXT: psrld $16, %xmm1 ; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v32i16: @@ -1600,7 +1600,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v32i16: @@ -1615,7 +1615,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1629,7 +1629,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1644,7 +1644,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> @@ -1711,7 +1711,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm1 ; X86-SSE2-NEXT: por %xmm2, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v64i8: @@ -1727,7 +1727,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v64i8: @@ -1744,7 +1744,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1760,7 +1760,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1808,7 +1808,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm1 ; X64-SSE2-NEXT: por %xmm2, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v64i8: @@ -1824,7 +1824,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v64i8: @@ -1841,7 +1841,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1857,7 +1857,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1874,7 +1874,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> Index: test/CodeGen/X86/horizontal-reduce-umax.ll =================================================================== --- test/CodeGen/X86/horizontal-reduce-umax.ll +++ test/CodeGen/X86/horizontal-reduce-umax.ll @@ -254,7 +254,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm3 ; X86-SSE2-NEXT: por %xmm2, %xmm3 ; X86-SSE2-NEXT: movd %xmm3, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v8i16: @@ -264,7 +264,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX-LABEL: test_reduce_v8i16: @@ -274,7 +274,7 @@ ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovd %xmm0, %eax -; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_reduce_v8i16: @@ -308,7 +308,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm3 ; X64-SSE2-NEXT: por %xmm2, %xmm3 ; X64-SSE2-NEXT: movd %xmm3, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v8i16: @@ -318,7 +318,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v8i16: @@ -376,7 +376,7 @@ ; X86-SSE2-NEXT: psrlw $8, %xmm0 ; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v16i8: @@ -389,7 +389,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX-LABEL: test_reduce_v16i8: @@ -401,7 +401,7 @@ ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_reduce_v16i8: @@ -417,7 +417,7 @@ ; X64-SSE2-NEXT: psrlw $8, %xmm0 ; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0 ; X64-SSE2-NEXT: movd %xmm0, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v16i8: @@ -430,7 +430,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v16i8: @@ -895,7 +895,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm1 ; X86-SSE2-NEXT: por %xmm3, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v16i16: @@ -906,7 +906,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v16i16: @@ -918,7 +918,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -931,7 +931,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -974,7 +974,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm1 ; X64-SSE2-NEXT: por %xmm3, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v16i16: @@ -985,7 +985,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v16i16: @@ -997,7 +997,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1010,7 +1010,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1022,7 +1022,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> @@ -1056,7 +1056,7 @@ ; X86-SSE2-NEXT: psrlw $8, %xmm0 ; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v32i8: @@ -1070,7 +1070,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v32i8: @@ -1084,7 +1084,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1099,7 +1099,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1117,7 +1117,7 @@ ; X64-SSE2-NEXT: psrlw $8, %xmm0 ; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0 ; X64-SSE2-NEXT: movd %xmm0, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v32i8: @@ -1131,7 +1131,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v32i8: @@ -1145,7 +1145,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1160,7 +1160,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1174,7 +1174,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 ; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> @@ -1801,7 +1801,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm2 ; X86-SSE2-NEXT: por %xmm1, %xmm2 ; X86-SSE2-NEXT: movd %xmm2, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v32i16: @@ -1814,7 +1814,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v32i16: @@ -1829,7 +1829,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1843,7 +1843,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1902,7 +1902,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm2 ; X64-SSE2-NEXT: por %xmm1, %xmm2 ; X64-SSE2-NEXT: movd %xmm2, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v32i16: @@ -1915,7 +1915,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v32i16: @@ -1930,7 +1930,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1944,7 +1944,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1958,7 +1958,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> @@ -1997,7 +1997,7 @@ ; X86-SSE2-NEXT: psrlw $8, %xmm0 ; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v64i8: @@ -2013,7 +2013,7 @@ ; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X86-SSE42-NEXT: pxor %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v64i8: @@ -2030,7 +2030,7 @@ ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -2046,7 +2046,7 @@ ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -2066,7 +2066,7 @@ ; X64-SSE2-NEXT: psrlw $8, %xmm0 ; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0 ; X64-SSE2-NEXT: movd %xmm0, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v64i8: @@ -2082,7 +2082,7 @@ ; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0 ; X64-SSE42-NEXT: pxor %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v64i8: @@ -2099,7 +2099,7 @@ ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -2115,7 +2115,7 @@ ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -2131,7 +2131,7 @@ ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0 ; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> Index: test/CodeGen/X86/horizontal-reduce-umin.ll =================================================================== --- test/CodeGen/X86/horizontal-reduce-umin.ll +++ test/CodeGen/X86/horizontal-reduce-umin.ll @@ -256,21 +256,21 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm1 ; X86-SSE2-NEXT: por %xmm3, %xmm1 ; X86-SSE2-NEXT: movd %xmm1, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v8i16: ; X86-SSE42: ## %bb.0: ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX-LABEL: test_reduce_v8i16: ; X86-AVX: ## %bb.0: ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vmovd %xmm0, %eax -; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_reduce_v8i16: @@ -304,21 +304,21 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm1 ; X64-SSE2-NEXT: por %xmm3, %xmm1 ; X64-SSE2-NEXT: movd %xmm1, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v8i16: ; X64-SSE42: ## %bb.0: ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX-LABEL: test_reduce_v8i16: ; X64-AVX: ## %bb.0: ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vmovd %xmm0, %eax -; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX-NEXT: retq %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> %2 = icmp ult <8 x i16> %a0, %1 @@ -347,7 +347,7 @@ ; X86-SSE2-NEXT: psrlw $8, %xmm0 ; X86-SSE2-NEXT: pminub %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v16i8: @@ -357,7 +357,7 @@ ; X86-SSE42-NEXT: pminub %xmm0, %xmm1 ; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX-LABEL: test_reduce_v16i8: @@ -366,7 +366,7 @@ ; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_reduce_v16i8: @@ -382,7 +382,7 @@ ; X64-SSE2-NEXT: psrlw $8, %xmm0 ; X64-SSE2-NEXT: pminub %xmm1, %xmm0 ; X64-SSE2-NEXT: movd %xmm0, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v16i8: @@ -392,7 +392,7 @@ ; X64-SSE42-NEXT: pminub %xmm0, %xmm1 ; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX-LABEL: test_reduce_v16i8: @@ -401,7 +401,7 @@ ; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX-NEXT: retq %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> %2 = icmp ult <16 x i8> %a0, %1 @@ -835,7 +835,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm2 ; X86-SSE2-NEXT: por %xmm4, %xmm2 ; X86-SSE2-NEXT: movd %xmm2, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v16i16: @@ -843,7 +843,7 @@ ; X86-SSE42-NEXT: pminuw %xmm1, %xmm0 ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v16i16: @@ -852,7 +852,7 @@ ; X86-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -862,7 +862,7 @@ ; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -905,7 +905,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm2 ; X64-SSE2-NEXT: por %xmm4, %xmm2 ; X64-SSE2-NEXT: movd %xmm2, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v16i16: @@ -913,7 +913,7 @@ ; X64-SSE42-NEXT: pminuw %xmm1, %xmm0 ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v16i16: @@ -922,7 +922,7 @@ ; X64-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -932,7 +932,7 @@ ; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -942,7 +942,7 @@ ; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> @@ -976,7 +976,7 @@ ; X86-SSE2-NEXT: psrlw $8, %xmm0 ; X86-SSE2-NEXT: pminub %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v32i8: @@ -987,7 +987,7 @@ ; X86-SSE42-NEXT: pminub %xmm0, %xmm1 ; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v32i8: @@ -998,7 +998,7 @@ ; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1010,7 +1010,7 @@ ; X86-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1028,7 +1028,7 @@ ; X64-SSE2-NEXT: psrlw $8, %xmm0 ; X64-SSE2-NEXT: pminub %xmm1, %xmm0 ; X64-SSE2-NEXT: movd %xmm0, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v32i8: @@ -1039,7 +1039,7 @@ ; X64-SSE42-NEXT: pminub %xmm0, %xmm1 ; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v32i8: @@ -1050,7 +1050,7 @@ ; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1062,7 +1062,7 @@ ; X64-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1074,7 +1074,7 @@ ; X64-AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> @@ -1699,7 +1699,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm4 ; X86-SSE2-NEXT: por %xmm2, %xmm4 ; X86-SSE2-NEXT: movd %xmm4, %eax -; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v32i16: @@ -1709,7 +1709,7 @@ ; X86-SSE42-NEXT: pminuw %xmm1, %xmm0 ; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X86-SSE42-NEXT: movd %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v32i16: @@ -1721,7 +1721,7 @@ ; X86-AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0 ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovd %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1732,7 +1732,7 @@ ; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1791,7 +1791,7 @@ ; X64-SSE2-NEXT: pandn %xmm0, %xmm4 ; X64-SSE2-NEXT: por %xmm2, %xmm4 ; X64-SSE2-NEXT: movd %xmm4, %eax -; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v32i16: @@ -1801,7 +1801,7 @@ ; X64-SSE42-NEXT: pminuw %xmm1, %xmm0 ; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0 ; X64-SSE42-NEXT: movd %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v32i16: @@ -1813,7 +1813,7 @@ ; X64-AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0 ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vmovd %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1824,7 +1824,7 @@ ; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vmovd %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1836,7 +1836,7 @@ ; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vmovd %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> @@ -1875,7 +1875,7 @@ ; X86-SSE2-NEXT: psrlw $8, %xmm0 ; X86-SSE2-NEXT: pminub %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax -; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE2-NEXT: retl ; ; X86-SSE42-LABEL: test_reduce_v64i8: @@ -1888,7 +1888,7 @@ ; X86-SSE42-NEXT: pminub %xmm0, %xmm1 ; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X86-SSE42-NEXT: retl ; ; X86-AVX1-LABEL: test_reduce_v64i8: @@ -1902,7 +1902,7 @@ ; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; @@ -1915,7 +1915,7 @@ ; X86-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X86-AVX2-NEXT: vzeroupper ; X86-AVX2-NEXT: retl ; @@ -1935,7 +1935,7 @@ ; X64-SSE2-NEXT: psrlw $8, %xmm0 ; X64-SSE2-NEXT: pminub %xmm1, %xmm0 ; X64-SSE2-NEXT: movd %xmm0, %eax -; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE2-NEXT: retq ; ; X64-SSE42-LABEL: test_reduce_v64i8: @@ -1948,7 +1948,7 @@ ; X64-SSE42-NEXT: pminub %xmm0, %xmm1 ; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0 ; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax -; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax +; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax ; X64-SSE42-NEXT: retq ; ; X64-AVX1-LABEL: test_reduce_v64i8: @@ -1962,7 +1962,7 @@ ; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX1-NEXT: vzeroupper ; X64-AVX1-NEXT: retq ; @@ -1975,7 +1975,7 @@ ; X64-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; @@ -1989,7 +1989,7 @@ ; X64-AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0 ; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0 ; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax -; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax +; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax ; X64-AVX512-NEXT: vzeroupper ; X64-AVX512-NEXT: retq %1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> Index: test/CodeGen/X86/iabs.ll =================================================================== --- test/CodeGen/X86/iabs.ll +++ test/CodeGen/X86/iabs.ll @@ -41,7 +41,7 @@ ; X86-NO-CMOV-NEXT: sarw $15, %cx ; X86-NO-CMOV-NEXT: addl %ecx, %eax ; X86-NO-CMOV-NEXT: xorl %ecx, %eax -; X86-NO-CMOV-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NO-CMOV-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NO-CMOV-NEXT: retl ; ; X86-CMOV-LABEL: test_i16: Index: test/CodeGen/X86/illegal-bitfield-loadstore.ll =================================================================== --- test/CodeGen/X86/illegal-bitfield-loadstore.ll +++ test/CodeGen/X86/illegal-bitfield-loadstore.ll @@ -116,7 +116,7 @@ ; X64-NEXT: movzwl 4(%rdi), %eax ; X64-NEXT: movzbl 6(%rdi), %ecx ; X64-NEXT: movb %cl, 6(%rdi) -; X64-NEXT: # kill: def %ecx killed %ecx killed %rcx def %rcx +; X64-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx ; X64-NEXT: shll $16, %ecx ; X64-NEXT: orl %eax, %ecx ; X64-NEXT: shlq $32, %rcx @@ -148,7 +148,7 @@ ; X64-NEXT: movzwl 4(%rdi), %eax ; X64-NEXT: movzbl 6(%rdi), %ecx ; X64-NEXT: movb %cl, 6(%rdi) -; X64-NEXT: # kill: def %ecx killed %ecx killed %rcx def %rcx +; X64-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx ; X64-NEXT: shll $16, %ecx ; X64-NEXT: orl %eax, %ecx ; X64-NEXT: shlq $32, %rcx @@ -186,7 +186,7 @@ ; X64-NEXT: movzwl 4(%rdi), %ecx ; X64-NEXT: movzbl 6(%rdi), %edx ; X64-NEXT: movb %dl, 6(%rdi) -; X64-NEXT: # kill: def %edx killed %edx killed %rdx def %rdx +; X64-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx ; X64-NEXT: shll $16, %edx ; X64-NEXT: orl %ecx, %edx ; X64-NEXT: shlq $32, %rdx Index: test/CodeGen/X86/implicit-null-checks.mir =================================================================== --- test/CodeGen/X86/implicit-null-checks.mir +++ test/CodeGen/X86/implicit-null-checks.mir @@ -23,7 +23,7 @@ } ;; Negative test. The regalloc is such that we cannot hoist the - ;; instruction materializing 2200000 into %eax + ;; instruction materializing 2200000 into $eax define i32 @imp_null_check_with_bitwise_op_1(i32* %x, i32 %val, i32* %ptr) { entry: br i1 undef, label %is_null, label %not_null, !make.implicit !0 @@ -387,39 +387,39 @@ alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%esi' } + - { reg: '$rdi' } + - { reg: '$esi' } # CHECK: bb.0.entry: -# CHECK: %eax = MOV32ri 2200000 -# CHECK-NEXT: %eax = FAULTING_OP 1, %bb.3, {{[0-9]+}}, %eax, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x) +# CHECK: $eax = MOV32ri 2200000 +# CHECK-NEXT: $eax = FAULTING_OP 1, %bb.3, {{[0-9]+}}, $eax, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags :: (load 4 from %ir.x) # CHECK-NEXT: JMP_1 %bb.1 body: | bb.0.entry: - liveins: %esi, %rdi + liveins: $esi, $rdi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.3, implicit %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.3, implicit $eflags bb.1.not_null: - liveins: %esi, %rdi + liveins: $esi, $rdi - %eax = MOV32ri 2200000 - %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x) - CMP32rr killed %eax, killed %esi, implicit-def %eflags - JE_1 %bb.4, implicit %eflags + $eax = MOV32ri 2200000 + $eax = AND32rm killed $eax, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x) + CMP32rr killed $eax, killed $esi, implicit-def $eflags + JE_1 %bb.4, implicit $eflags bb.2.ret_200: - %eax = MOV32ri 200 - RETQ %eax + $eax = MOV32ri 200 + RETQ $eax bb.3.is_null: - %eax = MOV32ri 42 - RETQ %eax + $eax = MOV32ri 42 + RETQ $eax bb.4.ret_100: - %eax = MOV32ri 100 - RETQ %eax + $eax = MOV32ri 100 + RETQ $eax ... --- @@ -427,42 +427,42 @@ alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%esi' } - - { reg: '%rdx' } + - { reg: '$rdi' } + - { reg: '$esi' } + - { reg: '$rdx' } # CHECK: bb.0.entry: -# CHECK: %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr) -# CHECK-NEXT: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.3, implicit %eflags +# CHECK: $eax = MOV32rm killed $rdx, 1, $noreg, 0, $noreg :: (volatile load 4 from %ir.ptr) +# CHECK-NEXT: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.3, implicit $eflags body: | bb.0.entry: - liveins: %esi, %rdi, %rdx + liveins: $esi, $rdi, $rdx - %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr) - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.3, implicit %eflags + $eax = MOV32rm killed $rdx, 1, $noreg, 0, $noreg :: (volatile load 4 from %ir.ptr) + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.3, implicit $eflags bb.1.not_null: - liveins: %esi, %rdi + liveins: $esi, $rdi - %eax = MOV32ri 2200000 - %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x) - CMP32rr killed %eax, killed %esi, implicit-def %eflags - JE_1 %bb.4, implicit %eflags + $eax = MOV32ri 2200000 + $eax = AND32rm killed $eax, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x) + CMP32rr killed $eax, killed $esi, implicit-def $eflags + JE_1 %bb.4, implicit $eflags bb.2.ret_200: - %eax = MOV32ri 200 + $eax = MOV32ri 200 bb.3.is_null: - liveins: %eax, %ah, %al, %ax, %bh, %bl, %bp, %bpl, %bx, %eax, %ebp, %ebx, %rax, %rbp, %rbx, %r12, %r13, %r14, %r15, %r12b, %r13b, %r14b, %r15b, %r12d, %r13d, %r14d, %r15d, %r12w, %r13w, %r14w, %r15w + liveins: $eax, $ah, $al, $ax, $bh, $bl, $bp, $bpl, $bx, $eax, $ebp, $ebx, $rax, $rbp, $rbx, $r12, $r13, $r14, $r15, $r12b, $r13b, $r14b, $r15b, $r12d, $r13d, $r14d, $r15d, $r12w, $r13w, $r14w, $r15w - RETQ %eax + RETQ $eax bb.4.ret_100: - %eax = MOV32ri 100 - RETQ %eax + $eax = MOV32ri 100 + RETQ $eax ... --- @@ -471,39 +471,39 @@ alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%esi' } + - { reg: '$rdi' } + - { reg: '$esi' } # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.3, implicit %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.3, implicit $eflags body: | bb.0.entry: - liveins: %esi, %rdi + liveins: $esi, $rdi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.3, implicit %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.3, implicit $eflags bb.1.not_null: - liveins: %esi, %rdi + liveins: $esi, $rdi - %eax = MOV32ri 2200000 - %eax = ADD32ri killed %eax, 100, implicit-def dead %eflags - %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x) - CMP32rr killed %eax, killed %esi, implicit-def %eflags - JE_1 %bb.4, implicit %eflags + $eax = MOV32ri 2200000 + $eax = ADD32ri killed $eax, 100, implicit-def dead $eflags + $eax = AND32rm killed $eax, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x) + CMP32rr killed $eax, killed $esi, implicit-def $eflags + JE_1 %bb.4, implicit $eflags bb.2.ret_200: - %eax = MOV32ri 200 - RETQ %eax + $eax = MOV32ri 200 + RETQ $eax bb.3.is_null: - %eax = MOV32ri 42 - RETQ %eax + $eax = MOV32ri 42 + RETQ $eax bb.4.ret_100: - %eax = MOV32ri 100 - RETQ %eax + $eax = MOV32ri 100 + RETQ $eax ... --- @@ -512,38 +512,38 @@ alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.3, implicit %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.3, implicit $eflags body: | bb.0.entry: - liveins: %rsi, %rdi + liveins: $rsi, $rdi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.3, implicit %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.3, implicit $eflags bb.1.not_null: - liveins: %rsi, %rdi + liveins: $rsi, $rdi - %rdi = MOV64ri 5000 - %rdi = AND64rm killed %rdi, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x) - CMP64rr killed %rdi, killed %rsi, implicit-def %eflags - JE_1 %bb.4, implicit %eflags + $rdi = MOV64ri 5000 + $rdi = AND64rm killed $rdi, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x) + CMP64rr killed $rdi, killed $rsi, implicit-def $eflags + JE_1 %bb.4, implicit $eflags bb.2.ret_200: - %eax = MOV32ri 200 - RETQ %eax + $eax = MOV32ri 200 + RETQ $eax bb.3.is_null: - %eax = MOV32ri 42 - RETQ %eax + $eax = MOV32ri 42 + RETQ $eax bb.4.ret_100: - %eax = MOV32ri 100 - RETQ %eax + $eax = MOV32ri 100 + RETQ $eax ... --- @@ -552,39 +552,39 @@ alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } # CHECK: bb.0.entry: -# CHECK: %rbx = MOV64rr %rdx -# CHECK-NEXT: %rbx = FAULTING_OP 1, %bb.3, {{[0-9]+}}, %rbx, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x) +# CHECK: $rbx = MOV64rr $rdx +# CHECK-NEXT: $rbx = FAULTING_OP 1, %bb.3, {{[0-9]+}}, $rbx, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags :: (load 4 from %ir.x) body: | bb.0.entry: - liveins: %rsi, %rdi, %rdx + liveins: $rsi, $rdi, $rdx - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.3, implicit %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.3, implicit $eflags bb.1.not_null: - liveins: %rsi, %rdi, %rdx + liveins: $rsi, $rdi, $rdx - %rbx = MOV64rr %rdx - %rbx = AND64rm killed %rbx, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x) - %rdx = MOV64ri 0 - CMP64rr killed %rbx, killed %rsi, implicit-def %eflags - JE_1 %bb.4, implicit %eflags + $rbx = MOV64rr $rdx + $rbx = AND64rm killed $rbx, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x) + $rdx = MOV64ri 0 + CMP64rr killed $rbx, killed $rsi, implicit-def $eflags + JE_1 %bb.4, implicit $eflags bb.2.ret_200: - %eax = MOV32ri 200 - RETQ %eax + $eax = MOV32ri 200 + RETQ $eax bb.3.is_null: - %eax = MOV32ri 42 - RETQ %eax + $eax = MOV32ri 42 + RETQ $eax bb.4.ret_100: - %eax = MOV32ri 100 - RETQ %eax + $eax = MOV32ri 100 + RETQ $eax ... --- @@ -593,38 +593,38 @@ alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } -calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', - '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15', - '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', - '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] + - { reg: '$rdi' } +calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx', + '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15', + '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d', + '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ] # CHECK: body: # CHECK-NOT: FAULTING_OP # CHECK: bb.1.stay: # CHECK: CALL64pcrel32 body: | bb.0.entry: - liveins: %rdi, %rbx + liveins: $rdi, $rbx - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbx, -16 - %rbx = MOV64rr %rdi - TEST64rr %rbx, %rbx, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + CFI_INSTRUCTION offset $rbx, -16 + $rbx = MOV64rr $rdi + TEST64rr $rbx, $rbx, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.stay: - liveins: %rbx + liveins: $rbx - CALL64pcrel32 @f, csr_64, implicit %rsp, implicit-def %rsp - %eax = MOV32rm killed %rbx, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr) - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + CALL64pcrel32 @f, csr_64, implicit $rsp, implicit-def $rsp + $eax = MOV32rm killed $rbx, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr) + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax bb.2.leave: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... --- @@ -636,154 +636,154 @@ # Make sure that the BEXTR32rm instruction below is not used to emit # an implicit null check -- hoisting it will require hosting the move -# to %esi and we cannot do that without clobbering the use of %rsi in +# to $esi and we cannot do that without clobbering the use of $rsi in # the first instruction in bb.1.not_null. alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %rcx = MOV64rm killed %rsi, 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr2) - %esi = MOV32ri 3076 - %eax = BEXTR32rm killed %rdi, 1, %noreg, 0, %noreg, killed %esi, implicit-def dead %eflags :: (load 4 from %ir.ptr) - %eax = ADD32rm killed %eax, killed %rcx, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.val) - RETQ %eax + $rcx = MOV64rm killed $rsi, 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr2) + $esi = MOV32ri 3076 + $eax = BEXTR32rm killed $rdi, 1, $noreg, 0, $noreg, killed $esi, implicit-def dead $eflags :: (load 4 from %ir.ptr) + $eax = ADD32rm killed $eax, killed $rcx, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.val) + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + RETQ $eax ... --- name: use_alternate_load_op # CHECK-LABEL: name: use_alternate_load_op # CHECK: bb.0.entry: -# CHECK: %rax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg +# CHECK: $rax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg # CHECK-NEXT: JMP_1 %bb.1 # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %rcx = MOV64rm killed %rsi, 1, %noreg, 0, %noreg - %rcx = AND64rm killed %rcx, %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags - %rax = MOV64rm killed %rdi, 1, %noreg, 0, %noreg - RETQ %eax + $rcx = MOV64rm killed $rsi, 1, $noreg, 0, $noreg + $rcx = AND64rm killed $rcx, $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags + $rax = MOV64rm killed $rdi, 1, $noreg, 0, $noreg + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + RETQ $eax ... --- name: imp_null_check_gep_load_with_use_dep # CHECK-LABEL: name: imp_null_check_gep_load_with_use_dep # CHECK: bb.0.entry: -# CHECK: %eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x) +# CHECK: $eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg, implicit-def $rax :: (load 4 from %ir.x) # CHECK-NEXT: JMP_1 %bb.1 alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rsi, %rdi + liveins: $rsi, $rdi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.1, implicit %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.1, implicit $eflags bb.2.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags - %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x) - %eax = LEA64_32r killed %rax, 1, killed %rsi, 4, %noreg - RETQ %eax + $rsi = ADD64rr $rsi, $rdi, implicit-def dead $eflags + $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg, implicit-def $rax :: (load 4 from %ir.x) + $eax = LEA64_32r killed $rax, 1, killed $rsi, 4, $noreg + RETQ $eax bb.1.is_null: - %eax = MOV32ri 42 - RETQ %eax + $eax = MOV32ri 42 + RETQ $eax ... --- name: imp_null_check_load_with_base_sep # CHECK-LABEL: name: imp_null_check_load_with_base_sep # CHECK: bb.0.entry: -# CHECK: %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags -# CHECK-NEXT: %esi = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags +# CHECK: $rsi = ADD64rr $rsi, $rdi, implicit-def dead $eflags +# CHECK-NEXT: $esi = FAULTING_OP 1, %bb.2, {{[0-9]+}}, $esi, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags # CHECK-NEXT: JMP_1 %bb.1 alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rsi, %rdi + liveins: $rsi, $rdi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.1, implicit %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.1, implicit $eflags bb.2.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags - %esi = AND32rm killed %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags - %eax = MOV32rr %esi - RETQ %eax + $rsi = ADD64rr $rsi, $rdi, implicit-def dead $eflags + $esi = AND32rm killed $esi, $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags + $eax = MOV32rr $esi + RETQ $eax bb.1.is_null: - %eax = MOV32ri 42 - RETQ %eax + $eax = MOV32ri 42 + RETQ $eax ... --- name: inc_store # CHECK-LABEL: name: inc_store # CHECK: bb.0.entry: -# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %rsi +# CHECK: $noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg, $rsi # CHECK-NEXT: JMP_1 %bb.1 # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - MOV64mr killed %rdi, 1, %noreg, 0, %noreg, killed %rsi + MOV64mr killed $rdi, 1, $noreg, 0, $noreg, killed $rsi RETQ bb.2.is_null: @@ -794,26 +794,26 @@ name: inc_store_plus_offset # CHECK-LABEL: inc_store_plus_offset # CHECK: bb.0.entry: -# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %rsi +# CHECK: $noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 16, $noreg, $rsi # CHECK-NEXT: JMP_1 %bb.1 # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - MOV64mr killed %rdi, 1, %noreg, 16, %noreg, killed %rsi + MOV64mr killed $rdi, 1, $noreg, 16, $noreg, killed $rsi RETQ bb.2.is_null: @@ -824,28 +824,28 @@ name: inc_store_with_dep # CHECK-LABEL: inc_store_with_dep # CHECK: bb.0.entry: -# CHECK: %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags -# CHECK-NEXT: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi +# CHECK: $esi = ADD32rr killed $esi, killed $esi, implicit-def dead $eflags +# CHECK-NEXT: $noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 16, $noreg, $esi # CHECK-NEXT: JMP_1 %bb.1 # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags - MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi + $esi = ADD32rr killed $esi, killed $esi, implicit-def dead $eflags + MOV32mr killed $rdi, 1, $noreg, 16, $noreg, killed $esi RETQ bb.2.is_null: @@ -856,61 +856,61 @@ name: inc_store_with_dep_in_null # CHECK-LABEL: inc_store_with_dep_in_null # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %esi = ADD32rr %esi, %esi, implicit-def dead %eflags - MOV32mr killed %rdi, 1, %noreg, 0, %noreg, %esi - %eax = MOV32rr killed %esi - RETQ %eax + $esi = ADD32rr $esi, $esi, implicit-def dead $eflags + MOV32mr killed $rdi, 1, $noreg, 0, $noreg, $esi + $eax = MOV32rr killed $esi + RETQ $eax bb.2.is_null: - liveins: %rsi + liveins: $rsi - %eax = MOV32rr killed %esi - RETQ %eax + $eax = MOV32rr killed $esi + RETQ $eax ... --- name: inc_store_with_volatile # CHECK-LABEL: inc_store_with_volatile # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - MOV32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi :: (volatile store 4 into %ir.ptr) + MOV32mr killed $rdi, 1, $noreg, 0, $noreg, killed $esi :: (volatile store 4 into %ir.ptr) RETQ bb.2.is_null: @@ -921,28 +921,28 @@ name: inc_store_with_two_dep # CHECK-LABEL: inc_store_with_two_dep # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags - %esi = ADD32ri killed %esi, 15, implicit-def dead %eflags - MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi + $esi = ADD32rr killed $esi, killed $esi, implicit-def dead $eflags + $esi = ADD32ri killed $esi, 15, implicit-def dead $eflags + MOV32mr killed $rdi, 1, $noreg, 16, $noreg, killed $esi RETQ bb.2.is_null: @@ -953,27 +953,27 @@ name: inc_store_with_redefined_base # CHECK-LABEL: inc_store_with_redefined_base # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %rdi = ADD64rr killed %rdi, killed %rdi, implicit-def dead %eflags - MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi + $rdi = ADD64rr killed $rdi, killed $rdi, implicit-def dead $eflags + MOV32mr killed $rdi, 1, $noreg, 16, $noreg, killed $esi RETQ bb.2.is_null: @@ -984,198 +984,198 @@ name: inc_store_with_reused_base # CHECK-LABEL: inc_store_with_reused_base # CHECK: bb.0.entry: -# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi +# CHECK: $noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 16, $noreg, $esi # CHECK-NEXT: JMP_1 %bb.1 # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %rax = MOV64rr %rdi - MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi - RETQ %eax + $rax = MOV64rr $rdi + MOV32mr killed $rdi, 1, $noreg, 16, $noreg, killed $esi + RETQ $eax bb.2.is_null: - %rax = XOR64rr undef %rax, undef %rax, implicit-def dead %eflags - RETQ %eax + $rax = XOR64rr undef $rax, undef $rax, implicit-def dead $eflags + RETQ $eax ... --- name: inc_store_across_call # CHECK-LABEL: inc_store_across_call # CHECK: bb.0.entry: -# CHECK: TEST64rr %rbx, %rbx, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rbx, $rbx, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } -calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', - '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15', - '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', - '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] + - { reg: '$rdi' } +calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx', + '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15', + '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d', + '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ] body: | bb.0.entry: - liveins: %rdi, %rbx + liveins: $rdi, $rbx - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbx, -16 - %rbx = MOV64rr killed %rdi - TEST64rr %rbx, %rbx, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + CFI_INSTRUCTION offset $rbx, -16 + $rbx = MOV64rr killed $rdi + TEST64rr $rbx, $rbx, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rbx + liveins: $rbx - CALL64pcrel32 @f, csr_64, implicit %rsp, implicit-def %rsp - MOV32mi %rbx, 1, %noreg, 0, %noreg, 20 - %rax = MOV64rr killed %rbx - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + CALL64pcrel32 @f, csr_64, implicit $rsp, implicit-def $rsp + MOV32mi $rbx, 1, $noreg, 0, $noreg, 20 + $rax = MOV64rr killed $rbx + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - %rbx = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + $rbx = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... --- name: inc_store_with_dep_in_dep # CHECK-LABEL: inc_store_with_dep_in_dep # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %eax = MOV32rr %esi - %esi = ADD32ri killed %esi, 15, implicit-def dead %eflags - MOV32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi - RETQ %eax + $eax = MOV32rr $esi + $esi = ADD32ri killed $esi, 15, implicit-def dead $eflags + MOV32mr killed $rdi, 1, $noreg, 0, $noreg, killed $esi + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + RETQ $eax ... --- name: inc_store_with_load_over_store # CHECK-LABEL: inc_store_with_load_over_store # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 2 - %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg - RETQ %eax + MOV32mi killed $rsi, 1, $noreg, 0, $noreg, 2 + $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + RETQ $eax ... --- name: inc_store_with_store_over_load # CHECK-LABEL: inc_store_with_store_over_load # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %eax = MOV32rm killed %rsi, 1, %noreg, 0, %noreg - MOV32mi killed %rdi, 1, %noreg, 0, %noreg, 2 - RETQ %eax + $eax = MOV32rm killed $rsi, 1, $noreg, 0, $noreg + MOV32mi killed $rdi, 1, $noreg, 0, $noreg, 2 + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + RETQ $eax ... --- name: inc_store_with_store_over_store # CHECK-LABEL: inc_store_with_store_over_store # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3 - MOV32mi killed %rdi, 1, %noreg, 0, %noreg, 2 + MOV32mi killed $rsi, 1, $noreg, 0, $noreg, 3 + MOV32mi killed $rdi, 1, $noreg, 0, $noreg, 2 RETQ bb.2.is_null: @@ -1186,27 +1186,27 @@ name: inc_store_with_load_and_store # CHECK-LABEL: inc_store_with_load_and_store # CHECK: bb.0.entry: -# CHECK: %noreg = FAULTING_OP 2, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %esi, implicit-def %eflags +# CHECK: $noreg = FAULTING_OP 2, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg, $esi, implicit-def $eflags # CHECK-NEXT: JMP_1 %bb.1 # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %esi = ADD32rr %esi, %esi, implicit-def dead %eflags - ADD32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi, implicit-def dead %eflags + $esi = ADD32rr $esi, $esi, implicit-def dead $eflags + ADD32mr killed $rdi, 1, $noreg, 0, $noreg, killed $esi, implicit-def dead $eflags RETQ bb.2.is_null: @@ -1217,72 +1217,72 @@ name: inc_store_and_load_no_alias # CHECK-LABEL: inc_store_and_load_no_alias # CHECK: bb.0.entry: -# CHECK: %eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr) +# CHECK: $eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr) # CHECK-NEXT: JMP_1 %bb.1 # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3 :: (store 4 into %ir.ptr2) - %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr) - RETQ %eax + MOV32mi killed $rsi, 1, $noreg, 0, $noreg, 3 :: (store 4 into %ir.ptr2) + $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr) + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + RETQ $eax ... --- name: inc_store_and_load_alias # CHECK-LABEL: inc_store_and_load_alias # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3 :: (store 4 into %ir.ptr2) - %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr) - RETQ %eax + MOV32mi killed $rsi, 1, $noreg, 0, $noreg, 3 :: (store 4 into %ir.ptr2) + $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr) + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + RETQ $eax ... --- name: inc_spill_dep # CHECK-LABEL: inc_spill_dep # CHECK: bb.0.entry: -# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags -# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags +# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags +# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags # CHECK: bb.1.not_null alignment: 4 @@ -1290,28 +1290,28 @@ stack: - { id: 0, type: spill-slot, offset: -8, size: 8, alignment: 8} liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0.entry: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %rsp = frame-setup SUB64ri8 %rsp, 8, implicit-def dead %eflags - MOV32mr %rsp, 1, %noreg, 0, %noreg, %esi :: (store 4 into %stack.0) - TEST64rr %rdi, %rdi, implicit-def %eflags - JE_1 %bb.2, implicit killed %eflags + $rsp = frame-setup SUB64ri8 $rsp, 8, implicit-def dead $eflags + MOV32mr $rsp, 1, $noreg, 0, $noreg, $esi :: (store 4 into %stack.0) + TEST64rr $rdi, $rdi, implicit-def $eflags + JE_1 %bb.2, implicit killed $eflags bb.1.not_null: - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %r14d = MOV32rm %rsp, 1, %noreg, 0, %noreg :: (load 4 from %stack.0) - MOV64mr %rsp, 1, %noreg, 0, %noreg, %rdi :: (store 8 into %stack.0) - %edi = MOV32rm %rdi, 1, %noreg, 8, %noreg :: (load 4 from %ir.ptr) - %eax = MOV32rr %edi - RETQ %eax + $r14d = MOV32rm $rsp, 1, $noreg, 0, $noreg :: (load 4 from %stack.0) + MOV64mr $rsp, 1, $noreg, 0, $noreg, $rdi :: (store 8 into %stack.0) + $edi = MOV32rm $rdi, 1, $noreg, 8, $noreg :: (load 4 from %ir.ptr) + $eax = MOV32rr $edi + RETQ $eax bb.2.is_null: - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - RETQ %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + RETQ $eax ... Index: test/CodeGen/X86/implicit-use-spill.mir =================================================================== --- test/CodeGen/X86/implicit-use-spill.mir +++ test/CodeGen/X86/implicit-use-spill.mir @@ -11,10 +11,10 @@ bb.0: ; CHECK: NOOP implicit-def [[VAL:%[0-9]+]] ; VAL should be spilled before csr_noregs, i.e., before we clobber all the registers - ; CHECK-NEXT: MOV64mr [[SLOT:%stack.[0-9]+]], 1, %noreg, 0, %noreg, [[VAL]] + ; CHECK-NEXT: MOV64mr [[SLOT:%stack.[0-9]+]], 1, $noreg, 0, $noreg, [[VAL]] ; CHECK-NEXT: NOOP csr_noregs ; We need to reload before the (implicit) use. - ; CHECK-NEXT: [[RELOADED_VAL:%[0-9]+]]:gr64 = MOV64rm [[SLOT]], 1, %noreg, 0, %noreg + ; CHECK-NEXT: [[RELOADED_VAL:%[0-9]+]]:gr64 = MOV64rm [[SLOT]], 1, $noreg, 0, $noreg ; CHECK-NEXT: NOOP implicit [[RELOADED_VAL]] NOOP implicit-def %0 NOOP csr_noregs Index: test/CodeGen/X86/imul.ll =================================================================== --- test/CodeGen/X86/imul.ll +++ test/CodeGen/X86/imul.ll @@ -218,7 +218,7 @@ define i32 @test2(i32 %a) { ; X64-LABEL: test2: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $5, %eax ; X64-NEXT: leal (%rax,%rdi), %eax @@ -239,7 +239,7 @@ define i32 @test3(i32 %a) { ; X64-LABEL: test3: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $5, %eax ; X64-NEXT: leal (%rax,%rdi), %eax Index: test/CodeGen/X86/invalid-liveness.mir =================================================================== --- test/CodeGen/X86/invalid-liveness.mir +++ test/CodeGen/X86/invalid-liveness.mir @@ -16,7 +16,7 @@ - { id: 0, class: gr32 } body: | bb.0: - JG_1 %bb.2, implicit %eflags + JG_1 %bb.2, implicit $eflags JMP_1 %bb.3 bb.2: @@ -24,6 +24,6 @@ JMP_1 %bb.3 bb.3: - %eax = COPY %0 - RETQ %eax + $eax = COPY %0 + RETQ $eax ... Index: test/CodeGen/X86/ipra-inline-asm.ll =================================================================== --- test/CodeGen/X86/ipra-inline-asm.ll +++ test/CodeGen/X86/ipra-inline-asm.ll @@ -11,7 +11,7 @@ } ; Verifies that inline assembly is correctly handled by giving a list of clobbered registers -; CHECK: foo Clobbered Registers: %ah %al %ax %ch %cl %cx %di %dil %eax %ecx %edi %rax %rcx %rdi +; CHECK: foo Clobbered Registers: $ah $al $ax $ch $cl $cx $di $dil $eax $ecx $edi $rax $rcx $rdi define void @foo() #0 { call void asm sideeffect "", "~{eax},~{ecx},~{edi}"() #0 ret void Index: test/CodeGen/X86/ipra-reg-alias.ll =================================================================== --- test/CodeGen/X86/ipra-reg-alias.ll +++ test/CodeGen/X86/ipra-reg-alias.ll @@ -6,7 +6,7 @@ %inc2 = mul i8 %inc, 5 ; Here only CL is clobbred so CH should not be clobbred, but CX, ECX and RCX ; should be clobbered. -; CHECK: main Clobbered Registers: %ah %al %ax %cl %cx %eax %ecx %eflags %rax %rcx +; CHECK: main Clobbered Registers: $ah $al $ax $cl $cx $eax $ecx $eflags $rax $rcx ret i8 %inc2 } Index: test/CodeGen/X86/ipra-reg-usage.ll =================================================================== --- test/CodeGen/X86/ipra-reg-usage.ll +++ test/CodeGen/X86/ipra-reg-usage.ll @@ -3,7 +3,7 @@ target triple = "x86_64-unknown-unknown" declare void @bar1() define preserve_allcc void @foo()#0 { -; CHECK: foo Clobbered Registers: %cs %ds %eflags %eip %eiz %es %fpsw %fs %gs %ip %rip %riz %ss %ssp %bnd0 %bnd1 %bnd2 %bnd3 %cr0 %cr1 %cr2 %cr3 %cr4 %cr5 %cr6 %cr7 %cr8 %cr9 %cr10 %cr11 %cr12 %cr13 %cr14 %cr15 %dr0 %dr1 %dr2 %dr3 %dr4 %dr5 %dr6 %dr7 %dr8 %dr9 %dr10 %dr11 %dr12 %dr13 %dr14 %dr15 %fp0 %fp1 %fp2 %fp3 %fp4 %fp5 %fp6 %fp7 %k0 %k1 %k2 %k3 %k4 %k5 %k6 %k7 %mm0 %mm1 %mm2 %mm3 %mm4 %mm5 %mm6 %mm7 %r11 %st0 %st1 %st2 %st3 %st4 %st5 %st6 %st7 %xmm16 %xmm17 %xmm18 %xmm19 %xmm20 %xmm21 %xmm22 %xmm23 %xmm24 %xmm25 %xmm26 %xmm27 %xmm28 %xmm29 %xmm30 %xmm31 %ymm0 %ymm1 %ymm2 %ymm3 %ymm4 %ymm5 %ymm6 %ymm7 %ymm8 %ymm9 %ymm10 %ymm11 %ymm12 %ymm13 %ymm14 %ymm15 %ymm16 %ymm17 %ymm18 %ymm19 %ymm20 %ymm21 %ymm22 %ymm23 %ymm24 %ymm25 %ymm26 %ymm27 %ymm28 %ymm29 %ymm30 %ymm31 %zmm0 %zmm1 %zmm2 %zmm3 %zmm4 %zmm5 %zmm6 %zmm7 %zmm8 %zmm9 %zmm10 %zmm11 %zmm12 %zmm13 %zmm14 %zmm15 %zmm16 %zmm17 %zmm18 %zmm19 %zmm20 %zmm21 %zmm22 %zmm23 %zmm24 %zmm25 %zmm26 %zmm27 %zmm28 %zmm29 %zmm30 %zmm31 %r11b %r11d %r11w +; CHECK: foo Clobbered Registers: $cs $ds $eflags $eip $eiz $es $fpsw $fs $gs $ip $rip $riz $ss $ssp $bnd0 $bnd1 $bnd2 $bnd3 $cr0 $cr1 $cr2 $cr3 $cr4 $cr5 $cr6 $cr7 $cr8 $cr9 $cr10 $cr11 $cr12 $cr13 $cr14 $cr15 $dr0 $dr1 $dr2 $dr3 $dr4 $dr5 $dr6 $dr7 $dr8 $dr9 $dr10 $dr11 $dr12 $dr13 $dr14 $dr15 $fp0 $fp1 $fp2 $fp3 $fp4 $fp5 $fp6 $fp7 $k0 $k1 $k2 $k3 $k4 $k5 $k6 $k7 $mm0 $mm1 $mm2 $mm3 $mm4 $mm5 $mm6 $mm7 $r11 $st0 $st1 $st2 $st3 $st4 $st5 $st6 $st7 $xmm16 $xmm17 $xmm18 $xmm19 $xmm20 $xmm21 $xmm22 $xmm23 $xmm24 $xmm25 $xmm26 $xmm27 $xmm28 $xmm29 $xmm30 $xmm31 $ymm0 $ymm1 $ymm2 $ymm3 $ymm4 $ymm5 $ymm6 $ymm7 $ymm8 $ymm9 $ymm10 $ymm11 $ymm12 $ymm13 $ymm14 $ymm15 $ymm16 $ymm17 $ymm18 $ymm19 $ymm20 $ymm21 $ymm22 $ymm23 $ymm24 $ymm25 $ymm26 $ymm27 $ymm28 $ymm29 $ymm30 $ymm31 $zmm0 $zmm1 $zmm2 $zmm3 $zmm4 $zmm5 $zmm6 $zmm7 $zmm8 $zmm9 $zmm10 $zmm11 $zmm12 $zmm13 $zmm14 $zmm15 $zmm16 $zmm17 $zmm18 $zmm19 $zmm20 $zmm21 $zmm22 $zmm23 $zmm24 $zmm25 $zmm26 $zmm27 $zmm28 $zmm29 $zmm30 $zmm31 $r11b $r11d $r11w call void @bar1() call void @bar2() ret void Index: test/CodeGen/X86/lea-3.ll =================================================================== --- test/CodeGen/X86/lea-3.ll +++ test/CodeGen/X86/lea-3.ll @@ -36,25 +36,25 @@ define i32 @test(i32 %a) { ; LNX1-LABEL: test: ; LNX1: # %bb.0: -; LNX1-NEXT: # kill: def %edi killed %edi def %rdi +; LNX1-NEXT: # kill: def $edi killed $edi def $rdi ; LNX1-NEXT: leal (%rdi,%rdi,2), %eax ; LNX1-NEXT: retq ; ; LNX2-LABEL: test: ; LNX2: # %bb.0: -; LNX2-NEXT: # kill: def %edi killed %edi def %rdi +; LNX2-NEXT: # kill: def $edi killed $edi def $rdi ; LNX2-NEXT: leal (%rdi,%rdi,2), %eax ; LNX2-NEXT: retq ; ; NACL-LABEL: test: ; NACL: # %bb.0: -; NACL-NEXT: # kill: def %edi killed %edi def %rdi +; NACL-NEXT: # kill: def $edi killed $edi def $rdi ; NACL-NEXT: leal (%rdi,%rdi,2), %eax ; NACL-NEXT: retq ; ; WIN-LABEL: test: ; WIN: # %bb.0: -; WIN-NEXT: # kill: def %ecx killed %ecx def %rcx +; WIN-NEXT: # kill: def $ecx killed $ecx def $rcx ; WIN-NEXT: leal (%rcx,%rcx,2), %eax ; WIN-NEXT: retq %tmp2 = mul i32 %a, 3 Index: test/CodeGen/X86/lea-opt-cse3.ll =================================================================== --- test/CodeGen/X86/lea-opt-cse3.ll +++ test/CodeGen/X86/lea-opt-cse3.ll @@ -5,8 +5,8 @@ define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 { ; X64-LABEL: foo: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %esi killed %esi def %rsi -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $esi killed $esi def $rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx ; X64-NEXT: leal 4(%rdi,%rsi,4), %eax ; X64-NEXT: imull %ecx, %eax @@ -33,8 +33,8 @@ define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 { ; X64-LABEL: foo1: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %esi killed %esi def %rsi -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $esi killed $esi def $rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx ; X64-NEXT: leal 4(%rdi,%rsi,8), %eax ; X64-NEXT: imull %ecx, %eax @@ -61,8 +61,8 @@ define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 { ; X64-LABEL: foo1_mult_basic_blocks: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %esi killed %esi def %rsi -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $esi killed $esi def $rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl $10, %ecx @@ -113,8 +113,8 @@ define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_addr #0 { ; X64-LABEL: foo1_mult_basic_blocks_illegal_scale: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %esi killed %esi def %rsi -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $esi killed $esi def $rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl $10, %ecx Index: test/CodeGen/X86/lea-opt-with-debug.mir =================================================================== --- test/CodeGen/X86/lea-opt-with-debug.mir +++ test/CodeGen/X86/lea-opt-with-debug.mir @@ -95,28 +95,28 @@ bb.0 (%ir-block.0): successors: %bb.1(0x80000000) - ; CHECK: %3:gr64_nosp = LEA64r %2, 2, %2, 0, %noreg, debug-location !13 - ; CHECK-NEXT: %4:gr64 = LEA64r %1, 4, %3, 0, %noreg, debug-location !13 - ; CHECK-NOT: %0:gr64 = LEA64r %1, 4, %3, 8, %noreg, debug-location !14 - ; CHECK: DBG_VALUE debug-use %4, debug-use %noreg, !11, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_stack_value), debug-location !15 - - %1 = MOV64rm %rip, 1, %noreg, @c, %noreg, debug-location !13 :: (dereferenceable load 8 from @c) - %2 = MOVSX64rm32 %rip, 1, %noreg, @a, %noreg, debug-location !13 :: (dereferenceable load 4 from @a) - %3 = LEA64r %2, 2, %2, 0, %noreg, debug-location !13 - %4 = LEA64r %1, 4, %3, 0, %noreg, debug-location !13 + ; CHECK: %3:gr64_nosp = LEA64r %2, 2, %2, 0, $noreg, debug-location !13 + ; CHECK-NEXT: %4:gr64 = LEA64r %1, 4, %3, 0, $noreg, debug-location !13 + ; CHECK-NOT: %0:gr64 = LEA64r %1, 4, %3, 8, $noreg, debug-location !14 + ; CHECK: DBG_VALUE debug-use %4, debug-use $noreg, !11, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_stack_value), debug-location !15 + + %1 = MOV64rm $rip, 1, $noreg, @c, $noreg, debug-location !13 :: (dereferenceable load 8 from @c) + %2 = MOVSX64rm32 $rip, 1, $noreg, @a, $noreg, debug-location !13 :: (dereferenceable load 4 from @a) + %3 = LEA64r %2, 2, %2, 0, $noreg, debug-location !13 + %4 = LEA64r %1, 4, %3, 0, $noreg, debug-location !13 %5 = COPY %4.sub_32bit, debug-location !13 - MOV32mr %rip, 1, %noreg, @d, %noreg, killed %5, debug-location !13 :: (store 4 into @d) - %0 = LEA64r %1, 4, %3, 8, %noreg, debug-location !14 - DBG_VALUE debug-use %0, debug-use %noreg, !11, !DIExpression(), debug-location !15 + MOV32mr $rip, 1, $noreg, @d, $noreg, killed %5, debug-location !13 :: (store 4 into @d) + %0 = LEA64r %1, 4, %3, 8, $noreg, debug-location !14 + DBG_VALUE debug-use %0, debug-use $noreg, !11, !DIExpression(), debug-location !15 ; CHECK-LABEL: bb.1 (%ir-block.8): - ; CHECK: %6:gr32 = MOV32rm %4, 1, %noreg, 8, %noreg, debug-location !17 :: (load 4 from %ir.7) + ; CHECK: %6:gr32 = MOV32rm %4, 1, $noreg, 8, $noreg, debug-location !17 :: (load 4 from %ir.7) bb.1 (%ir-block.8): successors: %bb.1(0x80000000) - %6 = MOV32rm %0, 1, %noreg, 0, %noreg, debug-location !17 :: (load 4 from %ir.7) - MOV32mr %rip, 1, %noreg, @d, %noreg, killed %6, debug-location !17 :: (store 4 into @d) + %6 = MOV32rm %0, 1, $noreg, 0, $noreg, debug-location !17 :: (load 4 from %ir.7) + MOV32mr $rip, 1, $noreg, @d, $noreg, killed %6, debug-location !17 :: (store 4 into @d) JMP_1 %bb.1, debug-location !18 ... Index: test/CodeGen/X86/lea32-schedule.ll =================================================================== --- test/CodeGen/X86/lea32-schedule.ll +++ test/CodeGen/X86/lea32-schedule.ll @@ -14,13 +14,13 @@ define i32 @test_lea_offset(i32) { ; GENERIC-LABEL: test_lea_offset: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal -24(%rdi), %eax # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_lea_offset: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -32,43 +32,43 @@ ; ; SLM-LABEL: test_lea_offset: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_offset: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal -24(%rdi), %eax # sched: [1:0.50] ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_lea_offset: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50] ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_lea_offset: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_lea_offset: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal -24(%rdi), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_lea_offset: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal -24(%rdi), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_offset: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = add nsw i32 %0, -24 @@ -78,13 +78,13 @@ define i32 @test_lea_offset_big(i32) { ; GENERIC-LABEL: test_lea_offset_big: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_lea_offset_big: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -96,43 +96,43 @@ ; ; SLM-LABEL: test_lea_offset_big: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_offset_big: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50] ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_lea_offset_big: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50] ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_lea_offset_big: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_lea_offset_big: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_lea_offset_big: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_offset_big: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = add nsw i32 %0, 1024 @@ -143,15 +143,15 @@ define i32 @test_lea_add(i32, i32) { ; GENERIC-LABEL: test_lea_add: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_lea_add: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %esi killed %esi def %rsi -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $esi killed $esi def $rsi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -163,50 +163,50 @@ ; ; SLM-LABEL: test_lea_add: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %esi killed %esi def %rsi -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $esi killed $esi def $rsi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_add: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %esi killed %esi def %rsi -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $esi killed $esi def $rsi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_lea_add: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_lea_add: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_lea_add: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_lea_add: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_add: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add nsw i32 %1, %0 @@ -216,16 +216,16 @@ define i32 @test_lea_add_offset(i32, i32) { ; GENERIC-LABEL: test_lea_add_offset: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; GENERIC-NEXT: addl $16, %eax # sched: [1:0.33] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_lea_add_offset: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %esi killed %esi def %rsi -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $esi killed $esi def $rsi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -237,54 +237,54 @@ ; ; SLM-LABEL: test_lea_add_offset: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %esi killed %esi def %rsi -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $esi killed $esi def $rsi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_add_offset: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %esi killed %esi def %rsi -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $esi killed $esi def $rsi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; SANDY-NEXT: addl $16, %eax # sched: [1:0.33] ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_lea_add_offset: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; HASWELL-NEXT: addl $16, %eax # sched: [1:0.25] ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_lea_add_offset: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; BROADWELL-NEXT: addl $16, %eax # sched: [1:0.25] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_lea_add_offset: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: addl $16, %eax # sched: [1:0.25] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_lea_add_offset: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_add_offset: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add i32 %0, 16 @@ -295,8 +295,8 @@ define i32 @test_lea_add_offset_big(i32, i32) { ; GENERIC-LABEL: test_lea_add_offset_big: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; GENERIC-NEXT: addl $-4096, %eax # imm = 0xF000 ; GENERIC-NEXT: # sched: [1:0.33] @@ -304,8 +304,8 @@ ; ; ATOM-LABEL: test_lea_add_offset_big: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %esi killed %esi def %rsi -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $esi killed $esi def $rsi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -317,15 +317,15 @@ ; ; SLM-LABEL: test_lea_add_offset_big: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %esi killed %esi def %rsi -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $esi killed $esi def $rsi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_add_offset_big: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %esi killed %esi def %rsi -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $esi killed $esi def $rsi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; SANDY-NEXT: addl $-4096, %eax # imm = 0xF000 ; SANDY-NEXT: # sched: [1:0.33] @@ -333,8 +333,8 @@ ; ; HASWELL-LABEL: test_lea_add_offset_big: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; HASWELL-NEXT: addl $-4096, %eax # imm = 0xF000 ; HASWELL-NEXT: # sched: [1:0.25] @@ -342,8 +342,8 @@ ; ; BROADWELL-LABEL: test_lea_add_offset_big: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; BROADWELL-NEXT: addl $-4096, %eax # imm = 0xF000 ; BROADWELL-NEXT: # sched: [1:0.25] @@ -351,8 +351,8 @@ ; ; SKYLAKE-LABEL: test_lea_add_offset_big: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: addl $-4096, %eax # imm = 0xF000 ; SKYLAKE-NEXT: # sched: [1:0.25] @@ -360,15 +360,15 @@ ; ; BTVER2-LABEL: test_lea_add_offset_big: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_add_offset_big: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add i32 %0, -4096 @@ -379,13 +379,13 @@ define i32 @test_lea_mul(i32) { ; GENERIC-LABEL: test_lea_mul: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_lea_mul: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -397,43 +397,43 @@ ; ; SLM-LABEL: test_lea_mul: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_mul: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_lea_mul: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_lea_mul: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_lea_mul: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_lea_mul: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_mul: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i32 %0, 3 @@ -443,14 +443,14 @@ define i32 @test_lea_mul_offset(i32) { ; GENERIC-LABEL: test_lea_mul_offset: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; GENERIC-NEXT: addl $-32, %eax # sched: [1:0.33] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_lea_mul_offset: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -462,47 +462,47 @@ ; ; SLM-LABEL: test_lea_mul_offset: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_mul_offset: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; SANDY-NEXT: addl $-32, %eax # sched: [1:0.33] ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_lea_mul_offset: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; HASWELL-NEXT: addl $-32, %eax # sched: [1:0.25] ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_lea_mul_offset: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; BROADWELL-NEXT: addl $-32, %eax # sched: [1:0.25] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_lea_mul_offset: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: addl $-32, %eax # sched: [1:0.25] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_lea_mul_offset: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_mul_offset: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i32 %0, 3 @@ -513,7 +513,7 @@ define i32 @test_lea_mul_offset_big(i32) { ; GENERIC-LABEL: test_lea_mul_offset_big: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; GENERIC-NEXT: addl $10000, %eax # imm = 0x2710 ; GENERIC-NEXT: # sched: [1:0.33] @@ -521,7 +521,7 @@ ; ; ATOM-LABEL: test_lea_mul_offset_big: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -533,13 +533,13 @@ ; ; SLM-LABEL: test_lea_mul_offset_big: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_mul_offset_big: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; SANDY-NEXT: addl $10000, %eax # imm = 0x2710 ; SANDY-NEXT: # sched: [1:0.33] @@ -547,7 +547,7 @@ ; ; HASWELL-LABEL: test_lea_mul_offset_big: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; HASWELL-NEXT: addl $10000, %eax # imm = 0x2710 ; HASWELL-NEXT: # sched: [1:0.25] @@ -555,7 +555,7 @@ ; ; BROADWELL-LABEL: test_lea_mul_offset_big: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; BROADWELL-NEXT: addl $10000, %eax # imm = 0x2710 ; BROADWELL-NEXT: # sched: [1:0.25] @@ -563,7 +563,7 @@ ; ; SKYLAKE-LABEL: test_lea_mul_offset_big: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: addl $10000, %eax # imm = 0x2710 ; SKYLAKE-NEXT: # sched: [1:0.25] @@ -571,13 +571,13 @@ ; ; BTVER2-LABEL: test_lea_mul_offset_big: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_mul_offset_big: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i32 %0, 9 @@ -588,15 +588,15 @@ define i32 @test_lea_add_scale(i32, i32) { ; GENERIC-LABEL: test_lea_add_scale: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_lea_add_scale: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %esi killed %esi def %rsi -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $esi killed $esi def $rsi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -608,50 +608,50 @@ ; ; SLM-LABEL: test_lea_add_scale: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %esi killed %esi def %rsi -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $esi killed $esi def $rsi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_add_scale: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %esi killed %esi def %rsi -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $esi killed $esi def $rsi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50] ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_lea_add_scale: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50] ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_lea_add_scale: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_lea_add_scale: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_lea_add_scale: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_add_scale: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i32 %1, 1 @@ -662,16 +662,16 @@ define i32 @test_lea_add_scale_offset(i32, i32) { ; GENERIC-LABEL: test_lea_add_scale_offset: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50] ; GENERIC-NEXT: addl $96, %eax # sched: [1:0.33] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_lea_add_scale_offset: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %esi killed %esi def %rsi -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $esi killed $esi def $rsi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -683,54 +683,54 @@ ; ; SLM-LABEL: test_lea_add_scale_offset: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %esi killed %esi def %rsi -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $esi killed $esi def $rsi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_add_scale_offset: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %esi killed %esi def %rsi -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $esi killed $esi def $rsi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50] ; SANDY-NEXT: addl $96, %eax # sched: [1:0.33] ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_lea_add_scale_offset: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50] ; HASWELL-NEXT: addl $96, %eax # sched: [1:0.25] ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_lea_add_scale_offset: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50] ; BROADWELL-NEXT: addl $96, %eax # sched: [1:0.25] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_lea_add_scale_offset: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: addl $96, %eax # sched: [1:0.25] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_lea_add_scale_offset: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_add_scale_offset: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i32 %1, 2 @@ -742,8 +742,8 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) { ; GENERIC-LABEL: test_lea_add_scale_offset_big: ; GENERIC: # %bb.0: -; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi -; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi +; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi +; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi ; GENERIC-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50] ; GENERIC-NEXT: addl $-1200, %eax # imm = 0xFB50 ; GENERIC-NEXT: # sched: [1:0.33] @@ -751,8 +751,8 @@ ; ; ATOM-LABEL: test_lea_add_scale_offset_big: ; ATOM: # %bb.0: -; ATOM-NEXT: # kill: def %esi killed %esi def %rsi -; ATOM-NEXT: # kill: def %edi killed %edi def %rdi +; ATOM-NEXT: # kill: def $esi killed $esi def $rsi +; ATOM-NEXT: # kill: def $edi killed $edi def $rdi ; ATOM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00] ; ATOM-NEXT: nop # sched: [1:0.50] ; ATOM-NEXT: nop # sched: [1:0.50] @@ -764,15 +764,15 @@ ; ; SLM-LABEL: test_lea_add_scale_offset_big: ; SLM: # %bb.0: -; SLM-NEXT: # kill: def %esi killed %esi def %rsi -; SLM-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NEXT: # kill: def $esi killed $esi def $rsi +; SLM-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_lea_add_scale_offset_big: ; SANDY: # %bb.0: -; SANDY-NEXT: # kill: def %esi killed %esi def %rsi -; SANDY-NEXT: # kill: def %edi killed %edi def %rdi +; SANDY-NEXT: # kill: def $esi killed $esi def $rsi +; SANDY-NEXT: # kill: def $edi killed $edi def $rdi ; SANDY-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50] ; SANDY-NEXT: addl $-1200, %eax # imm = 0xFB50 ; SANDY-NEXT: # sched: [1:0.33] @@ -780,8 +780,8 @@ ; ; HASWELL-LABEL: test_lea_add_scale_offset_big: ; HASWELL: # %bb.0: -; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi -; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi +; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi +; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi ; HASWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50] ; HASWELL-NEXT: addl $-1200, %eax # imm = 0xFB50 ; HASWELL-NEXT: # sched: [1:0.25] @@ -789,8 +789,8 @@ ; ; BROADWELL-LABEL: test_lea_add_scale_offset_big: ; BROADWELL: # %bb.0: -; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi -; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi +; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi +; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi ; BROADWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50] ; BROADWELL-NEXT: addl $-1200, %eax # imm = 0xFB50 ; BROADWELL-NEXT: # sched: [1:0.25] @@ -798,8 +798,8 @@ ; ; SKYLAKE-LABEL: test_lea_add_scale_offset_big: ; SKYLAKE: # %bb.0: -; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi -; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi +; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi +; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi ; SKYLAKE-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: addl $-1200, %eax # imm = 0xFB50 ; SKYLAKE-NEXT: # sched: [1:0.25] @@ -807,15 +807,15 @@ ; ; BTVER2-LABEL: test_lea_add_scale_offset_big: ; BTVER2: # %bb.0: -; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi -; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi +; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi +; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi ; BTVER2-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_lea_add_scale_offset_big: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi -; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi +; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi +; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi ; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i32 %1, 3 Index: test/CodeGen/X86/leaFixup32.mir =================================================================== --- test/CodeGen/X86/leaFixup32.mir +++ test/CodeGen/X86/leaFixup32.mir @@ -85,8 +85,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%eax' } - - { reg: '%ebp' } + - { reg: '$eax' } + - { reg: '$ebp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -103,12 +103,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp - ; CHECK: %eax = ADD32rr %eax, killed %ebp - ; CHECK: %eax = ADD32ri8 %eax, -5 + liveins: $eax, $ebp + ; CHECK: $eax = ADD32rr $eax, killed $ebp + ; CHECK: $eax = ADD32ri8 $eax, -5 - %eax = LEA32r killed %eax, 1, killed %ebp, -5, %noreg - RETQ %eax + $eax = LEA32r killed $eax, 1, killed $ebp, -5, $noreg + RETQ $eax ... --- @@ -120,8 +120,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%eax' } - - { reg: '%ebp' } + - { reg: '$eax' } + - { reg: '$ebp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -138,12 +138,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp - ; CHECK: %ebp = ADD32rr %ebp, killed %eax - ; CHECK: %ebp = ADD32ri8 %ebp, -5 + liveins: $eax, $ebp + ; CHECK: $ebp = ADD32rr $ebp, killed $eax + ; CHECK: $ebp = ADD32ri8 $ebp, -5 - %ebp = LEA32r killed %ebp, 1, killed %eax, -5, %noreg - RETQ %ebp + $ebp = LEA32r killed $ebp, 1, killed $eax, -5, $noreg + RETQ $ebp ... --- @@ -155,8 +155,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%eax' } - - { reg: '%ebp' } + - { reg: '$eax' } + - { reg: '$ebp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -173,11 +173,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp - ; CHECK: %ebp = ADD32rr %ebp, killed %eax + liveins: $eax, $ebp + ; CHECK: $ebp = ADD32rr $ebp, killed $eax - %ebp = LEA32r killed %ebp, 1, killed %eax, 0, %noreg - RETQ %ebp + $ebp = LEA32r killed $ebp, 1, killed $eax, 0, $noreg + RETQ $ebp ... --- @@ -189,9 +189,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%eax' } - - { reg: '%ebp' } - - { reg: '%ebx' } + - { reg: '$eax' } + - { reg: '$ebp' } + - { reg: '$ebx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -208,12 +208,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp, %esi - ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0 - ; CHECK: %ebx = ADD32ri8 %ebx, -5 + liveins: $eax, $ebp, $esi + ; CHECK: $ebx = LEA32r killed $eax, 1, killed $ebp, 0 + ; CHECK: $ebx = ADD32ri8 $ebx, -5 - %ebx = LEA32r killed %eax, 1, killed %ebp, -5, %noreg - RETQ %ebx + $ebx = LEA32r killed $eax, 1, killed $ebp, -5, $noreg + RETQ $ebx ... --- @@ -225,9 +225,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%eax' } - - { reg: '%ebp' } - - { reg: '%ebx' } + - { reg: '$eax' } + - { reg: '$ebp' } + - { reg: '$ebx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -244,12 +244,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp - ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, %noreg - ; CHECK: %ebx = ADD32ri8 %ebx, -5 + liveins: $eax, $ebp + ; CHECK: $ebx = LEA32r killed $eax, 1, killed $ebp, 0, $noreg + ; CHECK: $ebx = ADD32ri8 $ebx, -5 - %ebx = LEA32r killed %ebp, 1, killed %eax, -5, %noreg - RETQ %ebx + $ebx = LEA32r killed $ebp, 1, killed $eax, -5, $noreg + RETQ $ebx ... --- @@ -261,9 +261,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%eax' } - - { reg: '%ebp' } - - { reg: '%ebx' } + - { reg: '$eax' } + - { reg: '$ebp' } + - { reg: '$ebx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -280,11 +280,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp - ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, %noreg + liveins: $eax, $ebp + ; CHECK: $ebx = LEA32r killed $eax, 1, killed $ebp, 0, $noreg - %ebx = LEA32r killed %ebp, 1, killed %eax, 0, %noreg - RETQ %ebx + $ebx = LEA32r killed $ebp, 1, killed $eax, 0, $noreg + RETQ $ebx ... --- @@ -296,8 +296,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%eax' } - - { reg: '%ebp' } + - { reg: '$eax' } + - { reg: '$ebp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -314,12 +314,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp - ; CHECK: %eax = ADD32rr %eax, killed %ebp - ; CHECK: %eax = ADD32ri %eax, 129 + liveins: $eax, $ebp + ; CHECK: $eax = ADD32rr $eax, killed $ebp + ; CHECK: $eax = ADD32ri $eax, 129 - %eax = LEA32r killed %eax, 1, killed %ebp, 129, %noreg - RETQ %eax + $eax = LEA32r killed $eax, 1, killed $ebp, 129, $noreg + RETQ $eax ... --- @@ -331,9 +331,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%eax' } - - { reg: '%ebx' } - - { reg: '%ebp' } + - { reg: '$eax' } + - { reg: '$ebx' } + - { reg: '$ebp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -350,12 +350,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp, %ebx - ; CHECK: %ebx = MOV32rr %ebp - ; CHECK: %ebx = ADD32rr %ebx, %ebp + liveins: $eax, $ebp, $ebx + ; CHECK: $ebx = MOV32rr $ebp + ; CHECK: $ebx = ADD32rr $ebx, $ebp - %ebx = LEA32r killed %ebp, 1, %ebp, 0, %noreg - RETQ %ebx + $ebx = LEA32r killed $ebp, 1, $ebp, 0, $noreg + RETQ $ebx ... --- @@ -367,8 +367,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%ebx' } - - { reg: '%ebp' } + - { reg: '$ebx' } + - { reg: '$ebp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -385,12 +385,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp, %ebx - ; CHECK: %ebx = LEA32r %noreg, 1, %ebp, 5, %noreg - ; CHECK: %ebx = ADD32rr %ebx, %ebp + liveins: $eax, $ebp, $ebx + ; CHECK: $ebx = LEA32r $noreg, 1, $ebp, 5, $noreg + ; CHECK: $ebx = ADD32rr $ebx, $ebp - %ebx = LEA32r %ebp, 1, %ebp, 5, %noreg - RETQ %ebx + $ebx = LEA32r $ebp, 1, $ebp, 5, $noreg + RETQ $ebx ... --- @@ -402,8 +402,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%ebx' } - - { reg: '%ebp' } + - { reg: '$ebx' } + - { reg: '$ebp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -420,12 +420,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp, %ebx - ; CHECK: %ebx = LEA32r %noreg, 4, %ebp, 5, %noreg - ; CHECK: %ebx = ADD32rr %ebx, %ebp + liveins: $eax, $ebp, $ebx + ; CHECK: $ebx = LEA32r $noreg, 4, $ebp, 5, $noreg + ; CHECK: $ebx = ADD32rr $ebx, $ebp - %ebx = LEA32r %ebp, 4, %ebp, 5, %noreg - RETQ %ebx + $ebx = LEA32r $ebp, 4, $ebp, 5, $noreg + RETQ $ebx ... --- @@ -437,8 +437,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%ebx' } - - { reg: '%ebp' } + - { reg: '$ebx' } + - { reg: '$ebp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -455,11 +455,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp, %ebx - ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, %noreg + liveins: $eax, $ebp, $ebx + ; CHECK: $ebp = LEA32r killed $ebp, 4, killed $ebp, 0, $noreg - %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, %noreg - RETQ %ebp + $ebp = LEA32r killed $ebp, 4, killed $ebp, 0, $noreg + RETQ $ebp ... --- @@ -471,8 +471,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%ebp' } - - { reg: '%eax' } + - { reg: '$ebp' } + - { reg: '$eax' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -489,19 +489,19 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp, %ebx - ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, %noreg - ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, %noreg - ; CHECK: %ebp = ADD32ri8 %ebp, 5 + liveins: $eax, $ebp, $ebx + ; CHECK: $ebx = LEA32r killed $eax, 4, killed $eax, 5, $noreg + ; CHECK: $ebp = LEA32r killed $ebx, 4, killed $ebx, 0, $noreg + ; CHECK: $ebp = ADD32ri8 $ebp, 5 - CMP32rr %eax, killed %ebx, implicit-def %eflags - %ebx = LEA32r killed %eax, 4, killed %eax, 5, %noreg - JE_1 %bb.1, implicit %eflags - RETQ %ebx + CMP32rr $eax, killed $ebx, implicit-def $eflags + $ebx = LEA32r killed $eax, 4, killed $eax, 5, $noreg + JE_1 %bb.1, implicit $eflags + RETQ $ebx bb.1: - liveins: %eax, %ebp, %ebx - %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, %noreg - RETQ %ebp + liveins: $eax, $ebp, $ebx + $ebp = LEA32r killed $ebx, 4, killed $ebx, 5, $noreg + RETQ $ebp ... Index: test/CodeGen/X86/leaFixup64.mir =================================================================== --- test/CodeGen/X86/leaFixup64.mir +++ test/CodeGen/X86/leaFixup64.mir @@ -158,8 +158,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -176,12 +176,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0 - ; CHECK: %eax = ADD32ri8 %eax, -5 + liveins: $rax, $rbp + ; CHECK: $eax = LEA64_32r killed $rax, 1, killed $rbp, 0 + ; CHECK: $eax = ADD32ri8 $eax, -5 - %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, %noreg - RETQ %eax + $eax = LEA64_32r killed $rax, 1, killed $rbp, -5, $noreg + RETQ $eax ... --- @@ -193,8 +193,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -211,12 +211,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0 - ; CHECK: %ebp = ADD32ri8 %ebp, -5 + liveins: $rax, $rbp + ; CHECK: $ebp = LEA64_32r killed $rax, 1, killed $rbp, 0 + ; CHECK: $ebp = ADD32ri8 $ebp, -5 - %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, %noreg - RETQ %ebp + $ebp = LEA64_32r killed $rbp, 1, killed $rax, -5, $noreg + RETQ $ebp ... --- @@ -228,8 +228,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -246,11 +246,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0 + liveins: $rax, $rbp + ; CHECK: $ebp = LEA64_32r killed $rax, 1, killed $rbp, 0 - %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, %noreg - RETQ %ebp + $ebp = LEA64_32r killed $rbp, 1, killed $rax, 0, $noreg + RETQ $ebp ... --- @@ -262,8 +262,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -280,12 +280,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %rax = ADD64rr %rax, killed %rbp - ; CHECK: %rax = ADD64ri8 %rax, -5 + liveins: $rax, $rbp + ; CHECK: $rax = ADD64rr $rax, killed $rbp + ; CHECK: $rax = ADD64ri8 $rax, -5 - %rax = LEA64r killed %rax, 1, killed %rbp, -5, %noreg - RETQ %eax + $rax = LEA64r killed $rax, 1, killed $rbp, -5, $noreg + RETQ $eax ... --- @@ -297,8 +297,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -315,12 +315,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %rbp = ADD64rr %rbp, killed %rax - ; CHECK: %rbp = ADD64ri8 %rbp, -5 + liveins: $rax, $rbp + ; CHECK: $rbp = ADD64rr $rbp, killed $rax + ; CHECK: $rbp = ADD64ri8 $rbp, -5 - %rbp = LEA64r killed %rbp, 1, killed %rax, -5, %noreg - RETQ %ebp + $rbp = LEA64r killed $rbp, 1, killed $rax, -5, $noreg + RETQ $ebp ... --- @@ -332,8 +332,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -350,11 +350,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %rbp = ADD64rr %rbp, killed %rax + liveins: $rax, $rbp + ; CHECK: $rbp = ADD64rr $rbp, killed $rax - %rbp = LEA64r killed %rbp, 1, killed %rax, 0, %noreg - RETQ %ebp + $rbp = LEA64r killed $rbp, 1, killed $rax, 0, $noreg + RETQ $ebp ... --- @@ -366,9 +366,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } - - { reg: '%rbx' } + - { reg: '$rax' } + - { reg: '$rbp' } + - { reg: '$rbx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -385,12 +385,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg - ; CHECK: %ebx = ADD32ri8 %ebx, -5 + liveins: $rax, $rbp + ; CHECK: $ebx = LEA64_32r killed $rax, 1, killed $rbp, 0, $noreg + ; CHECK: $ebx = ADD32ri8 $ebx, -5 - %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, %noreg - RETQ %ebx + $ebx = LEA64_32r killed $rax, 1, killed $rbp, -5, $noreg + RETQ $ebx ... --- @@ -402,9 +402,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } - - { reg: '%rbx' } + - { reg: '$rax' } + - { reg: '$rbp' } + - { reg: '$rbx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -421,12 +421,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg - ; CHECK: %ebx = ADD32ri8 %ebx, -5 + liveins: $rax, $rbp + ; CHECK: $ebx = LEA64_32r killed $rax, 1, killed $rbp, 0, $noreg + ; CHECK: $ebx = ADD32ri8 $ebx, -5 - %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, %noreg - RETQ %ebx + $ebx = LEA64_32r killed $rbp, 1, killed $rax, -5, $noreg + RETQ $ebx ... --- @@ -438,9 +438,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } - - { reg: '%rbx' } + - { reg: '$rax' } + - { reg: '$rbp' } + - { reg: '$rbx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -457,11 +457,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg + liveins: $rax, $rbp + ; CHECK: $ebx = LEA64_32r killed $rax, 1, killed $rbp, 0, $noreg - %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, %noreg - RETQ %ebx + $ebx = LEA64_32r killed $rbp, 1, killed $rax, 0, $noreg + RETQ $ebx ... --- @@ -473,9 +473,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } - - { reg: '%rbx' } + - { reg: '$rax' } + - { reg: '$rbp' } + - { reg: '$rbx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -492,12 +492,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg - ; CHECK: %rbx = ADD64ri8 %rbx, -5 + liveins: $rax, $rbp + ; CHECK: $rbx = LEA64r killed $rax, 1, killed $rbp, 0, $noreg + ; CHECK: $rbx = ADD64ri8 $rbx, -5 - %rbx = LEA64r killed %rax, 1, killed %rbp, -5, %noreg - RETQ %ebx + $rbx = LEA64r killed $rax, 1, killed $rbp, -5, $noreg + RETQ $ebx ... --- @@ -509,9 +509,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } - - { reg: '%rbx' } + - { reg: '$rax' } + - { reg: '$rbp' } + - { reg: '$rbx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -528,12 +528,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg - ; CHECK: %rbx = ADD64ri8 %rbx, -5 + liveins: $rax, $rbp + ; CHECK: $rbx = LEA64r killed $rax, 1, killed $rbp, 0, $noreg + ; CHECK: $rbx = ADD64ri8 $rbx, -5 - %rbx = LEA64r killed %rbp, 1, killed %rax, -5, %noreg - RETQ %ebx + $rbx = LEA64r killed $rbp, 1, killed $rax, -5, $noreg + RETQ $ebx ... --- @@ -545,9 +545,9 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } - - { reg: '%rbx' } + - { reg: '$rax' } + - { reg: '$rbp' } + - { reg: '$rbx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -564,11 +564,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg + liveins: $rax, $rbp + ; CHECK: $rbx = LEA64r killed $rax, 1, killed $rbp, 0, $noreg - %rbx = LEA64r killed %rbp, 1, killed %rax, 0, %noreg - RETQ %ebx + $rbx = LEA64r killed $rbp, 1, killed $rax, 0, $noreg + RETQ $ebx ... --- @@ -580,8 +580,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rbp' } + - { reg: '$rdi' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -598,13 +598,13 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rdi, %rbp - ; CHECK: %r12 = LEA64r %noreg, 2, killed %r13, 5, %noreg - ; CHECK: %r12 = ADD64rr %r12, killed %rbp - %rbp = KILL %rbp, implicit-def %rbp - %r13 = KILL %rdi, implicit-def %r13 - %r12 = LEA64r killed %rbp, 2, killed %r13, 5, %noreg - RETQ %r12 + liveins: $rdi, $rbp + ; CHECK: $r12 = LEA64r $noreg, 2, killed $r13, 5, $noreg + ; CHECK: $r12 = ADD64rr $r12, killed $rbp + $rbp = KILL $rbp, implicit-def $rbp + $r13 = KILL $rdi, implicit-def $r13 + $r12 = LEA64r killed $rbp, 2, killed $r13, 5, $noreg + RETQ $r12 ... --- @@ -616,8 +616,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -634,12 +634,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0 - ; CHECK: %eax = ADD32ri %eax, 129 + liveins: $rax, $rbp + ; CHECK: $eax = LEA64_32r killed $rax, 1, killed $rbp, 0 + ; CHECK: $eax = ADD32ri $eax, 129 - %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, %noreg - RETQ %eax + $eax = LEA64_32r killed $rax, 1, killed $rbp, 129, $noreg + RETQ $eax ... --- @@ -651,8 +651,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -669,11 +669,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, %noreg + liveins: $rax, $rbp, $rbx + ; CHECK: $ebx = LEA64_32r killed $rbp, 1, killed $rbp, 0, $noreg - %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, %noreg - RETQ %ebx + $ebx = LEA64_32r killed $rbp, 1, killed $rbp, 0, $noreg + RETQ $ebx ... --- @@ -685,8 +685,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rbx' } - - { reg: '%rbp' } + - { reg: '$rbx' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -703,11 +703,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, %noreg + liveins: $rax, $rbp, $rbx + ; CHECK: $ebx = LEA64_32r killed $rbp, 1, killed $rbp, 5, $noreg - %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, %noreg - RETQ %ebx + $ebx = LEA64_32r killed $rbp, 1, killed $rbp, 5, $noreg + RETQ $ebx ... --- @@ -719,8 +719,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rbx' } - - { reg: '%rbp' } + - { reg: '$rbx' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -737,11 +737,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %eax, %ebp, %ebx - ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, %noreg + liveins: $eax, $ebp, $ebx + ; CHECK: $ebx = LEA64_32r killed $rbp, 4, killed $rbp, 5, $noreg - %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, %noreg - RETQ %ebx + $ebx = LEA64_32r killed $rbp, 4, killed $rbp, 5, $noreg + RETQ $ebx ... --- @@ -753,8 +753,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -771,12 +771,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp - ; CHECK: %rax = ADD64rr %rax, killed %rbp - ; CHECK: %rax = ADD64ri32 %rax, 129 + liveins: $rax, $rbp + ; CHECK: $rax = ADD64rr $rax, killed $rbp + ; CHECK: $rax = ADD64ri32 $rax, 129 - %rax = LEA64r killed %rax, 1, killed %rbp, 129, %noreg - RETQ %eax + $rax = LEA64r killed $rax, 1, killed $rbp, 129, $noreg + RETQ $eax ... --- @@ -788,8 +788,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rax' } - - { reg: '%rbp' } + - { reg: '$rax' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -806,12 +806,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %rbx = MOV64rr %rbp - ; CHECK: %rbx = ADD64rr %rbx, %rbp + liveins: $rax, $rbp, $rbx + ; CHECK: $rbx = MOV64rr $rbp + ; CHECK: $rbx = ADD64rr $rbx, $rbp - %rbx = LEA64r killed %rbp, 1, %rbp, 0, %noreg - RETQ %ebx + $rbx = LEA64r killed $rbp, 1, $rbp, 0, $noreg + RETQ $ebx ... --- @@ -823,8 +823,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rbx' } - - { reg: '%rbp' } + - { reg: '$rbx' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -841,12 +841,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %rbx = LEA64r %noreg, 1, %rbp, 5, %noreg - ; CHECK: %rbx = ADD64rr %rbx, %rbp + liveins: $rax, $rbp, $rbx + ; CHECK: $rbx = LEA64r $noreg, 1, $rbp, 5, $noreg + ; CHECK: $rbx = ADD64rr $rbx, $rbp - %rbx = LEA64r %rbp, 1, %rbp, 5, %noreg - RETQ %ebx + $rbx = LEA64r $rbp, 1, $rbp, 5, $noreg + RETQ $ebx ... --- @@ -858,8 +858,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rbx' } - - { reg: '%rbp' } + - { reg: '$rbx' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -876,12 +876,12 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %rbx = LEA64r %noreg, 4, %rbp, 5, %noreg - ; CHECK: %rbx = ADD64rr %rbx, %rbp + liveins: $rax, $rbp, $rbx + ; CHECK: $rbx = LEA64r $noreg, 4, $rbp, 5, $noreg + ; CHECK: $rbx = ADD64rr $rbx, $rbp - %rbx = LEA64r %rbp, 4, %rbp, 5, %noreg - RETQ %ebx + $rbx = LEA64r $rbp, 4, $rbp, 5, $noreg + RETQ $ebx ... --- @@ -893,8 +893,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rbx' } - - { reg: '%rbp' } + - { reg: '$rbx' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -911,11 +911,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, %noreg + liveins: $rax, $rbp, $rbx + ; CHECK: $rbp = LEA64r killed $rbp, 4, killed $rbp, 0, $noreg - %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, %noreg - RETQ %ebp + $rbp = LEA64r killed $rbp, 4, killed $rbp, 0, $noreg + RETQ $ebp ... --- @@ -927,8 +927,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rbp' } - - { reg: '%rax' } + - { reg: '$rbp' } + - { reg: '$rax' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -945,19 +945,19 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, %noreg - ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, %noreg - ; CHECK: %rbp = ADD64ri8 %rbp, 5 + liveins: $rax, $rbp, $rbx + ; CHECK: $rbx = LEA64r killed $rax, 4, killed $rax, 5, $noreg + ; CHECK: $rbp = LEA64r killed $rbx, 4, killed $rbx, 0, $noreg + ; CHECK: $rbp = ADD64ri8 $rbp, 5 - CMP64rr %rax, killed %rbx, implicit-def %eflags - %rbx = LEA64r killed %rax, 4, killed %rax, 5, %noreg - JE_1 %bb.1, implicit %eflags - RETQ %ebx + CMP64rr $rax, killed $rbx, implicit-def $eflags + $rbx = LEA64r killed $rax, 4, killed $rax, 5, $noreg + JE_1 %bb.1, implicit $eflags + RETQ $ebx bb.1: - liveins: %rax, %rbp, %rbx - %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, %noreg - RETQ %ebp + liveins: $rax, $rbp, $rbx + $rbp = LEA64r killed $rbx, 4, killed $rbx, 5, $noreg + RETQ $ebp ... --- @@ -969,8 +969,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rbx' } - - { reg: '%rbp' } + - { reg: '$rbx' } + - { reg: '$rbp' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -987,11 +987,11 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, %noreg + liveins: $rax, $rbp, $rbx + ; CHECK: $ebp = LEA64_32r killed $rbp, 4, killed $rbp, 0, $noreg - %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, %noreg - RETQ %ebp + $ebp = LEA64_32r killed $rbp, 4, killed $rbp, 0, $noreg + RETQ $ebp ... --- @@ -1003,8 +1003,8 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rbp' } - - { reg: '%rax' } + - { reg: '$rbp' } + - { reg: '$rax' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -1021,19 +1021,19 @@ hasMustTailInVarArgFunc: false body: | bb.0 (%ir-block.0): - liveins: %rax, %rbp, %rbx - ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, %noreg - ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, %noreg - ; CHECK: %ebp = ADD32ri8 %ebp, 5 + liveins: $rax, $rbp, $rbx + ; CHECK: $ebx = LEA64_32r killed $rax, 4, killed $rax, 5, $noreg + ; CHECK: $ebp = LEA64_32r killed $rbx, 4, killed $rbx, 0, $noreg + ; CHECK: $ebp = ADD32ri8 $ebp, 5 - CMP64rr %rax, killed %rbx, implicit-def %eflags - %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, %noreg - JE_1 %bb.1, implicit %eflags - RETQ %ebx + CMP64rr $rax, killed $rbx, implicit-def $eflags + $ebx = LEA64_32r killed $rax, 4, killed $rax, 5, $noreg + JE_1 %bb.1, implicit $eflags + RETQ $ebx bb.1: - liveins: %rax, %rbp, %rbx - %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, %noreg - RETQ %ebp + liveins: $rax, $rbp, $rbx + $ebp = LEA64_32r killed $rbx, 4, killed $rbx, 5, $noreg + RETQ $ebp ... Index: test/CodeGen/X86/loop-search.ll =================================================================== --- test/CodeGen/X86/loop-search.ll +++ test/CodeGen/X86/loop-search.ll @@ -25,15 +25,15 @@ ; ### FIXME: %bb.3 and LBB0_1 should be merged ; CHECK-NEXT: ## %bb.3: ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ; CHECK-NEXT: LBB0_1: ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ; CHECK-NEXT: LBB0_6: ; CHECK-NEXT: movb $1, %al -; CHECK-NEXT: ## kill: def %al killed %al killed %eax +; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq entry: %cmp5 = icmp sgt i32 %count, 0 Index: test/CodeGen/X86/lzcnt-schedule.ll =================================================================== --- test/CodeGen/X86/lzcnt-schedule.ll +++ test/CodeGen/X86/lzcnt-schedule.ll @@ -13,7 +13,7 @@ ; GENERIC-NEXT: lzcntw (%rsi), %cx # sched: [7:1.00] ; GENERIC-NEXT: lzcntw %di, %ax # sched: [3:1.00] ; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_ctlz_i16: @@ -21,7 +21,7 @@ ; HASWELL-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00] ; HASWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00] ; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax +; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_ctlz_i16: @@ -29,7 +29,7 @@ ; BROADWELL-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00] ; BROADWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00] ; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax +; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_ctlz_i16: @@ -37,7 +37,7 @@ ; SKYLAKE-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00] ; SKYLAKE-NEXT: lzcntw %di, %ax # sched: [3:1.00] ; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25] -; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax +; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_ctlz_i16: @@ -45,7 +45,7 @@ ; BTVER2-NEXT: lzcntw (%rsi), %cx # sched: [6:1.00] ; BTVER2-NEXT: lzcntw %di, %ax # sched: [3:1.00] ; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax +; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_ctlz_i16: @@ -53,7 +53,7 @@ ; ZNVER1-NEXT: lzcntw (%rsi), %cx # sched: [6:0.50] ; ZNVER1-NEXT: lzcntw %di, %ax # sched: [2:0.25] ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax +; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i16, i16 *%a1 %2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false ) Index: test/CodeGen/X86/lzcnt-zext-cmp.ll =================================================================== --- test/CodeGen/X86/lzcnt-zext-cmp.ll +++ test/CodeGen/X86/lzcnt-zext-cmp.ll @@ -84,7 +84,7 @@ ; ALL-NEXT: sete %cl ; ALL-NEXT: orb %al, %cl ; ALL-NEXT: movzbl %cl, %eax -; ALL-NEXT: # kill: def %ax killed %ax killed %eax +; ALL-NEXT: # kill: def $ax killed $ax killed $eax ; ALL-NEXT: retq %cmp = icmp eq i16 %a, 0 %cmp1 = icmp eq i16 %b, 0 @@ -128,7 +128,7 @@ ; FASTLZCNT-NEXT: lzcntq %rsi, %rax ; FASTLZCNT-NEXT: orl %ecx, %eax ; FASTLZCNT-NEXT: shrl $6, %eax -; FASTLZCNT-NEXT: # kill: def %eax killed %eax killed %rax +; FASTLZCNT-NEXT: # kill: def $eax killed $eax killed $rax ; FASTLZCNT-NEXT: retq ; ; NOFASTLZCNT-LABEL: test_zext_cmp5: @@ -267,7 +267,7 @@ ; FASTLZCNT-NEXT: shrl $5, %ecx ; FASTLZCNT-NEXT: shrl $6, %eax ; FASTLZCNT-NEXT: orl %ecx, %eax -; FASTLZCNT-NEXT: # kill: def %eax killed %eax killed %rax +; FASTLZCNT-NEXT: # kill: def $eax killed $eax killed $rax ; FASTLZCNT-NEXT: retq ; ; NOFASTLZCNT-LABEL: test_zext_cmp9: Index: test/CodeGen/X86/machine-combiner-int.ll =================================================================== --- test/CodeGen/X86/machine-combiner-int.ll +++ test/CodeGen/X86/machine-combiner-int.ll @@ -34,8 +34,8 @@ ; CHECK-NEXT: retq ; DEAD: ADD32rr -; DEAD-NEXT: IMUL32rr{{.*}}implicit-def dead %eflags -; DEAD-NEXT: IMUL32rr{{.*}}implicit-def dead %eflags +; DEAD-NEXT: IMUL32rr{{.*}}implicit-def dead $eflags +; DEAD-NEXT: IMUL32rr{{.*}}implicit-def dead $eflags %t0 = add i32 %x0, %x1 %t1 = mul i32 %x2, %t0 Index: test/CodeGen/X86/machine-copy-prop.mir =================================================================== --- test/CodeGen/X86/machine-copy-prop.mir +++ test/CodeGen/X86/machine-copy-prop.mir @@ -20,196 +20,196 @@ # the kill flag of intermediate instructions. # CHECK-LABEL: name: copyprop_remove_kill0 # CHECK: bb.0: -# CHECK-NEXT: %rax = COPY %rdi -# CHECK-NEXT: NOOP implicit %rdi +# CHECK-NEXT: $rax = COPY $rdi +# CHECK-NEXT: NOOP implicit $rdi # CHECK-NOT: COPY -# CHECK-NEXT: NOOP implicit %rax, implicit %rdi +# CHECK-NEXT: NOOP implicit $rax, implicit $rdi name: copyprop_remove_kill0 body: | bb.0: - %rax = COPY %rdi - NOOP implicit killed %rdi - %rdi = COPY %rax - NOOP implicit %rax, implicit %rdi + $rax = COPY $rdi + NOOP implicit killed $rdi + $rdi = COPY $rax + NOOP implicit $rax, implicit $rdi ... --- # The second copy is redundant and will be removed, check that we also remove # the kill flag of intermediate instructions. # CHECK-LABEL: name: copyprop_remove_kill1 # CHECK: bb.0: -# CHECK-NEXT: %rax = COPY %rdi -# CHECK-NEXT: NOOP implicit %edi +# CHECK-NEXT: $rax = COPY $rdi +# CHECK-NEXT: NOOP implicit $edi # CHECK-NOT: COPY -# CHECK-NEXT: NOOP implicit %rax, implicit %rdi +# CHECK-NEXT: NOOP implicit $rax, implicit $rdi name: copyprop_remove_kill1 body: | bb.0: - %rax = COPY %rdi - NOOP implicit killed %edi - %rdi = COPY %rax - NOOP implicit %rax, implicit %rdi + $rax = COPY $rdi + NOOP implicit killed $edi + $rdi = COPY $rax + NOOP implicit $rax, implicit $rdi ... --- # The second copy is redundant and will be removed, check that we also remove # the kill flag of intermediate instructions. # CHECK-LABEL: name: copyprop_remove_kill2 # CHECK: bb.0: -# CHECK-NEXT: %ax = COPY %di -# CHECK-NEXT: NOOP implicit %rdi +# CHECK-NEXT: $ax = COPY $di +# CHECK-NEXT: NOOP implicit $rdi # CHECK-NOT: COPY -# CHECK-NEXT: NOOP implicit %rax, implicit %rdi +# CHECK-NEXT: NOOP implicit $rax, implicit $rdi name: copyprop_remove_kill2 body: | bb.0: - %ax = COPY %di - NOOP implicit killed %rdi - %di = COPY %ax - NOOP implicit %rax, implicit %rdi + $ax = COPY $di + NOOP implicit killed $rdi + $di = COPY $ax + NOOP implicit $rax, implicit $rdi ... --- # The second copy is redundant; the call preserves the source and dest register. # CHECK-LABEL: name: copyprop0 # CHECK: bb.0: -# CHECK-NEXT: %rax = COPY %rdi +# CHECK-NEXT: $rax = COPY $rdi # CHECK-NEXT: CALL64pcrel32 @foo, csr_64_rt_mostregs -# CHECK-NEXT: NOOP implicit %edi +# CHECK-NEXT: NOOP implicit $edi # CHECK-NOT: COPY -# CHECK-NEXT: NOOP implicit %rax, implicit %rdi +# CHECK-NEXT: NOOP implicit $rax, implicit $rdi name: copyprop0 body: | bb.0: - %rax = COPY %rdi + $rax = COPY $rdi CALL64pcrel32 @foo, csr_64_rt_mostregs - NOOP implicit killed %edi - %rdi = COPY %rax - NOOP implicit %rax, implicit %rdi + NOOP implicit killed $edi + $rdi = COPY $rax + NOOP implicit $rax, implicit $rdi ... --- # The 2nd copy is redundant; The call preserves the source and dest register. # CHECK-LABEL: name: copyprop1 # CHECK: bb.0: -# CHECK-NEXT: %rax = COPY %rdi -# CHECK-NEXT: NOOP implicit %rax -# CHECK-NEXT: NOOP implicit %rax, implicit %rdi +# CHECK-NEXT: $rax = COPY $rdi +# CHECK-NEXT: NOOP implicit $rax +# CHECK-NEXT: NOOP implicit $rax, implicit $rdi name: copyprop1 body: | bb.0: - %rax = COPY %rdi - NOOP implicit killed %rax - %rax = COPY %rdi - NOOP implicit %rax, implicit %rdi + $rax = COPY $rdi + NOOP implicit killed $rax + $rax = COPY $rdi + NOOP implicit $rax, implicit $rdi ... --- # CHECK-LABEL: name: copyprop2 # CHECK: bb.0: -# CHECK-NEXT: %rax = COPY %rdi -# CHECK-NEXT: NOOP implicit %ax +# CHECK-NEXT: $rax = COPY $rdi +# CHECK-NEXT: NOOP implicit $ax # CHECK-NEXT: CALL64pcrel32 @foo, csr_64_rt_mostregs -# CHECK-NOT: %rax = COPY %rdi -# CHECK-NEXT: NOOP implicit %rax, implicit %rdi +# CHECK-NOT: $rax = COPY $rdi +# CHECK-NEXT: NOOP implicit $rax, implicit $rdi name: copyprop2 body: | bb.0: - %rax = COPY %rdi - NOOP implicit killed %ax + $rax = COPY $rdi + NOOP implicit killed $ax CALL64pcrel32 @foo, csr_64_rt_mostregs - %rax = COPY %rdi - NOOP implicit %rax, implicit %rdi + $rax = COPY $rdi + NOOP implicit $rax, implicit $rdi ... --- -# The second copy is not redundant if the source register (%rax) is clobbered -# even if the dest (%rbp) is not. +# The second copy is not redundant if the source register ($rax) is clobbered +# even if the dest ($rbp) is not. # CHECK-LABEL: name: nocopyprop0 # CHECK: bb.0: -# CHECK-NEXT: %rax = COPY %rbp -# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp -# CHECK-NEXT: %rbp = COPY %rax -# CHECK-NEXT: NOOP implicit %rax, implicit %rbp +# CHECK-NEXT: $rax = COPY $rbp +# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp +# CHECK-NEXT: $rbp = COPY $rax +# CHECK-NEXT: NOOP implicit $rax, implicit $rbp name: nocopyprop0 body: | bb.0: - %rax = COPY %rbp - CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp - %rbp = COPY %rax - NOOP implicit %rax, implicit %rbp + $rax = COPY $rbp + CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp + $rbp = COPY $rax + NOOP implicit $rax, implicit $rbp ... --- -# The second copy is not redundant if the dest register (%rax) is clobbered -# even if the source (%rbp) is not. +# The second copy is not redundant if the dest register ($rax) is clobbered +# even if the source ($rbp) is not. # CHECK-LABEL: name: nocopyprop1 # CHECK: bb.0: -# CHECK-NEXT: %rbp = COPY %rax -# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp -# CHECK-NEXT: %rax = COPY %rbp -# CHECK-NEXT: NOOP implicit %rax, implicit %rbp +# CHECK-NEXT: $rbp = COPY $rax +# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp +# CHECK-NEXT: $rax = COPY $rbp +# CHECK-NEXT: NOOP implicit $rax, implicit $rbp name: nocopyprop1 body: | bb.0: - %rbp = COPY %rax - CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp - %rax = COPY %rbp - NOOP implicit %rax, implicit %rbp + $rbp = COPY $rax + CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp + $rax = COPY $rbp + NOOP implicit $rax, implicit $rbp ... --- -# The second copy is not redundant if the source register (%rax) is clobbered -# even if the dest (%rbp) is not. +# The second copy is not redundant if the source register ($rax) is clobbered +# even if the dest ($rbp) is not. # CHECK-LABEL: name: nocopyprop2 # CHECK: bb.0: -# CHECK-NEXT: %rax = COPY %rbp -# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp -# CHECK-NEXT: %rax = COPY %rbp -# CHECK-NEXT: NOOP implicit %rax, implicit %rbp +# CHECK-NEXT: $rax = COPY $rbp +# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp +# CHECK-NEXT: $rax = COPY $rbp +# CHECK-NEXT: NOOP implicit $rax, implicit $rbp name: nocopyprop2 body: | bb.0: - %rax = COPY %rbp - CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp - %rax = COPY %rbp - NOOP implicit %rax, implicit %rbp + $rax = COPY $rbp + CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp + $rax = COPY $rbp + NOOP implicit $rax, implicit $rbp ... --- -# The second copy is not redundant if the dest register (%rax) is clobbered -# even if the source (%rbp) is not. +# The second copy is not redundant if the dest register ($rax) is clobbered +# even if the source ($rbp) is not. # CHECK-LABEL: name: nocopyprop3 # CHECK: bb.0: -# CHECK-NEXT: %rbp = COPY %rax -# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp -# CHECK-NEXT: %rbp = COPY %rax -# CHECK-NEXT: NOOP implicit %rax, implicit %rbp +# CHECK-NEXT: $rbp = COPY $rax +# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp +# CHECK-NEXT: $rbp = COPY $rax +# CHECK-NEXT: NOOP implicit $rax, implicit $rbp name: nocopyprop3 body: | bb.0: - %rbp = COPY %rax - CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp - %rbp = COPY %rax - NOOP implicit %rax, implicit %rbp + $rbp = COPY $rax + CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp + $rbp = COPY $rax + NOOP implicit $rax, implicit $rbp ... --- # A reserved register may change its value so the 2nd copy is not redundant. # CHECK-LABEL: name: nocopyprop4 # CHECK: bb.0: -# CHECK-NEXT: %rax = COPY %rip -# CHECK-NEXT: NOOP implicit %rax -# CHECK-NEXT: %rax = COPY %rip -# CHECK-NEXT: NOOP implicit %rax +# CHECK-NEXT: $rax = COPY $rip +# CHECK-NEXT: NOOP implicit $rax +# CHECK-NEXT: $rax = COPY $rip +# CHECK-NEXT: NOOP implicit $rax name: nocopyprop4 body: | bb.0: - %rax = COPY %rip - NOOP implicit %rax - %rax = COPY %rip - NOOP implicit %rax + $rax = COPY $rip + NOOP implicit $rax + $rax = COPY $rip + NOOP implicit $rax ... --- # Writing to a reserved register may have additional effects (slightly illegal -# testcase because writing to %rip like this should make the instruction a jump) +# testcase because writing to $rip like this should make the instruction a jump) # CHECK-LABEL: name: nocopyprop5 # CHECK: bb.0: -# CHECK-NEXT: %rip = COPY %rax -# CHECK-NEXT: %rip = COPY %rax +# CHECK-NEXT: $rip = COPY $rax +# CHECK-NEXT: $rip = COPY $rax name: nocopyprop5 body: | bb.0: - %rip = COPY %rax - %rip = COPY %rax + $rip = COPY $rax + $rip = COPY $rax ... Index: test/CodeGen/X86/machine-cse.ll =================================================================== --- test/CodeGen/X86/machine-cse.ll +++ test/CodeGen/X86/machine-cse.ll @@ -50,8 +50,8 @@ define void @commute(i32 %test_case, i32 %scale) nounwind ssp { ; CHECK-LABEL: commute: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal -1(%rdi), %eax ; CHECK-NEXT: cmpl $2, %eax ; CHECK-NEXT: ja .LBB1_4 @@ -64,7 +64,7 @@ ; CHECK-NEXT: imull %edi, %esi ; CHECK-NEXT: leal (%rsi,%rsi,2), %esi ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: # kill: def %edi killed %edi killed %rdi +; CHECK-NEXT: # kill: def $edi killed $edi killed $rdi ; CHECK-NEXT: callq printf ; CHECK-NEXT: addq $8, %rsp ; CHECK-NEXT: .p2align 4, 0x90 Index: test/CodeGen/X86/machine-region-info.mir =================================================================== --- test/CodeGen/X86/machine-region-info.mir +++ test/CodeGen/X86/machine-region-info.mir @@ -4,45 +4,45 @@ name: fun body: | bb.0: - CMP32ri8 %edi, 40, implicit-def %eflags - JNE_1 %bb.7, implicit killed %eflags + CMP32ri8 $edi, 40, implicit-def $eflags + JNE_1 %bb.7, implicit killed $eflags JMP_1 %bb.1 bb.1: - CMP32ri8 %edi, 1, implicit-def %eflags - JNE_1 %bb.11, implicit killed %eflags + CMP32ri8 $edi, 1, implicit-def $eflags + JNE_1 %bb.11, implicit killed $eflags JMP_1 %bb.2 bb.2: - CMP32ri8 %edi, 2, implicit-def %eflags - JNE_1 %bb.5, implicit killed %eflags + CMP32ri8 $edi, 2, implicit-def $eflags + JNE_1 %bb.5, implicit killed $eflags JMP_1 %bb.3 bb.3: - CMP32ri8 %edi, 90, implicit-def %eflags - JNE_1 %bb.5, implicit killed %eflags + CMP32ri8 $edi, 90, implicit-def $eflags + JNE_1 %bb.5, implicit killed $eflags JMP_1 %bb.4 bb.4: bb.5: - CMP32ri8 %edi, 4, implicit-def %eflags - JNE_1 %bb.11, implicit killed %eflags + CMP32ri8 $edi, 4, implicit-def $eflags + JNE_1 %bb.11, implicit killed $eflags JMP_1 %bb.6 bb.6: JMP_1 %bb.11 bb.7: - CMP32ri8 %edi, 5, implicit-def %eflags - JE_1 %bb.9, implicit killed %eflags + CMP32ri8 $edi, 5, implicit-def $eflags + JE_1 %bb.9, implicit killed $eflags JMP_1 %bb.8 bb.8: bb.9: - CMP32ri8 %edi, 6, implicit-def %eflags - JE_1 %bb.11, implicit killed %eflags + CMP32ri8 $edi, 6, implicit-def $eflags + JE_1 %bb.11, implicit killed $eflags JMP_1 %bb.10 bb.10: Index: test/CodeGen/X86/masked_gather_scatter.ll =================================================================== --- test/CodeGen/X86/masked_gather_scatter.ll +++ test/CodeGen/X86/masked_gather_scatter.ll @@ -299,8 +299,8 @@ ; ; KNL_32-LABEL: test6: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL_32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL_32-NEXT: movw $255, %ax ; KNL_32-NEXT: kmovw %eax, %k1 ; KNL_32-NEXT: kmovw %k1, %k2 @@ -337,7 +337,7 @@ ; ; KNL_64-LABEL: test7: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL_64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL_64-NEXT: kmovw %esi, %k0 ; KNL_64-NEXT: kshiftlw $8, %k0, %k0 ; KNL_64-NEXT: kshiftrw $8, %k0, %k1 @@ -350,7 +350,7 @@ ; ; KNL_32-LABEL: test7: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax ; KNL_32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; KNL_32-NEXT: kmovw %ecx, %k0 @@ -496,7 +496,7 @@ ; KNL_32-NEXT: movw $255, %ax ; KNL_32-NEXT: kmovw %eax, %k1 ; KNL_32-NEXT: vpgatherdd (,%zmm1), %zmm0 {%k1} -; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL_32-NEXT: retl ; ; SKX_SMALL-LABEL: test9: @@ -582,7 +582,7 @@ ; KNL_32-NEXT: movw $255, %ax ; KNL_32-NEXT: kmovw %eax, %k1 ; KNL_32-NEXT: vpgatherdd (,%zmm1), %zmm0 {%k1} -; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL_32-NEXT: retl ; ; SKX_SMALL-LABEL: test10: @@ -819,7 +819,7 @@ define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) { ; KNL_64-LABEL: test15: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL_64-NEXT: kshiftlw $12, %k0, %k0 @@ -831,7 +831,7 @@ ; ; KNL_32-LABEL: test15: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL_32-NEXT: kshiftlw $12, %k0, %k0 @@ -869,8 +869,8 @@ define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x double> %src0) { ; KNL_64-LABEL: test16: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL_64-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL_64-NEXT: kshiftlw $12, %k0, %k0 @@ -881,8 +881,8 @@ ; ; KNL_32-LABEL: test16: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL_32-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL_32-NEXT: kshiftlw $12, %k0, %k0 @@ -918,7 +918,7 @@ define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x double> %src0) { ; KNL_64-LABEL: test17: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 +; KNL_64-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 ; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0 ; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0 ; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1 @@ -932,7 +932,7 @@ ; ; KNL_32-LABEL: test17: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 +; KNL_32-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 ; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0 ; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0 ; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1 @@ -981,8 +981,8 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) { ; KNL_64-LABEL: test18: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL_64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL_64-NEXT: vpslld $31, %xmm2, %xmm2 ; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k0 ; KNL_64-NEXT: kshiftlw $12, %k0, %k0 @@ -993,8 +993,8 @@ ; ; KNL_32-LABEL: test18: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_32-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_32-NEXT: vpslld $31, %xmm2, %xmm2 ; KNL_32-NEXT: vptestmd %zmm2, %zmm2, %k0 ; KNL_32-NEXT: kshiftlw $12, %k0, %k0 @@ -1024,8 +1024,8 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind) { ; KNL_64-LABEL: test19: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL_64-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; KNL_64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL_64-NEXT: kshiftlw $12, %k0, %k0 @@ -1036,8 +1036,8 @@ ; ; KNL_32-LABEL: test19: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL_32-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL_32-NEXT: kshiftlw $12, %k0, %k0 @@ -1072,8 +1072,8 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) { ; KNL_64-LABEL: test20: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL_64-NEXT: vpsllq $63, %xmm2, %xmm2 ; KNL_64-NEXT: vptestmq %zmm2, %zmm2, %k0 ; KNL_64-NEXT: kshiftlw $14, %k0, %k0 @@ -1084,7 +1084,7 @@ ; ; KNL_32-LABEL: test20: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,2,3] ; KNL_32-NEXT: vpsllq $63, %xmm2, %xmm2 ; KNL_32-NEXT: vptestmq %zmm2, %zmm2, %k0 @@ -1116,7 +1116,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) { ; KNL_64-LABEL: test21: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL_64-NEXT: vpsllq $63, %xmm2, %xmm2 ; KNL_64-NEXT: vptestmq %zmm2, %zmm2, %k0 ; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1164,7 +1164,7 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x float> %src0) { ; KNL_64-LABEL: test22: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 +; KNL_64-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 ; KNL_64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3] ; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1 ; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k0 @@ -1177,7 +1177,7 @@ ; ; KNL_32-LABEL: test22: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 +; KNL_32-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 ; KNL_32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3] ; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1 ; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k0 @@ -1216,8 +1216,8 @@ define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x float> %src0) { ; KNL_64-LABEL: test22a: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %ymm2 -; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_64-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1 ; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k0 ; KNL_64-NEXT: kshiftlw $14, %k0, %k0 @@ -1229,8 +1229,8 @@ ; ; KNL_32-LABEL: test22a: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %ymm2 -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_32-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1 ; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k0 ; KNL_32-NEXT: kshiftlw $14, %k0, %k0 @@ -1322,7 +1322,7 @@ define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32> %src0) { ; KNL_64-LABEL: test23b: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1 ; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k0 ; KNL_64-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] @@ -1335,7 +1335,7 @@ ; ; KNL_32-LABEL: test23b: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1 ; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k0 ; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -1419,7 +1419,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %src0) { ; KNL_64-LABEL: test25: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 +; KNL_64-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 ; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0 ; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0 ; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1 @@ -1433,7 +1433,7 @@ ; ; KNL_32-LABEL: test25: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 +; KNL_32-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 ; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0 ; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0 ; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1 @@ -1475,7 +1475,7 @@ define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) { ; KNL_64-LABEL: test26: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0 ; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0 ; KNL_64-NEXT: movb $3, %al @@ -1487,7 +1487,7 @@ ; ; KNL_32-LABEL: test26: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; KNL_32-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0 ; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0 ; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -1530,7 +1530,7 @@ ; KNL_64-NEXT: movw $3, %ax ; KNL_64-NEXT: kmovw %eax, %k1 ; KNL_64-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1} -; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL_64-NEXT: vzeroupper ; KNL_64-NEXT: retq ; @@ -1541,7 +1541,7 @@ ; KNL_32-NEXT: movw $3, %cx ; KNL_32-NEXT: kmovw %ecx, %k1 ; KNL_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1} -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL_32-NEXT: vzeroupper ; KNL_32-NEXT: retl ; @@ -1571,7 +1571,7 @@ define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) { ; KNL_64-LABEL: test28: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; KNL_64-NEXT: movb $3, %al ; KNL_64-NEXT: kmovw %eax, %k1 @@ -1665,7 +1665,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) { ; KNL_64-LABEL: test30: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm3 killed %xmm3 def %zmm3 +; KNL_64-NEXT: # kill: def $xmm3 killed $xmm3 def $zmm3 ; KNL_64-NEXT: vpslld $31, %xmm2, %xmm2 ; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1 ; KNL_64-NEXT: kmovw %k1, %eax @@ -1673,7 +1673,7 @@ ; KNL_64-NEXT: vpsllq $2, %ymm1, %ymm1 ; KNL_64-NEXT: vpaddq %ymm1, %ymm0, %ymm1 ; KNL_64-NEXT: testb $1, %al -; KNL_64-NEXT: # implicit-def: %xmm0 +; KNL_64-NEXT: # implicit-def: $xmm0 ; KNL_64-NEXT: je .LBB31_2 ; KNL_64-NEXT: # %bb.1: # %cond.load ; KNL_64-NEXT: vmovq %xmm1, %rax @@ -1711,7 +1711,7 @@ ; KNL_32-NEXT: vpslld $2, %xmm1, %xmm1 ; KNL_32-NEXT: vpaddd %xmm1, %xmm0, %xmm2 ; KNL_32-NEXT: testb $1, %al -; KNL_32-NEXT: # implicit-def: %xmm1 +; KNL_32-NEXT: # implicit-def: $xmm1 ; KNL_32-NEXT: je .LBB31_2 ; KNL_32-NEXT: # %bb.1: # %cond.load ; KNL_32-NEXT: vmovd %xmm2, %eax @@ -1735,7 +1735,7 @@ ; KNL_32-NEXT: vpinsrd $2, (%eax), %xmm1, %xmm1 ; KNL_32-NEXT: .LBB31_6: # %else5 ; KNL_32-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; KNL_32-NEXT: addl $12, %esp ; KNL_32-NEXT: vzeroupper ; KNL_32-NEXT: retl @@ -1749,7 +1749,7 @@ ; SKX-NEXT: vpsllq $2, %ymm1, %ymm1 ; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm1 ; SKX-NEXT: testb $1, %al -; SKX-NEXT: # implicit-def: %xmm0 +; SKX-NEXT: # implicit-def: $xmm0 ; SKX-NEXT: je .LBB31_2 ; SKX-NEXT: # %bb.1: # %cond.load ; SKX-NEXT: vmovq %xmm1, %rax @@ -1787,7 +1787,7 @@ ; SKX_32-NEXT: vpslld $2, %xmm1, %xmm1 ; SKX_32-NEXT: vpaddd %xmm1, %xmm0, %xmm2 ; SKX_32-NEXT: testb $1, %al -; SKX_32-NEXT: # implicit-def: %xmm1 +; SKX_32-NEXT: # implicit-def: $xmm1 ; SKX_32-NEXT: je .LBB31_2 ; SKX_32-NEXT: # %bb.1: # %cond.load ; SKX_32-NEXT: vmovd %xmm2, %eax @@ -2329,7 +2329,7 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i64> %d) { ; KNL_64-LABEL: test_pr28312: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL_64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL_64-NEXT: kshiftlw $12, %k0, %k0 @@ -2348,7 +2348,7 @@ ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-32, %esp ; KNL_32-NEXT: subl $32, %esp -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1 ; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL_32-NEXT: kshiftlw $12, %k0, %k0 @@ -2517,7 +2517,7 @@ define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <2 x float> %src0) { ; KNL_64-LABEL: large_index: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; KNL_64-NEXT: vpsllq $63, %xmm0, %xmm0 ; KNL_64-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL_64-NEXT: kshiftlw $14, %k0, %k0 @@ -2532,7 +2532,7 @@ ; ; KNL_32-LABEL: large_index: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1 +; KNL_32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; KNL_32-NEXT: vpsllq $63, %xmm0, %xmm0 ; KNL_32-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL_32-NEXT: kshiftlw $14, %k0, %k0 @@ -2624,7 +2624,7 @@ ; KNL_64-NEXT: movw $255, %ax ; KNL_64-NEXT: kmovw %eax, %k1 ; KNL_64-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1} -; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL_64-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL_64-NEXT: retq ; ; KNL_32-LABEL: sext_v8i8_index: @@ -2636,7 +2636,7 @@ ; KNL_32-NEXT: movw $255, %cx ; KNL_32-NEXT: kmovw %ecx, %k1 ; KNL_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1} -; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; KNL_32-NEXT: retl ; ; SKX-LABEL: sext_v8i8_index: @@ -2670,7 +2670,7 @@ define void @test_scatter_2i32_index(<2 x double> %a1, double* %base, <2 x i32> %ind, <2 x i1> %mask) { ; KNL_64-LABEL: test_scatter_2i32_index: ; KNL_64: # %bb.0: -; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_64-NEXT: vpsllq $32, %xmm1, %xmm1 ; KNL_64-NEXT: vpsraq $32, %zmm1, %zmm1 ; KNL_64-NEXT: vpsllq $63, %xmm2, %xmm2 @@ -2683,7 +2683,7 @@ ; ; KNL_32-LABEL: test_scatter_2i32_index: ; KNL_32: # %bb.0: -; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL_32-NEXT: vpsllq $32, %xmm1, %xmm1 ; KNL_32-NEXT: vpsraq $32, %zmm1, %zmm1 ; KNL_32-NEXT: vpsllq $63, %xmm2, %xmm2 Index: test/CodeGen/X86/masked_memop.ll =================================================================== --- test/CodeGen/X86/masked_memop.ll +++ test/CodeGen/X86/masked_memop.ll @@ -12,7 +12,7 @@ ; AVX-LABEL: loadv1: ; AVX: ## %bb.0: ; AVX-NEXT: testq %rdi, %rdi -; AVX-NEXT: ## implicit-def: %xmm1 +; AVX-NEXT: ## implicit-def: $xmm1 ; AVX-NEXT: je LBB0_1 ; AVX-NEXT: ## %bb.2: ## %else ; AVX-NEXT: testq %rdi, %rdi @@ -32,7 +32,7 @@ ; AVX512F-LABEL: loadv1: ; AVX512F: ## %bb.0: ; AVX512F-NEXT: testq %rdi, %rdi -; AVX512F-NEXT: ## implicit-def: %xmm1 +; AVX512F-NEXT: ## implicit-def: $xmm1 ; AVX512F-NEXT: jne LBB0_2 ; AVX512F-NEXT: ## %bb.1: ## %cond.load ; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero @@ -46,7 +46,7 @@ ; SKX-LABEL: loadv1: ; SKX: ## %bb.0: ; SKX-NEXT: testq %rdi, %rdi -; SKX-NEXT: ## implicit-def: %xmm1 +; SKX-NEXT: ## implicit-def: $xmm1 ; SKX-NEXT: jne LBB0_2 ; SKX-NEXT: ## %bb.1: ## %cond.load ; SKX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero @@ -374,12 +374,12 @@ ; ; AVX512F-LABEL: test11b: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1 +; AVX512F-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0 ; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0 ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 ; AVX512F-NEXT: vpblendmd (%rdi), %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: test11b: @@ -419,7 +419,7 @@ ; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0 ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 ; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} {z} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: test11c: @@ -459,7 +459,7 @@ ; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0 ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 ; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: test11d: @@ -535,7 +535,7 @@ ; ; AVX512F-LABEL: test14: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 +; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; AVX512F-NEXT: vptestnmq %zmm0, %zmm0, %k0 @@ -727,7 +727,7 @@ ; AVX512F-NEXT: kshiftlw $14, %k0, %k0 ; AVX512F-NEXT: kshiftrw $14, %k0, %k1 ; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} {z} -; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -754,7 +754,7 @@ ; AVX512F-NEXT: movw $15, %ax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} {z} -; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -780,11 +780,11 @@ ; ; AVX512F-LABEL: mload_constmask_v4f32: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: movw $13, %ax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} -; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -831,11 +831,11 @@ ; ; AVX512F-LABEL: mload_constmask_v4i32: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: movw $14, %ax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} -; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -875,11 +875,11 @@ ; ; AVX512F-LABEL: mload_constmask_v8f32: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: movw $7, %ax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: mload_constmask_v8f32: @@ -902,11 +902,11 @@ ; ; AVX512F-LABEL: mload_constmask_v4f64: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: movb $7, %al ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovupd (%rdi), %zmm0 {%k1} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: mload_constmask_v4f64: @@ -929,11 +929,11 @@ ; ; AVX512F-LABEL: mload_constmask_v8i32: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: movw $135, %ax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: mload_constmask_v8i32: @@ -954,11 +954,11 @@ ; ; AVX512F-LABEL: mload_constmask_v4i64: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: movb $9, %al ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: mload_constmask_v4i64: @@ -1011,7 +1011,7 @@ ; AVX512F-NEXT: movb $7, %al ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovupd (%rdi), %zmm0 {%k1} {z} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: mload_constmask_v4f64_undef_passthrough: @@ -1042,7 +1042,7 @@ ; AVX512F-NEXT: movb $6, %al ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} -; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; SKX-LABEL: mload_constmask_v4i64_undef_passthrough: @@ -1070,7 +1070,7 @@ ; ; AVX512F-LABEL: test21: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1 +; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512F-NEXT: movw $15, %ax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vmovdqu32 %zmm1, (%rdi) {%k1} @@ -1290,8 +1290,8 @@ ; ; AVX512F-LABEL: trunc_mask: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2 -; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm1, %k0 ; AVX512F-NEXT: kshiftlw $12, %k0, %k0 Index: test/CodeGen/X86/misched-copy.ll =================================================================== --- test/CodeGen/X86/misched-copy.ll +++ test/CodeGen/X86/misched-copy.ll @@ -9,10 +9,10 @@ ; MUL_HiLo PhysReg def copies should be just below the mul. ; ; CHECK: *** Final schedule for %bb.1 *** -; CHECK: %eax = COPY -; CHECK-NEXT: MUL32r %{{[0-9]+}}:gr32, implicit-def %eax, implicit-def %edx, implicit-def dead %eflags, implicit %eax -; CHECK-NEXT: COPY %e{{[ad]}}x -; CHECK-NEXT: COPY %e{{[ad]}}x +; CHECK: $eax = COPY +; CHECK-NEXT: MUL32r %{{[0-9]+}}:gr32, implicit-def $eax, implicit-def $edx, implicit-def dead $eflags, implicit $eax +; CHECK-NEXT: COPY $e{{[ad]}}x +; CHECK-NEXT: COPY $e{{[ad]}}x ; CHECK: DIVSSrm define i64 @mulhoist(i32 %a, i32 %b) #0 { entry: Index: test/CodeGen/X86/movmsk.ll =================================================================== --- test/CodeGen/X86/movmsk.ll +++ test/CodeGen/X86/movmsk.ll @@ -102,7 +102,7 @@ ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: movq %xmm0, %rdi ; CHECK-NEXT: shrq $63, %rdi -; CHECK-NEXT: ## kill: def %edi killed %edi killed %rdi +; CHECK-NEXT: ## kill: def $edi killed $edi killed $rdi ; CHECK-NEXT: jmp _float_call_signbit_callee ## TAILCALL entry: %t0 = bitcast double %n to i64 Index: test/CodeGen/X86/movtopush.mir =================================================================== --- test/CodeGen/X86/movtopush.mir +++ test/CodeGen/X86/movtopush.mir @@ -33,25 +33,25 @@ ... --- # CHECK-LABEL: test9 -# CHECK: ADJCALLSTACKDOWN32 16, 0, 16, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp -# CHECK-NEXT: PUSH32i8 4, implicit-def %esp, implicit %esp -# CHECK-NEXT: PUSH32i8 3, implicit-def %esp, implicit %esp -# CHECK-NEXT: PUSH32i8 2, implicit-def %esp, implicit %esp -# CHECK-NEXT: PUSH32i8 1, implicit-def %esp, implicit %esp -# CHECK-NEXT: CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp -# CHECK-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp -# CHECK-NEXT: ADJCALLSTACKDOWN32 20, 0, 20, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp -# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, %noreg, 0, %noreg :: (load 4 from %stack.2.s, align 8) -# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, %noreg, 4, %noreg :: (load 4 from %stack.2.s + 4) -# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, %noreg, 0, %noreg -# CHECK-NEXT: %5:gr32 = LEA32r %stack.1.q, 1, %noreg, 0, %noreg -# CHECK-NEXT: PUSH32r %4, implicit-def %esp, implicit %esp -# CHECK-NEXT: PUSH32r %5, implicit-def %esp, implicit %esp -# CHECK-NEXT: PUSH32i8 6, implicit-def %esp, implicit %esp -# CHECK-NEXT: PUSH32r %2, implicit-def %esp, implicit %esp -# CHECK-NEXT: PUSH32r %1, implicit-def %esp, implicit %esp -# CHECK-NEXT: CALLpcrel32 @struct, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp -# CHECK-NEXT: ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp +# CHECK: ADJCALLSTACKDOWN32 16, 0, 16, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp +# CHECK-NEXT: PUSH32i8 4, implicit-def $esp, implicit $esp +# CHECK-NEXT: PUSH32i8 3, implicit-def $esp, implicit $esp +# CHECK-NEXT: PUSH32i8 2, implicit-def $esp, implicit $esp +# CHECK-NEXT: PUSH32i8 1, implicit-def $esp, implicit $esp +# CHECK-NEXT: CALLpcrel32 @good, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp +# CHECK-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp +# CHECK-NEXT: ADJCALLSTACKDOWN32 20, 0, 20, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp +# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, $noreg, 0, $noreg :: (load 4 from %stack.2.s, align 8) +# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, $noreg, 4, $noreg :: (load 4 from %stack.2.s + 4) +# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, $noreg, 0, $noreg +# CHECK-NEXT: %5:gr32 = LEA32r %stack.1.q, 1, $noreg, 0, $noreg +# CHECK-NEXT: PUSH32r %4, implicit-def $esp, implicit $esp +# CHECK-NEXT: PUSH32r %5, implicit-def $esp, implicit $esp +# CHECK-NEXT: PUSH32i8 6, implicit-def $esp, implicit $esp +# CHECK-NEXT: PUSH32r %2, implicit-def $esp, implicit $esp +# CHECK-NEXT: PUSH32r %1, implicit-def $esp, implicit $esp +# CHECK-NEXT: CALLpcrel32 @struct, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp +# CHECK-NEXT: ADJCALLSTACKUP32 20, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp # CHECK-NEXT: RET 0 name: test9 alignment: 0 @@ -99,27 +99,27 @@ constants: body: | bb.0.entry: - ADJCALLSTACKDOWN32 16, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp - %0 = COPY %esp - MOV32mi %0, 1, %noreg, 12, %noreg, 4 :: (store 4 into stack + 12) - MOV32mi %0, 1, %noreg, 8, %noreg, 3 :: (store 4 into stack + 8) - MOV32mi %0, 1, %noreg, 4, %noreg, 2 :: (store 4 into stack + 4) - MOV32mi %0, 1, %noreg, 0, %noreg, 1 :: (store 4 into stack) - CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp - ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp - ADJCALLSTACKDOWN32 20, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp - %1 = MOV32rm %stack.2.s, 1, %noreg, 0, %noreg :: (load 4 from %stack.2.s, align 8) - %2 = MOV32rm %stack.2.s, 1, %noreg, 4, %noreg :: (load 4 from %stack.2.s + 4) - %3 = COPY %esp - MOV32mr %3, 1, %noreg, 4, %noreg, killed %2 :: (store 4) - MOV32mr %3, 1, %noreg, 0, %noreg, killed %1 :: (store 4) - %4 = LEA32r %stack.0.p, 1, %noreg, 0, %noreg - MOV32mr %3, 1, %noreg, 16, %noreg, killed %4 :: (store 4 into stack + 16) - %5 = LEA32r %stack.1.q, 1, %noreg, 0, %noreg - MOV32mr %3, 1, %noreg, 12, %noreg, killed %5 :: (store 4 into stack + 12) - MOV32mi %3, 1, %noreg, 8, %noreg, 6 :: (store 4 into stack + 8) - CALLpcrel32 @struct, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp, - ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp + ADJCALLSTACKDOWN32 16, 0, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp + %0 = COPY $esp + MOV32mi %0, 1, $noreg, 12, $noreg, 4 :: (store 4 into stack + 12) + MOV32mi %0, 1, $noreg, 8, $noreg, 3 :: (store 4 into stack + 8) + MOV32mi %0, 1, $noreg, 4, $noreg, 2 :: (store 4 into stack + 4) + MOV32mi %0, 1, $noreg, 0, $noreg, 1 :: (store 4 into stack) + CALLpcrel32 @good, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp + ADJCALLSTACKUP32 16, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp + ADJCALLSTACKDOWN32 20, 0, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp + %1 = MOV32rm %stack.2.s, 1, $noreg, 0, $noreg :: (load 4 from %stack.2.s, align 8) + %2 = MOV32rm %stack.2.s, 1, $noreg, 4, $noreg :: (load 4 from %stack.2.s + 4) + %3 = COPY $esp + MOV32mr %3, 1, $noreg, 4, $noreg, killed %2 :: (store 4) + MOV32mr %3, 1, $noreg, 0, $noreg, killed %1 :: (store 4) + %4 = LEA32r %stack.0.p, 1, $noreg, 0, $noreg + MOV32mr %3, 1, $noreg, 16, $noreg, killed %4 :: (store 4 into stack + 16) + %5 = LEA32r %stack.1.q, 1, $noreg, 0, $noreg + MOV32mr %3, 1, $noreg, 12, $noreg, killed %5 :: (store 4 into stack + 12) + MOV32mi %3, 1, $noreg, 8, $noreg, 6 :: (store 4 into stack + 8) + CALLpcrel32 @struct, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, + ADJCALLSTACKUP32 20, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp RET 0 ... Index: test/CodeGen/X86/mul-constant-i16.ll =================================================================== --- test/CodeGen/X86/mul-constant-i16.ll +++ test/CodeGen/X86/mul-constant-i16.ll @@ -21,14 +21,14 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: addl %eax, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_2: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 2 ret i16 %mul @@ -39,14 +39,14 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_3: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,2), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 3 ret i16 %mul @@ -57,14 +57,14 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll $2, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_4: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (,%rdi,4), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 4 ret i16 %mul @@ -75,14 +75,14 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,4), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_5: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,4), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 5 ret i16 %mul @@ -94,15 +94,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_6: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: addl %edi, %edi ; X64-NEXT: leal (%rdi,%rdi,2), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 6 ret i16 %mul @@ -114,15 +114,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: leal (,%ecx,8), %eax ; X86-NEXT: subl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_7: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (,%rdi,8), %eax ; X64-NEXT: subl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 7 ret i16 %mul @@ -133,14 +133,14 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll $3, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_8: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (,%rdi,8), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 8 ret i16 %mul @@ -151,14 +151,14 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,8), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_9: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,8), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 9 ret i16 %mul @@ -170,15 +170,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,4), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_10: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: addl %edi, %edi ; X64-NEXT: leal (%rdi,%rdi,4), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 10 ret i16 %mul @@ -190,15 +190,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,2), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_11: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,4), %eax ; X64-NEXT: leal (%rdi,%rax,2), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 11 ret i16 %mul @@ -210,15 +210,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll $2, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_12: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: shll $2, %edi ; X64-NEXT: leal (%rdi,%rdi,2), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 12 ret i16 %mul @@ -230,15 +230,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_13: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,2), %eax ; X64-NEXT: leal (%rdi,%rax,4), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 13 ret i16 %mul @@ -251,16 +251,16 @@ ; X86-NEXT: leal (%ecx,%ecx,2), %eax ; X86-NEXT: leal (%ecx,%eax,4), %eax ; X86-NEXT: addl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_14: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,2), %eax ; X64-NEXT: leal (%rdi,%rax,4), %eax ; X64-NEXT: addl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 14 ret i16 %mul @@ -272,15 +272,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_15: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,4), %eax ; X64-NEXT: leal (%rax,%rax,2), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 15 ret i16 %mul @@ -291,7 +291,7 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll $4, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_16: @@ -310,16 +310,16 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: shll $4, %eax ; X86-NEXT: addl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_17: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $4, %eax ; X64-NEXT: leal (%rax,%rdi), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 17 ret i16 %mul @@ -331,15 +331,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,8), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_18: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: addl %edi, %edi ; X64-NEXT: leal (%rdi,%rdi,8), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 18 ret i16 %mul @@ -352,16 +352,16 @@ ; X86-NEXT: leal (%ecx,%ecx,4), %eax ; X86-NEXT: shll $2, %eax ; X86-NEXT: subl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_19: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,4), %eax ; X64-NEXT: shll $2, %eax ; X64-NEXT: subl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 19 ret i16 %mul @@ -373,15 +373,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll $2, %eax ; X86-NEXT: leal (%eax,%eax,4), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_20: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: shll $2, %edi ; X64-NEXT: leal (%rdi,%rdi,4), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 20 ret i16 %mul @@ -393,15 +393,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_21: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,4), %eax ; X64-NEXT: leal (%rdi,%rax,4), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 21 ret i16 %mul @@ -414,16 +414,16 @@ ; X86-NEXT: leal (%ecx,%ecx,4), %eax ; X86-NEXT: leal (%ecx,%eax,4), %eax ; X86-NEXT: addl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_22: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,4), %eax ; X64-NEXT: leal (%rdi,%rax,4), %eax ; X64-NEXT: addl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 22 ret i16 %mul @@ -436,16 +436,16 @@ ; X86-NEXT: leal (%ecx,%ecx,2), %eax ; X86-NEXT: shll $3, %eax ; X86-NEXT: subl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_23: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,2), %eax ; X64-NEXT: shll $3, %eax ; X64-NEXT: subl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 23 ret i16 %mul @@ -457,15 +457,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll $3, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_24: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: shll $3, %edi ; X64-NEXT: leal (%rdi,%rdi,2), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 24 ret i16 %mul @@ -477,15 +477,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: leal (%eax,%eax,4), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_25: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,4), %eax ; X64-NEXT: leal (%rax,%rax,4), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 25 ret i16 %mul @@ -498,16 +498,16 @@ ; X86-NEXT: leal (%ecx,%ecx,8), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: subl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_26: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,8), %eax ; X64-NEXT: leal (%rax,%rax,2), %eax ; X64-NEXT: subl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 26 ret i16 %mul @@ -519,15 +519,15 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_27: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,8), %eax ; X64-NEXT: leal (%rax,%rax,2), %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 27 ret i16 %mul @@ -540,16 +540,16 @@ ; X86-NEXT: leal (%ecx,%ecx,8), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: addl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_28: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,8), %eax ; X64-NEXT: leal (%rax,%rax,2), %eax ; X64-NEXT: addl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 28 ret i16 %mul @@ -563,17 +563,17 @@ ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: addl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_29: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rdi,8), %eax ; X64-NEXT: leal (%rax,%rax,2), %eax ; X64-NEXT: addl %edi, %eax ; X64-NEXT: addl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 29 ret i16 %mul @@ -587,7 +587,7 @@ ; X86-NEXT: shll $5, %eax ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: subl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_30: @@ -596,7 +596,7 @@ ; X64-NEXT: shll $5, %eax ; X64-NEXT: subl %edi, %eax ; X64-NEXT: subl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 30 ret i16 %mul @@ -609,7 +609,7 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: shll $5, %eax ; X86-NEXT: subl %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_31: @@ -617,7 +617,7 @@ ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $5, %eax ; X64-NEXT: subl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 31 ret i16 %mul @@ -628,7 +628,7 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll $5, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_by_32: @@ -648,16 +648,16 @@ ; X86-NEXT: leal 42(%eax,%eax,8), %ecx ; X86-NEXT: leal 2(%eax,%eax,4), %eax ; X86-NEXT: imull %ecx, %eax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: test_mul_spec: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 42(%rdi,%rdi,8), %ecx ; X64-NEXT: leal 2(%rdi,%rdi,4), %eax ; X64-NEXT: imull %ecx, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 9 %add = add nsw i16 %mul, 42 Index: test/CodeGen/X86/mul-constant-i32.ll =================================================================== --- test/CodeGen/X86/mul-constant-i32.ll +++ test/CodeGen/X86/mul-constant-i32.ll @@ -61,13 +61,13 @@ ; ; X64-HSW-LABEL: test_mul_by_2: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_2: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] ; @@ -79,25 +79,25 @@ ; ; HSW-NOOPT-LABEL: test_mul_by_2: ; HSW-NOOPT: # %bb.0: -; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; HSW-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50] ; HSW-NOOPT-NEXT: retq # sched: [7:1.00] ; ; JAG-NOOPT-LABEL: test_mul_by_2: ; JAG-NOOPT: # %bb.0: -; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; JAG-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50] ; JAG-NOOPT-NEXT: retq # sched: [4:1.00] ; ; X64-SLM-LABEL: test_mul_by_2: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] ; ; SLM-NOOPT-LABEL: test_mul_by_2: ; SLM-NOOPT: # %bb.0: -; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00] ; SLM-NOOPT-NEXT: retq # sched: [4:1.00] %mul = mul nsw i32 %x, 2 @@ -112,13 +112,13 @@ ; ; X64-HSW-LABEL: test_mul_by_3: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_3: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] ; @@ -129,25 +129,25 @@ ; ; HSW-NOOPT-LABEL: test_mul_by_3: ; HSW-NOOPT: # %bb.0: -; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; HSW-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; HSW-NOOPT-NEXT: retq # sched: [7:1.00] ; ; JAG-NOOPT-LABEL: test_mul_by_3: ; JAG-NOOPT: # %bb.0: -; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; JAG-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; JAG-NOOPT-NEXT: retq # sched: [4:1.00] ; ; X64-SLM-LABEL: test_mul_by_3: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] ; ; SLM-NOOPT-LABEL: test_mul_by_3: ; SLM-NOOPT: # %bb.0: -; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00] ; SLM-NOOPT-NEXT: retq # sched: [4:1.00] %mul = mul nsw i32 %x, 3 @@ -163,13 +163,13 @@ ; ; X64-HSW-LABEL: test_mul_by_4: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_4: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] ; @@ -181,25 +181,25 @@ ; ; HSW-NOOPT-LABEL: test_mul_by_4: ; HSW-NOOPT: # %bb.0: -; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; HSW-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50] ; HSW-NOOPT-NEXT: retq # sched: [7:1.00] ; ; JAG-NOOPT-LABEL: test_mul_by_4: ; JAG-NOOPT: # %bb.0: -; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; JAG-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50] ; JAG-NOOPT-NEXT: retq # sched: [4:1.00] ; ; X64-SLM-LABEL: test_mul_by_4: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] ; ; SLM-NOOPT-LABEL: test_mul_by_4: ; SLM-NOOPT: # %bb.0: -; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00] ; SLM-NOOPT-NEXT: retq # sched: [4:1.00] %mul = mul nsw i32 %x, 4 @@ -214,13 +214,13 @@ ; ; X64-HSW-LABEL: test_mul_by_5: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_5: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] ; @@ -231,25 +231,25 @@ ; ; HSW-NOOPT-LABEL: test_mul_by_5: ; HSW-NOOPT: # %bb.0: -; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; HSW-NOOPT-NEXT: retq # sched: [7:1.00] ; ; JAG-NOOPT-LABEL: test_mul_by_5: ; JAG-NOOPT: # %bb.0: -; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; JAG-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; JAG-NOOPT-NEXT: retq # sched: [4:1.00] ; ; X64-SLM-LABEL: test_mul_by_5: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] ; ; SLM-NOOPT-LABEL: test_mul_by_5: ; SLM-NOOPT: # %bb.0: -; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00] ; SLM-NOOPT-NEXT: retq # sched: [4:1.00] %mul = mul nsw i32 %x, 5 @@ -266,14 +266,14 @@ ; ; X64-HSW-LABEL: test_mul_by_6: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25] ; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_6: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -295,7 +295,7 @@ ; ; X64-SLM-LABEL: test_mul_by_6: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50] ; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -318,14 +318,14 @@ ; ; X64-HSW-LABEL: test_mul_by_7: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50] ; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_7: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50] ; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -347,7 +347,7 @@ ; ; X64-SLM-LABEL: test_mul_by_7: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00] ; X64-SLM-NEXT: subl %edi, %eax # sched: [1:0.50] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -369,13 +369,13 @@ ; ; X64-HSW-LABEL: test_mul_by_8: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_8: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] ; @@ -387,25 +387,25 @@ ; ; HSW-NOOPT-LABEL: test_mul_by_8: ; HSW-NOOPT: # %bb.0: -; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; HSW-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50] ; HSW-NOOPT-NEXT: retq # sched: [7:1.00] ; ; JAG-NOOPT-LABEL: test_mul_by_8: ; JAG-NOOPT: # %bb.0: -; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; JAG-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50] ; JAG-NOOPT-NEXT: retq # sched: [4:1.00] ; ; X64-SLM-LABEL: test_mul_by_8: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] ; ; SLM-NOOPT-LABEL: test_mul_by_8: ; SLM-NOOPT: # %bb.0: -; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00] ; SLM-NOOPT-NEXT: retq # sched: [4:1.00] %mul = mul nsw i32 %x, 8 @@ -420,13 +420,13 @@ ; ; X64-HSW-LABEL: test_mul_by_9: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_9: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] ; @@ -437,25 +437,25 @@ ; ; HSW-NOOPT-LABEL: test_mul_by_9: ; HSW-NOOPT: # %bb.0: -; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; HSW-NOOPT-NEXT: retq # sched: [7:1.00] ; ; JAG-NOOPT-LABEL: test_mul_by_9: ; JAG-NOOPT: # %bb.0: -; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; JAG-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; JAG-NOOPT-NEXT: retq # sched: [4:1.00] ; ; X64-SLM-LABEL: test_mul_by_9: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] ; ; SLM-NOOPT-LABEL: test_mul_by_9: ; SLM-NOOPT: # %bb.0: -; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00] ; SLM-NOOPT-NEXT: retq # sched: [4:1.00] %mul = mul nsw i32 %x, 9 @@ -472,14 +472,14 @@ ; ; X64-HSW-LABEL: test_mul_by_10: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25] ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_10: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -501,7 +501,7 @@ ; ; X64-SLM-LABEL: test_mul_by_10: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50] ; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -524,14 +524,14 @@ ; ; X64-HSW-LABEL: test_mul_by_11: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_11: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -574,14 +574,14 @@ ; ; X64-HSW-LABEL: test_mul_by_12: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_12: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -603,7 +603,7 @@ ; ; X64-SLM-LABEL: test_mul_by_12: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00] ; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -626,14 +626,14 @@ ; ; X64-HSW-LABEL: test_mul_by_13: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_13: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -677,7 +677,7 @@ ; ; X64-HSW-LABEL: test_mul_by_14: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25] @@ -685,7 +685,7 @@ ; ; X64-JAG-LABEL: test_mul_by_14: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50] @@ -729,14 +729,14 @@ ; ; X64-HSW-LABEL: test_mul_by_15: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_15: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -758,7 +758,7 @@ ; ; X64-SLM-LABEL: test_mul_by_15: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00] ; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -834,7 +834,7 @@ ; ; X64-HSW-LABEL: test_mul_by_17: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25] ; X64-HSW-NEXT: shll $4, %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rax,%rdi), %eax # sched: [1:0.50] @@ -842,7 +842,7 @@ ; ; X64-JAG-LABEL: test_mul_by_17: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.50] ; X64-JAG-NEXT: shll $4, %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rax,%rdi), %eax # sched: [1:0.50] @@ -865,7 +865,7 @@ ; ; X64-SLM-LABEL: test_mul_by_17: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50] ; X64-SLM-NEXT: shll $4, %eax # sched: [1:1.00] ; X64-SLM-NEXT: leal (%rax,%rdi), %eax # sched: [1:1.00] @@ -889,14 +889,14 @@ ; ; X64-HSW-LABEL: test_mul_by_18: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25] ; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_18: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -918,7 +918,7 @@ ; ; X64-SLM-LABEL: test_mul_by_18: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50] ; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -942,7 +942,7 @@ ; ; X64-HSW-LABEL: test_mul_by_19: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: shll $2, %eax # sched: [1:0.50] ; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25] @@ -950,7 +950,7 @@ ; ; X64-JAG-LABEL: test_mul_by_19: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: shll $2, %eax # sched: [1:0.50] ; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50] @@ -994,14 +994,14 @@ ; ; X64-HSW-LABEL: test_mul_by_20: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_20: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -1023,7 +1023,7 @@ ; ; X64-SLM-LABEL: test_mul_by_20: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00] ; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -1046,14 +1046,14 @@ ; ; X64-HSW-LABEL: test_mul_by_21: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_21: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -1097,7 +1097,7 @@ ; ; X64-HSW-LABEL: test_mul_by_22: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25] @@ -1105,7 +1105,7 @@ ; ; X64-JAG-LABEL: test_mul_by_22: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50] @@ -1150,7 +1150,7 @@ ; ; X64-HSW-LABEL: test_mul_by_23: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: shll $3, %eax # sched: [1:0.50] ; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25] @@ -1158,7 +1158,7 @@ ; ; X64-JAG-LABEL: test_mul_by_23: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: shll $3, %eax # sched: [1:0.50] ; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50] @@ -1202,14 +1202,14 @@ ; ; X64-HSW-LABEL: test_mul_by_24: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: shll $3, %edi # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_24: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: shll $3, %edi # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -1231,7 +1231,7 @@ ; ; X64-SLM-LABEL: test_mul_by_24: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: shll $3, %edi # sched: [1:1.00] ; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -1254,14 +1254,14 @@ ; ; X64-HSW-LABEL: test_mul_by_25: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_25: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -1283,7 +1283,7 @@ ; ; X64-SLM-LABEL: test_mul_by_25: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00] ; X64-SLM-NEXT: leal (%rax,%rax,4), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -1307,7 +1307,7 @@ ; ; X64-HSW-LABEL: test_mul_by_26: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25] @@ -1315,7 +1315,7 @@ ; ; X64-JAG-LABEL: test_mul_by_26: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50] @@ -1359,14 +1359,14 @@ ; ; X64-HSW-LABEL: test_mul_by_27: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: test_mul_by_27: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] @@ -1388,7 +1388,7 @@ ; ; X64-SLM-LABEL: test_mul_by_27: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00] ; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00] ; X64-SLM-NEXT: retq # sched: [4:1.00] @@ -1412,7 +1412,7 @@ ; ; X64-HSW-LABEL: test_mul_by_28: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25] @@ -1420,7 +1420,7 @@ ; ; X64-JAG-LABEL: test_mul_by_28: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50] @@ -1466,7 +1466,7 @@ ; ; X64-HSW-LABEL: test_mul_by_29: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25] @@ -1475,7 +1475,7 @@ ; ; X64-JAG-LABEL: test_mul_by_29: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] ; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50] ; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50] @@ -1681,7 +1681,7 @@ ; ; X64-HSW-LABEL: test_mul_spec: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50] ; X64-HSW-NEXT: addl $42, %ecx # sched: [1:0.25] ; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] @@ -1691,7 +1691,7 @@ ; ; X64-JAG-LABEL: test_mul_spec: ; X64-JAG: # %bb.0: -; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50] ; X64-JAG-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50] ; X64-JAG-NEXT: imull %ecx, %eax # sched: [3:1.00] @@ -1707,7 +1707,7 @@ ; ; HSW-NOOPT-LABEL: test_mul_spec: ; HSW-NOOPT: # %bb.0: -; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50] ; HSW-NOOPT-NEXT: addl $42, %ecx # sched: [1:0.25] ; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50] @@ -1717,7 +1717,7 @@ ; ; JAG-NOOPT-LABEL: test_mul_spec: ; JAG-NOOPT: # %bb.0: -; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; JAG-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50] ; JAG-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50] ; JAG-NOOPT-NEXT: imull %ecx, %eax # sched: [3:1.00] @@ -1725,7 +1725,7 @@ ; ; X64-SLM-LABEL: test_mul_spec: ; X64-SLM: # %bb.0: -; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00] ; X64-SLM-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00] ; X64-SLM-NEXT: imull %ecx, %eax # sched: [3:1.00] @@ -1733,7 +1733,7 @@ ; ; SLM-NOOPT-LABEL: test_mul_spec: ; SLM-NOOPT: # %bb.0: -; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi +; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi ; SLM-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00] ; SLM-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00] ; SLM-NOOPT-NEXT: imull %ecx, %eax # sched: [3:1.00] Index: test/CodeGen/X86/mul-constant-result.ll =================================================================== --- test/CodeGen/X86/mul-constant-result.ll +++ test/CodeGen/X86/mul-constant-result.ll @@ -188,7 +188,7 @@ ; ; X64-HSW-LABEL: mult: ; X64-HSW: # %bb.0: -; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: cmpl $1, %esi ; X64-HSW-NEXT: movl $1, %ecx ; X64-HSW-NEXT: movl %esi, %eax @@ -202,60 +202,60 @@ ; X64-HSW-NEXT: jmpq *.LJTI0_0(,%rdi,8) ; X64-HSW-NEXT: .LBB0_2: ; X64-HSW-NEXT: addl %eax, %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_36: ; X64-HSW-NEXT: xorl %eax, %eax ; X64-HSW-NEXT: .LBB0_37: -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_3: ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_4: ; X64-HSW-NEXT: shll $2, %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_5: ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_6: ; X64-HSW-NEXT: addl %eax, %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_7: ; X64-HSW-NEXT: leal (,%rax,8), %ecx ; X64-HSW-NEXT: jmp .LBB0_8 ; X64-HSW-NEXT: .LBB0_9: ; X64-HSW-NEXT: shll $3, %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_10: ; X64-HSW-NEXT: leal (%rax,%rax,8), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_11: ; X64-HSW-NEXT: addl %eax, %eax ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_12: ; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx ; X64-HSW-NEXT: leal (%rax,%rcx,2), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_13: ; X64-HSW-NEXT: shll $2, %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_14: ; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx ; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_15: ; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx @@ -263,11 +263,11 @@ ; X64-HSW-NEXT: .LBB0_18: ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_19: ; X64-HSW-NEXT: shll $4, %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_20: ; X64-HSW-NEXT: movl %eax, %ecx @@ -276,7 +276,7 @@ ; X64-HSW-NEXT: .LBB0_21: ; X64-HSW-NEXT: addl %eax, %eax ; X64-HSW-NEXT: leal (%rax,%rax,8), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_22: ; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx @@ -285,12 +285,12 @@ ; X64-HSW-NEXT: .LBB0_23: ; X64-HSW-NEXT: shll $2, %eax ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_24: ; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx ; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_25: ; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx @@ -304,12 +304,12 @@ ; X64-HSW-NEXT: .LBB0_27: ; X64-HSW-NEXT: shll $3, %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_28: ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_29: ; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx @@ -318,7 +318,7 @@ ; X64-HSW-NEXT: .LBB0_30: ; X64-HSW-NEXT: leal (%rax,%rax,8), %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_31: ; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx @@ -331,7 +331,7 @@ ; X64-HSW-NEXT: .LBB0_17: ; X64-HSW-NEXT: addl %eax, %ecx ; X64-HSW-NEXT: movl %ecx, %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_33: ; X64-HSW-NEXT: movl %eax, %ecx @@ -344,11 +344,11 @@ ; X64-HSW-NEXT: .LBB0_8: ; X64-HSW-NEXT: subl %eax, %ecx ; X64-HSW-NEXT: movl %ecx, %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_35: ; X64-HSW-NEXT: shll $5, %eax -; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax +; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq %3 = icmp eq i32 %1, 0 %4 = icmp sgt i32 %1, 1 Index: test/CodeGen/X86/negate-i1.ll =================================================================== --- test/CodeGen/X86/negate-i1.ll +++ test/CodeGen/X86/negate-i1.ll @@ -49,7 +49,7 @@ ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: andl $1, %eax ; X32-NEXT: negl %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl %b = sext i1 %a to i16 ret i16 %b @@ -66,7 +66,7 @@ ; X32: # %bb.0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: negl %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl %b = sext i1 %a to i16 ret i16 %b @@ -109,7 +109,7 @@ define i64 @select_i64_neg1_or_0(i1 %a) { ; X64-LABEL: select_i64_neg1_or_0: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: andl $1, %edi ; X64-NEXT: negq %rdi ; X64-NEXT: movq %rdi, %rax Index: test/CodeGen/X86/non-value-mem-operand.mir =================================================================== --- test/CodeGen/X86/non-value-mem-operand.mir +++ test/CodeGen/X86/non-value-mem-operand.mir @@ -123,12 +123,12 @@ alignment: 4 tracksRegLiveness: true fixedStack: - - { id: 0, type: spill-slot, offset: -56, size: 8, alignment: 8, callee-saved-register: '%rbx' } - - { id: 1, type: spill-slot, offset: -48, size: 8, alignment: 16, callee-saved-register: '%r12' } - - { id: 2, type: spill-slot, offset: -40, size: 8, alignment: 8, callee-saved-register: '%r13' } - - { id: 3, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '%r14' } - - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%r15' } - - { id: 5, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%rbp' } + - { id: 0, type: spill-slot, offset: -56, size: 8, alignment: 8, callee-saved-register: '$rbx' } + - { id: 1, type: spill-slot, offset: -48, size: 8, alignment: 16, callee-saved-register: '$r12' } + - { id: 2, type: spill-slot, offset: -40, size: 8, alignment: 8, callee-saved-register: '$r13' } + - { id: 3, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '$r14' } + - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$r15' } + - { id: 5, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '$rbp' } stack: - { id: 0, offset: -88, size: 8, alignment: 8 } - { id: 1, offset: -96, size: 8, alignment: 8 } @@ -143,151 +143,151 @@ body: | bb.0.bb: successors: %bb.1.bb2(0x00000800), %bb.3.bb3(0x7ffff800) - liveins: %rbp, %r15, %r14, %r13, %r12, %rbx - - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r15, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r14, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r13, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r12, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - %rsp = frame-setup SUB64ri8 %rsp, 56, implicit-def dead %eflags - CALL64r undef %rax, csr_64, implicit %rsp, implicit undef %rdi, implicit undef %rsi, implicit-def %rsp, implicit-def %rax - TEST64rr %rax, %rax, implicit-def %eflags - JNE_1 %bb.3.bb3, implicit killed %eflags + liveins: $rbp, $r15, $r14, $r13, $r12, $rbx + + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r15, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r14, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r13, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r12, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + $rsp = frame-setup SUB64ri8 $rsp, 56, implicit-def dead $eflags + CALL64r undef $rax, csr_64, implicit $rsp, implicit undef $rdi, implicit undef $rsi, implicit-def $rsp, implicit-def $rax + TEST64rr $rax, $rax, implicit-def $eflags + JNE_1 %bb.3.bb3, implicit killed $eflags bb.1.bb2: successors: %bb.2(0x40000000), %bb.13.bb59(0x40000000) - %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags - TEST8rr %bpl, %bpl, implicit-def %eflags - JE_1 %bb.13.bb59, implicit killed %eflags + $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags + TEST8rr $bpl, $bpl, implicit-def $eflags + JE_1 %bb.13.bb59, implicit killed $eflags bb.2: successors: %bb.12.bb51(0x80000000) - liveins: %ebp + liveins: $ebp - %xmm0 = XORPSrr undef %xmm0, undef %xmm0 - %ebx = IMPLICIT_DEF implicit-def %rbx + $xmm0 = XORPSrr undef $xmm0, undef $xmm0 + $ebx = IMPLICIT_DEF implicit-def $rbx JMP_1 %bb.12.bb51 bb.3.bb3: successors: %bb.4.bb7(0x80000000) - liveins: %rax - - MOV64mr %rsp, 1, %noreg, 32, %noreg, %rax :: (store 8 into %stack.5) - %r12 = MOV64rr killed %rax - %r12 = ADD64ri8 killed %r12, 16, implicit-def dead %eflags - %xmm0 = XORPSrr undef %xmm0, undef %xmm0 - %esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags - %rax = MOV64ri %const.0 - %xmm1 = MOVSDrm killed %rax, 1, %noreg, 0, %noreg :: (load 8 from constant-pool) - MOVSDmr %rsp, 1, %noreg, 40, %noreg, killed %xmm1 :: (store 8 into %stack.4) - %eax = IMPLICIT_DEF - %ecx = XOR32rr undef %ecx, undef %ecx, implicit-def dead %eflags + liveins: $rax + + MOV64mr $rsp, 1, $noreg, 32, $noreg, $rax :: (store 8 into %stack.5) + $r12 = MOV64rr killed $rax + $r12 = ADD64ri8 killed $r12, 16, implicit-def dead $eflags + $xmm0 = XORPSrr undef $xmm0, undef $xmm0 + $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags + $rax = MOV64ri %const.0 + $xmm1 = MOVSDrm killed $rax, 1, $noreg, 0, $noreg :: (load 8 from constant-pool) + MOVSDmr $rsp, 1, $noreg, 40, $noreg, killed $xmm1 :: (store 8 into %stack.4) + $eax = IMPLICIT_DEF + $ecx = XOR32rr undef $ecx, undef $ecx, implicit-def dead $eflags bb.4.bb7: successors: %bb.6.bb26(0x40000000), %bb.5.bb15(0x40000000) - liveins: %eax, %ecx, %esi, %r12, %xmm0 + liveins: $eax, $ecx, $esi, $r12, $xmm0 - %ebp = MOV32rr killed %ecx - %ebx = MOV32rr killed %eax, implicit-def %rbx - %r14d = MOV32rr %ebx, implicit-def %r14 - TEST8rr %sil, %sil, implicit-def %eflags - JNE_1 %bb.6.bb26, implicit %eflags + $ebp = MOV32rr killed $ecx + $ebx = MOV32rr killed $eax, implicit-def $rbx + $r14d = MOV32rr $ebx, implicit-def $r14 + TEST8rr $sil, $sil, implicit-def $eflags + JNE_1 %bb.6.bb26, implicit $eflags bb.5.bb15: successors: %bb.6.bb26(0x80000000) - liveins: %ebp, %rbx, %r14, %xmm0 - - MOV32mr %rsp, 1, %noreg, 24, %noreg, %ebx :: (store 4 into %stack.0, align 8) - MOV32mr %rsp, 1, %noreg, 16, %noreg, %ebp :: (store 4 into %stack.1, align 8) - MOVSDmr %rsp, 1, %noreg, 8, %noreg, killed %xmm0 :: (store 8 into %stack.2) - %rax = MOV64rm %rsp, 1, %noreg, 32, %noreg :: (load 8 from %stack.5) - MOV64mr %rsp, 1, %noreg, 48, %noreg, killed %rax :: (store 8 into %stack.3) - %rax = MOV64ri @wibble - STATEPOINT 2882400000, 0, 0, killed %rax, 2, 0, 2, 0, 2, 30, 2, 1, 2, 0, 2, 99, 2, 0, 2, 12, 2, 0, 2, 10, 1, 8, %rsp, 24, 2, 10, 2, 0, 2, 10, 1, 8, %rsp, 16, 2, 10, 2, 4278124286, 2, 6, 2, 4278124286, 2, 7, 1, 8, %rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 1, 8, %rsp, 48, 2, 7, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def %rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2), (volatile load 8 from %stack.3) - %esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags - %r12 = IMPLICIT_DEF + liveins: $ebp, $rbx, $r14, $xmm0 + + MOV32mr $rsp, 1, $noreg, 24, $noreg, $ebx :: (store 4 into %stack.0, align 8) + MOV32mr $rsp, 1, $noreg, 16, $noreg, $ebp :: (store 4 into %stack.1, align 8) + MOVSDmr $rsp, 1, $noreg, 8, $noreg, killed $xmm0 :: (store 8 into %stack.2) + $rax = MOV64rm $rsp, 1, $noreg, 32, $noreg :: (load 8 from %stack.5) + MOV64mr $rsp, 1, $noreg, 48, $noreg, killed $rax :: (store 8 into %stack.3) + $rax = MOV64ri @wibble + STATEPOINT 2882400000, 0, 0, killed $rax, 2, 0, 2, 0, 2, 30, 2, 1, 2, 0, 2, 99, 2, 0, 2, 12, 2, 0, 2, 10, 1, 8, $rsp, 24, 2, 10, 2, 0, 2, 10, 1, 8, $rsp, 16, 2, 10, 2, 4278124286, 2, 6, 2, 4278124286, 2, 7, 1, 8, $rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 1, 8, $rsp, 48, 2, 7, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def $rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2), (volatile load 8 from %stack.3) + $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags + $r12 = IMPLICIT_DEF bb.6.bb26: successors: %bb.8.bb37(0x40000000), %bb.7.bb35(0x40000000) - liveins: %ebp, %esi, %rbx, %r12, %r14 - - %rax = MOV64ri @global.1 - %rax = MOV64rm killed %rax, 1, %noreg, 0, %noreg :: (dereferenceable load 8 from @global.1) - TEST64rr %rax, %rax, implicit-def %eflags - %rax = CMOVE64rr undef %rax, killed %rax, implicit killed %eflags - %ecx = MOV32rm undef %rax, 1, %noreg, 0, %noreg :: (load 4 from `i32* undef`) - %rdx = MOV64rm %r12, 8, %r14, 0, %noreg :: (load 8 from %ir.tmp3) - %r15 = LEA64r %rdx, 1, %noreg, 1, _ - MOV64mr %r12, 8, %r14, 0, %noreg, %r15 :: (store 8 into %ir.tmp3) - %ecx = SUB32rr killed %ecx, %edx, implicit-def dead %eflags, implicit killed %rdx - MOV32mr undef %rax, 1, %noreg, 0, %noreg, killed %ecx :: (store 4 into `i32* undef`) - %r13 = MOV64rm killed %rax, 1, %noreg, 768, %noreg :: (load 8 from %ir.tmp33) - TEST8rr %sil, %sil, implicit-def %eflags - %rax = IMPLICIT_DEF - JNE_1 %bb.8.bb37, implicit %eflags + liveins: $ebp, $esi, $rbx, $r12, $r14 + + $rax = MOV64ri @global.1 + $rax = MOV64rm killed $rax, 1, $noreg, 0, $noreg :: (dereferenceable load 8 from @global.1) + TEST64rr $rax, $rax, implicit-def $eflags + $rax = CMOVE64rr undef $rax, killed $rax, implicit killed $eflags + $ecx = MOV32rm undef $rax, 1, $noreg, 0, $noreg :: (load 4 from `i32* undef`) + $rdx = MOV64rm $r12, 8, $r14, 0, $noreg :: (load 8 from %ir.tmp3) + $r15 = LEA64r $rdx, 1, $noreg, 1, _ + MOV64mr $r12, 8, $r14, 0, $noreg, $r15 :: (store 8 into %ir.tmp3) + $ecx = SUB32rr killed $ecx, $edx, implicit-def dead $eflags, implicit killed $rdx + MOV32mr undef $rax, 1, $noreg, 0, $noreg, killed $ecx :: (store 4 into `i32* undef`) + $r13 = MOV64rm killed $rax, 1, $noreg, 768, $noreg :: (load 8 from %ir.tmp33) + TEST8rr $sil, $sil, implicit-def $eflags + $rax = IMPLICIT_DEF + JNE_1 %bb.8.bb37, implicit $eflags bb.7.bb35: successors: %bb.8.bb37(0x80000000) - liveins: %ebp, %rbx, %r12, %r13, %r14, %r15 + liveins: $ebp, $rbx, $r12, $r13, $r14, $r15 - %rsi = MOV64ri @global - %rax = MOV64ri @ham - CALL64r killed %rax, csr_64, implicit %rsp, implicit undef %rdi, implicit %rsi, implicit-def %rsp, implicit-def %rax - %esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags + $rsi = MOV64ri @global + $rax = MOV64ri @ham + CALL64r killed $rax, csr_64, implicit $rsp, implicit undef $rdi, implicit $rsi, implicit-def $rsp, implicit-def $rax + $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags bb.8.bb37: successors: %bb.9.bb37(0x40000000), %bb.10.bb37(0x40000000) - liveins: %ebp, %esi, %rax, %rbx, %r12, %r13, %r14, %r15 + liveins: $ebp, $esi, $rax, $rbx, $r12, $r13, $r14, $r15 - %rcx = MOV64rm killed %rax, 1, %noreg, 760, %noreg :: (load 8 from %ir.tmp40) - CMP64rr %r13, %rcx, implicit-def %eflags - JL_1 %bb.10.bb37, implicit %eflags + $rcx = MOV64rm killed $rax, 1, $noreg, 760, $noreg :: (load 8 from %ir.tmp40) + CMP64rr $r13, $rcx, implicit-def $eflags + JL_1 %bb.10.bb37, implicit $eflags bb.9.bb37: successors: %bb.10.bb37(0x80000000) - liveins: %ebp, %esi, %rbx, %r12, %r13, %r14, %r15 + liveins: $ebp, $esi, $rbx, $r12, $r13, $r14, $r15 - %cl = MOV8rr %r13b, implicit killed %r13, implicit-def %rcx + $cl = MOV8rr $r13b, implicit killed $r13, implicit-def $rcx bb.10.bb37: successors: %bb.11.bb51.loopexit(0x00000800), %bb.4.bb7(0x7ffff800) - liveins: %ebp, %esi, %rbx, %rcx, %r12, %r14, %r15 - - %cl = KILL %cl, implicit killed %rcx - %r15 = SAR64rCL killed %r15, implicit-def dead %eflags, implicit %cl - MOV64mr %r12, 8, killed %r14, 0, %noreg, killed %r15 :: (store 8 into %ir.tmp7) - MOV64mi32 undef %rax, 1, %noreg, 0, %noreg, 0 :: (store 8 into `i64* undef`) - %eax = LEA64_32r %rbx, 1, %noreg, 1, _ - %ecx = MOV32ri 6 - CMP32ri %eax, 15141, implicit-def %eflags - %xmm0 = MOVSDrm %rsp, 1, %noreg, 40, %noreg :: (load 8 from %stack.4) - JL_1 %bb.4.bb7, implicit %eflags + liveins: $ebp, $esi, $rbx, $rcx, $r12, $r14, $r15 + + $cl = KILL $cl, implicit killed $rcx + $r15 = SAR64rCL killed $r15, implicit-def dead $eflags, implicit $cl + MOV64mr $r12, 8, killed $r14, 0, $noreg, killed $r15 :: (store 8 into %ir.tmp7) + MOV64mi32 undef $rax, 1, $noreg, 0, $noreg, 0 :: (store 8 into `i64* undef`) + $eax = LEA64_32r $rbx, 1, $noreg, 1, _ + $ecx = MOV32ri 6 + CMP32ri $eax, 15141, implicit-def $eflags + $xmm0 = MOVSDrm $rsp, 1, $noreg, 40, $noreg :: (load 8 from %stack.4) + JL_1 %bb.4.bb7, implicit $eflags bb.11.bb51.loopexit: successors: %bb.12.bb51(0x80000000) - liveins: %ebp, %rbx + liveins: $ebp, $rbx - %ebp = INC32r killed %ebp, implicit-def dead %eflags - %ebx = INC32r %ebx, implicit-def dead %eflags, implicit killed %rbx, implicit-def %rbx - %rax = MOV64ri %const.0 - %xmm0 = MOVSDrm killed %rax, 1, %noreg, 0, %noreg :: (load 8 from constant-pool) + $ebp = INC32r killed $ebp, implicit-def dead $eflags + $ebx = INC32r $ebx, implicit-def dead $eflags, implicit killed $rbx, implicit-def $rbx + $rax = MOV64ri %const.0 + $xmm0 = MOVSDrm killed $rax, 1, $noreg, 0, $noreg :: (load 8 from constant-pool) bb.12.bb51: - liveins: %ebp, %rbx, %xmm0 + liveins: $ebp, $rbx, $xmm0 - MOV32mr %rsp, 1, %noreg, 24, %noreg, %ebx, implicit killed %rbx :: (store 4 into %stack.0, align 8) - MOV32mr %rsp, 1, %noreg, 16, %noreg, killed %ebp :: (store 4 into %stack.1, align 8) - MOVSDmr %rsp, 1, %noreg, 8, %noreg, killed %xmm0 :: (store 8 into %stack.2) - %rax = MOV64ri @wobble - %edi = MOV32ri -121 - STATEPOINT 2882400000, 0, 1, killed %rax, %edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 270, 2, 4, 2, 12, 2, 0, 2, 11, 2, 4278124286, 2, 99, 2, 0, 2, 10, 1, 8, %rsp, 24, 2, 6, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, 2, 10, 1, 8, %rsp, 16, 2, 10, 2, 4278124286, 2, 99, 2, 0, 2, 7, 1, 8, %rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, csr_64, implicit-def %rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2) + MOV32mr $rsp, 1, $noreg, 24, $noreg, $ebx, implicit killed $rbx :: (store 4 into %stack.0, align 8) + MOV32mr $rsp, 1, $noreg, 16, $noreg, killed $ebp :: (store 4 into %stack.1, align 8) + MOVSDmr $rsp, 1, $noreg, 8, $noreg, killed $xmm0 :: (store 8 into %stack.2) + $rax = MOV64ri @wobble + $edi = MOV32ri -121 + STATEPOINT 2882400000, 0, 1, killed $rax, $edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 270, 2, 4, 2, 12, 2, 0, 2, 11, 2, 4278124286, 2, 99, 2, 0, 2, 10, 1, 8, $rsp, 24, 2, 6, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, 2, 10, 1, 8, $rsp, 16, 2, 10, 2, 4278124286, 2, 99, 2, 0, 2, 7, 1, 8, $rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, csr_64, implicit-def $rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2) bb.13.bb59: - %rax = MOV64ri @wobble - %edi = MOV32ri 8 - STATEPOINT 2882400000, 0, 1, killed %rax, %edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 123, 2, 4, 2, 12, 2, 0, 2, 13, 2, 0, 2, 99, 2, 4278124286, 2, 13, 2, 0, 2, 10, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def %rsp + $rax = MOV64ri @wobble + $edi = MOV32ri 8 + STATEPOINT 2882400000, 0, 1, killed $rax, $edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 123, 2, 4, 2, 12, 2, 0, 2, 13, 2, 0, 2, 99, 2, 4278124286, 2, 13, 2, 0, 2, 10, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def $rsp ... Index: test/CodeGen/X86/oddshuffles.ll =================================================================== --- test/CodeGen/X86/oddshuffles.ll +++ test/CodeGen/X86/oddshuffles.ll @@ -31,7 +31,7 @@ ; ; AVX2-LABEL: v3i64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3] ; AVX2-NEXT: vpextrq $1, %xmm0, 16(%rdi) @@ -66,7 +66,7 @@ ; ; AVX2-LABEL: v3f64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3] ; AVX2-NEXT: vmovhpd %xmm0, 16(%rdi) @@ -226,7 +226,7 @@ ; ; AVX2-LABEL: v5i32: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u> ; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1 @@ -276,7 +276,7 @@ ; ; AVX2-LABEL: v5f32: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u> ; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1 @@ -442,7 +442,7 @@ ; ; AVX2-LABEL: v7i32: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,6,3,6,1,7,4,u> ; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0 @@ -1802,7 +1802,7 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 ; AVX1-NEXT: vmovaps %ymm1, 32(%rdi) ; AVX1-NEXT: vmovaps %ymm1, (%rdi) -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -1821,7 +1821,7 @@ ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 ; XOP-NEXT: vmovaps %ymm1, 32(%rdi) ; XOP-NEXT: vmovaps %ymm1, (%rdi) -; XOP-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; XOP-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; XOP-NEXT: vzeroupper ; XOP-NEXT: retq %shuffle = shufflevector <4 x double> %A, <4 x double> %A, <8 x i32> zeroinitializer Index: test/CodeGen/X86/or-lea.ll =================================================================== --- test/CodeGen/X86/or-lea.ll +++ test/CodeGen/X86/or-lea.ll @@ -9,8 +9,8 @@ define i32 @or_shift1_and1(i32 %x, i32 %y) { ; CHECK-LABEL: or_shift1_and1: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: leal (%rsi,%rdi,2), %eax ; CHECK-NEXT: retq @@ -24,8 +24,8 @@ define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) { ; CHECK-LABEL: or_shift1_and1_swapped: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: leal (%rsi,%rdi,2), %eax ; CHECK-NEXT: retq @@ -39,8 +39,8 @@ define i32 @or_shift2_and1(i32 %x, i32 %y) { ; CHECK-LABEL: or_shift2_and1: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: leal (%rsi,%rdi,4), %eax ; CHECK-NEXT: retq @@ -54,8 +54,8 @@ define i32 @or_shift3_and1(i32 %x, i32 %y) { ; CHECK-LABEL: or_shift3_and1: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: leal (%rsi,%rdi,8), %eax ; CHECK-NEXT: retq @@ -69,8 +69,8 @@ define i32 @or_shift3_and7(i32 %x, i32 %y) { ; CHECK-LABEL: or_shift3_and7: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $7, %esi ; CHECK-NEXT: leal (%rsi,%rdi,8), %eax ; CHECK-NEXT: retq @@ -86,8 +86,8 @@ define i32 @or_shift4_and1(i32 %x, i32 %y) { ; CHECK-LABEL: or_shift4_and1: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %esi killed %esi def %rsi -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: shll $4, %edi ; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: leal (%rsi,%rdi), %eax @@ -104,7 +104,7 @@ define i32 @or_shift3_and8(i32 %x, i32 %y) { ; CHECK-LABEL: or_shift3_and8: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal (,%rdi,8), %eax ; CHECK-NEXT: andl $8, %esi ; CHECK-NEXT: orl %esi, %eax Index: test/CodeGen/X86/patchpoint-verifiable.mir =================================================================== --- test/CodeGen/X86/patchpoint-verifiable.mir +++ test/CodeGen/X86/patchpoint-verifiable.mir @@ -17,8 +17,8 @@ name: small_patchpoint_codegen tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } frameInfo: hasPatchPoint: true stackSize: 8 @@ -28,15 +28,15 @@ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: - liveins: %rdi, %rsi, %rbp + liveins: $rdi, $rsi, $rbp - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = frame-setup MOV64rr %rsp - CFI_INSTRUCTION def_cfa_register %rbp - ; CHECK: PATCHPOINT 5, 5, 0, 2, 0, %rdi, %rsi, csr_64, implicit-def dead early-clobber %r11, implicit-def %rsp, implicit-def dead %rax - PATCHPOINT 5, 5, 0, 2, 0, %rdi, %rsi, csr_64, implicit-def dead early-clobber %r11, implicit-def %rsp, implicit-def dead %rax - %rbp = POP64r implicit-def %rsp, implicit %rsp + CFI_INSTRUCTION offset $rbp, -16 + $rbp = frame-setup MOV64rr $rsp + CFI_INSTRUCTION def_cfa_register $rbp + ; CHECK: PATCHPOINT 5, 5, 0, 2, 0, $rdi, $rsi, csr_64, implicit-def dead early-clobber $r11, implicit-def $rsp, implicit-def dead $rax + PATCHPOINT 5, 5, 0, 2, 0, $rdi, $rsi, csr_64, implicit-def dead early-clobber $r11, implicit-def $rsp, implicit-def dead $rax + $rbp = POP64r implicit-def $rsp, implicit $rsp RETQ ... Index: test/CodeGen/X86/peephole-recurrence.mir =================================================================== --- test/CodeGen/X86/peephole-recurrence.mir +++ test/CodeGen/X86/peephole-recurrence.mir @@ -86,14 +86,14 @@ - { id: 11, class: gr32, preferred-register: '' } - { id: 12, class: gr32, preferred-register: '' } liveins: - - { reg: '%edi', virtual-reg: '%4' } + - { reg: '$edi', virtual-reg: '%4' } body: | bb.0.bb0: successors: %bb.1(0x80000000) - liveins: %edi + liveins: $edi - %4 = COPY %edi - %5 = MOV32r0 implicit-def dead %eflags + %4 = COPY $edi + %5 = MOV32r0 implicit-def dead $eflags bb.1.bb1: successors: %bb.3(0x30000000), %bb.2(0x50000000) @@ -101,8 +101,8 @@ ; CHECK: %0:gr32 = PHI %5, %bb.0, %3, %bb.5 %0 = PHI %5, %bb.0, %3, %bb.5 %6 = MOV32ri 1 - TEST32rr %4, %4, implicit-def %eflags - JE_1 %bb.3, implicit %eflags + TEST32rr %4, %4, implicit-def $eflags + JE_1 %bb.3, implicit $eflags JMP_1 %bb.2 bb.2.bb3: @@ -114,8 +114,8 @@ successors: %bb.5(0x30000000), %bb.4(0x50000000) %1 = PHI %6, %bb.1, %7, %bb.2 - TEST32rr %1, %1, implicit-def %eflags - JE_1 %bb.5, implicit %eflags + TEST32rr %1, %1, implicit-def $eflags + JE_1 %bb.5, implicit $eflags JMP_1 %bb.4 bb.4.bb6: @@ -127,22 +127,22 @@ successors: %bb.1(0x7c000000), %bb.6(0x04000000) %2 = PHI %6, %bb.3, %9, %bb.4 - %10 = ADD32rr %1, %0, implicit-def dead %eflags + %10 = ADD32rr %1, %0, implicit-def dead $eflags ; CHECK: %10:gr32 = ADD32rr ; CHECK-SAME: %0, ; CHECK-SAME: %1, - %3 = ADD32rr %2, killed %10, implicit-def dead %eflags + %3 = ADD32rr %2, killed %10, implicit-def dead $eflags ; CHECK: %3:gr32 = ADD32rr ; CHECK-SAME: %10, ; CHECK-SAME: %2, - %11 = SUB32ri8 %3, 10, implicit-def %eflags - JL_1 %bb.1, implicit %eflags + %11 = SUB32ri8 %3, 10, implicit-def $eflags + JL_1 %bb.1, implicit $eflags JMP_1 %bb.6 bb.6.bb8: - %12 = MOV32r0 implicit-def dead %eflags - %eax = COPY %12 - RET 0, %eax + %12 = MOV32r0 implicit-def dead $eflags + $eax = COPY %12 + RET 0, $eax ... --- @@ -168,16 +168,16 @@ - { id: 12, class: gr32, preferred-register: '' } - { id: 13, class: gr32, preferred-register: '' } liveins: - - { reg: '%edi', virtual-reg: '%4' } - - { reg: '%rsi', virtual-reg: '%5' } + - { reg: '$edi', virtual-reg: '%4' } + - { reg: '$rsi', virtual-reg: '%5' } body: | bb.0.bb0: successors: %bb.1(0x80000000) - liveins: %edi, %rsi + liveins: $edi, $rsi - %5 = COPY %rsi - %4 = COPY %edi - %6 = MOV32r0 implicit-def dead %eflags + %5 = COPY $rsi + %4 = COPY $edi + %6 = MOV32r0 implicit-def dead $eflags bb.1.bb1: successors: %bb.3(0x30000000), %bb.2(0x50000000) @@ -185,8 +185,8 @@ %0 = PHI %6, %bb.0, %3, %bb.5 ; CHECK: %0:gr32 = PHI %6, %bb.0, %3, %bb.5 %7 = MOV32ri 1 - TEST32rr %4, %4, implicit-def %eflags - JE_1 %bb.3, implicit %eflags + TEST32rr %4, %4, implicit-def $eflags + JE_1 %bb.3, implicit $eflags JMP_1 %bb.2 bb.2.bb3: @@ -198,8 +198,8 @@ successors: %bb.5(0x30000000), %bb.4(0x50000000) %1 = PHI %7, %bb.1, %8, %bb.2 - TEST32rr %1, %1, implicit-def %eflags - JE_1 %bb.5, implicit %eflags + TEST32rr %1, %1, implicit-def $eflags + JE_1 %bb.5, implicit $eflags JMP_1 %bb.4 bb.4.bb6: @@ -211,22 +211,22 @@ successors: %bb.1(0x7c000000), %bb.6(0x04000000) %2 = PHI %7, %bb.3, %10, %bb.4 - %11 = ADD32rr %1, %0, implicit-def dead %eflags + %11 = ADD32rr %1, %0, implicit-def dead $eflags ; CHECK: %11:gr32 = ADD32rr ; CHECK-SAME: %1, ; CHECK-SAME: %0, - MOV32mr %5, 1, %noreg, 0, %noreg, %0 :: (store 4 into %ir.p) - %3 = ADD32rr %2, killed %11, implicit-def dead %eflags + MOV32mr %5, 1, $noreg, 0, $noreg, %0 :: (store 4 into %ir.p) + %3 = ADD32rr %2, killed %11, implicit-def dead $eflags ; CHECK: %3:gr32 = ADD32rr ; CHECK-SAME: %2, ; CHECK-SAME: %11, - %12 = SUB32ri8 %3, 10, implicit-def %eflags - JL_1 %bb.1, implicit %eflags + %12 = SUB32ri8 %3, 10, implicit-def $eflags + JL_1 %bb.1, implicit $eflags JMP_1 %bb.6 bb.6.bb8: - %13 = MOV32r0 implicit-def dead %eflags - %eax = COPY %13 - RET 0, %eax + %13 = MOV32r0 implicit-def dead $eflags + $eax = COPY %13 + RET 0, $eax ... Index: test/CodeGen/X86/pmul.ll =================================================================== --- test/CodeGen/X86/pmul.ll +++ test/CodeGen/X86/pmul.ll @@ -63,7 +63,7 @@ ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq entry: @@ -206,7 +206,7 @@ ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq entry: Index: test/CodeGen/X86/popcnt-schedule.ll =================================================================== --- test/CodeGen/X86/popcnt-schedule.ll +++ test/CodeGen/X86/popcnt-schedule.ll @@ -17,7 +17,7 @@ ; GENERIC-NEXT: popcntw (%rsi), %cx # sched: [9:1.00] ; GENERIC-NEXT: popcntw %di, %ax # sched: [3:1.00] ; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SLM-LABEL: test_ctpop_i16: @@ -25,7 +25,7 @@ ; SLM-NEXT: popcntw (%rsi), %cx # sched: [6:1.00] ; SLM-NEXT: popcntw %di, %ax # sched: [3:1.00] ; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50] -; SLM-NEXT: # kill: def %ax killed %ax killed %eax +; SLM-NEXT: # kill: def $ax killed $ax killed $eax ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_ctpop_i16: @@ -33,7 +33,7 @@ ; SANDY-NEXT: popcntw (%rsi), %cx # sched: [9:1.00] ; SANDY-NEXT: popcntw %di, %ax # sched: [3:1.00] ; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33] -; SANDY-NEXT: # kill: def %ax killed %ax killed %eax +; SANDY-NEXT: # kill: def $ax killed $ax killed $eax ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_ctpop_i16: @@ -41,7 +41,7 @@ ; HASWELL-NEXT: popcntw (%rsi), %cx # sched: [8:1.00] ; HASWELL-NEXT: popcntw %di, %ax # sched: [3:1.00] ; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax +; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_ctpop_i16: @@ -49,7 +49,7 @@ ; BROADWELL-NEXT: popcntw (%rsi), %cx # sched: [8:1.00] ; BROADWELL-NEXT: popcntw %di, %ax # sched: [3:1.00] ; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax +; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_ctpop_i16: @@ -57,7 +57,7 @@ ; SKYLAKE-NEXT: popcntw (%rsi), %cx # sched: [8:1.00] ; SKYLAKE-NEXT: popcntw %di, %ax # sched: [3:1.00] ; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25] -; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax +; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_ctpop_i16: @@ -65,7 +65,7 @@ ; BTVER2-NEXT: popcntw (%rsi), %cx # sched: [8:1.00] ; BTVER2-NEXT: popcntw %di, %ax # sched: [3:1.00] ; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax +; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_ctpop_i16: @@ -73,7 +73,7 @@ ; ZNVER1-NEXT: popcntw (%rsi), %cx # sched: [10:1.00] ; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00] ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax +; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i16, i16 *%a1 %2 = tail call i16 @llvm.ctpop.i16( i16 %1 ) Index: test/CodeGen/X86/popcnt.ll =================================================================== --- test/CodeGen/X86/popcnt.ll +++ test/CodeGen/X86/popcnt.ll @@ -44,14 +44,14 @@ ; X32-POPCNT: # %bb.0: ; X32-POPCNT-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-POPCNT-NEXT: popcntl %eax, %eax -; X32-POPCNT-NEXT: # kill: def %al killed %al killed %eax +; X32-POPCNT-NEXT: # kill: def $al killed $al killed $eax ; X32-POPCNT-NEXT: retl ; ; X64-POPCNT-LABEL: cnt8: ; X64-POPCNT: # %bb.0: ; X64-POPCNT-NEXT: movzbl %dil, %eax ; X64-POPCNT-NEXT: popcntl %eax, %eax -; X64-POPCNT-NEXT: # kill: def %al killed %al killed %eax +; X64-POPCNT-NEXT: # kill: def $al killed $al killed $eax ; X64-POPCNT-NEXT: retq %cnt = tail call i8 @llvm.ctpop.i8(i8 %x) ret i8 %cnt @@ -78,7 +78,7 @@ ; X32-NEXT: shll $8, %eax ; X32-NEXT: addl %ecx, %eax ; X32-NEXT: movzbl %ah, %eax -; X32-NEXT: # kill: def %ax killed %ax killed %eax +; X32-NEXT: # kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: cnt16: @@ -100,7 +100,7 @@ ; X64-NEXT: shll $8, %ecx ; X64-NEXT: addl %eax, %ecx ; X64-NEXT: movzbl %ch, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; ; X32-POPCNT-LABEL: cnt16: Index: test/CodeGen/X86/post-ra-sched-with-debug.mir =================================================================== --- test/CodeGen/X86/post-ra-sched-with-debug.mir +++ test/CodeGen/X86/post-ra-sched-with-debug.mir @@ -237,91 +237,91 @@ name: _ZN1sC2Ei tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%esi' } + - { reg: '$rdi' } + - { reg: '$esi' } fixedStack: - - { id: 0, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '%rbx' } - - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%r14' } + - { id: 0, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '$rbx' } + - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$r14' } - { id: 2, type: spill-slot, offset: -16, size: 8, alignment: 16 } stack: - { id: 0, offset: -36, size: 4, alignment: 4 } body: | bb.0: successors: %bb.3, %bb.2 - liveins: %esi, %rdi, %r14, %rbx, %rbp + liveins: $esi, $rdi, $r14, $rbx, $rbp - ; CHECK: [[REGISTER:%r[a-z0-9]+]] = LEA64r {{%r[a-z0-9]+}}, 1, %noreg, -20, %noreg - ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use %noreg, ![[J_VAR]], !DIExpression(), debug-location ![[J_LOC]] - ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use %noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]] + ; CHECK: [[REGISTER:\$r[a-z0-9]+]] = LEA64r {{\$r[a-z0-9]+}}, 1, $noreg, -20, $noreg + ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use $noreg, ![[J_VAR]], !DIExpression(), debug-location ![[J_LOC]] + ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use $noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]] - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = frame-setup MOV64rr %rsp - CFI_INSTRUCTION def_cfa_register %rbp - frame-setup PUSH64r killed %r14, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - %rsp = frame-setup SUB64ri8 %rsp, 16, implicit-def dead %eflags - CFI_INSTRUCTION offset %rbx, -32 - CFI_INSTRUCTION offset %r14, -24 - %r14d = MOV32rr %esi - %rbx = MOV64rr %rdi - CALL64pcrel32 @_ZN1lC2Ei, csr_64, implicit %rsp, implicit %rdi, implicit %esi, implicit-def %rsp - %rdi = LEA64r %rbx, 1, %noreg, 8, %noreg - DBG_VALUE debug-use %rdi, debug-use %noreg, !20, !17, debug-location !27 - DBG_VALUE debug-use %rdi, debug-use %noreg, !10, !17, debug-location !18 - %rax = MOV64rm %rbx, 1, %noreg, 16, %noreg :: (load 8) - MOV64mr %rbx, 1, %noreg, 8, %noreg, killed %rax :: (store 8) - MOV64mr %rbx, 1, %noreg, 24, %noreg, %rdi :: (store 8) - %eax = MOV32ri -1 - %cl = MOV8rr %r14b, implicit killed %r14d - %eax = SHL32rCL killed %eax, implicit-def dead %eflags, implicit %cl - MOV32mr %rbx, 1, %noreg, 32, %noreg, %eax :: (store 4, align 8) - MOV32mi %rbp, 1, %noreg, -20, %noreg, 0 :: (store 4) - %rcx = MOV64rm %rbx, 1, %noreg, 8, %noreg :: (load 8) - MOV64mr %rip, 1, %noreg, @n, %noreg, %rcx :: (store 8) - %edx = XOR32rr undef %edx, undef %edx, implicit-def dead %eflags, implicit-def %rdx - TEST64rr %rcx, %rcx, implicit-def %eflags - %esi = MOV32ri @o, implicit-def %rsi - %rsi = CMOVNE64rr killed %rsi, %rdx, implicit killed %eflags - %rsi = OR64rr killed %rsi, killed %rcx, implicit-def %eflags - %rcx = LEA64r %rbp, 1, %noreg, -20, %noreg - DBG_VALUE debug-use %rcx, debug-use %noreg, !46, !17, debug-location !48 - DBG_VALUE debug-use %rcx, debug-use %noreg, !39, !17, debug-location !44 - DBG_VALUE %rbp, -20, !29, !17, debug-location !36 - %rcx = CMOVNE64rr killed %rcx, killed %rdx, implicit killed %eflags - %rcx = OR64rr killed %rcx, killed %rsi, implicit-def dead %eflags - %rdx = MOVSX64rm32 %rbx, 1, %noreg, 0, %noreg :: (load 4, align 8) - TEST32mr killed %rcx, 4, killed %rdx, 0, %noreg, killed %eax, implicit-def %eflags :: (load 4) - JNE_1 %bb.2, implicit %eflags + CFI_INSTRUCTION offset $rbp, -16 + $rbp = frame-setup MOV64rr $rsp + CFI_INSTRUCTION def_cfa_register $rbp + frame-setup PUSH64r killed $r14, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + $rsp = frame-setup SUB64ri8 $rsp, 16, implicit-def dead $eflags + CFI_INSTRUCTION offset $rbx, -32 + CFI_INSTRUCTION offset $r14, -24 + $r14d = MOV32rr $esi + $rbx = MOV64rr $rdi + CALL64pcrel32 @_ZN1lC2Ei, csr_64, implicit $rsp, implicit $rdi, implicit $esi, implicit-def $rsp + $rdi = LEA64r $rbx, 1, $noreg, 8, $noreg + DBG_VALUE debug-use $rdi, debug-use $noreg, !20, !17, debug-location !27 + DBG_VALUE debug-use $rdi, debug-use $noreg, !10, !17, debug-location !18 + $rax = MOV64rm $rbx, 1, $noreg, 16, $noreg :: (load 8) + MOV64mr $rbx, 1, $noreg, 8, $noreg, killed $rax :: (store 8) + MOV64mr $rbx, 1, $noreg, 24, $noreg, $rdi :: (store 8) + $eax = MOV32ri -1 + $cl = MOV8rr $r14b, implicit killed $r14d + $eax = SHL32rCL killed $eax, implicit-def dead $eflags, implicit $cl + MOV32mr $rbx, 1, $noreg, 32, $noreg, $eax :: (store 4, align 8) + MOV32mi $rbp, 1, $noreg, -20, $noreg, 0 :: (store 4) + $rcx = MOV64rm $rbx, 1, $noreg, 8, $noreg :: (load 8) + MOV64mr $rip, 1, $noreg, @n, $noreg, $rcx :: (store 8) + $edx = XOR32rr undef $edx, undef $edx, implicit-def dead $eflags, implicit-def $rdx + TEST64rr $rcx, $rcx, implicit-def $eflags + $esi = MOV32ri @o, implicit-def $rsi + $rsi = CMOVNE64rr killed $rsi, $rdx, implicit killed $eflags + $rsi = OR64rr killed $rsi, killed $rcx, implicit-def $eflags + $rcx = LEA64r $rbp, 1, $noreg, -20, $noreg + DBG_VALUE debug-use $rcx, debug-use $noreg, !46, !17, debug-location !48 + DBG_VALUE debug-use $rcx, debug-use $noreg, !39, !17, debug-location !44 + DBG_VALUE $rbp, -20, !29, !17, debug-location !36 + $rcx = CMOVNE64rr killed $rcx, killed $rdx, implicit killed $eflags + $rcx = OR64rr killed $rcx, killed $rsi, implicit-def dead $eflags + $rdx = MOVSX64rm32 $rbx, 1, $noreg, 0, $noreg :: (load 4, align 8) + TEST32mr killed $rcx, 4, killed $rdx, 0, $noreg, killed $eax, implicit-def $eflags :: (load 4) + JNE_1 %bb.2, implicit $eflags JMP_1 %bb.3 bb.1: successors: %bb.2 - liveins: %rbx, %rbp + liveins: $rbx, $rbp - %rdi = MOV64rm %rbx, 1, %noreg, 24, %noreg :: (load 8) + $rdi = MOV64rm $rbx, 1, $noreg, 24, $noreg :: (load 8) bb.2: successors: %bb.1, %bb.3 - liveins: %rbx, %rbp, %rsp, %rdi + liveins: $rbx, $rbp, $rsp, $rdi - CALL64pcrel32 @_ZN1p2aaEv, csr_64, implicit %rsp, implicit %rdi, implicit-def %rsp, implicit-def %eax - %eax = KILL %eax, implicit-def %rax - %ecx = LEA64_32r %rax, 1, %noreg, -1, %noreg, implicit-def %rcx - %ecx = SHR32ri %ecx, 31, implicit-def dead %eflags, implicit killed %rcx, implicit-def %rcx - %eax = LEA64_32r killed %rax, 1, killed %rcx, -1, %noreg - %eax = SAR32r1 killed %eax, implicit-def dead %eflags - CMP32mr %rbx, 1, %noreg, 0, %noreg, killed %eax, implicit-def %eflags :: (load 4, align 8), (load 4, align 8) - JG_1 %bb.1, implicit killed %eflags + CALL64pcrel32 @_ZN1p2aaEv, csr_64, implicit $rsp, implicit $rdi, implicit-def $rsp, implicit-def $eax + $eax = KILL $eax, implicit-def $rax + $ecx = LEA64_32r $rax, 1, $noreg, -1, $noreg, implicit-def $rcx + $ecx = SHR32ri $ecx, 31, implicit-def dead $eflags, implicit killed $rcx, implicit-def $rcx + $eax = LEA64_32r killed $rax, 1, killed $rcx, -1, $noreg + $eax = SAR32r1 killed $eax, implicit-def dead $eflags + CMP32mr $rbx, 1, $noreg, 0, $noreg, killed $eax, implicit-def $eflags :: (load 4, align 8), (load 4, align 8) + JG_1 %bb.1, implicit killed $eflags bb.3: - liveins: %rbp + liveins: $rbp - %rsp = ADD64ri8 %rsp, 16, implicit-def dead %eflags - %rbx = POP64r implicit-def %rsp, implicit %rsp - %r14 = POP64r implicit-def %rsp, implicit %rsp - %rbp = POP64r implicit-def %rsp, implicit %rsp + $rsp = ADD64ri8 $rsp, 16, implicit-def dead $eflags + $rbx = POP64r implicit-def $rsp, implicit $rsp + $r14 = POP64r implicit-def $rsp, implicit $rsp + $rbp = POP64r implicit-def $rsp, implicit $rsp RETQ ... Index: test/CodeGen/X86/pr22970.ll =================================================================== --- test/CodeGen/X86/pr22970.ll +++ test/CodeGen/X86/pr22970.ll @@ -13,7 +13,7 @@ ; ; X64-LABEL: PR22970_i32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: andl $4095, %esi # imm = 0xFFF ; X64-NEXT: movl 32(%rdi,%rsi,4), %eax ; X64-NEXT: retq Index: test/CodeGen/X86/pr27681.mir =================================================================== --- test/CodeGen/X86/pr27681.mir +++ test/CodeGen/X86/pr27681.mir @@ -15,69 +15,69 @@ frameInfo: stackSize: 52 fixedStack: - - { id: 0, type: spill-slot, offset: -20, size: 4, alignment: 4, callee-saved-register: '%esi' } - - { id: 1, type: spill-slot, offset: -16, size: 4, alignment: 4, callee-saved-register: '%edi' } - - { id: 2, type: spill-slot, offset: -12, size: 4, alignment: 4, callee-saved-register: '%ebx' } - - { id: 3, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%ebp' } + - { id: 0, type: spill-slot, offset: -20, size: 4, alignment: 4, callee-saved-register: '$esi' } + - { id: 1, type: spill-slot, offset: -16, size: 4, alignment: 4, callee-saved-register: '$edi' } + - { id: 2, type: spill-slot, offset: -12, size: 4, alignment: 4, callee-saved-register: '$ebx' } + - { id: 3, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '$ebp' } stack: - { id: 0, type: spill-slot, offset: -53, size: 1, alignment: 1 } - { id: 1, type: spill-slot, offset: -48, size: 4, alignment: 4 } - { id: 2, type: spill-slot, offset: -32, size: 4, alignment: 4 } body: | bb.0: - liveins: %ebp, %ebx, %edi, %esi + liveins: $ebp, $ebx, $edi, $esi - frame-setup PUSH32r killed %ebp, implicit-def %esp, implicit %esp - frame-setup PUSH32r killed %ebx, implicit-def %esp, implicit %esp - frame-setup PUSH32r killed %edi, implicit-def %esp, implicit %esp - frame-setup PUSH32r killed %esi, implicit-def %esp, implicit %esp - %esp = frame-setup SUB32ri8 %esp, 36, implicit-def dead %eflags - %eax = MOV32ri 1 - %ebp = MOV32ri 2 - %ebx = MOV32ri 3 - %ecx = MOV32ri 4 - %edi = MOV32ri 5 - %edx = MOV32ri 6 + frame-setup PUSH32r killed $ebp, implicit-def $esp, implicit $esp + frame-setup PUSH32r killed $ebx, implicit-def $esp, implicit $esp + frame-setup PUSH32r killed $edi, implicit-def $esp, implicit $esp + frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp + $esp = frame-setup SUB32ri8 $esp, 36, implicit-def dead $eflags + $eax = MOV32ri 1 + $ebp = MOV32ri 2 + $ebx = MOV32ri 3 + $ecx = MOV32ri 4 + $edi = MOV32ri 5 + $edx = MOV32ri 6 bb.1: - liveins: %eax, %ebp, %ebx, %ecx, %edi, %edx + liveins: $eax, $ebp, $ebx, $ecx, $edi, $edx - %ebp = SHR32rCL killed %ebp, implicit-def dead %eflags, implicit %cl - %ebp = XOR32rr killed %ebp, killed %ebx, implicit-def dead %eflags - TEST32rr %edx, %edx, implicit-def %eflags - %cl = SETNEr implicit %eflags - ; This %bl def is antidependent on the above use of %ebx - %bl = MOV8rm %esp, 1, %noreg, 3, _ ; :: (load 1 from %stack.0) - %cl = OR8rr killed %cl, %bl, implicit-def dead %eflags - %esi = MOVZX32rr8 killed %cl - %esi = ADD32rr killed %esi, killed %edi, implicit-def dead %eflags - %ecx = MOV32rm %esp, 1, %noreg, 24, _ ; :: (load 4 from %stack.2) - %edx = SAR32rCL killed %edx, implicit-def dead %eflags, implicit %cl - TEST32rr killed %edx, %edx, implicit-def %eflags - %cl = SETNEr implicit %eflags - ; Verify that removal of the %bl antidependence does not use %ch + $ebp = SHR32rCL killed $ebp, implicit-def dead $eflags, implicit $cl + $ebp = XOR32rr killed $ebp, killed $ebx, implicit-def dead $eflags + TEST32rr $edx, $edx, implicit-def $eflags + $cl = SETNEr implicit $eflags + ; This %bl def is antidependent on the above use of $ebx + $bl = MOV8rm $esp, 1, $noreg, 3, _ ; :: (load 1 from %stack.0) + $cl = OR8rr killed $cl, $bl, implicit-def dead $eflags + $esi = MOVZX32rr8 killed $cl + $esi = ADD32rr killed $esi, killed $edi, implicit-def dead $eflags + $ecx = MOV32rm $esp, 1, $noreg, 24, _ ; :: (load 4 from %stack.2) + $edx = SAR32rCL killed $edx, implicit-def dead $eflags, implicit $cl + TEST32rr killed $edx, $edx, implicit-def $eflags + $cl = SETNEr implicit $eflags + ; Verify that removal of the $bl antidependence does not use $ch ; as a replacement register. - ; CHECK: %cl = AND8rr killed %cl, killed %b - %cl = AND8rr killed %cl, killed %bl, implicit-def dead %eflags - CMP32ri8 %ebp, -1, implicit-def %eflags - %edx = MOV32ri 0 - JE_1 %bb.3, implicit %eflags + ; CHECK: $cl = AND8rr killed $cl, killed $b + $cl = AND8rr killed $cl, killed $bl, implicit-def dead $eflags + CMP32ri8 $ebp, -1, implicit-def $eflags + $edx = MOV32ri 0 + JE_1 %bb.3, implicit $eflags bb.2: - liveins: %cl, %eax, %ebp, %esi + liveins: $cl, $eax, $ebp, $esi - OR32mr %esp, 1, %noreg, 8, %noreg, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1) - %dl = SETNEr implicit %eflags, implicit-def %edx + OR32mr $esp, 1, $noreg, 8, $noreg, killed $eax, implicit-def $eflags ; :: (store 4 into %stack.1) + $dl = SETNEr implicit $eflags, implicit-def $edx bb.3: - liveins: %cl, %ebp, %edx, %esi + liveins: $cl, $ebp, $edx, $esi - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags - %esp = ADD32ri8 %esp, 36, implicit-def dead %eflags - %esi = POP32r implicit-def %esp, implicit %esp - %edi = POP32r implicit-def %esp, implicit %esp - %ebx = POP32r implicit-def %esp, implicit %esp - %ebp = POP32r implicit-def %esp, implicit %esp - RET 0, %eax + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags + $esp = ADD32ri8 $esp, 36, implicit-def dead $eflags + $esi = POP32r implicit-def $esp, implicit $esp + $edi = POP32r implicit-def $esp, implicit $esp + $ebx = POP32r implicit-def $esp, implicit $esp + $ebp = POP32r implicit-def $esp, implicit $esp + RET 0, $eax ... Index: test/CodeGen/X86/pr28173.ll =================================================================== --- test/CodeGen/X86/pr28173.ll +++ test/CodeGen/X86/pr28173.ll @@ -27,7 +27,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movzbl %dil, %eax ; CHECK-NEXT: orl $65534, %eax # imm = 0xFFFE -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq br label %bb @@ -45,7 +45,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movzbl %dil, %eax ; CHECK-NEXT: orl $2, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq br label %bb Index: test/CodeGen/X86/pr28560.ll =================================================================== --- test/CodeGen/X86/pr28560.ll +++ test/CodeGen/X86/pr28560.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=i686-pc-linux -print-after=postrapseudos < %s 2>&1 | FileCheck %s -; CHECK: MOV8rr %{{[a-d]}}l, implicit killed %e[[R:[a-d]]]x, implicit-def %e[[R]]x +; CHECK: MOV8rr ${{[a-d]}}l, implicit killed $e[[R:[a-d]]]x, implicit-def $e[[R]]x define i32 @foo(i32 %i, i32 %k, i8* %p) { %f = icmp ne i32 %i, %k %s = zext i1 %f to i8 Index: test/CodeGen/X86/pr29061.ll =================================================================== --- test/CodeGen/X86/pr29061.ll +++ test/CodeGen/X86/pr29061.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: .cfi_offset %edi, -8 ; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %edi -; CHECK-NEXT: # kill: def %di killed %di killed %edi +; CHECK-NEXT: # kill: def $di killed $di killed $edi ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: popl %edi @@ -28,7 +28,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %esi -; CHECK-NEXT: # kill: def %si killed %si killed %esi +; CHECK-NEXT: # kill: def $si killed $si killed $esi ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: popl %esi Index: test/CodeGen/X86/pr30430.ll =================================================================== --- test/CodeGen/X86/pr30430.ll +++ test/CodeGen/X86/pr30430.ll @@ -73,7 +73,7 @@ ; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] -; CHECK-NEXT: # implicit-def: %ymm2 +; CHECK-NEXT: # implicit-def: $ymm2 ; CHECK-NEXT: vmovaps %xmm1, %xmm2 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm2 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -90,10 +90,10 @@ ; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3] ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero ; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0] -; CHECK-NEXT: # implicit-def: %ymm3 +; CHECK-NEXT: # implicit-def: $ymm3 ; CHECK-NEXT: vmovaps %xmm1, %xmm3 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm3 -; CHECK-NEXT: # implicit-def: %zmm24 +; CHECK-NEXT: # implicit-def: $zmm24 ; CHECK-NEXT: vmovaps %zmm3, %zmm24 ; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm24, %zmm24 ; CHECK-NEXT: vmovaps %zmm24, {{[0-9]+}}(%rsp) Index: test/CodeGen/X86/pr32282.ll =================================================================== --- test/CodeGen/X86/pr32282.ll +++ test/CodeGen/X86/pr32282.ll @@ -64,7 +64,7 @@ ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: divl %ecx -; X64-NEXT: # kill: def %eax killed %eax def %rax +; X64-NEXT: # kill: def $eax killed $eax def $rax ; X64-NEXT: .LBB0_3: ; X64-NEXT: testq %rax, %rax ; X64-NEXT: setne -{{[0-9]+}}(%rsp) Index: test/CodeGen/X86/pr32284.ll =================================================================== --- test/CodeGen/X86/pr32284.ll +++ test/CodeGen/X86/pr32284.ll @@ -308,7 +308,7 @@ define void @f2() { ; X86-O0-LABEL: f2: ; X86-O0: # %bb.0: # %entry -; X86-O0-NEXT: # implicit-def: %rax +; X86-O0-NEXT: # implicit-def: $rax ; X86-O0-NEXT: movzbl var_7, %ecx ; X86-O0-NEXT: cmpb $0, var_7 ; X86-O0-NEXT: setne %dl @@ -361,7 +361,7 @@ ; 686-O0-NEXT: .cfi_def_cfa_offset 14 ; 686-O0-NEXT: .cfi_offset %esi, -12 ; 686-O0-NEXT: .cfi_offset %edi, -8 -; 686-O0-NEXT: # implicit-def: %eax +; 686-O0-NEXT: # implicit-def: $eax ; 686-O0-NEXT: movzbl var_7, %ecx ; 686-O0-NEXT: cmpb $0, var_7 ; 686-O0-NEXT: setne %dl Index: test/CodeGen/X86/pr32329.ll =================================================================== --- test/CodeGen/X86/pr32329.ll +++ test/CodeGen/X86/pr32329.ll @@ -78,7 +78,7 @@ ; X64-NEXT: imull %esi, %ecx ; X64-NEXT: addl $-1437483407, %ecx # imm = 0xAA51BE71 ; X64-NEXT: movl $9, %edx -; X64-NEXT: # kill: def %cl killed %cl killed %ecx +; X64-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NEXT: shlq %cl, %rdx ; X64-NEXT: movq %rdx, {{.*}}(%rip) ; X64-NEXT: cmpl %eax, %esi Index: test/CodeGen/X86/pr32345.ll =================================================================== --- test/CodeGen/X86/pr32345.ll +++ test/CodeGen/X86/pr32345.ll @@ -10,7 +10,7 @@ define void @foo() { ; X640-LABEL: foo: ; X640: # %bb.0: # %bb -; X640-NEXT: # implicit-def: %rax +; X640-NEXT: # implicit-def: $rax ; X640-NEXT: movzwl var_22, %ecx ; X640-NEXT: movzwl var_27, %edx ; X640-NEXT: xorl %edx, %ecx @@ -27,8 +27,8 @@ ; X640-NEXT: movzwl var_27, %ecx ; X640-NEXT: subl $16610, %ecx # imm = 0x40E2 ; X640-NEXT: movl %ecx, %ecx -; X640-NEXT: # kill: def %rcx killed %ecx -; X640-NEXT: # kill: def %cl killed %rcx +; X640-NEXT: # kill: def $rcx killed $ecx +; X640-NEXT: # kill: def $cl killed $rcx ; X640-NEXT: sarq %cl, %rsi ; X640-NEXT: movb %sil, %cl ; X640-NEXT: movb %cl, (%rax) @@ -49,12 +49,12 @@ ; 6860-NEXT: .cfi_offset %esi, -20 ; 6860-NEXT: .cfi_offset %edi, -16 ; 6860-NEXT: .cfi_offset %ebx, -12 -; 6860-NEXT: # implicit-def: %eax +; 6860-NEXT: # implicit-def: $eax ; 6860-NEXT: movw var_22, %cx ; 6860-NEXT: movzwl var_27, %edx ; 6860-NEXT: movw %dx, %si ; 6860-NEXT: xorw %si, %cx -; 6860-NEXT: # implicit-def: %edi +; 6860-NEXT: # implicit-def: $edi ; 6860-NEXT: movw %cx, %di ; 6860-NEXT: xorl %edx, %edi ; 6860-NEXT: movw %di, %cx @@ -65,7 +65,7 @@ ; 6860-NEXT: movzwl var_27, %edx ; 6860-NEXT: movw %dx, %si ; 6860-NEXT: xorw %si, %cx -; 6860-NEXT: # implicit-def: %edi +; 6860-NEXT: # implicit-def: $edi ; 6860-NEXT: movw %cx, %di ; 6860-NEXT: xorl %edx, %edi ; 6860-NEXT: movw %di, %cx @@ -104,7 +104,7 @@ ; X64-NEXT: movzwl %ax, %eax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) ; X64-NEXT: addl $-16610, %ecx # imm = 0xBF1E -; X64-NEXT: # kill: def %cl killed %cl killed %ecx +; X64-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NEXT: shrq %cl, %rax ; X64-NEXT: movb %al, (%rax) ; X64-NEXT: retq Index: test/CodeGen/X86/pr32484.ll =================================================================== --- test/CodeGen/X86/pr32484.ll +++ test/CodeGen/X86/pr32484.ll @@ -4,10 +4,10 @@ define void @foo() { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: -; CHECK-NEXT: # implicit-def: %rax +; CHECK-NEXT: # implicit-def: $rax ; CHECK-NEXT: jmpq *%rax ; CHECK-NEXT: .LBB0_1: -; CHECK-NEXT: # implicit-def: %rax +; CHECK-NEXT: # implicit-def: $rax ; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: pcmpeqd %xmm1, %xmm1 ; CHECK-NEXT: movdqu %xmm1, (%rax) Index: test/CodeGen/X86/pr34653.ll =================================================================== --- test/CodeGen/X86/pr34653.ll +++ test/CodeGen/X86/pr34653.ll @@ -64,7 +64,7 @@ ; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0] ; CHECK-NEXT: vpermilpd {{.*#+}} xmm11 = xmm11[1,0] ; CHECK-NEXT: vpermilpd {{.*#+}} xmm13 = xmm13[1,0] -; CHECK-NEXT: # kill: def %ymm10 killed %ymm10 killed %zmm10 +; CHECK-NEXT: # kill: def $ymm10 killed $ymm10 killed $zmm10 ; CHECK-NEXT: vextractf128 $1, %ymm10, %xmm10 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill ; CHECK-NEXT: vmovaps %xmm10, %xmm0 @@ -75,7 +75,7 @@ ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; CHECK-NEXT: # kill: def %ymm9 killed %ymm9 killed %zmm9 +; CHECK-NEXT: # kill: def $ymm9 killed $ymm9 killed $zmm9 ; CHECK-NEXT: vextractf128 $1, %ymm9, %xmm9 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill ; CHECK-NEXT: vmovaps %xmm9, %xmm0 @@ -88,7 +88,7 @@ ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; CHECK-NEXT: # kill: def %ymm8 killed %ymm8 killed %zmm8 +; CHECK-NEXT: # kill: def $ymm8 killed $ymm8 killed $zmm8 ; CHECK-NEXT: vextractf128 $1, %ymm8, %xmm8 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill ; CHECK-NEXT: vmovaps %xmm8, %xmm0 @@ -101,7 +101,7 @@ ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; CHECK-NEXT: # kill: def %ymm7 killed %ymm7 killed %zmm7 +; CHECK-NEXT: # kill: def $ymm7 killed $ymm7 killed $zmm7 ; CHECK-NEXT: vextractf128 $1, %ymm7, %xmm7 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill ; CHECK-NEXT: vmovaps %xmm7, %xmm0 Index: test/CodeGen/X86/pr35765.ll =================================================================== --- test/CodeGen/X86/pr35765.ll +++ test/CodeGen/X86/pr35765.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: movzwl {{.*}}(%rip), %ecx ; CHECK-NEXT: addl $-1398, %ecx # imm = 0xFA8A ; CHECK-NEXT: movl $4, %eax -; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx +; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx ; CHECK-NEXT: shll %cl, %eax ; CHECK-NEXT: movzwl {{.*}}(%rip), %ecx ; CHECK-NEXT: movzwl {{.*}}(%rip), %edx Index: test/CodeGen/X86/pre-coalesce.mir =================================================================== --- test/CodeGen/X86/pre-coalesce.mir +++ test/CodeGen/X86/pre-coalesce.mir @@ -83,11 +83,11 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - %0 = MOV64rm %rip, 1, %noreg, @b, %noreg :: (dereferenceable load 8 from @b) - %12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0) - TEST8rr %12, %12, implicit-def %eflags - %11 = MOV32rm %rip, 1, %noreg, @a, %noreg :: (dereferenceable load 4 from @a) - JNE_1 %bb.1, implicit killed %eflags + %0 = MOV64rm $rip, 1, $noreg, @b, $noreg :: (dereferenceable load 8 from @b) + %12 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load 1 from %ir.t0) + TEST8rr %12, %12, implicit-def $eflags + %11 = MOV32rm $rip, 1, $noreg, @a, $noreg :: (dereferenceable load 4 from @a) + JNE_1 %bb.1, implicit killed $eflags bb.4: %10 = COPY %11 @@ -98,18 +98,18 @@ bb.2.while.body: %8 = MOVSX32rr8 %12 %10 = COPY %11 - %10 = SHL32ri %10, 5, implicit-def dead %eflags - %10 = ADD32rr %10, %11, implicit-def dead %eflags - %10 = ADD32rr %10, %8, implicit-def dead %eflags - MOV32mr %rip, 1, %noreg, @a, %noreg, %10 :: (store 4 into @a) - %12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0) - TEST8rr %12, %12, implicit-def %eflags + %10 = SHL32ri %10, 5, implicit-def dead $eflags + %10 = ADD32rr %10, %11, implicit-def dead $eflags + %10 = ADD32rr %10, %8, implicit-def dead $eflags + MOV32mr $rip, 1, $noreg, @a, $noreg, %10 :: (store 4 into @a) + %12 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load 1 from %ir.t0) + TEST8rr %12, %12, implicit-def $eflags %11 = COPY %10 - JNE_1 %bb.2, implicit killed %eflags + JNE_1 %bb.2, implicit killed $eflags JMP_1 %bb.3 bb.3.while.end: - %eax = COPY %10 - RET 0, killed %eax + $eax = COPY %10 + RET 0, killed $eax ... Index: test/CodeGen/X86/prefer-avx256-mask-extend.ll =================================================================== --- test/CodeGen/X86/prefer-avx256-mask-extend.ll +++ test/CodeGen/X86/prefer-avx256-mask-extend.ll @@ -30,7 +30,7 @@ ; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512F-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq %in = load <8 x i32>, <8 x i32>* %p Index: test/CodeGen/X86/prefer-avx256-mask-shuffle.ll =================================================================== --- test/CodeGen/X86/prefer-avx256-mask-shuffle.ll +++ test/CodeGen/X86/prefer-avx256-mask-shuffle.ll @@ -113,7 +113,7 @@ ; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm2 ; AVX512BW-NEXT: vptestmd %zmm2, %zmm2, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -228,7 +228,7 @@ ; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: vpmovw2m %zmm0, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq %cmp = icmp eq <32 x i8> %a, zeroinitializer %b = shufflevector <32 x i1> %cmp, <32 x i1> undef, <32 x i32> Index: test/CodeGen/X86/prefer-avx256-popcnt.ll =================================================================== --- test/CodeGen/X86/prefer-avx256-popcnt.ll +++ test/CodeGen/X86/prefer-avx256-popcnt.ll @@ -26,7 +26,7 @@ ; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512F-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq %out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %in) Index: test/CodeGen/X86/prefer-avx256-shift.ll =================================================================== --- test/CodeGen/X86/prefer-avx256-shift.ll +++ test/CodeGen/X86/prefer-avx256-shift.ll @@ -83,10 +83,10 @@ ; ; AVX512BWNOVL-LABEL: var_shl_v16i16: ; AVX512BWNOVL: # %bb.0: -; AVX512BWNOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BWNOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BWNOVL-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 -; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BWNOVL-NEXT: retq %shift = shl <16 x i16> %a, %b ret <16 x i16> %shift @@ -141,7 +141,7 @@ ; AVX512BWNOVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512BWNOVL-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 ; AVX512BWNOVL-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BWNOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BWNOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BWNOVL-NEXT: vzeroupper ; AVX512BWNOVL-NEXT: retq %shift = shl <16 x i8> %a, %b @@ -227,10 +227,10 @@ ; ; AVX512BWNOVL-LABEL: var_lshr_v16i16: ; AVX512BWNOVL: # %bb.0: -; AVX512BWNOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BWNOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BWNOVL-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 -; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BWNOVL-NEXT: retq %shift = lshr <16 x i16> %a, %b ret <16 x i16> %shift @@ -286,7 +286,7 @@ ; AVX512BWNOVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512BWNOVL-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 ; AVX512BWNOVL-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BWNOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BWNOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BWNOVL-NEXT: vzeroupper ; AVX512BWNOVL-NEXT: retq %shift = lshr <16 x i8> %a, %b @@ -396,10 +396,10 @@ ; ; AVX512BWNOVL-LABEL: var_ashr_v16i16: ; AVX512BWNOVL: # %bb.0: -; AVX512BWNOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BWNOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BWNOVL-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BWNOVL-NEXT: retq %shift = ashr <16 x i16> %a, %b ret <16 x i16> %shift @@ -467,7 +467,7 @@ ; AVX512BWNOVL-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512BWNOVL-NEXT: vpsravw %zmm1, %zmm0, %zmm0 ; AVX512BWNOVL-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BWNOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BWNOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BWNOVL-NEXT: vzeroupper ; AVX512BWNOVL-NEXT: retq %shift = ashr <16 x i8> %a, %b Index: test/CodeGen/X86/prefer-avx256-trunc.ll =================================================================== --- test/CodeGen/X86/prefer-avx256-trunc.ll +++ test/CodeGen/X86/prefer-avx256-trunc.ll @@ -28,9 +28,9 @@ ; ; AVX512BW-LABEL: testv16i16_trunc_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; Index: test/CodeGen/X86/promote-vec3.ll =================================================================== --- test/CodeGen/X86/promote-vec3.ll +++ test/CodeGen/X86/promote-vec3.ll @@ -17,9 +17,9 @@ ; SSE3-NEXT: pextrw $0, %xmm0, %eax ; SSE3-NEXT: pextrw $1, %xmm0, %edx ; SSE3-NEXT: pextrw $2, %xmm0, %ecx -; SSE3-NEXT: # kill: def %ax killed %ax killed %eax -; SSE3-NEXT: # kill: def %dx killed %dx killed %edx -; SSE3-NEXT: # kill: def %cx killed %cx killed %ecx +; SSE3-NEXT: # kill: def $ax killed $ax killed $eax +; SSE3-NEXT: # kill: def $dx killed $dx killed $edx +; SSE3-NEXT: # kill: def $cx killed $cx killed $ecx ; SSE3-NEXT: retl ; ; SSE41-LABEL: zext_i8: @@ -31,9 +31,9 @@ ; SSE41-NEXT: movd %xmm0, %eax ; SSE41-NEXT: pextrw $2, %xmm0, %edx ; SSE41-NEXT: pextrw $4, %xmm0, %ecx -; SSE41-NEXT: # kill: def %ax killed %ax killed %eax -; SSE41-NEXT: # kill: def %dx killed %dx killed %edx -; SSE41-NEXT: # kill: def %cx killed %cx killed %ecx +; SSE41-NEXT: # kill: def $ax killed $ax killed $eax +; SSE41-NEXT: # kill: def $dx killed $dx killed $edx +; SSE41-NEXT: # kill: def $cx killed $cx killed $ecx ; SSE41-NEXT: retl ; ; AVX-32-LABEL: zext_i8: @@ -45,9 +45,9 @@ ; AVX-32-NEXT: vmovd %xmm0, %eax ; AVX-32-NEXT: vpextrw $2, %xmm0, %edx ; AVX-32-NEXT: vpextrw $4, %xmm0, %ecx -; AVX-32-NEXT: # kill: def %ax killed %ax killed %eax -; AVX-32-NEXT: # kill: def %dx killed %dx killed %edx -; AVX-32-NEXT: # kill: def %cx killed %cx killed %ecx +; AVX-32-NEXT: # kill: def $ax killed $ax killed $eax +; AVX-32-NEXT: # kill: def $dx killed $dx killed $edx +; AVX-32-NEXT: # kill: def $cx killed $cx killed $ecx ; AVX-32-NEXT: retl ; ; AVX-64-LABEL: zext_i8: @@ -59,9 +59,9 @@ ; AVX-64-NEXT: vmovd %xmm0, %eax ; AVX-64-NEXT: vpextrw $2, %xmm0, %edx ; AVX-64-NEXT: vpextrw $4, %xmm0, %ecx -; AVX-64-NEXT: # kill: def %ax killed %ax killed %eax -; AVX-64-NEXT: # kill: def %dx killed %dx killed %edx -; AVX-64-NEXT: # kill: def %cx killed %cx killed %ecx +; AVX-64-NEXT: # kill: def $ax killed $ax killed $eax +; AVX-64-NEXT: # kill: def $dx killed $dx killed $edx +; AVX-64-NEXT: # kill: def $cx killed $cx killed $ecx ; AVX-64-NEXT: retq %2 = zext <3 x i8> %0 to <3 x i16> ret <3 x i16> %2 @@ -83,9 +83,9 @@ ; SSE3-NEXT: movd %xmm0, %eax ; SSE3-NEXT: pextrw $2, %xmm0, %edx ; SSE3-NEXT: pextrw $4, %xmm0, %ecx -; SSE3-NEXT: # kill: def %ax killed %ax killed %eax -; SSE3-NEXT: # kill: def %dx killed %dx killed %edx -; SSE3-NEXT: # kill: def %cx killed %cx killed %ecx +; SSE3-NEXT: # kill: def $ax killed $ax killed $eax +; SSE3-NEXT: # kill: def $dx killed $dx killed $edx +; SSE3-NEXT: # kill: def $cx killed $cx killed $ecx ; SSE3-NEXT: retl ; ; SSE41-LABEL: sext_i8: @@ -98,9 +98,9 @@ ; SSE41-NEXT: movd %xmm0, %eax ; SSE41-NEXT: pextrw $2, %xmm0, %edx ; SSE41-NEXT: pextrw $4, %xmm0, %ecx -; SSE41-NEXT: # kill: def %ax killed %ax killed %eax -; SSE41-NEXT: # kill: def %dx killed %dx killed %edx -; SSE41-NEXT: # kill: def %cx killed %cx killed %ecx +; SSE41-NEXT: # kill: def $ax killed $ax killed $eax +; SSE41-NEXT: # kill: def $dx killed $dx killed $edx +; SSE41-NEXT: # kill: def $cx killed $cx killed $ecx ; SSE41-NEXT: retl ; ; AVX-32-LABEL: sext_i8: @@ -113,9 +113,9 @@ ; AVX-32-NEXT: vmovd %xmm0, %eax ; AVX-32-NEXT: vpextrw $2, %xmm0, %edx ; AVX-32-NEXT: vpextrw $4, %xmm0, %ecx -; AVX-32-NEXT: # kill: def %ax killed %ax killed %eax -; AVX-32-NEXT: # kill: def %dx killed %dx killed %edx -; AVX-32-NEXT: # kill: def %cx killed %cx killed %ecx +; AVX-32-NEXT: # kill: def $ax killed $ax killed $eax +; AVX-32-NEXT: # kill: def $dx killed $dx killed $edx +; AVX-32-NEXT: # kill: def $cx killed $cx killed $ecx ; AVX-32-NEXT: retl ; ; AVX-64-LABEL: sext_i8: @@ -128,9 +128,9 @@ ; AVX-64-NEXT: vmovd %xmm0, %eax ; AVX-64-NEXT: vpextrw $2, %xmm0, %edx ; AVX-64-NEXT: vpextrw $4, %xmm0, %ecx -; AVX-64-NEXT: # kill: def %ax killed %ax killed %eax -; AVX-64-NEXT: # kill: def %dx killed %dx killed %edx -; AVX-64-NEXT: # kill: def %cx killed %cx killed %ecx +; AVX-64-NEXT: # kill: def $ax killed $ax killed $eax +; AVX-64-NEXT: # kill: def $dx killed $dx killed $edx +; AVX-64-NEXT: # kill: def $cx killed $cx killed $ecx ; AVX-64-NEXT: retq %2 = sext <3 x i8> %0 to <3 x i16> ret <3 x i16> %2 Index: test/CodeGen/X86/psubus.ll =================================================================== --- test/CodeGen/X86/psubus.ll +++ test/CodeGen/X86/psubus.ll @@ -1896,7 +1896,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -1922,7 +1922,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; Index: test/CodeGen/X86/rdpid-schedule.ll =================================================================== --- test/CodeGen/X86/rdpid-schedule.ll +++ test/CodeGen/X86/rdpid-schedule.ll @@ -6,13 +6,13 @@ ; GENERIC-LABEL: test_rdpid: ; GENERIC: # %bb.0: ; GENERIC-NEXT: rdpid %rax # sched: [100:0.33] -; GENERIC-NEXT: # kill: def %eax killed %eax killed %rax +; GENERIC-NEXT: # kill: def $eax killed $eax killed $rax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ICELAKE-LABEL: test_rdpid: ; ICELAKE: # %bb.0: ; ICELAKE-NEXT: rdpid %rax # sched: [100:0.25] -; ICELAKE-NEXT: # kill: def %eax killed %eax killed %rax +; ICELAKE-NEXT: # kill: def $eax killed $eax killed $rax ; ICELAKE-NEXT: retq # sched: [7:1.00] %1 = tail call i32 @llvm.x86.rdpid() ret i32 %1 Index: test/CodeGen/X86/rdpid.ll =================================================================== --- test/CodeGen/X86/rdpid.ll +++ test/CodeGen/X86/rdpid.ll @@ -6,7 +6,7 @@ ; X86-64-LABEL: test_builtin_rdpid: ; X86-64: # %bb.0: ; X86-64-NEXT: rdpid %rax -; X86-64-NEXT: # kill: def %eax killed %eax killed %rax +; X86-64-NEXT: # kill: def $eax killed $eax killed $rax ; X86-64-NEXT: retq ; ; X86-LABEL: test_builtin_rdpid: Index: test/CodeGen/X86/reduce-trunc-shl.ll =================================================================== --- test/CodeGen/X86/reduce-trunc-shl.ll +++ test/CodeGen/X86/reduce-trunc-shl.ll @@ -43,7 +43,7 @@ ; AVX2-NEXT: vpslld $17, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq %shl = shl <8 x i32> %a, Index: test/CodeGen/X86/remat-phys-dead.ll =================================================================== --- test/CodeGen/X86/remat-phys-dead.ll +++ test/CodeGen/X86/remat-phys-dead.ll @@ -9,7 +9,7 @@ define i8 @test_remat() { ret i8 0 ; CHECK: REGISTER COALESCING -; CHECK: Remat: dead %eax = MOV32r0 implicit-def dead %eflags, implicit-def %al +; CHECK: Remat: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al } ; On the other hand, if it's already the correct width, we really shouldn't be @@ -18,6 +18,6 @@ define i32 @test_remat32() { ret i32 0 ; CHECK: REGISTER COALESCING -; CHECK: Remat: %eax = MOV32r0 implicit-def dead %eflags +; CHECK: Remat: $eax = MOV32r0 implicit-def dead $eflags } Index: test/CodeGen/X86/sar_fold64.ll =================================================================== --- test/CodeGen/X86/sar_fold64.ll +++ test/CodeGen/X86/sar_fold64.ll @@ -6,7 +6,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movswq %di, %rax ; CHECK-NEXT: addl %eax, %eax -; CHECK-NEXT: # kill: def %eax killed %eax killed %rax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-NEXT: retq %1 = shl i64 %a, 48 %2 = ashr exact i64 %1, 47 @@ -19,7 +19,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movswq %di, %rax ; CHECK-NEXT: shrq %rax -; CHECK-NEXT: # kill: def %eax killed %eax killed %rax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-NEXT: retq %1 = shl i64 %a, 48 %2 = ashr exact i64 %1, 49 @@ -32,7 +32,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movsbq %dil, %rax ; CHECK-NEXT: addl %eax, %eax -; CHECK-NEXT: # kill: def %eax killed %eax killed %rax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-NEXT: retq %1 = shl i64 %a, 56 %2 = ashr exact i64 %1, 55 @@ -45,7 +45,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movsbq %dil, %rax ; CHECK-NEXT: shrq %rax -; CHECK-NEXT: # kill: def %eax killed %eax killed %rax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-NEXT: retq %1 = shl i64 %a, 56 %2 = ashr exact i64 %1, 57 Index: test/CodeGen/X86/scalar_widen_div.ll =================================================================== --- test/CodeGen/X86/scalar_widen_div.ll +++ test/CodeGen/X86/scalar_widen_div.ll @@ -81,15 +81,15 @@ ; CHECK-LABEL: test_uchar_div: ; CHECK: # %bb.0: ; CHECK-NEXT: movzbl %dil, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movl %eax, %edi ; CHECK-NEXT: movzbl %sil, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %r8b ; CHECK-NEXT: movl %eax, %esi ; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: # kill: def %eax killed %eax def %ax +; CHECK-NEXT: # kill: def $eax killed $eax def $ax ; CHECK-NEXT: divb %r9b ; CHECK-NEXT: movl %eax, %ecx ; CHECK-NEXT: movl %edi, %eax @@ -105,34 +105,34 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: pextrw $4, %xmm0, %eax ; CHECK-NEXT: pextrw $4, %xmm1, %ecx -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %cx ; CHECK-NEXT: movl %eax, %r8d ; CHECK-NEXT: pextrw $3, %xmm0, %eax ; CHECK-NEXT: pextrw $3, %xmm1, %ecx -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %cx ; CHECK-NEXT: movl %eax, %r9d ; CHECK-NEXT: pextrw $2, %xmm0, %eax ; CHECK-NEXT: pextrw $2, %xmm1, %ecx -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %cx ; CHECK-NEXT: movl %eax, %edi ; CHECK-NEXT: movd %xmm0, %eax ; CHECK-NEXT: movd %xmm1, %ecx -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %cx ; CHECK-NEXT: movl %eax, %ecx ; CHECK-NEXT: pextrw $1, %xmm0, %eax ; CHECK-NEXT: pextrw $1, %xmm1, %esi -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %si -; CHECK-NEXT: # kill: def %ax killed %ax def %eax +; CHECK-NEXT: # kill: def $ax killed $ax def $eax ; CHECK-NEXT: movd %ecx, %xmm0 ; CHECK-NEXT: pinsrw $1, %eax, %xmm0 ; CHECK-NEXT: pinsrw $2, %edi, %xmm0 @@ -294,34 +294,34 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: pextrw $4, %xmm0, %eax ; CHECK-NEXT: pextrw $4, %xmm1, %ecx -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %cx ; CHECK-NEXT: movl %edx, %r8d ; CHECK-NEXT: pextrw $3, %xmm0, %eax ; CHECK-NEXT: pextrw $3, %xmm1, %ecx -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %cx ; CHECK-NEXT: movl %edx, %r9d ; CHECK-NEXT: pextrw $2, %xmm0, %eax ; CHECK-NEXT: pextrw $2, %xmm1, %ecx -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %cx ; CHECK-NEXT: movl %edx, %edi ; CHECK-NEXT: movd %xmm0, %eax ; CHECK-NEXT: movd %xmm1, %ecx -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %cx ; CHECK-NEXT: movl %edx, %ecx ; CHECK-NEXT: pextrw $1, %xmm0, %eax ; CHECK-NEXT: pextrw $1, %xmm1, %esi -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: cwtd ; CHECK-NEXT: idivw %si -; CHECK-NEXT: # kill: def %dx killed %dx def %edx +; CHECK-NEXT: # kill: def $dx killed $dx def $edx ; CHECK-NEXT: movd %ecx, %xmm0 ; CHECK-NEXT: pinsrw $1, %edx, %xmm0 ; CHECK-NEXT: pinsrw $2, %edi, %xmm0 Index: test/CodeGen/X86/scavenger.mir =================================================================== --- test/CodeGen/X86/scavenger.mir +++ test/CodeGen/X86/scavenger.mir @@ -5,10 +5,10 @@ tracksRegLiveness: true body: | bb.0: - ; CHECK: [[REG0:%e[a-z]+]] = MOV32ri 42 - ; CHECK: %ebp = COPY killed [[REG0]] + ; CHECK: [[REG0:\$e[a-z]+]] = MOV32ri 42 + ; CHECK: $ebp = COPY killed [[REG0]] %0 : gr32 = MOV32ri 42 - %ebp = COPY %0 + $ebp = COPY %0 ... --- # CHECK-LABEL: name: func2 @@ -16,27 +16,27 @@ tracksRegLiveness: true body: | bb.0: - ; CHECK-NOT: %eax = MOV32ri 42 - ; CHECK: [[REG0:%e[a-z]+]] = MOV32ri 42 - ; CHECK: %ebp = COPY killed [[REG0]] - %eax = MOV32ri 13 + ; CHECK-NOT: $eax = MOV32ri 42 + ; CHECK: [[REG0:\$e[a-z]+]] = MOV32ri 42 + ; CHECK: $ebp = COPY killed [[REG0]] + $eax = MOV32ri 13 %0 : gr32 = MOV32ri 42 - %ebp = COPY %0 + $ebp = COPY %0 - ; CHECK: [[REG1:%e[a-z]+]] = MOV32ri 23 - ; CHECK: [[REG2:%e[a-z]+]] = MOV32ri 7 - ; CHECK: [[REG1]] = ADD32ri8 [[REG1]], 5, implicit-def dead %eflags + ; CHECK: [[REG1:\$e[a-z]+]] = MOV32ri 23 + ; CHECK: [[REG2:\$e[a-z]+]] = MOV32ri 7 + ; CHECK: [[REG1]] = ADD32ri8 [[REG1]], 5, implicit-def dead $eflags %1 : gr32 = MOV32ri 23 %2 : gr32 = MOV32ri 7 - %1 = ADD32ri8 %1, 5, implicit-def dead %eflags + %1 = ADD32ri8 %1, 5, implicit-def dead $eflags - NOOP implicit %ebp + NOOP implicit $ebp ; CHECK: NOOP implicit killed [[REG2]] ; CHECK: NOOP implicit killed [[REG1]] NOOP implicit %2 NOOP implicit %1 - RETQ %eax + RETQ $eax ... --- # CHECK-LABEL: name: func3 @@ -44,6 +44,6 @@ tracksRegLiveness: true body: | bb.0: - ; CHECK dead {{%e[a-z]+}} = MOV32ri 42 + ; CHECK dead {{\$e[a-z]+}} = MOV32ri 42 dead %0 : gr32 = MOV32ri 42 ... Index: test/CodeGen/X86/schedule-x86-64-shld.ll =================================================================== --- test/CodeGen/X86/schedule-x86-64-shld.ll +++ test/CodeGen/X86/schedule-x86-64-shld.ll @@ -166,7 +166,7 @@ ; BTVER2-NEXT: shlq %cl, %rdi # sched: [1:0.50] ; BTVER2-NEXT: movl $64, %ecx # sched: [1:0.50] ; BTVER2-NEXT: subl %edx, %ecx # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %cl killed %cl killed %ecx +; BTVER2-NEXT: # kill: def $cl killed $cl killed $ecx ; BTVER2-NEXT: shrq %cl, %rsi # sched: [1:0.50] ; BTVER2-NEXT: orq %rdi, %rsi # sched: [1:0.50] ; BTVER2-NEXT: movq %rsi, %rax # sched: [1:0.50] @@ -178,7 +178,7 @@ ; BDVER1-NEXT: shlq %cl, %rdi ; BDVER1-NEXT: movl $64, %ecx ; BDVER1-NEXT: subl %edx, %ecx -; BDVER1-NEXT: # kill: def %cl killed %cl killed %ecx +; BDVER1-NEXT: # kill: def $cl killed $cl killed $ecx ; BDVER1-NEXT: shrq %cl, %rsi ; BDVER1-NEXT: orq %rdi, %rsi ; BDVER1-NEXT: movq %rsi, %rax @@ -240,7 +240,7 @@ ; BTVER2-NEXT: shrq %cl, %rdi # sched: [1:0.50] ; BTVER2-NEXT: movl $64, %ecx # sched: [1:0.50] ; BTVER2-NEXT: subl %edx, %ecx # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %cl killed %cl killed %ecx +; BTVER2-NEXT: # kill: def $cl killed $cl killed $ecx ; BTVER2-NEXT: shlq %cl, %rsi # sched: [1:0.50] ; BTVER2-NEXT: orq %rdi, %rsi # sched: [1:0.50] ; BTVER2-NEXT: movq %rsi, %rax # sched: [1:0.50] @@ -252,7 +252,7 @@ ; BDVER1-NEXT: shrq %cl, %rdi ; BDVER1-NEXT: movl $64, %ecx ; BDVER1-NEXT: subl %edx, %ecx -; BDVER1-NEXT: # kill: def %cl killed %cl killed %ecx +; BDVER1-NEXT: # kill: def $cl killed $cl killed $ecx ; BDVER1-NEXT: shlq %cl, %rsi ; BDVER1-NEXT: orq %rdi, %rsi ; BDVER1-NEXT: movq %rsi, %rax @@ -314,7 +314,7 @@ ; BTVER2-NEXT: shlq %cl, %rax # sched: [1:0.50] ; BTVER2-NEXT: movl $64, %ecx # sched: [1:0.50] ; BTVER2-NEXT: subl %esi, %ecx # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %cl killed %cl killed %ecx +; BTVER2-NEXT: # kill: def $cl killed $cl killed $ecx ; BTVER2-NEXT: shrq %cl, %rdi # sched: [1:0.50] ; BTVER2-NEXT: orq %rax, %rdi # sched: [1:0.50] ; BTVER2-NEXT: movq %rdi, {{.*}}(%rip) # sched: [1:1.00] @@ -327,7 +327,7 @@ ; BDVER1-NEXT: shlq %cl, %rax ; BDVER1-NEXT: movl $64, %ecx ; BDVER1-NEXT: subl %esi, %ecx -; BDVER1-NEXT: # kill: def %cl killed %cl killed %ecx +; BDVER1-NEXT: # kill: def $cl killed $cl killed $ecx ; BDVER1-NEXT: shrq %cl, %rdi ; BDVER1-NEXT: orq %rax, %rdi ; BDVER1-NEXT: movq %rdi, {{.*}}(%rip) Index: test/CodeGen/X86/schedule-x86_64.ll =================================================================== --- test/CodeGen/X86/schedule-x86_64.ll +++ test/CodeGen/X86/schedule-x86_64.ll @@ -1948,7 +1948,7 @@ ; GENERIC-NEXT: bsfw (%rsi), %cx # sched: [8:1.00] ; GENERIC-NEXT: #NO_APP ; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_bsf16: @@ -1958,7 +1958,7 @@ ; ATOM-NEXT: bsfw (%rsi), %cx # sched: [16:8.00] ; ATOM-NEXT: #NO_APP ; ATOM-NEXT: orl %ecx, %eax # sched: [1:0.50] -; ATOM-NEXT: # kill: def %ax killed %ax killed %eax +; ATOM-NEXT: # kill: def $ax killed $ax killed $eax ; ATOM-NEXT: retq # sched: [79:39.50] ; ; SLM-LABEL: test_bsf16: @@ -1968,7 +1968,7 @@ ; SLM-NEXT: bsfw (%rsi), %cx # sched: [4:1.00] ; SLM-NEXT: #NO_APP ; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50] -; SLM-NEXT: # kill: def %ax killed %ax killed %eax +; SLM-NEXT: # kill: def $ax killed $ax killed $eax ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_bsf16: @@ -1978,7 +1978,7 @@ ; SANDY-NEXT: bsfw (%rsi), %cx # sched: [8:1.00] ; SANDY-NEXT: #NO_APP ; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33] -; SANDY-NEXT: # kill: def %ax killed %ax killed %eax +; SANDY-NEXT: # kill: def $ax killed $ax killed $eax ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_bsf16: @@ -1988,7 +1988,7 @@ ; HASWELL-NEXT: bsfw (%rsi), %cx # sched: [8:1.00] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax +; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_bsf16: @@ -1998,7 +1998,7 @@ ; BROADWELL-NEXT: bsfw (%rsi), %cx # sched: [8:1.00] ; BROADWELL-NEXT: #NO_APP ; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax +; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_bsf16: @@ -2008,7 +2008,7 @@ ; SKYLAKE-NEXT: bsfw (%rsi), %cx # sched: [8:1.00] ; SKYLAKE-NEXT: #NO_APP ; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25] -; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax +; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; SKX-LABEL: test_bsf16: @@ -2018,7 +2018,7 @@ ; SKX-NEXT: bsfw (%rsi), %cx # sched: [8:1.00] ; SKX-NEXT: #NO_APP ; SKX-NEXT: orl %ecx, %eax # sched: [1:0.25] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_bsf16: @@ -2028,7 +2028,7 @@ ; BTVER2-NEXT: bsfw (%rsi), %cx # sched: [4:1.00] ; BTVER2-NEXT: #NO_APP ; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax +; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_bsf16: @@ -2038,7 +2038,7 @@ ; ZNVER1-NEXT: bsfw (%rsi), %cx # sched: [7:0.50] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax +; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call { i16, i16 } asm sideeffect "bsf $2, $0 \0A\09 bsf $3, $1", "=r,=r,r,*m,~{dirflag},~{fpsr},~{flags}"(i16 %a0, i16* %a1) %2 = extractvalue { i16, i16 } %1, 0 @@ -2247,7 +2247,7 @@ ; GENERIC-NEXT: bsrw (%rsi), %cx # sched: [8:1.00] ; GENERIC-NEXT: #NO_APP ; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_bsr16: @@ -2257,7 +2257,7 @@ ; ATOM-NEXT: bsrw (%rsi), %cx # sched: [16:8.00] ; ATOM-NEXT: #NO_APP ; ATOM-NEXT: orl %ecx, %eax # sched: [1:0.50] -; ATOM-NEXT: # kill: def %ax killed %ax killed %eax +; ATOM-NEXT: # kill: def $ax killed $ax killed $eax ; ATOM-NEXT: retq # sched: [79:39.50] ; ; SLM-LABEL: test_bsr16: @@ -2267,7 +2267,7 @@ ; SLM-NEXT: bsrw (%rsi), %cx # sched: [4:1.00] ; SLM-NEXT: #NO_APP ; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50] -; SLM-NEXT: # kill: def %ax killed %ax killed %eax +; SLM-NEXT: # kill: def $ax killed $ax killed $eax ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_bsr16: @@ -2277,7 +2277,7 @@ ; SANDY-NEXT: bsrw (%rsi), %cx # sched: [8:1.00] ; SANDY-NEXT: #NO_APP ; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33] -; SANDY-NEXT: # kill: def %ax killed %ax killed %eax +; SANDY-NEXT: # kill: def $ax killed $ax killed $eax ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_bsr16: @@ -2287,7 +2287,7 @@ ; HASWELL-NEXT: bsrw (%rsi), %cx # sched: [8:1.00] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax +; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_bsr16: @@ -2297,7 +2297,7 @@ ; BROADWELL-NEXT: bsrw (%rsi), %cx # sched: [8:1.00] ; BROADWELL-NEXT: #NO_APP ; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25] -; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax +; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_bsr16: @@ -2307,7 +2307,7 @@ ; SKYLAKE-NEXT: bsrw (%rsi), %cx # sched: [8:1.00] ; SKYLAKE-NEXT: #NO_APP ; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25] -; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax +; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; SKX-LABEL: test_bsr16: @@ -2317,7 +2317,7 @@ ; SKX-NEXT: bsrw (%rsi), %cx # sched: [8:1.00] ; SKX-NEXT: #NO_APP ; SKX-NEXT: orl %ecx, %eax # sched: [1:0.25] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_bsr16: @@ -2327,7 +2327,7 @@ ; BTVER2-NEXT: bsrw (%rsi), %cx # sched: [4:1.00] ; BTVER2-NEXT: #NO_APP ; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax +; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_bsr16: @@ -2337,7 +2337,7 @@ ; ZNVER1-NEXT: bsrw (%rsi), %cx # sched: [7:0.50] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax +; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call { i16, i16 } asm sideeffect "bsr $2, $0 \0A\09 bsr $3, $1", "=r,=r,r,*m,~{dirflag},~{fpsr},~{flags}"(i16 %a0, i16* %a1) %2 = extractvalue { i16, i16 } %1, 0 Index: test/CodeGen/X86/select.ll =================================================================== --- test/CodeGen/X86/select.ll +++ test/CodeGen/X86/select.ll @@ -145,7 +145,7 @@ ; MCU-NEXT: fucompp ; MCU-NEXT: fnstsw %ax ; MCU-NEXT: xorl %edx, %edx -; MCU-NEXT: # kill: def %ah killed %ah killed %ax +; MCU-NEXT: # kill: def $ah killed $ah killed $ax ; MCU-NEXT: sahf ; MCU-NEXT: seta %dl ; MCU-NEXT: movb (%ecx,%edx,4), %al @@ -798,14 +798,14 @@ ; GENERIC: ## %bb.0: ## %entry ; GENERIC-NEXT: negw %di ; GENERIC-NEXT: sbbl %eax, %eax -; GENERIC-NEXT: ## kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: ## kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq ; ; ATOM-LABEL: test17: ; ATOM: ## %bb.0: ## %entry ; ATOM-NEXT: negw %di ; ATOM-NEXT: sbbl %eax, %eax -; ATOM-NEXT: ## kill: def %ax killed %ax killed %eax +; ATOM-NEXT: ## kill: def $ax killed $ax killed $eax ; ATOM-NEXT: nop ; ATOM-NEXT: nop ; ATOM-NEXT: nop @@ -816,7 +816,7 @@ ; MCU: # %bb.0: # %entry ; MCU-NEXT: negw %ax ; MCU-NEXT: sbbl %eax, %eax -; MCU-NEXT: # kill: def %ax killed %ax killed %eax +; MCU-NEXT: # kill: def $ax killed $ax killed $eax ; MCU-NEXT: retl entry: %cmp = icmp ne i16 %x, 0 @@ -1027,7 +1027,7 @@ ; MCU-NEXT: cmpl %eax, %ecx ; MCU-NEXT: fucom %st(0) ; MCU-NEXT: fnstsw %ax -; MCU-NEXT: # kill: def %ah killed %ah killed %ax +; MCU-NEXT: # kill: def $ah killed $ah killed $ax ; MCU-NEXT: sahf ; MCU-NEXT: jp .LBB24_4 ; MCU-NEXT: # %bb.5: # %CF244 @@ -1073,7 +1073,7 @@ ; MCU-NEXT: negl %edx ; MCU-NEXT: andl $43, %edx ; MCU-NEXT: xorl %edx, %eax -; MCU-NEXT: # kill: def %ax killed %ax killed %eax +; MCU-NEXT: # kill: def $ax killed $ax killed $eax ; MCU-NEXT: retl entry: %and = and i8 %cond, 1 Index: test/CodeGen/X86/select_const.ll =================================================================== --- test/CodeGen/X86/select_const.ll +++ test/CodeGen/X86/select_const.ll @@ -74,7 +74,7 @@ define i32 @select_0_or_neg1(i1 %cond) { ; CHECK-LABEL: select_0_or_neg1: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: leal -1(%rdi), %eax ; CHECK-NEXT: retq @@ -85,7 +85,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_0_or_neg1_zeroext: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal -1(%rdi), %eax ; CHECK-NEXT: retq %sel = select i1 %cond, i32 0, i32 -1 @@ -139,7 +139,7 @@ define i32 @select_Cplus1_C(i1 %cond) { ; CHECK-LABEL: select_Cplus1_C: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: leal 41(%rdi), %eax ; CHECK-NEXT: retq @@ -150,7 +150,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_Cplus1_C_zeroext: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal 41(%rdi), %eax ; CHECK-NEXT: retq %sel = select i1 %cond, i32 42, i32 41 @@ -287,7 +287,7 @@ ; CHECK-NEXT: cmpl $43, %edi ; CHECK-NEXT: setl %al ; CHECK-NEXT: leal -1(,%rax,4), %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %cmp = icmp sgt i32 %x, 42 %sel = select i1 %cmp, i16 -1, i16 3 @@ -344,7 +344,7 @@ ; CHECK-NEXT: movzbl %dil, %eax ; CHECK-NEXT: shll $6, %eax ; CHECK-NEXT: orl $7, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %sel = select i1 %cond, i16 7, i16 71 ret i16 %sel Index: test/CodeGen/X86/setcc-lowering.ll =================================================================== --- test/CodeGen/X86/setcc-lowering.ll +++ test/CodeGen/X86/setcc-lowering.ll @@ -23,7 +23,7 @@ ; ; KNL-32-LABEL: pr25080: ; KNL-32: # %bb.0: # %entry -; KNL-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [8388607,8388607,8388607,8388607,8388607,8388607,8388607,8388607] ; KNL-32-NEXT: vptestnmd %zmm1, %zmm0, %k0 ; KNL-32-NEXT: movb $15, %al @@ -31,7 +31,7 @@ ; KNL-32-NEXT: korw %k1, %k0, %k1 ; KNL-32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-32-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; KNL-32-NEXT: retl entry: %0 = trunc <8 x i32> %a to <8 x i23> Index: test/CodeGen/X86/sext-i1.ll =================================================================== --- test/CodeGen/X86/sext-i1.ll +++ test/CodeGen/X86/sext-i1.ll @@ -124,7 +124,7 @@ ; ; X64-LABEL: select_0_or_1s: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: andl $1, %edi ; X64-NEXT: leal -1(%rdi), %eax ; X64-NEXT: retq @@ -144,7 +144,7 @@ ; ; X64-LABEL: select_0_or_1s_zeroext: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal -1(%rdi), %eax ; X64-NEXT: retq %not = xor i1 %cond, 1 Index: test/CodeGen/X86/shift-combine.ll =================================================================== --- test/CodeGen/X86/shift-combine.ll +++ test/CodeGen/X86/shift-combine.ll @@ -14,7 +14,7 @@ ; ; X64-LABEL: test_lshr_and: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: shrl $2, %edi ; X64-NEXT: andl $3, %edi ; X64-NEXT: movl array(,%rdi,4), %eax @@ -102,7 +102,7 @@ ; ; X64-LABEL: test_exact4: ; X64: # %bb.0: -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: subl %edi, %esi ; X64-NEXT: shrl $3, %esi ; X64-NEXT: leaq (%rdx,%rsi,4), %rax @@ -124,7 +124,7 @@ ; ; X64-LABEL: test_exact5: ; X64: # %bb.0: -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: subl %edi, %esi ; X64-NEXT: shrl $3, %esi ; X64-NEXT: leaq (%rdx,%rsi,4), %rax @@ -145,7 +145,7 @@ ; ; X64-LABEL: test_exact6: ; X64: # %bb.0: -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: subl %edi, %esi ; X64-NEXT: leaq (%rsi,%rdx), %rax ; X64-NEXT: retq Index: test/CodeGen/X86/shift-double.ll =================================================================== --- test/CodeGen/X86/shift-double.ll +++ test/CodeGen/X86/shift-double.ll @@ -278,7 +278,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: andl $31, %ecx -; X86-NEXT: # kill: def %cl killed %cl killed %ecx +; X86-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-NEXT: shldl %cl, %edx, %eax ; X86-NEXT: retl ; @@ -304,7 +304,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: andl $31, %ecx -; X86-NEXT: # kill: def %cl killed %cl killed %ecx +; X86-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-NEXT: shrdl %cl, %edx, %eax ; X86-NEXT: retl ; Index: test/CodeGen/X86/shrink-compare.ll =================================================================== --- test/CodeGen/X86/shrink-compare.ll +++ test/CodeGen/X86/shrink-compare.ll @@ -72,11 +72,11 @@ ; CHECK-NEXT: testl %esi, %esi ; CHECK-NEXT: je .LBB3_1 ; CHECK-NEXT: # %bb.2: # %lor.end -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq ; CHECK-NEXT: .LBB3_1: # %lor.rhs ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: retq entry: %tobool = icmp ne i32 %b, 0 Index: test/CodeGen/X86/shrink_wrap_dbg_value.mir =================================================================== --- test/CodeGen/X86/shrink_wrap_dbg_value.mir +++ test/CodeGen/X86/shrink_wrap_dbg_value.mir @@ -102,8 +102,8 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%ecx', virtual-reg: '' } - - { reg: '%edx', virtual-reg: '' } + - { reg: '$ecx', virtual-reg: '' } + - { reg: '$edx', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -134,49 +134,49 @@ body: | bb.0.entry: successors: %bb.4(0x40000000), %bb.1(0x40000000) - liveins: %ecx, %edx + liveins: $ecx, $edx - DBG_VALUE debug-use %edx, debug-use %noreg, !15, !DIExpression(), debug-location !25 - DBG_VALUE debug-use %ecx, debug-use %noreg, !16, !DIExpression(), debug-location !26 - %eax = COPY %ecx + DBG_VALUE debug-use $edx, debug-use $noreg, !15, !DIExpression(), debug-location !25 + DBG_VALUE debug-use $ecx, debug-use $noreg, !16, !DIExpression(), debug-location !26 + $eax = COPY $ecx DBG_VALUE %fixed-stack.0, 0, !16, !DIExpression(), debug-location !26 DBG_VALUE %fixed-stack.1, 0, !15, !DIExpression(), debug-location !25 - CMP32rr %eax, killed %edx, implicit-def %eflags, debug-location !27 - JL_1 %bb.4, implicit killed %eflags, debug-location !29 + CMP32rr $eax, killed $edx, implicit-def $eflags, debug-location !27 + JL_1 %bb.4, implicit killed $eflags, debug-location !29 JMP_1 %bb.1, debug-location !29 bb.1.for.cond.preheader: successors: %bb.2(0x80000000) - %esi = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (load 4 from %fixed-stack.0) - DBG_VALUE debug-use %esi, debug-use %noreg, !13, !DIExpression(), debug-location !19 - %edi = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (load 4 from %fixed-stack.1) - DBG_VALUE debug-use %edi, debug-use %noreg, !14, !DIExpression(), debug-location !20 - %edi = DEC32r killed %edi, implicit-def dead %eflags, debug-location !30 - %ebx = LEA32r %fixed-stack.1, 1, %noreg, 0, %noreg + $esi = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (load 4 from %fixed-stack.0) + DBG_VALUE debug-use $esi, debug-use $noreg, !13, !DIExpression(), debug-location !19 + $edi = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (load 4 from %fixed-stack.1) + DBG_VALUE debug-use $edi, debug-use $noreg, !14, !DIExpression(), debug-location !20 + $edi = DEC32r killed $edi, implicit-def dead $eflags, debug-location !30 + $ebx = LEA32r %fixed-stack.1, 1, $noreg, 0, $noreg bb.2.for.cond: successors: %bb.2(0x7c000000), %bb.3(0x04000000) - liveins: %ebx, %edi, %esi + liveins: $ebx, $edi, $esi - ADJCALLSTACKDOWN32 4, 0, 4, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp, debug-location !33 + ADJCALLSTACKDOWN32 4, 0, 4, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp, debug-location !33 DBG_VALUE %fixed-stack.1, 0, !14, !DIExpression(), debug-location !20 - PUSH32r %ebx, implicit-def %esp, implicit %esp, debug-location !33 + PUSH32r $ebx, implicit-def $esp, implicit $esp, debug-location !33 CFI_INSTRUCTION adjust_cfa_offset 4, debug-location !33 - CALLpcrel32 @doSomething, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp, implicit-def %eax, debug-location !33 - ADJCALLSTACKUP32 4, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp, debug-location !33 - %edi = INC32r killed %edi, implicit-def dead %eflags, debug-location !30 - CMP32rr %edi, %esi, implicit-def %eflags, debug-location !30 - JL_1 %bb.2, implicit killed %eflags, debug-location !34 + CALLpcrel32 @doSomething, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, implicit-def $eax, debug-location !33 + ADJCALLSTACKUP32 4, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp, debug-location !33 + $edi = INC32r killed $edi, implicit-def dead $eflags, debug-location !30 + CMP32rr $edi, $esi, implicit-def $eflags, debug-location !30 + JL_1 %bb.2, implicit killed $eflags, debug-location !34 bb.3: successors: %bb.4(0x80000000) - liveins: %eax + liveins: $eax bb.4.return: - liveins: %eax + liveins: $eax - RET 8, %eax, debug-location !37 + RET 8, $eax, debug-location !37 ... Index: test/CodeGen/X86/shuffle-vs-trunc-256.ll =================================================================== --- test/CodeGen/X86/shuffle-vs-trunc-256.ll +++ test/CodeGen/X86/shuffle-vs-trunc-256.ll @@ -824,7 +824,7 @@ ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -834,7 +834,7 @@ ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -844,7 +844,7 @@ ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] -; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -854,7 +854,7 @@ ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; AVX512BW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -865,7 +865,7 @@ ; AVX512BWVL-NEXT: kmovd %eax, %k1 ; AVX512BWVL-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1} ; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] -; AVX512BWVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; Index: test/CodeGen/X86/simple-register-allocation-read-undef.mir =================================================================== --- test/CodeGen/X86/simple-register-allocation-read-undef.mir +++ test/CodeGen/X86/simple-register-allocation-read-undef.mir @@ -3,7 +3,7 @@ name: f body: | bb.0: - JB_1 %bb.2, undef implicit killed %eflags + JB_1 %bb.2, undef implicit killed $eflags JMP_1 %bb.1 bb.1: Index: test/CodeGen/X86/sqrt-fastmath-mir.ll =================================================================== --- test/CodeGen/X86/sqrt-fastmath-mir.ll +++ test/CodeGen/X86/sqrt-fastmath-mir.ll @@ -5,7 +5,7 @@ define float @foo(float %f) #0 { ; CHECK: {{name: *foo}} ; CHECK: body: -; CHECK: %0:fr32 = COPY %xmm0 +; CHECK: %0:fr32 = COPY $xmm0 ; CHECK: %1:fr32 = VRSQRTSSr killed %2, %0 ; CHECK: %3:fr32 = VMULSSrr %0, %1 ; CHECK: %4:fr32 = VMOVSSrm @@ -20,8 +20,8 @@ ; CHECK: %14:fr32 = FsFLD0SS ; CHECK: %15:fr32 = VCMPSSrr %0, killed %14, 0 ; CHECK: %17:vr128 = VANDNPSrr killed %16, killed %13 -; CHECK: %xmm0 = COPY %18 -; CHECK: RET 0, %xmm0 +; CHECK: $xmm0 = COPY %18 +; CHECK: RET 0, $xmm0 %call = tail call float @llvm.sqrt.f32(float %f) #1 ret float %call } @@ -29,7 +29,7 @@ define float @rfoo(float %f) #0 { ; CHECK: {{name: *rfoo}} ; CHECK: body: | -; CHECK: %0:fr32 = COPY %xmm0 +; CHECK: %0:fr32 = COPY $xmm0 ; CHECK: %1:fr32 = VRSQRTSSr killed %2, %0 ; CHECK: %3:fr32 = VMULSSrr %0, %1 ; CHECK: %4:fr32 = VMOVSSrm @@ -41,8 +41,8 @@ ; CHECK: %10:fr32 = VFMADD213SSr %8, killed %9, %4 ; CHECK: %11:fr32 = VMULSSrr %8, %6 ; CHECK: %12:fr32 = VMULSSrr killed %11, killed %10 -; CHECK: %xmm0 = COPY %12 -; CHECK: RET 0, %xmm0 +; CHECK: $xmm0 = COPY %12 +; CHECK: RET 0, $xmm0 %sqrt = tail call float @llvm.sqrt.f32(float %f) %div = fdiv fast float 1.0, %sqrt ret float %div Index: test/CodeGen/X86/sse2-schedule.ll =================================================================== --- test/CodeGen/X86/sse2-schedule.ll +++ test/CodeGen/X86/sse2-schedule.ll @@ -5485,61 +5485,61 @@ ; GENERIC-LABEL: test_pextrw: ; GENERIC: # %bb.0: ; GENERIC-NEXT: pextrw $6, %xmm0, %eax # sched: [3:1.00] -; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax +; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; ATOM-LABEL: test_pextrw: ; ATOM: # %bb.0: ; ATOM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:2.00] -; ATOM-NEXT: # kill: def %ax killed %ax killed %eax +; ATOM-NEXT: # kill: def $ax killed $ax killed $eax ; ATOM-NEXT: retq # sched: [79:39.50] ; ; SLM-LABEL: test_pextrw: ; SLM: # %bb.0: ; SLM-NEXT: pextrw $6, %xmm0, %eax # sched: [1:1.00] -; SLM-NEXT: # kill: def %ax killed %ax killed %eax +; SLM-NEXT: # kill: def $ax killed $ax killed $eax ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_pextrw: ; SANDY: # %bb.0: ; SANDY-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00] -; SANDY-NEXT: # kill: def %ax killed %ax killed %eax +; SANDY-NEXT: # kill: def $ax killed $ax killed $eax ; SANDY-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_pextrw: ; HASWELL: # %bb.0: ; HASWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00] -; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax +; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax ; HASWELL-NEXT: retq # sched: [7:1.00] ; ; BROADWELL-LABEL: test_pextrw: ; BROADWELL: # %bb.0: ; BROADWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00] -; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax +; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax ; BROADWELL-NEXT: retq # sched: [7:1.00] ; ; SKYLAKE-LABEL: test_pextrw: ; SKYLAKE: # %bb.0: ; SKYLAKE-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00] -; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax +; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; ; SKX-LABEL: test_pextrw: ; SKX: # %bb.0: ; SKX-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00] -; SKX-NEXT: # kill: def %ax killed %ax killed %eax +; SKX-NEXT: # kill: def $ax killed $ax killed $eax ; SKX-NEXT: retq # sched: [7:1.00] ; ; BTVER2-LABEL: test_pextrw: ; BTVER2: # %bb.0: ; BTVER2-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50] -; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax +; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_pextrw: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax +; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = extractelement <8 x i16> %a0, i32 6 ret i16 %1 Index: test/CodeGen/X86/sse42-schedule.ll =================================================================== --- test/CodeGen/X86/sse42-schedule.ll +++ test/CodeGen/X86/sse42-schedule.ll @@ -370,7 +370,7 @@ ; GENERIC-NEXT: movl $7, %eax # sched: [1:0.33] ; GENERIC-NEXT: movl $7, %edx # sched: [1:0.33] ; GENERIC-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33] -; GENERIC-NEXT: # kill: def %ecx killed %ecx def %rcx +; GENERIC-NEXT: # kill: def $ecx killed $ecx def $rcx ; GENERIC-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] ; @@ -383,7 +383,7 @@ ; SLM-NEXT: movl $7, %edx # sched: [1:0.50] ; SLM-NEXT: movl %ecx, %esi # sched: [1:0.50] ; SLM-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [21:21.00] -; SLM-NEXT: # kill: def %ecx killed %ecx def %rcx +; SLM-NEXT: # kill: def $ecx killed $ecx def $rcx ; SLM-NEXT: leal (%rcx,%rsi), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; @@ -396,7 +396,7 @@ ; SANDY-NEXT: movl $7, %eax # sched: [1:0.33] ; SANDY-NEXT: movl $7, %edx # sched: [1:0.33] ; SANDY-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33] -; SANDY-NEXT: # kill: def %ecx killed %ecx def %rcx +; SANDY-NEXT: # kill: def $ecx killed $ecx def $rcx ; SANDY-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50] ; SANDY-NEXT: retq # sched: [1:1.00] ; @@ -409,7 +409,7 @@ ; HASWELL-NEXT: movl $7, %eax # sched: [1:0.25] ; HASWELL-NEXT: movl $7, %edx # sched: [1:0.25] ; HASWELL-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00] -; HASWELL-NEXT: # kill: def %ecx killed %ecx def %rcx +; HASWELL-NEXT: # kill: def $ecx killed $ecx def $rcx ; HASWELL-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50] ; HASWELL-NEXT: retq # sched: [7:1.00] ; @@ -422,7 +422,7 @@ ; BROADWELL-NEXT: movl $7, %eax # sched: [1:0.25] ; BROADWELL-NEXT: movl $7, %edx # sched: [1:0.25] ; BROADWELL-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [23:4.00] -; BROADWELL-NEXT: # kill: def %ecx killed %ecx def %rcx +; BROADWELL-NEXT: # kill: def $ecx killed $ecx def $rcx ; BROADWELL-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; @@ -435,7 +435,7 @@ ; SKYLAKE-NEXT: movl $7, %eax # sched: [1:0.25] ; SKYLAKE-NEXT: movl $7, %edx # sched: [1:0.25] ; SKYLAKE-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00] -; SKYLAKE-NEXT: # kill: def %ecx killed %ecx def %rcx +; SKYLAKE-NEXT: # kill: def $ecx killed $ecx def $rcx ; SKYLAKE-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; @@ -448,7 +448,7 @@ ; SKX-NEXT: movl $7, %eax # sched: [1:0.25] ; SKX-NEXT: movl $7, %edx # sched: [1:0.25] ; SKX-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00] -; SKX-NEXT: # kill: def %ecx killed %ecx def %rcx +; SKX-NEXT: # kill: def $ecx killed $ecx def $rcx ; SKX-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50] ; SKX-NEXT: retq # sched: [7:1.00] ; @@ -461,7 +461,7 @@ ; BTVER2-NEXT: movl $7, %edx # sched: [1:0.50] ; BTVER2-NEXT: movl %ecx, %esi # sched: [1:0.50] ; BTVER2-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [19:10.00] -; BTVER2-NEXT: # kill: def %ecx killed %ecx def %rcx +; BTVER2-NEXT: # kill: def $ecx killed $ecx def $rcx ; BTVER2-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; @@ -474,7 +474,7 @@ ; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25] ; ZNVER1-NEXT: movl %ecx, %esi # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?] -; ZNVER1-NEXT: # kill: def %ecx killed %ecx def %rcx +; ZNVER1-NEXT: # kill: def $ecx killed $ecx def $rcx ; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7) @@ -588,7 +588,7 @@ ; GENERIC-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00] ; GENERIC-NEXT: movl %ecx, %eax # sched: [1:0.33] ; GENERIC-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00] -; GENERIC-NEXT: # kill: def %ecx killed %ecx def %rcx +; GENERIC-NEXT: # kill: def $ecx killed $ecx def $rcx ; GENERIC-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] ; @@ -597,7 +597,7 @@ ; SLM-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [17:17.00] ; SLM-NEXT: movl %ecx, %eax # sched: [1:0.50] ; SLM-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:17.00] -; SLM-NEXT: # kill: def %ecx killed %ecx def %rcx +; SLM-NEXT: # kill: def $ecx killed $ecx def $rcx ; SLM-NEXT: leal (%rcx,%rax), %eax # sched: [1:1.00] ; SLM-NEXT: retq # sched: [4:1.00] ; @@ -606,7 +606,7 @@ ; SANDY-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00] ; SANDY-NEXT: movl %ecx, %eax # sched: [1:0.33] ; SANDY-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00] -; SANDY-NEXT: # kill: def %ecx killed %ecx def %rcx +; SANDY-NEXT: # kill: def $ecx killed $ecx def $rcx ; SANDY-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50] ; SANDY-NEXT: retq # sched: [1:1.00] ; @@ -615,7 +615,7 @@ ; HASWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00] ; HASWELL-NEXT: movl %ecx, %eax # sched: [1:0.25] ; HASWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00] -; HASWELL-NEXT: # kill: def %ecx killed %ecx def %rcx +; HASWELL-NEXT: # kill: def $ecx killed $ecx def $rcx ; HASWELL-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50] ; HASWELL-NEXT: retq # sched: [7:1.00] ; @@ -624,7 +624,7 @@ ; BROADWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00] ; BROADWELL-NEXT: movl %ecx, %eax # sched: [1:0.25] ; BROADWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00] -; BROADWELL-NEXT: # kill: def %ecx killed %ecx def %rcx +; BROADWELL-NEXT: # kill: def $ecx killed $ecx def $rcx ; BROADWELL-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50] ; BROADWELL-NEXT: retq # sched: [7:1.00] ; @@ -633,7 +633,7 @@ ; SKYLAKE-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00] ; SKYLAKE-NEXT: movl %ecx, %eax # sched: [1:0.25] ; SKYLAKE-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00] -; SKYLAKE-NEXT: # kill: def %ecx killed %ecx def %rcx +; SKYLAKE-NEXT: # kill: def $ecx killed $ecx def $rcx ; SKYLAKE-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50] ; SKYLAKE-NEXT: retq # sched: [7:1.00] ; @@ -642,7 +642,7 @@ ; SKX-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00] ; SKX-NEXT: movl %ecx, %eax # sched: [1:0.25] ; SKX-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00] -; SKX-NEXT: # kill: def %ecx killed %ecx def %rcx +; SKX-NEXT: # kill: def $ecx killed $ecx def $rcx ; SKX-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50] ; SKX-NEXT: retq # sched: [7:1.00] ; @@ -651,7 +651,7 @@ ; BTVER2-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [7:2.00] ; BTVER2-NEXT: movl %ecx, %eax # sched: [1:0.50] ; BTVER2-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [12:2.00] -; BTVER2-NEXT: # kill: def %ecx killed %ecx def %rcx +; BTVER2-NEXT: # kill: def $ecx killed $ecx def $rcx ; BTVER2-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; @@ -660,7 +660,7 @@ ; ZNVER1-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [100:?] ; ZNVER1-NEXT: movl %ecx, %eax # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?] -; ZNVER1-NEXT: # kill: def %ecx killed %ecx def %rcx +; ZNVER1-NEXT: # kill: def $ecx killed $ecx def $rcx ; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7) Index: test/CodeGen/X86/subvector-broadcast.ll =================================================================== --- test/CodeGen/X86/subvector-broadcast.ll +++ test/CodeGen/X86/subvector-broadcast.ll @@ -1093,13 +1093,13 @@ define <4 x double> @reg_broadcast_2f64_4f64(<2 x double> %a0) nounwind { ; X32-LABEL: reg_broadcast_2f64_4f64: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: reg_broadcast_2f64_4f64: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> @@ -1109,28 +1109,28 @@ define <8 x double> @reg_broadcast_2f64_8f64(<2 x double> %a0) nounwind { ; X32-AVX-LABEL: reg_broadcast_2f64_8f64: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX-NEXT: retl ; ; X32-AVX512-LABEL: reg_broadcast_2f64_8f64: ; X32-AVX512: # %bb.0: -; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512-NEXT: retl ; ; X64-AVX-LABEL: reg_broadcast_2f64_8f64: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX-NEXT: retq ; ; X64-AVX512-LABEL: reg_broadcast_2f64_8f64: ; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512-NEXT: retq @@ -1146,7 +1146,7 @@ ; ; X32-AVX512-LABEL: reg_broadcast_4f64_8f64: ; X32-AVX512: # %bb.0: -; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X32-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512-NEXT: retl ; @@ -1157,7 +1157,7 @@ ; ; X64-AVX512-LABEL: reg_broadcast_4f64_8f64: ; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512-NEXT: retq %1 = shufflevector <4 x double> %a0, <4 x double> undef, <8 x i32> @@ -1167,13 +1167,13 @@ define <4 x i64> @reg_broadcast_2i64_4i64(<2 x i64> %a0) nounwind { ; X32-LABEL: reg_broadcast_2i64_4i64: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: reg_broadcast_2i64_4i64: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> @@ -1183,28 +1183,28 @@ define <8 x i64> @reg_broadcast_2i64_8i64(<2 x i64> %a0) nounwind { ; X32-AVX-LABEL: reg_broadcast_2i64_8i64: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX-NEXT: retl ; ; X32-AVX512-LABEL: reg_broadcast_2i64_8i64: ; X32-AVX512: # %bb.0: -; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512-NEXT: retl ; ; X64-AVX-LABEL: reg_broadcast_2i64_8i64: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX-NEXT: retq ; ; X64-AVX512-LABEL: reg_broadcast_2i64_8i64: ; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512-NEXT: retq @@ -1220,7 +1220,7 @@ ; ; X32-AVX512-LABEL: reg_broadcast_4i64_8i64: ; X32-AVX512: # %bb.0: -; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X32-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512-NEXT: retl ; @@ -1231,7 +1231,7 @@ ; ; X64-AVX512-LABEL: reg_broadcast_4i64_8i64: ; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512-NEXT: retq %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <8 x i32> @@ -1241,13 +1241,13 @@ define <8 x float> @reg_broadcast_4f32_8f32(<4 x float> %a0) nounwind { ; X32-LABEL: reg_broadcast_4f32_8f32: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: reg_broadcast_4f32_8f32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> @@ -1257,28 +1257,28 @@ define <16 x float> @reg_broadcast_4f32_16f32(<4 x float> %a0) nounwind { ; X32-AVX-LABEL: reg_broadcast_4f32_16f32: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX-NEXT: retl ; ; X32-AVX512-LABEL: reg_broadcast_4f32_16f32: ; X32-AVX512: # %bb.0: -; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512-NEXT: retl ; ; X64-AVX-LABEL: reg_broadcast_4f32_16f32: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX-NEXT: retq ; ; X64-AVX512-LABEL: reg_broadcast_4f32_16f32: ; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512-NEXT: retq @@ -1294,7 +1294,7 @@ ; ; X32-AVX512-LABEL: reg_broadcast_8f32_16f32: ; X32-AVX512: # %bb.0: -; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X32-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512-NEXT: retl ; @@ -1305,7 +1305,7 @@ ; ; X64-AVX512-LABEL: reg_broadcast_8f32_16f32: ; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512-NEXT: retq %1 = shufflevector <8 x float> %a0, <8 x float> undef, <16 x i32> @@ -1315,13 +1315,13 @@ define <8 x i32> @reg_broadcast_4i32_8i32(<4 x i32> %a0) nounwind { ; X32-LABEL: reg_broadcast_4i32_8i32: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: reg_broadcast_4i32_8i32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <8 x i32> @@ -1331,28 +1331,28 @@ define <16 x i32> @reg_broadcast_4i32_16i32(<4 x i32> %a0) nounwind { ; X32-AVX-LABEL: reg_broadcast_4i32_16i32: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX-NEXT: retl ; ; X32-AVX512-LABEL: reg_broadcast_4i32_16i32: ; X32-AVX512: # %bb.0: -; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512-NEXT: retl ; ; X64-AVX-LABEL: reg_broadcast_4i32_16i32: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX-NEXT: retq ; ; X64-AVX512-LABEL: reg_broadcast_4i32_16i32: ; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512-NEXT: retq @@ -1368,7 +1368,7 @@ ; ; X32-AVX512-LABEL: reg_broadcast_8i32_16i32: ; X32-AVX512: # %bb.0: -; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X32-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512-NEXT: retl ; @@ -1379,7 +1379,7 @@ ; ; X64-AVX512-LABEL: reg_broadcast_8i32_16i32: ; X64-AVX512: # %bb.0: -; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512-NEXT: retq %1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <16 x i32> @@ -1389,13 +1389,13 @@ define <16 x i16> @reg_broadcast_8i16_16i16(<8 x i16> %a0) nounwind { ; X32-LABEL: reg_broadcast_8i16_16i16: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: reg_broadcast_8i16_16i16: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <16 x i32> @@ -1405,56 +1405,56 @@ define <32 x i16> @reg_broadcast_8i16_32i16(<8 x i16> %a0) nounwind { ; X32-AVX-LABEL: reg_broadcast_8i16_32i16: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX-NEXT: retl ; ; X32-AVX512F-LABEL: reg_broadcast_8i16_32i16: ; X32-AVX512F: # %bb.0: -; X32-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX512F-NEXT: retl ; ; X32-AVX512BW-LABEL: reg_broadcast_8i16_32i16: ; X32-AVX512BW: # %bb.0: -; X32-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512BW-NEXT: retl ; ; X32-AVX512DQ-LABEL: reg_broadcast_8i16_32i16: ; X32-AVX512DQ: # %bb.0: -; X32-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX512DQ-NEXT: retl ; ; X64-AVX-LABEL: reg_broadcast_8i16_32i16: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX-NEXT: retq ; ; X64-AVX512F-LABEL: reg_broadcast_8i16_32i16: ; X64-AVX512F: # %bb.0: -; X64-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX512F-NEXT: retq ; ; X64-AVX512BW-LABEL: reg_broadcast_8i16_32i16: ; X64-AVX512BW: # %bb.0: -; X64-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BW-NEXT: retq ; ; X64-AVX512DQ-LABEL: reg_broadcast_8i16_32i16: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX512DQ-NEXT: retq @@ -1475,7 +1475,7 @@ ; ; X32-AVX512BW-LABEL: reg_broadcast_16i16_32i16: ; X32-AVX512BW: # %bb.0: -; X32-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X32-AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512BW-NEXT: retl ; @@ -1496,7 +1496,7 @@ ; ; X64-AVX512BW-LABEL: reg_broadcast_16i16_32i16: ; X64-AVX512BW: # %bb.0: -; X64-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X64-AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BW-NEXT: retq ; @@ -1511,13 +1511,13 @@ define <32 x i8> @reg_broadcast_16i8_32i8(<16 x i8> %a0) nounwind { ; X32-LABEL: reg_broadcast_16i8_32i8: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: reg_broadcast_16i8_32i8: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <32 x i32> @@ -1527,56 +1527,56 @@ define <64 x i8> @reg_broadcast_16i8_64i8(<16 x i8> %a0) nounwind { ; X32-AVX-LABEL: reg_broadcast_16i8_64i8: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX-NEXT: retl ; ; X32-AVX512F-LABEL: reg_broadcast_16i8_64i8: ; X32-AVX512F: # %bb.0: -; X32-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX512F-NEXT: retl ; ; X32-AVX512BW-LABEL: reg_broadcast_16i8_64i8: ; X32-AVX512BW: # %bb.0: -; X32-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512BW-NEXT: retl ; ; X32-AVX512DQ-LABEL: reg_broadcast_16i8_64i8: ; X32-AVX512DQ: # %bb.0: -; X32-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX512DQ-NEXT: retl ; ; X64-AVX-LABEL: reg_broadcast_16i8_64i8: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX-NEXT: retq ; ; X64-AVX512F-LABEL: reg_broadcast_16i8_64i8: ; X64-AVX512F: # %bb.0: -; X64-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX512F-NEXT: retq ; ; X64-AVX512BW-LABEL: reg_broadcast_16i8_64i8: ; X64-AVX512BW: # %bb.0: -; X64-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BW-NEXT: retq ; ; X64-AVX512DQ-LABEL: reg_broadcast_16i8_64i8: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX512DQ-NEXT: retq @@ -1597,7 +1597,7 @@ ; ; X32-AVX512BW-LABEL: reg_broadcast_32i8_64i8: ; X32-AVX512BW: # %bb.0: -; X32-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X32-AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X32-AVX512BW-NEXT: retl ; @@ -1618,7 +1618,7 @@ ; ; X64-AVX512BW-LABEL: reg_broadcast_32i8_64i8: ; X64-AVX512BW: # %bb.0: -; X64-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; X64-AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BW-NEXT: retq ; Index: test/CodeGen/X86/switch-lower-peel-top-case.ll =================================================================== --- test/CodeGen/X86/switch-lower-peel-top-case.ll +++ test/CodeGen/X86/switch-lower-peel-top-case.ll @@ -11,34 +11,34 @@ ], !prof !2 ; CHECK: successors: %[[PEELED_CASE_LABEL:.*]](0x5999999a), %[[PEELED_SWITCH_LABEL:.*]](0x26666666) -; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY %edi -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18568, implicit-def %eflags -; CHECK: JE_1 %[[PEELED_CASE_LABEL]], implicit %eflags +; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY $edi +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18568, implicit-def $eflags +; CHECK: JE_1 %[[PEELED_CASE_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[PEELED_SWITCH_LABEL]] ; CHECK: [[PEELED_SWITCH_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[BB1_LABEL:.*]](0x0206d3a0), %[[BB2_LABEL:.*]](0x7df92c60) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18311, implicit-def %eflags -; CHECK: JG_1 %[[BB2_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18311, implicit-def $eflags +; CHECK: JG_1 %[[BB2_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[BB1_LABEL]] ; CHECK: [[BB1_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[CASE2_LABEL:.*]](0x35e50d5b), %[[BB3_LABEL:.*]](0x4a1af2a5) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], -8826, implicit-def %eflags -; CHECK: JE_1 %[[CASE2_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], -8826, implicit-def $eflags +; CHECK: JE_1 %[[CASE2_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[BB3_LABEL]] ; CHECK: [[BB3_LABEL]] ; CHECK: successors: %[[CASE5_LABEL:.*]](0x45d173c8), %[[BB4_LABEL:.*]](0x3a2e8c38) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 129, implicit-def %eflags -; CHECK: JE_1 %[[CASE5_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 129, implicit-def $eflags +; CHECK: JE_1 %[[CASE5_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[BB4_LABEL]] ; CHECK: [[BB4_LABEL:.*]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[CASE1_LABEL:.*]](0x66666666), %[[DEFAULT_BB_LABEL:.*]](0x1999999a) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 8, implicit-def %eflags -; CHECK: JE_1 %[[CASE1_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 8, implicit-def $eflags +; CHECK: JE_1 %[[CASE1_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]] ; CHECK: [[BB2_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[CASE3_LABEL:.*]](0x7fe44107), %[[DEFAULT_BB_LABEL]](0x001bbef9) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18312, implicit-def %eflags -; CHECK: JE_1 %[[CASE3_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18312, implicit-def $eflags +; CHECK: JE_1 %[[CASE3_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]] bb1: @@ -73,40 +73,40 @@ ], !prof !3 ; CHECK: successors: %[[PEELED_CASE_LABEL:.*]](0x59999999), %[[PEELED_SWITCH_LABEL:.*]](0x26666667) -; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY %edi -; CHECK: %{{[0-9]+}}:gr32 = ADD32ri8 %{{[0-9]+}}, -85, implicit-def dead %eflags -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %{{[0-9]+}}, 2, implicit-def %eflags -; CHECK: JB_1 %[[PEELED_CASE_LABEL]], implicit %eflags +; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY $edi +; CHECK: %{{[0-9]+}}:gr32 = ADD32ri8 %{{[0-9]+}}, -85, implicit-def dead $eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %{{[0-9]+}}, 2, implicit-def $eflags +; CHECK: JB_1 %[[PEELED_CASE_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[PEELED_SWITCH_LABEL]] ; CHECK: [[PEELED_SWITCH_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[BB1_LABEL:.*]](0x0088888a), %[[BB2_LABEL:.*]](0x7f777776) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 4, implicit-def %eflags -; CHECK: JG_1 %[[BB2_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 4, implicit-def $eflags +; CHECK: JG_1 %[[BB2_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[BB1_LABEL]] ; CHECK: [[BB1_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[CASE4_LABEL:.*]](0x7f775a4f), %[[BB3_LABEL:.*]](0x0088a5b1) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 1, implicit-def %eflags -; CHECK: JE_1 %[[CASE4_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 1, implicit-def $eflags +; CHECK: JE_1 %[[CASE4_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[BB3_LABEL]] ; CHECK: [[BB3_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[CASE1_LABEL:.*]](0x66666666), %[[DEFAULT_BB_LABEL:.*]](0x1999999a) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], -40, implicit-def %eflags -; CHECK: JE_1 %[[CASE1_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], -40, implicit-def $eflags +; CHECK: JE_1 %[[CASE1_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]] ; CHECK: [[BB2_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[CASE5_LABEL:.*]](0x00000000), %[[BB4_LABEL:.*]](0x80000000) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 5, implicit-def %eflags -; CHECK: JE_1 %[[CASE5_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 5, implicit-def $eflags +; CHECK: JE_1 %[[CASE5_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[BB4_LABEL]] ; CHECK: [[BB4_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[CASE6_LABEL:.*]](0x00000000), %[[BB5_LABEL:.*]](0x80000000) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 7, implicit-def %eflags -; CHECK: JE_1 %[[CASE6_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 7, implicit-def $eflags +; CHECK: JE_1 %[[CASE6_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[BB5_LABEL]] ; CHECK: [[BB5_LABEL]].{{[a-zA-Z0-9.]+}}: ; CHECK: successors: %[[CASE7_LABEL:.*]](0x00000000), %[[DEFAULT_BB_LABEL]](0x80000000) -; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 49, implicit-def %eflags -; CHECK: JE_1 %[[CASE7_LABEL]], implicit %eflags +; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 49, implicit-def $eflags +; CHECK: JE_1 %[[CASE7_LABEL]], implicit $eflags ; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]] Index: test/CodeGen/X86/tail-call-conditional.mir =================================================================== --- test/CodeGen/X86/tail-call-conditional.mir +++ test/CodeGen/X86/tail-call-conditional.mir @@ -29,57 +29,57 @@ name: test tracksRegLiveness: true liveins: - - { reg: '%rdi' } - - { reg: '%rsi' } + - { reg: '$rdi' } + - { reg: '$rsi' } body: | bb.0: successors: %bb.1, %bb.4 - liveins: %rdi, %rsi + liveins: $rdi, $rsi - %rax = COPY %rdi - CMP64ri8 %rax, 99, implicit-def %eflags - JA_1 %bb.4, implicit %eflags + $rax = COPY $rdi + CMP64ri8 $rax, 99, implicit-def $eflags + JA_1 %bb.4, implicit $eflags JMP_1 %bb.1 ; CHECK: bb.1: ; CHECK-NEXT: successors: %bb.2({{[^)]+}}){{$}} - ; CHECK-NEXT: liveins: %rax, %rsi + ; CHECK-NEXT: liveins: $rax, $rsi ; CHECK-NEXT: {{^ $}} - ; CHECK-NEXT: %rdi = COPY %rsi - ; CHECK-NEXT: %rsi = COPY %rax - ; CHECK-NEXT: CMP64ri8 %rax, 9, implicit-def %eflags - ; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit %rsp, implicit %eflags, implicit %ssp, implicit %rsp, implicit %rdi, implicit %rsi, implicit %rax, implicit-def %rax, implicit %sil, implicit-def %sil, implicit %si, implicit-def %si, implicit %esi, implicit-def %esi, implicit %rsi, implicit-def %rsi, implicit %dil, implicit-def %dil, implicit %di, implicit-def %di, implicit %edi, implicit-def %edi, implicit %rdi, implicit-def %rdi, implicit %ah, implicit-def %ah, implicit %al, implicit-def %al, implicit %ax, implicit-def %ax, implicit %eax, implicit-def %eax + ; CHECK-NEXT: $rdi = COPY $rsi + ; CHECK-NEXT: $rsi = COPY $rax + ; CHECK-NEXT: CMP64ri8 $rax, 9, implicit-def $eflags + ; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit $rsp, implicit $eflags, implicit $ssp, implicit $rsp, implicit $rdi, implicit $rsi, implicit $rax, implicit-def $rax, implicit $sil, implicit-def $sil, implicit $si, implicit-def $si, implicit $esi, implicit-def $esi, implicit $rsi, implicit-def $rsi, implicit $dil, implicit-def $dil, implicit $di, implicit-def $di, implicit $edi, implicit-def $edi, implicit $rdi, implicit-def $rdi, implicit $ah, implicit-def $ah, implicit $al, implicit-def $al, implicit $ax, implicit-def $ax, implicit $eax, implicit-def $eax bb.1: successors: %bb.2, %bb.3 - liveins: %rax, %rsi + liveins: $rax, $rsi - CMP64ri8 %rax, 9, implicit-def %eflags - JA_1 %bb.3, implicit %eflags + CMP64ri8 $rax, 9, implicit-def $eflags + JA_1 %bb.3, implicit $eflags JMP_1 %bb.2 bb.2: - liveins: %rax, %rsi + liveins: $rax, $rsi - %rdi = COPY %rsi - %rsi = COPY %rax + $rdi = COPY $rsi + $rsi = COPY $rax - TCRETURNdi64 @f1, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi + TCRETURNdi64 @f1, 0, csr_64, implicit $rsp, implicit $rdi, implicit $rsi ; CHECK: bb.2: - ; CHECK-NEXT: liveins: %rax, %rdi, %rsi + ; CHECK-NEXT: liveins: $rax, $rdi, $rsi ; CHECK-NEXT: {{^ $}} - ; CHECK-NEXT: TCRETURNdi64 @f2, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi + ; CHECK-NEXT: TCRETURNdi64 @f2, 0, csr_64, implicit $rsp, implicit $rdi, implicit $rsi bb.3: - liveins: %rax, %rsi + liveins: $rax, $rsi - %rdi = COPY %rsi - %rsi = COPY %rax - TCRETURNdi64 @f2, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi + $rdi = COPY $rsi + $rsi = COPY $rax + TCRETURNdi64 @f2, 0, csr_64, implicit $rsp, implicit $rdi, implicit $rsi bb.4: - dead %eax = MOV32ri64 123, implicit-def %rax - RET 0, %rax + dead $eax = MOV32ri64 123, implicit-def $rax + RET 0, $rax ... Index: test/CodeGen/X86/tail-dup-debugloc.ll =================================================================== --- test/CodeGen/X86/tail-dup-debugloc.ll +++ test/CodeGen/X86/tail-dup-debugloc.ll @@ -4,7 +4,7 @@ ; 'while.cond1.preheader.lr.ph' survives after tailduplication pass. ; ; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 9, column: 5, scope: !{{[0-9]+}}) -; CHECK: [[VREG:%[^ ]+]]:gr64 = COPY %rdi +; CHECK: [[VREG:%[^ ]+]]:gr64 = COPY $rdi ; CHECK: TEST64rr [[VREG]], [[VREG]] ; CHECK-NEXT: JE_1 {{.+}}, debug-location [[DLOC]] ; CHECK-NEXT: JMP_1 {{.+}}, debug-location [[DLOC]] Index: test/CodeGen/X86/tail-merge-after-mbp.mir =================================================================== --- test/CodeGen/X86/tail-merge-after-mbp.mir +++ test/CodeGen/X86/tail-merge-after-mbp.mir @@ -5,26 +5,26 @@ # check loop bb.9 is not merged with bb.12 # CHECK: bb.2: # CHECK-NEXT: successors: %bb.3(0x30000000), %bb.4(0x50000000) -# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg -# CHECK-NEXT: TEST64rr %rax, %rax +# CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg +# CHECK-NEXT: TEST64rr $rax, $rax # CHECK-NEXT: JE_1 %bb.3 # CHECK: bb.4: # CHECK-NEXT: successors: %bb.5(0x30000000), %bb.10(0x50000000) -# CHECK: CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0 +# CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0 # CHECK-NEXT: JNE_1 %bb.10 # CHECK: bb.5: # CHECK-NEXT: successors: %bb.6(0x30000000), %bb.7(0x50000000) -# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg -# CHECK-NEXT: TEST64rr %rax, %rax +# CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg +# CHECK-NEXT: TEST64rr $rax, $rax # CHECK-NEXT: JE_1 %bb.6 # CHECK: bb.7 # CHECK-NEXT: successors: %bb.8(0x71555555), %bb.10(0x0eaaaaab) -# CHECK: CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0 +# CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0 # CHECK-NEXT: JNE_1 %bb.10 # CHECK: bb.8: # CHECK-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000) -# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg -# CHECK-NEXT: TEST64rr %rax, %rax +# CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg +# CHECK-NEXT: TEST64rr $rax, $rax # CHECK-NEXT: JNE_1 %bb.7 name: foo @@ -32,74 +32,74 @@ bb.0: successors: %bb.1(0x40000000), %bb.7(0x40000000) - TEST8ri %dl, 1, implicit-def %eflags, implicit killed %edx - JE_1 %bb.7, implicit %eflags + TEST8ri $dl, 1, implicit-def $eflags, implicit killed $edx + JE_1 %bb.7, implicit $eflags bb.1: successors: %bb.16(0x80000000) - %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags JMP_1 %bb.16 bb.7: successors: %bb.8(0x30000000), %bb.9(0x50000000) - %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8) - TEST64rr %rax, %rax, implicit-def %eflags - JNE_1 %bb.9, implicit killed %eflags + $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8) + TEST64rr $rax, $rax, implicit-def $eflags + JNE_1 %bb.9, implicit killed $eflags bb.8: successors: %bb.16(0x80000000) - %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags JMP_1 %bb.16 bb.9: successors: %bb.10(0x30000000), %bb.15(0x50000000) - CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0, implicit-def %eflags :: (load 8) - JNE_1 %bb.15, implicit %eflags + CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8) + JNE_1 %bb.15, implicit $eflags bb.10: successors: %bb.11(0x30000000), %bb.12(0x50000000) - %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8) - TEST64rr %rax, %rax, implicit-def %eflags - JNE_1 %bb.12, implicit %eflags + $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8) + TEST64rr $rax, $rax, implicit-def $eflags + JNE_1 %bb.12, implicit $eflags bb.11: successors: %bb.16(0x80000000) - %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags JMP_1 %bb.16 bb.12: successors: %bb.13(0x71555555), %bb.15(0x0eaaaaab) - CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0, implicit-def %eflags :: (load 8), (load 8) - JNE_1 %bb.15, implicit %eflags + CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8), (load 8) + JNE_1 %bb.15, implicit $eflags bb.13: successors: %bb.14(0x04000000), %bb.12(0x7c000000) - %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8) - TEST64rr %rax, %rax, implicit-def %eflags - JNE_1 %bb.12, implicit %eflags + $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8) + TEST64rr $rax, $rax, implicit-def $eflags + JNE_1 %bb.12, implicit $eflags bb.14: successors: %bb.16(0x80000000) - %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags + $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags JMP_1 %bb.16 bb.15: successors: %bb.16(0x80000000) - %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags - dead %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags, implicit-def %al + $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags + dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al bb.16: - RETQ %eax + RETQ $eax ... Index: test/CodeGen/X86/tail-merge-debugloc.ll =================================================================== --- test/CodeGen/X86/tail-merge-debugloc.ll +++ test/CodeGen/X86/tail-merge-debugloc.ll @@ -6,7 +6,7 @@ ; location info. ; ; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 2, column: 2, scope: !{{[0-9]+}}) -; CHECK: TEST64rr{{.*}}%rsi, renamable %rsi, implicit-def %eflags +; CHECK: TEST64rr{{.*}}$rsi, renamable $rsi, implicit-def $eflags ; CHECK-NEXT: JNE_1{{.*}}, debug-location [[DLOC]] target triple = "x86_64-unknown-linux-gnu" Index: test/CodeGen/X86/tbm-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/tbm-intrinsics-fast-isel.ll +++ test/CodeGen/X86/tbm-intrinsics-fast-isel.ll @@ -28,7 +28,7 @@ ; ; X64-LABEL: test__blcfill_u32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 1(%rdi), %eax ; X64-NEXT: andl %edi, %eax ; X64-NEXT: retq @@ -48,7 +48,7 @@ ; ; X64-LABEL: test__blci_u32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 1(%rdi), %eax ; X64-NEXT: xorl $-1, %eax ; X64-NEXT: orl %edi, %eax @@ -93,7 +93,7 @@ ; ; X64-LABEL: test__blcmsk_u32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 1(%rdi), %eax ; X64-NEXT: xorl %edi, %eax ; X64-NEXT: retq @@ -112,7 +112,7 @@ ; ; X64-LABEL: test__blcs_u32: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal 1(%rdi), %eax ; X64-NEXT: orl %edi, %eax ; X64-NEXT: retq Index: test/CodeGen/X86/tbm_patterns.ll =================================================================== --- test/CodeGen/X86/tbm_patterns.ll +++ test/CodeGen/X86/tbm_patterns.ll @@ -151,7 +151,7 @@ define i32 @test_x86_tbm_blcfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind { ; CHECK-LABEL: test_x86_tbm_blcfill_u32_z2: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal 1(%rdi), %eax ; CHECK-NEXT: testl %edi, %eax ; CHECK-NEXT: cmovnel %edx, %esi @@ -230,7 +230,7 @@ define i32 @test_x86_tbm_blci_u32_z2(i32 %a, i32 %b, i32 %c) nounwind { ; CHECK-LABEL: test_x86_tbm_blci_u32_z2: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal 1(%rdi), %eax ; CHECK-NEXT: notl %eax ; CHECK-NEXT: orl %edi, %eax @@ -419,7 +419,7 @@ define i32 @test_x86_tbm_blcmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind { ; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z2: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal 1(%rdi), %eax ; CHECK-NEXT: xorl %edi, %eax ; CHECK-NEXT: cmovnel %edx, %esi @@ -496,7 +496,7 @@ define i32 @test_x86_tbm_blcs_u32_z2(i32 %a, i32 %b, i32 %c) nounwind { ; CHECK-LABEL: test_x86_tbm_blcs_u32_z2: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal 1(%rdi), %eax ; CHECK-NEXT: orl %edi, %eax ; CHECK-NEXT: cmovnel %edx, %esi @@ -573,7 +573,7 @@ define i32 @test_x86_tbm_blsfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind { ; CHECK-LABEL: test_x86_tbm_blsfill_u32_z2: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: leal -1(%rdi), %eax ; CHECK-NEXT: orl %edi, %eax ; CHECK-NEXT: cmovnel %edx, %esi Index: test/CodeGen/X86/trunc-subvector.ll =================================================================== --- test/CodeGen/X86/trunc-subvector.ll +++ test/CodeGen/X86/trunc-subvector.ll @@ -11,7 +11,7 @@ ; ; AVX-LABEL: test1: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x = sext <8 x i32> %v to <8 x i64> @@ -50,7 +50,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -77,14 +77,14 @@ ; AVX2-LABEL: test4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: test4: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %x = sext <8 x i32> %v to <8 x i64> @@ -113,7 +113,7 @@ ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 ; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -138,7 +138,7 @@ ; ; AVX-LABEL: test6: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x = zext <8 x i32> %v to <8 x i64> @@ -176,7 +176,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -202,14 +202,14 @@ ; AVX2-LABEL: test9: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: test9: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %x = zext <8 x i32> %v to <8 x i64> @@ -234,7 +234,7 @@ ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; Index: test/CodeGen/X86/umul-with-overflow.ll =================================================================== --- test/CodeGen/X86/umul-with-overflow.ll +++ test/CodeGen/X86/umul-with-overflow.ll @@ -35,7 +35,7 @@ ; ; X64-LABEL: test2: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: addl %esi, %edi ; X64-NEXT: leal (%rdi,%rdi), %eax ; X64-NEXT: retq @@ -57,8 +57,8 @@ ; ; X64-LABEL: test3: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %esi killed %esi def %rsi -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $esi killed $esi def $rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: leal (%rdi,%rsi), %eax ; X64-NEXT: movl $4, %ecx ; X64-NEXT: mull %ecx Index: test/CodeGen/X86/unreachable-mbb-undef-phi.mir =================================================================== --- test/CodeGen/X86/unreachable-mbb-undef-phi.mir +++ test/CodeGen/X86/unreachable-mbb-undef-phi.mir @@ -13,7 +13,7 @@ bb.1: %1 = PHI %0, %bb.0, %2, %bb.2 - %2 = ADD32ri8 killed %1, 1, implicit-def %eflags + %2 = ADD32ri8 killed %1, 1, implicit-def $eflags JMP_1 %bb.3 bb.2: Index: test/CodeGen/X86/update-terminator-debugloc.ll =================================================================== --- test/CodeGen/X86/update-terminator-debugloc.ll +++ test/CodeGen/X86/update-terminator-debugloc.ll @@ -21,8 +21,8 @@ ; these debug locations are propaged correctly to lowered instructions. ; ; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 6 -; CHECK-DAG: [[VREG1:%[^ ]+]]:gr64 = COPY %rsi -; CHECK-DAG: [[VREG2:%[^ ]+]]:gr64 = COPY %rdi +; CHECK-DAG: [[VREG1:%[^ ]+]]:gr64 = COPY $rsi +; CHECK-DAG: [[VREG2:%[^ ]+]]:gr64 = COPY $rdi ; CHECK: SUB64rr [[VREG2]], [[VREG1]] ; CHECK-NEXT: JNE_1 {{.*}}, debug-location [[DLOC]]{{$}} ; CHECK: [[VREG3:%[^ ]+]]:gr64 = PHI [[VREG2]] Index: test/CodeGen/X86/update-terminator.mir =================================================================== --- test/CodeGen/X86/update-terminator.mir +++ test/CodeGen/X86/update-terminator.mir @@ -48,29 +48,29 @@ bb.0 (%ir-block.0): successors: %bb.1(50), %bb.3(50) - JNE_1 %bb.1, implicit %eflags + JNE_1 %bb.1, implicit $eflags JMP_1 %bb.3 bb.1: successors: %bb.2(100) - CALL64pcrel32 @dummy1, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 @dummy1, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 @dummy1, csr_64, implicit %rsp, implicit-def %rsp - JNE_1 %bb.2, implicit %eflags + CALL64pcrel32 @dummy1, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 @dummy1, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 @dummy1, csr_64, implicit $rsp, implicit-def $rsp + JNE_1 %bb.2, implicit $eflags bb.2: successors: %bb.4(100) - CALL64pcrel32 @dummy2, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 @dummy2, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 @dummy2, csr_64, implicit %rsp, implicit-def %rsp + CALL64pcrel32 @dummy2, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 @dummy2, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 @dummy2, csr_64, implicit $rsp, implicit-def $rsp JMP_1 %bb.4 bb.3: successors: %bb.2(100) - CALL64pcrel32 @dummy3, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 @dummy3, csr_64, implicit %rsp, implicit-def %rsp - CALL64pcrel32 @dummy3, csr_64, implicit %rsp, implicit-def %rsp + CALL64pcrel32 @dummy3, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 @dummy3, csr_64, implicit $rsp, implicit-def $rsp + CALL64pcrel32 @dummy3, csr_64, implicit $rsp, implicit-def $rsp JMP_1 %bb.2 bb.4: Index: test/CodeGen/X86/urem-i8-constant.ll =================================================================== --- test/CodeGen/X86/urem-i8-constant.ll +++ test/CodeGen/X86/urem-i8-constant.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: imull $111, %ecx, %eax ; CHECK-NEXT: shrl $12, %eax ; CHECK-NEXT: movb $37, %dl -; CHECK-NEXT: # kill: def %al killed %al killed %eax +; CHECK-NEXT: # kill: def $al killed $al killed $eax ; CHECK-NEXT: mulb %dl ; CHECK-NEXT: subb %al, %cl ; CHECK-NEXT: movl %ecx, %eax Index: test/CodeGen/X86/urem-power-of-two.ll =================================================================== --- test/CodeGen/X86/urem-power-of-two.ll +++ test/CodeGen/X86/urem-power-of-two.ll @@ -56,7 +56,7 @@ ; X86-NEXT: shrl %cl, %eax ; X86-NEXT: decl %eax ; X86-NEXT: andw {{[0-9]+}}(%esp), %ax -; X86-NEXT: # kill: def %ax killed %ax killed %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: shift_right_pow_2: @@ -66,7 +66,7 @@ ; X64-NEXT: shrl %cl, %eax ; X64-NEXT: decl %eax ; X64-NEXT: andl %edi, %eax -; X64-NEXT: # kill: def %ax killed %ax killed %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %shr = lshr i16 -32768, %y %urem = urem i16 %x, %shr @@ -81,20 +81,20 @@ ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl ; X86-NEXT: andb $4, %cl ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X86-NEXT: # kill: def %eax killed %eax def %ax +; X86-NEXT: # kill: def $eax killed $eax def $ax ; X86-NEXT: divb %cl ; X86-NEXT: movzbl %ah, %eax -; X86-NEXT: # kill: def %al killed %al killed %eax +; X86-NEXT: # kill: def $al killed $al killed $eax ; X86-NEXT: retl ; ; X64-LABEL: and_pow_2: ; X64: # %bb.0: ; X64-NEXT: andb $4, %sil ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: # kill: def %eax killed %eax def %ax +; X64-NEXT: # kill: def $eax killed $eax def $ax ; X64-NEXT: divb %sil ; X64-NEXT: movzbl %ah, %eax -; X64-NEXT: # kill: def %al killed %al killed %eax +; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %and = and i8 %y, 4 %urem = urem i8 %x, %and Index: test/CodeGen/X86/var-permute-256.ll =================================================================== --- test/CodeGen/X86/var-permute-256.ll +++ test/CodeGen/X86/var-permute-256.ll @@ -1340,13 +1340,13 @@ ; ; AVX512VL-LABEL: var_shuffle_v4i64_from_v2i64: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ; AVX512VL-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v4i64_from_v2i64: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLBW-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ; AVX512VLBW-NEXT: retq %index0 = extractelement <4 x i64> %indices, i32 0 @@ -1398,7 +1398,7 @@ ; ; INT256-LABEL: var_shuffle_v8i32_from_v4i32: ; INT256: # %bb.0: # %entry -; INT256-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; INT256-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; INT256-NEXT: retq entry: @@ -1660,7 +1660,7 @@ ; ; AVX512VLBW-LABEL: var_shuffle_v16i16_from_v8i16: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLBW-NEXT: vpermw %ymm0, %ymm1, %ymm0 ; AVX512VLBW-NEXT: retq %index0 = extractelement <16 x i16> %indices, i32 0 @@ -2201,7 +2201,7 @@ ; ; VBMI-LABEL: var_shuffle_v32i8_from_v16i8: ; VBMI: # %bb.0: -; VBMI-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; VBMI-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; VBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0 ; VBMI-NEXT: retq %index0 = extractelement <32 x i8> %indices, i32 0 @@ -2363,13 +2363,13 @@ ; ; AVX512VL-LABEL: var_shuffle_v4f64_from_v2f64: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ; AVX512VL-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v4f64_from_v2f64: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLBW-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ; AVX512VLBW-NEXT: retq %index0 = extractelement <4 x i64> %indices, i32 0 @@ -2421,7 +2421,7 @@ ; ; INT256-LABEL: var_shuffle_v8f32_from_v4f32: ; INT256: # %bb.0: # %entry -; INT256-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; INT256-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; INT256-NEXT: retq entry: Index: test/CodeGen/X86/vec_cmp_uint-128.ll =================================================================== --- test/CodeGen/X86/vec_cmp_uint-128.ll +++ test/CodeGen/X86/vec_cmp_uint-128.ll @@ -357,8 +357,8 @@ ; ; AVX512-LABEL: ge_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1 ; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -693,8 +693,8 @@ ; ; AVX512-LABEL: le_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm1 ; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper Index: test/CodeGen/X86/vec_fp_to_int.ll =================================================================== --- test/CodeGen/X86/vec_fp_to_int.ll +++ test/CodeGen/X86/vec_fp_to_int.ll @@ -60,9 +60,9 @@ ; ; AVX512DQ-LABEL: fptosi_2f64_to_2i64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -115,7 +115,7 @@ ; ; AVX-LABEL: fptosi_4f64_to_2i32: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq @@ -217,9 +217,9 @@ ; ; AVX512DQ-LABEL: fptosi_4f64_to_4i64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptosi_4f64_to_4i64: @@ -321,9 +321,9 @@ ; ; AVX512DQ-LABEL: fptoui_2f64_to_2i64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -388,7 +388,7 @@ ; ; AVX512F-LABEL: fptoui_2f64_to_4i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero ; AVX512F-NEXT: vzeroupper @@ -401,7 +401,7 @@ ; ; AVX512DQ-LABEL: fptoui_2f64_to_4i32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0 ; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero ; AVX512DQ-NEXT: vzeroupper @@ -467,9 +467,9 @@ ; ; AVX512F-LABEL: fptoui_2f64_to_2i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -480,9 +480,9 @@ ; ; AVX512DQ-LABEL: fptoui_2f64_to_2i32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -542,30 +542,30 @@ ; ; AVX512F-LABEL: fptoui_4f64_to_2i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: fptoui_4f64_to_2i32: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512DQ-LABEL: fptoui_4f64_to_2i32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptoui_4f64_to_2i32: ; AVX512VLDQ: # %bb.0: -; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0 ; AVX512VLDQ-NEXT: vzeroupper ; AVX512VLDQ-NEXT: retq @@ -736,9 +736,9 @@ ; ; AVX512DQ-LABEL: fptoui_4f64_to_4i64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptoui_4f64_to_4i64: @@ -812,9 +812,9 @@ ; ; AVX512F-LABEL: fptoui_4f64_to_4i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -826,9 +826,9 @@ ; ; AVX512DQ-LABEL: fptoui_4f64_to_4i32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -919,9 +919,9 @@ ; ; AVX512DQ-LABEL: fptosi_2f32_to_2i64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -978,16 +978,16 @@ ; ; AVX512DQ-LABEL: fptosi_4f32_to_2i64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptosi_4f32_to_2i64: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0 -; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VLDQ-NEXT: vzeroupper ; AVX512VLDQ-NEXT: retq %cvt = fptosi <4 x float> %a to <4 x i64> @@ -1106,7 +1106,7 @@ ; AVX512DQ-LABEL: fptosi_4f32_to_4i64: ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptosi_4f32_to_4i64: @@ -1214,13 +1214,13 @@ ; AVX512DQ-LABEL: fptosi_8f32_to_4i64: ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptosi_8f32_to_4i64: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: vcvttps2qq %ymm0, %zmm0 -; AVX512VLDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512VLDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512VLDQ-NEXT: retq %cvt = fptosi <8 x float> %a to <8 x i64> %shuf = shufflevector <8 x i64> %cvt, <8 x i64> undef, <4 x i32> @@ -1281,7 +1281,7 @@ ; ; AVX512F-LABEL: fptoui_2f32_to_2i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0 ; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero ; AVX512F-NEXT: vzeroupper @@ -1295,7 +1295,7 @@ ; ; AVX512DQ-LABEL: fptoui_2f32_to_2i32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0 ; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero ; AVX512DQ-NEXT: vzeroupper @@ -1349,9 +1349,9 @@ ; ; AVX512F-LABEL: fptoui_4f32_to_4i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1362,9 +1362,9 @@ ; ; AVX512DQ-LABEL: fptoui_4f32_to_4i32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -1446,9 +1446,9 @@ ; ; AVX512DQ-LABEL: fptoui_2f32_to_2i64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -1531,16 +1531,16 @@ ; ; AVX512DQ-LABEL: fptoui_4f32_to_2i64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptoui_4f32_to_2i64: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0 -; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VLDQ-NEXT: vzeroupper ; AVX512VLDQ-NEXT: retq %cvt = fptoui <4 x float> %a to <4 x i64> @@ -1644,9 +1644,9 @@ ; ; AVX512F-LABEL: fptoui_8f32_to_8i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: fptoui_8f32_to_8i32: @@ -1656,9 +1656,9 @@ ; ; AVX512DQ-LABEL: fptoui_8f32_to_8i32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptoui_8f32_to_8i32: @@ -1835,7 +1835,7 @@ ; AVX512DQ-LABEL: fptoui_4f32_to_4i64: ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptoui_4f32_to_4i64: @@ -2013,13 +2013,13 @@ ; AVX512DQ-LABEL: fptoui_8f32_to_4i64: ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: fptoui_8f32_to_4i64: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: vcvttps2uqq %ymm0, %zmm0 -; AVX512VLDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512VLDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512VLDQ-NEXT: retq %cvt = fptoui <8 x float> %a to <8 x i64> %shuf = shufflevector <8 x i64> %cvt, <8 x i64> undef, <4 x i32> Index: test/CodeGen/X86/vec_ins_extract-1.ll =================================================================== --- test/CodeGen/X86/vec_ins_extract-1.ll +++ test/CodeGen/X86/vec_ins_extract-1.ll @@ -22,7 +22,7 @@ ; ; X64-LABEL: t0: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; X64-NEXT: andl $3, %edi ; X64-NEXT: movl $76, -24(%rsp,%rdi,4) @@ -51,7 +51,7 @@ ; ; X64-LABEL: t1: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl $76, %eax ; X64-NEXT: pinsrd $0, %eax, %xmm0 ; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) @@ -79,7 +79,7 @@ ; ; X64-LABEL: t2: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; X64-NEXT: andl $3, %edi ; X64-NEXT: pinsrd $0, -24(%rsp,%rdi,4), %xmm0 @@ -106,7 +106,7 @@ ; ; X64-LABEL: t3: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; X64-NEXT: andl $3, %edi ; X64-NEXT: movss %xmm0, -24(%rsp,%rdi,4) Index: test/CodeGen/X86/vec_insert-4.ll =================================================================== --- test/CodeGen/X86/vec_insert-4.ll +++ test/CodeGen/X86/vec_insert-4.ll @@ -26,7 +26,7 @@ ; X64-NEXT: movq %rsp, %rbp ; X64-NEXT: andq $-32, %rsp ; X64-NEXT: subq $64, %rsp -; X64-NEXT: ## kill: def %edi killed %edi def %rdi +; X64-NEXT: ## kill: def $edi killed $edi def $rdi ; X64-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; X64-NEXT: movaps %xmm0, (%rsp) ; X64-NEXT: andl $7, %edi Index: test/CodeGen/X86/vec_insert-5.ll =================================================================== --- test/CodeGen/X86/vec_insert-5.ll +++ test/CodeGen/X86/vec_insert-5.ll @@ -17,7 +17,7 @@ ; ; X64-LABEL: t1: ; X64: # %bb.0: -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: shll $12, %edi ; X64-NEXT: movq %rdi, %xmm0 ; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] Index: test/CodeGen/X86/vec_insert-8.ll =================================================================== --- test/CodeGen/X86/vec_insert-8.ll +++ test/CodeGen/X86/vec_insert-8.ll @@ -23,7 +23,7 @@ ; ; X64-LABEL: var_insert: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %esi killed %esi def %rsi +; X64-NEXT: # kill: def $esi killed $esi def $rsi ; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; X64-NEXT: andl $3, %esi ; X64-NEXT: movl %edi, -24(%rsp,%rsi,4) @@ -51,7 +51,7 @@ ; ; X64-LABEL: var_extract: ; X64: # %bb.0: # %entry -; X64-NEXT: # kill: def %edi killed %edi def %rdi +; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; X64-NEXT: andl $3, %edi ; X64-NEXT: movl -24(%rsp,%rdi,4), %eax Index: test/CodeGen/X86/vec_insert-mmx.ll =================================================================== --- test/CodeGen/X86/vec_insert-mmx.ll +++ test/CodeGen/X86/vec_insert-mmx.ll @@ -16,7 +16,7 @@ ; ; X64-LABEL: t0: ; X64: ## %bb.0: -; X64-NEXT: ## kill: def %edi killed %edi def %rdi +; X64-NEXT: ## kill: def $edi killed $edi def $rdi ; X64-NEXT: movq %rdi, %xmm0 ; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] Index: test/CodeGen/X86/vec_int_to_fp.ll =================================================================== --- test/CodeGen/X86/vec_int_to_fp.ll +++ test/CodeGen/X86/vec_int_to_fp.ll @@ -58,9 +58,9 @@ ; ; AVX512DQ-LABEL: sitofp_2i64_to_2f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -96,7 +96,7 @@ ; AVX-LABEL: sitofp_4i32_to_2f64: ; AVX: # %bb.0: ; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %cvt = sitofp <4 x i32> %a to <4 x double> @@ -134,7 +134,7 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -142,7 +142,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -150,7 +150,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cvt = sitofp <8 x i16> %a to <8 x double> @@ -190,7 +190,7 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0 ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -198,7 +198,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0 ; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -206,7 +206,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 ; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cvt = sitofp <16 x i8> %a to <16 x double> @@ -301,9 +301,9 @@ ; ; AVX512DQ-LABEL: sitofp_4i64_to_4f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: sitofp_4i64_to_4f64: @@ -377,7 +377,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %cvt = sitofp <8 x i16> %a to <8 x double> %shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> @@ -432,7 +432,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 ; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %cvt = sitofp <16 x i8> %a to <16 x double> %shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> @@ -492,9 +492,9 @@ ; ; AVX512DQ-LABEL: uitofp_2i64_to_2f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -531,9 +531,9 @@ ; ; AVX512F-LABEL: uitofp_2i32_to_2f64: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -544,9 +544,9 @@ ; ; AVX512DQ-LABEL: uitofp_2i32_to_2f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -580,7 +580,7 @@ ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 ; AVX1-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -594,37 +594,37 @@ ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] ; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 ; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: uitofp_4i32_to_2f64: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: uitofp_4i32_to_2f64: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0 -; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512DQ-LABEL: uitofp_4i32_to_2f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: uitofp_4i32_to_2f64: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0 -; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VLDQ-NEXT: vzeroupper ; AVX512VLDQ-NEXT: retq %cvt = uitofp <4 x i32> %a to <4 x double> @@ -662,7 +662,7 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -670,7 +670,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -678,7 +678,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cvt = uitofp <8 x i16> %a to <8 x double> @@ -718,7 +718,7 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -726,7 +726,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -734,7 +734,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cvt = uitofp <16 x i8> %a to <16 x double> @@ -823,9 +823,9 @@ ; ; AVX512DQ-LABEL: uitofp_4i64_to_4f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: uitofp_4i64_to_4f64: @@ -883,9 +883,9 @@ ; ; AVX512F-LABEL: uitofp_4i32_to_4f64: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: uitofp_4i32_to_4f64: @@ -895,9 +895,9 @@ ; ; AVX512DQ-LABEL: uitofp_4i32_to_4f64: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: uitofp_4i32_to_4f64: @@ -956,7 +956,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %cvt = uitofp <8 x i16> %a to <8 x double> %shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> @@ -1013,7 +1013,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %cvt = uitofp <16 x i8> %a to <16 x double> %shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> @@ -1072,9 +1072,9 @@ ; ; AVX512DQ-LABEL: sitofp_2i64_to_4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -1131,7 +1131,7 @@ ; ; AVX512DQ-LABEL: sitofp_2i64_to_4f32_zero: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0 ; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero ; AVX512DQ-NEXT: vzeroupper @@ -1197,15 +1197,15 @@ ; ; AVX512DQ-LABEL: sitofp_4i64_to_4f32_undef: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32_undef: ; AVX512VLDQ: # %bb.0: -; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0 ; AVX512VLDQ-NEXT: vzeroupper ; AVX512VLDQ-NEXT: retq @@ -1261,7 +1261,7 @@ ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -1269,7 +1269,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1277,7 +1277,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cvt = sitofp <8 x i16> %a to <8 x float> @@ -1320,7 +1320,7 @@ ; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -1328,7 +1328,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0 ; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1336,7 +1336,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 ; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cvt = sitofp <16 x i8> %a to <16 x float> @@ -1436,9 +1436,9 @@ ; ; AVX512DQ-LABEL: sitofp_4i64_to_4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -1576,7 +1576,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 ; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %cvt = sitofp <16 x i8> %a to <16 x float> %shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> @@ -1691,9 +1691,9 @@ ; ; AVX512DQ-LABEL: uitofp_2i64_to_4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -1800,7 +1800,7 @@ ; ; AVX512DQ-LABEL: uitofp_2i64_to_2f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0 ; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero ; AVX512DQ-NEXT: vzeroupper @@ -1927,15 +1927,15 @@ ; ; AVX512DQ-LABEL: uitofp_4i64_to_4f32_undef: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32_undef: ; AVX512VLDQ: # %bb.0: -; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0 ; AVX512VLDQ-NEXT: vzeroupper ; AVX512VLDQ-NEXT: retq @@ -1979,9 +1979,9 @@ ; ; AVX512F-LABEL: uitofp_4i32_to_4f32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1992,9 +1992,9 @@ ; ; AVX512DQ-LABEL: uitofp_4i32_to_4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -2039,7 +2039,7 @@ ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -2047,7 +2047,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -2055,7 +2055,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cvt = uitofp <8 x i16> %a to <8 x float> @@ -2098,7 +2098,7 @@ ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -2106,7 +2106,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -2114,7 +2114,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cvt = uitofp <16 x i8> %a to <16 x float> @@ -2361,9 +2361,9 @@ ; ; AVX512DQ-LABEL: uitofp_4i64_to_4f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -2425,9 +2425,9 @@ ; ; AVX512F-LABEL: uitofp_8i32_to_8f32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: uitofp_8i32_to_8f32: @@ -2437,9 +2437,9 @@ ; ; AVX512DQ-LABEL: uitofp_8i32_to_8f32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: uitofp_8i32_to_8f32: @@ -2556,7 +2556,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %cvt = uitofp <16 x i8> %a to <16 x float> %shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> @@ -2614,7 +2614,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0 ; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -2778,7 +2778,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0 ; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f64: @@ -2910,7 +2910,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0 ; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -2952,7 +2952,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -2965,7 +2965,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -3108,7 +3108,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0 ; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f64: @@ -3172,7 +3172,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovaps (%rdi), %xmm0 ; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: uitofp_load_4i32_to_4f64: @@ -3184,7 +3184,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0 ; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f64: @@ -3342,7 +3342,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0 ; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -3933,7 +3933,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0 ; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -3986,7 +3986,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovaps (%rdi), %xmm0 ; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -3999,7 +3999,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0 ; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -4575,7 +4575,7 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovaps (%rdi), %ymm0 ; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: uitofp_load_8i32_to_8f32: @@ -4587,7 +4587,7 @@ ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0 ; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512DQ-NEXT: retq ; ; AVX512VLDQ-LABEL: uitofp_load_8i32_to_8f32: Index: test/CodeGen/X86/vec_minmax_sint.ll =================================================================== --- test/CodeGen/X86/vec_minmax_sint.ll +++ test/CodeGen/X86/vec_minmax_sint.ll @@ -72,10 +72,10 @@ ; ; AVX512-LABEL: max_gt_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp sgt <2 x i64> %a, %b @@ -183,10 +183,10 @@ ; ; AVX512-LABEL: max_gt_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %1 = icmp sgt <4 x i64> %a, %b %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b @@ -479,10 +479,10 @@ ; ; AVX512-LABEL: max_ge_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp sge <2 x i64> %a, %b @@ -608,10 +608,10 @@ ; ; AVX512-LABEL: max_ge_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %1 = icmp sge <4 x i64> %a, %b %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b @@ -905,10 +905,10 @@ ; ; AVX512-LABEL: min_lt_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp slt <2 x i64> %a, %b @@ -1017,10 +1017,10 @@ ; ; AVX512-LABEL: min_lt_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %1 = icmp slt <4 x i64> %a, %b %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b @@ -1306,10 +1306,10 @@ ; ; AVX512-LABEL: min_le_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp sle <2 x i64> %a, %b @@ -1434,10 +1434,10 @@ ; ; AVX512-LABEL: min_le_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %1 = icmp sle <4 x i64> %a, %b %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b Index: test/CodeGen/X86/vec_minmax_uint.ll =================================================================== --- test/CodeGen/X86/vec_minmax_uint.ll +++ test/CodeGen/X86/vec_minmax_uint.ll @@ -82,10 +82,10 @@ ; ; AVX512-LABEL: max_gt_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp ugt <2 x i64> %a, %b @@ -208,10 +208,10 @@ ; ; AVX512-LABEL: max_gt_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %1 = icmp ugt <4 x i64> %a, %b %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b @@ -526,10 +526,10 @@ ; ; AVX512-LABEL: max_ge_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp uge <2 x i64> %a, %b @@ -669,10 +669,10 @@ ; ; AVX512-LABEL: max_ge_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %1 = icmp uge <4 x i64> %a, %b %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b @@ -980,10 +980,10 @@ ; ; AVX512-LABEL: min_lt_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp ult <2 x i64> %a, %b @@ -1106,10 +1106,10 @@ ; ; AVX512-LABEL: min_lt_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %1 = icmp ult <4 x i64> %a, %b %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b @@ -1423,10 +1423,10 @@ ; ; AVX512-LABEL: min_le_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp ule <2 x i64> %a, %b @@ -1566,10 +1566,10 @@ ; ; AVX512-LABEL: min_le_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %1 = icmp ule <4 x i64> %a, %b %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b Index: test/CodeGen/X86/vec_ss_load_fold.ll =================================================================== --- test/CodeGen/X86/vec_ss_load_fold.ll +++ test/CodeGen/X86/vec_ss_load_fold.ll @@ -17,7 +17,7 @@ ; X32-NEXT: minss LCPI0_2, %xmm0 ; X32-NEXT: maxss %xmm1, %xmm0 ; X32-NEXT: cvttss2si %xmm0, %eax -; X32-NEXT: ## kill: def %ax killed %ax killed %eax +; X32-NEXT: ## kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test1: @@ -29,7 +29,7 @@ ; X64-NEXT: minss {{.*}}(%rip), %xmm0 ; X64-NEXT: maxss %xmm1, %xmm0 ; X64-NEXT: cvttss2si %xmm0, %eax -; X64-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; ; X32_AVX1-LABEL: test1: @@ -42,7 +42,7 @@ ; X32_AVX1-NEXT: vminss LCPI0_2, %xmm0, %xmm0 ; X32_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X32_AVX1-NEXT: vcvttss2si %xmm0, %eax -; X32_AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X32_AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X32_AVX1-NEXT: retl ; ; X64_AVX1-LABEL: test1: @@ -54,7 +54,7 @@ ; X64_AVX1-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0 ; X64_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X64_AVX1-NEXT: vcvttss2si %xmm0, %eax -; X64_AVX1-NEXT: ## kill: def %ax killed %ax killed %eax +; X64_AVX1-NEXT: ## kill: def $ax killed $ax killed $eax ; X64_AVX1-NEXT: retq ; ; X32_AVX512-LABEL: test1: @@ -67,7 +67,7 @@ ; X32_AVX512-NEXT: vminss LCPI0_2, %xmm0, %xmm0 ; X32_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X32_AVX512-NEXT: vcvttss2si %xmm0, %eax -; X32_AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X32_AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X32_AVX512-NEXT: retl ; ; X64_AVX512-LABEL: test1: @@ -79,7 +79,7 @@ ; X64_AVX512-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0 ; X64_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X64_AVX512-NEXT: vcvttss2si %xmm0, %eax -; X64_AVX512-NEXT: ## kill: def %ax killed %ax killed %eax +; X64_AVX512-NEXT: ## kill: def $ax killed $ax killed $eax ; X64_AVX512-NEXT: retq %tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1] %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1] @@ -104,7 +104,7 @@ ; X32-NEXT: xorps %xmm1, %xmm1 ; X32-NEXT: maxss %xmm1, %xmm0 ; X32-NEXT: cvttss2si %xmm0, %eax -; X32-NEXT: ## kill: def %ax killed %ax killed %eax +; X32-NEXT: ## kill: def $ax killed $ax killed $eax ; X32-NEXT: retl ; ; X64-LABEL: test2: @@ -115,7 +115,7 @@ ; X64-NEXT: xorps %xmm1, %xmm1 ; X64-NEXT: maxss %xmm1, %xmm0 ; X64-NEXT: cvttss2si %xmm0, %eax -; X64-NEXT: ## kill: def %ax killed %ax killed %eax +; X64-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; ; X32_AVX-LABEL: test2: @@ -127,7 +127,7 @@ ; X32_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X32_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X32_AVX-NEXT: vcvttss2si %xmm0, %eax -; X32_AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X32_AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X32_AVX-NEXT: retl ; ; X64_AVX-LABEL: test2: @@ -138,7 +138,7 @@ ; X64_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X64_AVX-NEXT: vcvttss2si %xmm0, %eax -; X64_AVX-NEXT: ## kill: def %ax killed %ax killed %eax +; X64_AVX-NEXT: ## kill: def $ax killed $ax killed $eax ; X64_AVX-NEXT: retq %tmp28 = fsub float %f, 1.000000e+00 ; [#uses=1] %tmp37 = fmul float %tmp28, 5.000000e-01 ; [#uses=1] Index: test/CodeGen/X86/vector-bitreverse.ll =================================================================== --- test/CodeGen/X86/vector-bitreverse.ll +++ test/CodeGen/X86/vector-bitreverse.ll @@ -53,7 +53,7 @@ ; XOP-NEXT: vmovd %edi, %xmm0 ; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0 ; XOP-NEXT: vpextrb $0, %xmm0, %eax -; XOP-NEXT: # kill: def %al killed %al killed %eax +; XOP-NEXT: # kill: def $al killed $al killed $eax ; XOP-NEXT: retq %b = call i8 @llvm.bitreverse.i8(i8 %a) ret i8 %b @@ -62,7 +62,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind { ; SSE-LABEL: test_bitreverse_i16: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def %edi killed %edi def %rdi +; SSE-NEXT: # kill: def $edi killed $edi def $rdi ; SSE-NEXT: rolw $8, %di ; SSE-NEXT: movl %edi, %eax ; SSE-NEXT: andl $3855, %eax # imm = 0xF0F @@ -80,12 +80,12 @@ ; SSE-NEXT: andl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: shrl %eax ; SSE-NEXT: leal (%rax,%rcx,2), %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX-LABEL: test_bitreverse_i16: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: rolw $8, %di ; AVX-NEXT: movl %edi, %eax ; AVX-NEXT: andl $3855, %eax # imm = 0xF0F @@ -103,7 +103,7 @@ ; AVX-NEXT: andl $43690, %eax # imm = 0xAAAA ; AVX-NEXT: shrl %eax ; AVX-NEXT: leal (%rax,%rcx,2), %eax -; AVX-NEXT: # kill: def %ax killed %ax killed %eax +; AVX-NEXT: # kill: def $ax killed $ax killed $eax ; AVX-NEXT: retq ; ; XOP-LABEL: test_bitreverse_i16: @@ -111,7 +111,7 @@ ; XOP-NEXT: vmovd %edi, %xmm0 ; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0 ; XOP-NEXT: vmovd %xmm0, %eax -; XOP-NEXT: # kill: def %ax killed %ax killed %eax +; XOP-NEXT: # kill: def $ax killed $ax killed $eax ; XOP-NEXT: retq %b = call i16 @llvm.bitreverse.i16(i16 %a) ret i16 %b @@ -120,7 +120,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind { ; SSE-LABEL: test_bitreverse_i32: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def %edi killed %edi def %rdi +; SSE-NEXT: # kill: def $edi killed $edi def $rdi ; SSE-NEXT: bswapl %edi ; SSE-NEXT: movl %edi, %eax ; SSE-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F @@ -142,7 +142,7 @@ ; ; AVX-LABEL: test_bitreverse_i32: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: bswapl %edi ; AVX-NEXT: movl %edi, %eax ; AVX-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F Index: test/CodeGen/X86/vector-compare-all_of.ll =================================================================== --- test/CodeGen/X86/vector-compare-all_of.ll +++ test/CodeGen/X86/vector-compare-all_of.ll @@ -608,7 +608,7 @@ ; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: movl $-1, %eax ; SSE-NEXT: cmovnel %ecx, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v8i16_sext: @@ -619,7 +619,7 @@ ; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX-NEXT: movl $-1, %eax ; AVX-NEXT: cmovnel %ecx, %eax -; AVX-NEXT: # kill: def %ax killed %ax killed %eax +; AVX-NEXT: # kill: def $ax killed $ax killed $eax ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v8i16_sext: @@ -632,7 +632,7 @@ ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512-NEXT: retq %c = icmp sgt <8 x i16> %a0, %a1 %s = sext <8 x i1> %c to <8 x i16> @@ -657,7 +657,7 @@ ; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: movl $-1, %eax ; SSE-NEXT: cmovnel %ecx, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: test_v16i16_sext: @@ -675,7 +675,7 @@ ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -686,7 +686,7 @@ ; AVX2-NEXT: xorl %eax, %eax ; AVX2-NEXT: cmpl $-1, %ecx ; AVX2-NEXT: cmovel %ecx, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -702,7 +702,7 @@ ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %c = icmp sgt <16 x i16> %a0, %a1 @@ -730,7 +730,7 @@ ; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: movl $-1, %eax ; SSE-NEXT: cmovnel %ecx, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: test_v16i16_legal_sext: @@ -745,7 +745,7 @@ ; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: movl $-1, %eax ; AVX1-NEXT: cmovnel %ecx, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -759,7 +759,7 @@ ; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX2-NEXT: movl $-1, %eax ; AVX2-NEXT: cmovnel %ecx, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -777,7 +777,7 @@ ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax ; AVX512-NEXT: movsbl %al, %eax -; AVX512-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %c = icmp sgt <16 x i16> %a0, %a1 @@ -804,7 +804,7 @@ ; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: movl $-1, %eax ; SSE-NEXT: cmovnel %ecx, %eax -; SSE-NEXT: # kill: def %al killed %al killed %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v16i8_sext: @@ -815,7 +815,7 @@ ; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX-NEXT: movl $-1, %eax ; AVX-NEXT: cmovnel %ecx, %eax -; AVX-NEXT: # kill: def %al killed %al killed %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v16i8_sext: @@ -830,7 +830,7 @@ ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax -; AVX512-NEXT: # kill: def %al killed %al killed %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: retq %c = icmp sgt <16 x i8> %a0, %a1 %s = sext <16 x i1> %c to <16 x i8> @@ -857,7 +857,7 @@ ; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: movl $-1, %eax ; SSE-NEXT: cmovnel %ecx, %eax -; SSE-NEXT: # kill: def %al killed %al killed %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: test_v32i8_sext: @@ -877,7 +877,7 @@ ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpextrb $0, %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -888,7 +888,7 @@ ; AVX2-NEXT: xorl %eax, %eax ; AVX2-NEXT: cmpl $-1, %ecx ; AVX2-NEXT: cmovel %ecx, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -906,7 +906,7 @@ ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax -; AVX512-NEXT: # kill: def %al killed %al killed %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %c = icmp sgt <32 x i8> %a0, %a1 Index: test/CodeGen/X86/vector-compare-any_of.ll =================================================================== --- test/CodeGen/X86/vector-compare-any_of.ll +++ test/CodeGen/X86/vector-compare-any_of.ll @@ -562,7 +562,7 @@ ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: negl %eax ; SSE-NEXT: sbbl %eax, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v8i16_sext: @@ -571,7 +571,7 @@ ; AVX-NEXT: vpmovmskb %xmm0, %eax ; AVX-NEXT: negl %eax ; AVX-NEXT: sbbl %eax, %eax -; AVX-NEXT: # kill: def %ax killed %ax killed %eax +; AVX-NEXT: # kill: def $ax killed $ax killed $eax ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v8i16_sext: @@ -584,7 +584,7 @@ ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512-NEXT: retq %c = icmp sgt <8 x i16> %a0, %a1 %s = sext <8 x i1> %c to <8 x i16> @@ -607,7 +607,7 @@ ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: negl %eax ; SSE-NEXT: sbbl %eax, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: test_v16i16_sext: @@ -625,7 +625,7 @@ ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -635,7 +635,7 @@ ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: negl %eax ; AVX2-NEXT: sbbl %eax, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -651,7 +651,7 @@ ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %c = icmp sgt <16 x i16> %a0, %a1 @@ -677,7 +677,7 @@ ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: negl %eax ; SSE-NEXT: sbbl %eax, %eax -; SSE-NEXT: # kill: def %ax killed %ax killed %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: test_v16i16_legal_sext: @@ -690,7 +690,7 @@ ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: negl %eax ; AVX1-NEXT: sbbl %eax, %eax -; AVX1-NEXT: # kill: def %ax killed %ax killed %eax +; AVX1-NEXT: # kill: def $ax killed $ax killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -702,7 +702,7 @@ ; AVX2-NEXT: vpmovmskb %xmm0, %eax ; AVX2-NEXT: negl %eax ; AVX2-NEXT: sbbl %eax, %eax -; AVX2-NEXT: # kill: def %ax killed %ax killed %eax +; AVX2-NEXT: # kill: def $ax killed $ax killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -720,7 +720,7 @@ ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax ; AVX512-NEXT: movsbl %al, %eax -; AVX512-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %c = icmp sgt <16 x i16> %a0, %a1 @@ -745,7 +745,7 @@ ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: negl %eax ; SSE-NEXT: sbbl %eax, %eax -; SSE-NEXT: # kill: def %al killed %al killed %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v16i8_sext: @@ -754,7 +754,7 @@ ; AVX-NEXT: vpmovmskb %xmm0, %eax ; AVX-NEXT: negl %eax ; AVX-NEXT: sbbl %eax, %eax -; AVX-NEXT: # kill: def %al killed %al killed %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v16i8_sext: @@ -769,7 +769,7 @@ ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax -; AVX512-NEXT: # kill: def %al killed %al killed %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: retq %c = icmp sgt <16 x i8> %a0, %a1 %s = sext <16 x i1> %c to <16 x i8> @@ -794,7 +794,7 @@ ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: negl %eax ; SSE-NEXT: sbbl %eax, %eax -; SSE-NEXT: # kill: def %al killed %al killed %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX1-LABEL: test_v32i8_sext: @@ -814,7 +814,7 @@ ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpextrb $0, %xmm0, %eax -; AVX1-NEXT: # kill: def %al killed %al killed %eax +; AVX1-NEXT: # kill: def $al killed $al killed $eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -824,7 +824,7 @@ ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: negl %eax ; AVX2-NEXT: sbbl %eax, %eax -; AVX2-NEXT: # kill: def %al killed %al killed %eax +; AVX2-NEXT: # kill: def $al killed $al killed $eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -842,7 +842,7 @@ ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax -; AVX512-NEXT: # kill: def %al killed %al killed %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %c = icmp sgt <32 x i8> %a0, %a1 Index: test/CodeGen/X86/vector-compare-results.ll =================================================================== --- test/CodeGen/X86/vector-compare-results.ll +++ test/CodeGen/X86/vector-compare-results.ll @@ -145,7 +145,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = fcmp ogt <4 x double> %a0, %a1 @@ -181,7 +181,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = fcmp ogt <8 x float> %a0, %a1 @@ -244,7 +244,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp sgt <4 x i64> %a0, %a1 @@ -281,7 +281,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp sgt <8 x i32> %a0, %a1 @@ -334,7 +334,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = icmp sgt <16 x i16> %a0, %a1 @@ -661,7 +661,7 @@ ; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -670,7 +670,7 @@ ; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -678,7 +678,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = fcmp ogt <8 x double> %a0, %a1 @@ -741,7 +741,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = fcmp ogt <16 x float> %a0, %a1 @@ -840,7 +840,7 @@ ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -849,7 +849,7 @@ ; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -857,7 +857,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = icmp sgt <8 x i64> %a0, %a1 @@ -923,7 +923,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = icmp sgt <16 x i32> %a0, %a1 @@ -1238,7 +1238,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq %1 = icmp sgt <32 x i16> %a0, %a1 ret <32 x i1> %1 @@ -2261,9 +2261,9 @@ ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm4 ; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm2 ; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vmovdqa %xmm4, %xmm1 -; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 killed %ymm2 +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 killed $ymm2 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -2273,9 +2273,9 @@ ; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm4 ; AVX512DQ-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm2 ; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vmovdqa %xmm4, %xmm1 -; AVX512DQ-NEXT: # kill: def %xmm2 killed %xmm2 killed %ymm2 +; AVX512DQ-NEXT: # kill: def $xmm2 killed $xmm2 killed $ymm2 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; @@ -2384,7 +2384,7 @@ ; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm3, %k1 ; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = fcmp ogt <16 x double> %a0, %a1 @@ -2739,7 +2739,7 @@ ; AVX512BW-NEXT: vcmpltps %zmm1, %zmm3, %k1 ; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq %1 = fcmp ogt <32 x float> %a0, %a1 ret <32 x i1> %1 @@ -2935,7 +2935,7 @@ ; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm1, %k1 ; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = icmp sgt <16 x i64> %a0, %a1 @@ -3282,7 +3282,7 @@ ; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm1, %k1 ; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq %1 = icmp sgt <32 x i32> %a0, %a1 ret <32 x i1> %1 @@ -6934,7 +6934,7 @@ ; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1 ; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq %1 = fcmp ogt <32 x double> %a0, %a1 ret <32 x i1> %1 @@ -7577,7 +7577,7 @@ ; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1 ; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq %1 = icmp sgt <32 x i64> %a0, %a1 ret <32 x i1> %1 Index: test/CodeGen/X86/vector-extend-inreg.ll =================================================================== --- test/CodeGen/X86/vector-extend-inreg.ll +++ test/CodeGen/X86/vector-extend-inreg.ll @@ -47,7 +47,7 @@ ; X64-SSE-NEXT: movq %rsp, %rbp ; X64-SSE-NEXT: andq $-128, %rsp ; X64-SSE-NEXT: subq $256, %rsp # imm = 0x100 -; X64-SSE-NEXT: # kill: def %edi killed %edi def %rdi +; X64-SSE-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero ; X64-SSE-NEXT: xorps %xmm0, %xmm0 ; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) @@ -99,7 +99,7 @@ ; X64-AVX-NEXT: movq %rsp, %rbp ; X64-AVX-NEXT: andq $-128, %rsp ; X64-AVX-NEXT: subq $256, %rsp # imm = 0x100 -; X64-AVX-NEXT: # kill: def %edi killed %edi def %rdi +; X64-AVX-NEXT: # kill: def $edi killed $edi def $rdi ; X64-AVX-NEXT: vpermpd {{.*#+}} ymm0 = ymm3[3,1,2,3] ; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X64-AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] Index: test/CodeGen/X86/vector-half-conversions.ll =================================================================== --- test/CodeGen/X86/vector-half-conversions.ll +++ test/CodeGen/X86/vector-half-conversions.ll @@ -29,7 +29,7 @@ ; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: movq %rax, %rdx ; AVX1-NEXT: movswl %ax, %esi -; AVX1-NEXT: # kill: def %eax killed %eax killed %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax ; AVX1-NEXT: shrl $16, %eax ; AVX1-NEXT: shrq $32, %rcx ; AVX1-NEXT: shrq $48, %rdx @@ -56,7 +56,7 @@ ; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: movq %rax, %rdx ; AVX2-NEXT: movswl %ax, %esi -; AVX2-NEXT: # kill: def %eax killed %eax killed %rax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: shrl $16, %eax ; AVX2-NEXT: shrq $32, %rcx ; AVX2-NEXT: shrq $48, %rdx @@ -83,7 +83,7 @@ ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: movq %rax, %rdx ; AVX512F-NEXT: movswl %ax, %esi -; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: shrq $48, %rdx @@ -110,7 +110,7 @@ ; AVX512VL-NEXT: movq %rax, %rcx ; AVX512VL-NEXT: movq %rax, %rdx ; AVX512VL-NEXT: movswl %ax, %esi -; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512VL-NEXT: shrl $16, %eax ; AVX512VL-NEXT: shrq $32, %rcx ; AVX512VL-NEXT: shrq $48, %rdx @@ -141,7 +141,7 @@ ; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: movq %rax, %rdx ; AVX1-NEXT: movswl %ax, %esi -; AVX1-NEXT: # kill: def %eax killed %eax killed %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax ; AVX1-NEXT: shrl $16, %eax ; AVX1-NEXT: shrq $32, %rcx ; AVX1-NEXT: shrq $48, %rdx @@ -167,7 +167,7 @@ ; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: movq %rax, %rdx ; AVX2-NEXT: movswl %ax, %esi -; AVX2-NEXT: # kill: def %eax killed %eax killed %rax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: shrl $16, %eax ; AVX2-NEXT: shrq $32, %rcx ; AVX2-NEXT: shrq $48, %rdx @@ -193,7 +193,7 @@ ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: movq %rax, %rdx ; AVX512F-NEXT: movswl %ax, %esi -; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: shrq $48, %rdx @@ -221,7 +221,7 @@ ; AVX512VL-NEXT: movq %rax, %rcx ; AVX512VL-NEXT: movq %rax, %rdx ; AVX512VL-NEXT: movswl %ax, %esi -; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512VL-NEXT: shrl $16, %eax ; AVX512VL-NEXT: shrq $32, %rcx ; AVX512VL-NEXT: shrq $48, %rdx @@ -253,7 +253,7 @@ ; ALL-NEXT: movq %rdx, %r8 ; ALL-NEXT: movq %rdx, %r10 ; ALL-NEXT: movswl %dx, %r9d -; ALL-NEXT: # kill: def %edx killed %edx killed %rdx +; ALL-NEXT: # kill: def $edx killed $edx killed $rdx ; ALL-NEXT: shrl $16, %edx ; ALL-NEXT: shrq $32, %r8 ; ALL-NEXT: shrq $48, %r10 @@ -261,7 +261,7 @@ ; ALL-NEXT: movq %rdi, %rax ; ALL-NEXT: movq %rdi, %rsi ; ALL-NEXT: movswl %di, %ecx -; ALL-NEXT: # kill: def %edi killed %edi killed %rdi +; ALL-NEXT: # kill: def $edi killed $edi killed $rdi ; ALL-NEXT: shrl $16, %edi ; ALL-NEXT: shrq $32, %rax ; ALL-NEXT: shrq $48, %rsi @@ -314,7 +314,7 @@ ; AVX1-NEXT: movswl %cx, %ecx ; AVX1-NEXT: vmovd %ecx, %xmm9 ; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: # kill: def %eax killed %eax killed %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax ; AVX1-NEXT: shrl $16, %eax ; AVX1-NEXT: cwtl ; AVX1-NEXT: vmovd %eax, %xmm10 @@ -329,7 +329,7 @@ ; AVX1-NEXT: movswl %cx, %ecx ; AVX1-NEXT: vmovd %ecx, %xmm13 ; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: # kill: def %eax killed %eax killed %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax ; AVX1-NEXT: shrl $16, %eax ; AVX1-NEXT: cwtl ; AVX1-NEXT: vmovd %eax, %xmm14 @@ -344,7 +344,7 @@ ; AVX1-NEXT: movswl %cx, %ecx ; AVX1-NEXT: vmovd %ecx, %xmm3 ; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: # kill: def %eax killed %eax killed %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax ; AVX1-NEXT: shrl $16, %eax ; AVX1-NEXT: cwtl ; AVX1-NEXT: vmovd %eax, %xmm4 @@ -409,7 +409,7 @@ ; AVX2-NEXT: movswl %cx, %ecx ; AVX2-NEXT: vmovd %ecx, %xmm9 ; AVX2-NEXT: movswl %ax, %ecx -; AVX2-NEXT: # kill: def %eax killed %eax killed %rax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: shrl $16, %eax ; AVX2-NEXT: cwtl ; AVX2-NEXT: vmovd %eax, %xmm10 @@ -424,7 +424,7 @@ ; AVX2-NEXT: movswl %cx, %ecx ; AVX2-NEXT: vmovd %ecx, %xmm13 ; AVX2-NEXT: movswl %ax, %ecx -; AVX2-NEXT: # kill: def %eax killed %eax killed %rax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: shrl $16, %eax ; AVX2-NEXT: cwtl ; AVX2-NEXT: vmovd %eax, %xmm14 @@ -439,7 +439,7 @@ ; AVX2-NEXT: movswl %cx, %ecx ; AVX2-NEXT: vmovd %ecx, %xmm3 ; AVX2-NEXT: movswl %ax, %ecx -; AVX2-NEXT: # kill: def %eax killed %eax killed %rax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: shrl $16, %eax ; AVX2-NEXT: cwtl ; AVX2-NEXT: vmovd %eax, %xmm4 @@ -504,7 +504,7 @@ ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm9 ; AVX512F-NEXT: movswl %ax, %ecx -; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm11 @@ -519,7 +519,7 @@ ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm14 ; AVX512F-NEXT: movswl %ax, %ecx -; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm15 @@ -534,7 +534,7 @@ ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm1 ; AVX512F-NEXT: movswl %ax, %ecx -; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm4 @@ -600,7 +600,7 @@ ; AVX512VL-NEXT: movswl %cx, %ecx ; AVX512VL-NEXT: vmovd %ecx, %xmm9 ; AVX512VL-NEXT: movswl %ax, %ecx -; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512VL-NEXT: shrl $16, %eax ; AVX512VL-NEXT: cwtl ; AVX512VL-NEXT: vmovd %eax, %xmm11 @@ -615,7 +615,7 @@ ; AVX512VL-NEXT: movswl %cx, %ecx ; AVX512VL-NEXT: vmovd %ecx, %xmm14 ; AVX512VL-NEXT: movswl %ax, %ecx -; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512VL-NEXT: shrl $16, %eax ; AVX512VL-NEXT: cwtl ; AVX512VL-NEXT: vmovd %eax, %xmm15 @@ -630,7 +630,7 @@ ; AVX512VL-NEXT: movswl %cx, %ecx ; AVX512VL-NEXT: vmovd %ecx, %xmm18 ; AVX512VL-NEXT: movswl %ax, %ecx -; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512VL-NEXT: shrl $16, %eax ; AVX512VL-NEXT: cwtl ; AVX512VL-NEXT: vmovd %eax, %xmm19 @@ -736,7 +736,7 @@ ; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: movq %rax, %rdx ; AVX1-NEXT: movswl %ax, %esi -; AVX1-NEXT: # kill: def %eax killed %eax killed %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax ; AVX1-NEXT: shrl $16, %eax ; AVX1-NEXT: shrq $32, %rcx ; AVX1-NEXT: shrq $48, %rdx @@ -762,7 +762,7 @@ ; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: movq %rax, %rdx ; AVX2-NEXT: movswl %ax, %esi -; AVX2-NEXT: # kill: def %eax killed %eax killed %rax +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: shrl $16, %eax ; AVX2-NEXT: shrq $32, %rcx ; AVX2-NEXT: shrq $48, %rdx @@ -788,7 +788,7 @@ ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: movq %rax, %rdx ; AVX512F-NEXT: movswl %ax, %esi -; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: shrq $48, %rdx @@ -816,7 +816,7 @@ ; AVX512VL-NEXT: movq %rax, %rcx ; AVX512VL-NEXT: movq %rax, %rdx ; AVX512VL-NEXT: movswl %ax, %esi -; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax +; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax ; AVX512VL-NEXT: shrl $16, %eax ; AVX512VL-NEXT: shrq $32, %rcx ; AVX512VL-NEXT: shrq $48, %rdx @@ -2078,7 +2078,7 @@ ; ALL: # %bb.0: ; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; ALL-NEXT: vmovd %xmm0, %eax -; ALL-NEXT: # kill: def %ax killed %ax killed %eax +; ALL-NEXT: # kill: def $ax killed $ax killed $eax ; ALL-NEXT: retq %1 = fptrunc float %a0 to half %2 = bitcast half %1 to i16 @@ -2995,7 +2995,7 @@ ; AVX1-NEXT: movl %eax, %ebx ; AVX1-NEXT: shll $16, %ebx ; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movzwl %ax, %r14d @@ -3032,7 +3032,7 @@ ; AVX2-NEXT: movl %eax, %ebx ; AVX2-NEXT: shll $16, %ebx ; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movzwl %ax, %r14d @@ -3069,7 +3069,7 @@ ; AVX512-NEXT: movl %eax, %ebx ; AVX512-NEXT: shll $16, %ebx ; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movzwl %ax, %r14d @@ -3111,7 +3111,7 @@ ; AVX1-NEXT: movl %eax, %ebx ; AVX1-NEXT: shll $16, %ebx ; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movzwl %ax, %r14d @@ -3149,7 +3149,7 @@ ; AVX2-NEXT: movl %eax, %ebx ; AVX2-NEXT: shll $16, %ebx ; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movzwl %ax, %r14d @@ -3187,7 +3187,7 @@ ; AVX512F-NEXT: movl %eax, %ebx ; AVX512F-NEXT: shll $16, %ebx ; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: callq __truncdfhf2 ; AVX512F-NEXT: movzwl %ax, %r14d @@ -3225,7 +3225,7 @@ ; AVX512VL-NEXT: movl %eax, %ebx ; AVX512VL-NEXT: shll $16, %ebx ; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: callq __truncdfhf2 ; AVX512VL-NEXT: movzwl %ax, %r14d @@ -3269,7 +3269,7 @@ ; AVX1-NEXT: movl %eax, %ebx ; AVX1-NEXT: shll $16, %ebx ; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movzwl %ax, %r14d @@ -3307,7 +3307,7 @@ ; AVX2-NEXT: movl %eax, %ebx ; AVX2-NEXT: shll $16, %ebx ; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movzwl %ax, %r14d @@ -3345,7 +3345,7 @@ ; AVX512-NEXT: movl %eax, %ebx ; AVX512-NEXT: shll $16, %ebx ; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movzwl %ax, %r14d @@ -3391,7 +3391,7 @@ ; AVX1-NEXT: movl %eax, %ebx ; AVX1-NEXT: shll $16, %ebx ; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movzwl %ax, %r15d @@ -3416,7 +3416,7 @@ ; AVX1-NEXT: movl %eax, %ebx ; AVX1-NEXT: shll $16, %ebx ; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movzwl %ax, %r15d @@ -3458,7 +3458,7 @@ ; AVX2-NEXT: movl %eax, %ebx ; AVX2-NEXT: shll $16, %ebx ; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movzwl %ax, %r15d @@ -3483,7 +3483,7 @@ ; AVX2-NEXT: movl %eax, %ebx ; AVX2-NEXT: shll $16, %ebx ; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movzwl %ax, %r15d @@ -3524,7 +3524,7 @@ ; AVX512-NEXT: movl %eax, %ebx ; AVX512-NEXT: shll $16, %ebx ; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movzwl %ax, %r15d @@ -3552,7 +3552,7 @@ ; AVX512-NEXT: movl %eax, %ebx ; AVX512-NEXT: shll $16, %ebx ; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movzwl %ax, %r15d @@ -3650,7 +3650,7 @@ ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movl %eax, %r15d ; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movl %eax, %ebp @@ -3688,7 +3688,7 @@ ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movl %eax, %r15d ; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movl %eax, %ebp @@ -3726,7 +3726,7 @@ ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movl %eax, %r15d ; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movl %eax, %ebp @@ -3763,7 +3763,7 @@ ; AVX1-NEXT: movl %eax, %ebp ; AVX1-NEXT: shll $16, %ebp ; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movzwl %ax, %ebx @@ -3805,7 +3805,7 @@ ; AVX2-NEXT: movl %eax, %ebp ; AVX2-NEXT: shll $16, %ebp ; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movzwl %ax, %ebx @@ -3847,7 +3847,7 @@ ; AVX512F-NEXT: movl %eax, %ebp ; AVX512F-NEXT: shll $16, %ebp ; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: callq __truncdfhf2 ; AVX512F-NEXT: movzwl %ax, %ebx @@ -3889,7 +3889,7 @@ ; AVX512VL-NEXT: movl %eax, %ebp ; AVX512VL-NEXT: shll $16, %ebp ; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: callq __truncdfhf2 ; AVX512VL-NEXT: movzwl %ax, %ebx @@ -3938,7 +3938,7 @@ ; AVX1-NEXT: movl %eax, %ebp ; AVX1-NEXT: shll $16, %ebp ; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movzwl %ax, %ebx @@ -3980,7 +3980,7 @@ ; AVX2-NEXT: movl %eax, %ebp ; AVX2-NEXT: shll $16, %ebp ; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movzwl %ax, %ebx @@ -4022,7 +4022,7 @@ ; AVX512-NEXT: movl %eax, %ebp ; AVX512-NEXT: shll $16, %ebp ; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movzwl %ax, %ebx @@ -4092,7 +4092,7 @@ ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movl %eax, %r13d ; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movl %eax, %ebp @@ -4100,7 +4100,7 @@ ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movl %eax, %r14d ; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: callq __truncdfhf2 ; AVX1-NEXT: movl %eax, %r15d @@ -4160,7 +4160,7 @@ ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movl %eax, %r13d ; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movl %eax, %ebp @@ -4168,7 +4168,7 @@ ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movl %eax, %r14d ; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: callq __truncdfhf2 ; AVX2-NEXT: movl %eax, %r15d @@ -4230,7 +4230,7 @@ ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movl %eax, %r13d ; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movl %eax, %ebp @@ -4238,7 +4238,7 @@ ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movl %eax, %r14d ; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movl %eax, %r15d Index: test/CodeGen/X86/vector-lzcnt-128.ll =================================================================== --- test/CodeGen/X86/vector-lzcnt-128.ll +++ test/CodeGen/X86/vector-lzcnt-128.ll @@ -233,9 +233,9 @@ ; ; AVX512CD-LABEL: testv2i64: ; AVX512CD: # %bb.0: -; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 -; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512CD-NEXT: vzeroupper ; AVX512CD-NEXT: retq ; @@ -499,9 +499,9 @@ ; ; AVX512CD-LABEL: testv2i64u: ; AVX512CD: # %bb.0: -; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 -; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512CD-NEXT: vzeroupper ; AVX512CD-NEXT: retq ; @@ -747,9 +747,9 @@ ; ; AVX512CD-LABEL: testv4i32: ; AVX512CD: # %bb.0: -; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512CD-NEXT: vzeroupper ; AVX512CD-NEXT: retq ; @@ -989,9 +989,9 @@ ; ; AVX512CD-LABEL: testv4i32u: ; AVX512CD: # %bb.0: -; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512CD-NEXT: vzeroupper ; AVX512CD-NEXT: retq ; Index: test/CodeGen/X86/vector-lzcnt-256.ll =================================================================== --- test/CodeGen/X86/vector-lzcnt-256.ll +++ test/CodeGen/X86/vector-lzcnt-256.ll @@ -162,9 +162,9 @@ ; ; AVX512CD-LABEL: testv4i64: ; AVX512CD: # %bb.0: -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512CD-NEXT: retq ; ; X32-AVX-LABEL: testv4i64: @@ -354,9 +354,9 @@ ; ; AVX512CD-LABEL: testv4i64u: ; AVX512CD: # %bb.0: -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512CD-NEXT: retq ; ; X32-AVX-LABEL: testv4i64u: @@ -521,9 +521,9 @@ ; ; AVX512CD-LABEL: testv8i32: ; AVX512CD: # %bb.0: -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512CD-NEXT: retq ; ; X32-AVX-LABEL: testv8i32: @@ -683,9 +683,9 @@ ; ; AVX512CD-LABEL: testv8i32u: ; AVX512CD: # %bb.0: -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512CD-NEXT: retq ; ; X32-AVX-LABEL: testv8i32u: Index: test/CodeGen/X86/vector-popcnt-128.ll =================================================================== --- test/CodeGen/X86/vector-popcnt-128.ll +++ test/CodeGen/X86/vector-popcnt-128.ll @@ -115,9 +115,9 @@ ; ; AVX512VPOPCNTDQ-LABEL: testv2i64: ; AVX512VPOPCNTDQ: # %bb.0: -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -290,9 +290,9 @@ ; ; AVX512VPOPCNTDQ-LABEL: testv4i32: ; AVX512VPOPCNTDQ: # %bb.0: -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -456,7 +456,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -470,9 +470,9 @@ ; ; BITALG_NOVLX-LABEL: testv8i16: ; BITALG_NOVLX: # %bb.0: -; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; BITALG_NOVLX-NEXT: vzeroupper ; BITALG_NOVLX-NEXT: retq ; @@ -595,9 +595,9 @@ ; ; BITALG_NOVLX-LABEL: testv16i8: ; BITALG_NOVLX: # %bb.0: -; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; BITALG_NOVLX-NEXT: vzeroupper ; BITALG_NOVLX-NEXT: retq ; Index: test/CodeGen/X86/vector-popcnt-256.ll =================================================================== --- test/CodeGen/X86/vector-popcnt-256.ll +++ test/CodeGen/X86/vector-popcnt-256.ll @@ -46,9 +46,9 @@ ; ; AVX512VPOPCNTDQ-LABEL: testv4i64: ; AVX512VPOPCNTDQ: # %bb.0: -; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: retq ; ; AVX512VPOPCNTDQVL-LABEL: testv4i64: @@ -139,9 +139,9 @@ ; ; AVX512VPOPCNTDQ-LABEL: testv8i32: ; AVX512VPOPCNTDQ: # %bb.0: -; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: retq ; ; AVX512VPOPCNTDQVL-LABEL: testv8i32: @@ -246,9 +246,9 @@ ; ; BITALG_NOVLX-LABEL: testv16i16: ; BITALG_NOVLX: # %bb.0: -; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; BITALG_NOVLX-NEXT: retq ; ; BITALG-LABEL: testv16i16: @@ -318,9 +318,9 @@ ; ; BITALG_NOVLX-LABEL: testv32i8: ; BITALG_NOVLX: # %bb.0: -; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; BITALG_NOVLX-NEXT: retq ; ; BITALG-LABEL: testv32i8: Index: test/CodeGen/X86/vector-rotate-128.ll =================================================================== --- test/CodeGen/X86/vector-rotate-128.ll +++ test/CodeGen/X86/vector-rotate-128.ll @@ -78,10 +78,10 @@ ; ; AVX512BW-LABEL: var_rotate_v2i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -219,10 +219,10 @@ ; ; AVX512BW-LABEL: var_rotate_v4i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -462,8 +462,8 @@ ; ; AVX512BW-LABEL: var_rotate_v8i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16] ; AVX512BW-NEXT: vpsubw %xmm1, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1 @@ -853,10 +853,10 @@ ; ; AVX512BW-LABEL: constant_rotate_v2i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,14] ; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -955,10 +955,10 @@ ; ; AVX512BW-LABEL: constant_rotate_v4i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,6,7] ; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1082,7 +1082,7 @@ ; ; AVX512BW-LABEL: constant_rotate_v8i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7] ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9] @@ -1378,9 +1378,9 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_v2i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1425,9 +1425,9 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_v4i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1563,7 +1563,7 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_mask_v2i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper @@ -1615,7 +1615,7 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_mask_v4i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper Index: test/CodeGen/X86/vector-rotate-256.ll =================================================================== --- test/CodeGen/X86/vector-rotate-256.ll +++ test/CodeGen/X86/vector-rotate-256.ll @@ -50,10 +50,10 @@ ; ; AVX512BW-LABEL: var_rotate_v4i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VL-LABEL: var_rotate_v4i64: @@ -141,10 +141,10 @@ ; ; AVX512BW-LABEL: var_rotate_v8i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VL-LABEL: var_rotate_v8i32: @@ -271,8 +271,8 @@ ; ; AVX512BW-LABEL: var_rotate_v16i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; AVX512BW-NEXT: vpsubw %ymm1, %ymm2, %ymm2 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1 @@ -479,10 +479,10 @@ ; ; AVX512BW-LABEL: constant_rotate_v4i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,14,50,60] ; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VL-LABEL: constant_rotate_v4i64: @@ -545,10 +545,10 @@ ; ; AVX512BW-LABEL: constant_rotate_v8i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11] ; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VL-LABEL: constant_rotate_v8i32: @@ -623,7 +623,7 @@ ; ; AVX512BW-LABEL: constant_rotate_v16i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1] @@ -800,9 +800,9 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_v4i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_rotate_v4i64: @@ -853,9 +853,9 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_v8i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_rotate_v8i32: @@ -1012,7 +1012,7 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_mask_v4i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: retq @@ -1074,7 +1074,7 @@ ; ; AVX512BW-LABEL: splatconstant_rotate_mask_v8i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: retq Index: test/CodeGen/X86/vector-sext.ll =================================================================== --- test/CodeGen/X86/vector-sext.ll +++ test/CodeGen/X86/vector-sext.ll @@ -1241,7 +1241,7 @@ ; AVX512F-NEXT: movzbl (%rdi), %eax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1250,7 +1250,7 @@ ; AVX512BW-NEXT: movzbl (%rdi), %eax ; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1431,7 +1431,7 @@ ; AVX512F-NEXT: movzbl (%rdi), %eax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1440,7 +1440,7 @@ ; AVX512BW-NEXT: movzbl (%rdi), %eax ; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1636,7 +1636,7 @@ ; AVX512F-NEXT: movzbl (%rdi), %eax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: load_sext_4i1_to_4i64: @@ -1644,7 +1644,7 @@ ; AVX512BW-NEXT: movzbl (%rdi), %eax ; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; X32-SSE41-LABEL: load_sext_4i1_to_4i64: @@ -1994,7 +1994,7 @@ ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -2003,7 +2003,7 @@ ; AVX512BW-NEXT: movzbl (%rdi), %eax ; AVX512BW-NEXT: kmovd %eax, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -2394,7 +2394,7 @@ ; AVX512F-NEXT: movzbl (%rdi), %eax ; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: load_sext_8i1_to_8i32: @@ -2402,7 +2402,7 @@ ; AVX512BW-NEXT: movzbl (%rdi), %eax ; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; X32-SSE41-LABEL: load_sext_8i1_to_8i32: @@ -2912,7 +2912,7 @@ ; AVX512BW: # %bb.0: # %entry ; AVX512BW-NEXT: kmovw (%rdi), %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -3430,7 +3430,7 @@ ; AVX512BW: # %bb.0: # %entry ; AVX512BW-NEXT: kmovw (%rdi), %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; X32-SSE41-LABEL: load_sext_16i1_to_16i16: @@ -4285,7 +4285,7 @@ ; AVX512BW: # %bb.0: # %entry ; AVX512BW-NEXT: kmovd (%rdi), %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; X32-SSE41-LABEL: load_sext_32i1_to_32i8: @@ -5018,7 +5018,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; X32-SSE41-LABEL: sext_32xi1_to_32xi8: Index: test/CodeGen/X86/vector-shift-ashr-128.ll =================================================================== --- test/CodeGen/X86/vector-shift-ashr-128.ll +++ test/CodeGen/X86/vector-shift-ashr-128.ll @@ -82,10 +82,10 @@ ; ; AVX512-LABEL: var_shift_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; @@ -336,16 +336,16 @@ ; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: var_shift_v8i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -546,7 +546,7 @@ ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -670,9 +670,9 @@ ; ; AVX512-LABEL: splatvar_shift_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; @@ -984,7 +984,7 @@ ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1127,10 +1127,10 @@ ; ; AVX512-LABEL: constant_shift_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [1,7] ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; @@ -1305,16 +1305,16 @@ ; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: constant_shift_v8i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7] ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1495,7 +1495,7 @@ ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1622,9 +1622,9 @@ ; ; AVX512-LABEL: splatconstant_shift_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq ; Index: test/CodeGen/X86/vector-shift-ashr-256.ll =================================================================== --- test/CodeGen/X86/vector-shift-ashr-256.ll +++ test/CodeGen/X86/vector-shift-ashr-256.ll @@ -75,10 +75,10 @@ ; ; AVX512-LABEL: var_shift_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: var_shift_v4i64: @@ -309,10 +309,10 @@ ; ; AVX512BW-LABEL: var_shift_v16i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512DQVL-LABEL: var_shift_v16i16: @@ -696,9 +696,9 @@ ; ; AVX512-LABEL: splatvar_shift_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatvar_shift_v4i64: @@ -1170,10 +1170,10 @@ ; ; AVX512-LABEL: constant_shift_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [1,7,31,62] ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: constant_shift_v4i64: @@ -1360,10 +1360,10 @@ ; ; AVX512BW-LABEL: constant_shift_v16i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512DQVL-LABEL: constant_shift_v16i16: @@ -1702,9 +1702,9 @@ ; ; AVX512-LABEL: splatconstant_shift_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_shift_v4i64: Index: test/CodeGen/X86/vector-shift-lshr-128.ll =================================================================== --- test/CodeGen/X86/vector-shift-lshr-128.ll +++ test/CodeGen/X86/vector-shift-lshr-128.ll @@ -290,7 +290,7 @@ ; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -307,16 +307,16 @@ ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512DQ-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: var_shift_v8i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -466,7 +466,7 @@ ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -789,7 +789,7 @@ ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1050,7 +1050,7 @@ ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1066,16 +1066,16 @@ ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: constant_shift_v8i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7] ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1202,7 +1202,7 @@ ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; Index: test/CodeGen/X86/vector-shift-lshr-256.ll =================================================================== --- test/CodeGen/X86/vector-shift-lshr-256.ll +++ test/CodeGen/X86/vector-shift-lshr-256.ll @@ -272,10 +272,10 @@ ; ; AVX512BW-LABEL: var_shift_v16i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512DQVL-LABEL: var_shift_v16i16: @@ -1091,10 +1091,10 @@ ; ; AVX512BW-LABEL: constant_shift_v16i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512DQVL-LABEL: constant_shift_v16i16: Index: test/CodeGen/X86/vector-shift-shl-128.ll =================================================================== --- test/CodeGen/X86/vector-shift-shl-128.ll +++ test/CodeGen/X86/vector-shift-shl-128.ll @@ -247,7 +247,7 @@ ; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -262,16 +262,16 @@ ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512DQ-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq ; ; AVX512BW-LABEL: var_shift_v8i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -416,7 +416,7 @@ ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -731,7 +731,7 @@ ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -942,10 +942,10 @@ ; ; AVX512BW-LABEL: constant_shift_v8i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7] ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1052,7 +1052,7 @@ ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; Index: test/CodeGen/X86/vector-shift-shl-256.ll =================================================================== --- test/CodeGen/X86/vector-shift-shl-256.ll +++ test/CodeGen/X86/vector-shift-shl-256.ll @@ -232,10 +232,10 @@ ; ; AVX512BW-LABEL: var_shift_v16i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512DQVL-LABEL: var_shift_v16i16: @@ -966,10 +966,10 @@ ; ; AVX512BW-LABEL: constant_shift_v16i16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512BW-NEXT: retq ; ; AVX512DQVL-LABEL: constant_shift_v16i16: Index: test/CodeGen/X86/vector-shuffle-256-v4.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-256-v4.ll +++ test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -1338,21 +1338,21 @@ define <4 x double> @insert_reg_and_zero_v4f64(double %a) { ; AVX1-LABEL: insert_reg_and_zero_v4f64: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: insert_reg_and_zero_v4f64: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: insert_reg_and_zero_v4f64: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1] ; AVX512VL-NEXT: retq Index: test/CodeGen/X86/vector-shuffle-512-v16.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-512-v16.ll +++ test/CodeGen/X86/vector-shuffle-512-v16.ll @@ -689,7 +689,7 @@ define <16 x i32> @mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x i32> %a) { ; ALL-LABEL: mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03: ; ALL: # %bb.0: -; ALL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; ALL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; ALL-NEXT: retq @@ -700,7 +700,7 @@ define <16 x float> @mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x float> %a) { ; ALL-LABEL: mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03: ; ALL: # %bb.0: -; ALL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; ALL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; ALL-NEXT: retq Index: test/CodeGen/X86/vector-shuffle-512-v8.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-512-v8.ll +++ test/CodeGen/X86/vector-shuffle-512-v8.ll @@ -2644,14 +2644,14 @@ define <8 x i64> @shuffle_v2i64_v8i64_01010101(<2 x i64> %a) { ; AVX512F-LABEL: shuffle_v2i64_v8i64_01010101: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512F-32-LABEL: shuffle_v2i64_v8i64_01010101: ; AVX512F-32: # %bb.0: -; AVX512F-32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; AVX512F-32-NEXT: retl @@ -2662,14 +2662,14 @@ define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) { ; AVX512F-LABEL: shuffle_v2f64_v8f64_01010101: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512F-32-LABEL: shuffle_v2f64_v8f64_01010101: ; AVX512F-32: # %bb.0: -; AVX512F-32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; AVX512F-32-NEXT: retl @@ -2746,7 +2746,7 @@ ; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] ; AVX512F-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,2,3] -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -2755,7 +2755,7 @@ ; AVX512F-32-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512F-32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] ; AVX512F-32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,2,3] -; AVX512F-32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = shufflevector <8 x i64> %v, <8 x i64> undef, <2 x i32> Index: test/CodeGen/X86/vector-shuffle-avx512.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-avx512.ll +++ test/CodeGen/X86/vector-shuffle-avx512.ll @@ -8,7 +8,7 @@ define <8 x float> @expand(<4 x float> %a) { ; SKX64-LABEL: expand: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX64-NEXT: movb $5, %al ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z} @@ -23,7 +23,7 @@ ; ; SKX32-LABEL: expand: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX32-NEXT: movb $5, %al ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z} @@ -42,7 +42,7 @@ define <8 x float> @expand1(<4 x float> %a ) { ; SKX64-LABEL: expand1: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX64-NEXT: movb $-86, %al ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z} @@ -50,7 +50,7 @@ ; ; KNL64-LABEL: expand1: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL64-NEXT: vmovaps {{.*#+}} ymm1 = ; KNL64-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1 @@ -59,7 +59,7 @@ ; ; SKX32-LABEL: expand1: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX32-NEXT: movb $-86, %al ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z} @@ -67,7 +67,7 @@ ; ; KNL32-LABEL: expand1: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL32-NEXT: vmovaps {{.*#+}} ymm1 = ; KNL32-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1 @@ -81,7 +81,7 @@ define <4 x double> @expand2(<2 x double> %a) { ; SKX64-LABEL: expand2: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX64-NEXT: movb $9, %al ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z} @@ -89,7 +89,7 @@ ; ; KNL64-LABEL: expand2: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1] ; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7] @@ -97,7 +97,7 @@ ; ; SKX32-LABEL: expand2: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX32-NEXT: movb $9, %al ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z} @@ -105,7 +105,7 @@ ; ; KNL32-LABEL: expand2: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1] ; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7] @@ -118,7 +118,7 @@ define <8 x i32> @expand3(<4 x i32> %a ) { ; SKX64-LABEL: expand3: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX64-NEXT: movb $-127, %al ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z} @@ -133,7 +133,7 @@ ; ; SKX32-LABEL: expand3: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX32-NEXT: movb $-127, %al ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z} @@ -153,7 +153,7 @@ define <4 x i64> @expand4(<2 x i64> %a ) { ; SKX64-LABEL: expand4: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX64-NEXT: movb $9, %al ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z} @@ -161,7 +161,7 @@ ; ; KNL64-LABEL: expand4: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1] ; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7] @@ -169,7 +169,7 @@ ; ; SKX32-LABEL: expand4: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX32-NEXT: movb $9, %al ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z} @@ -177,7 +177,7 @@ ; ; KNL32-LABEL: expand4: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; KNL32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1] ; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7] @@ -251,7 +251,7 @@ define <16 x float> @expand7(<8 x float> %a) { ; SKX64-LABEL: expand7: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX64-NEXT: movw $1285, %ax # imm = 0x505 ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z} @@ -259,7 +259,7 @@ ; ; KNL64-LABEL: expand7: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL64-NEXT: movw $1285, %ax # imm = 0x505 ; KNL64-NEXT: kmovw %eax, %k1 ; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z} @@ -267,7 +267,7 @@ ; ; SKX32-LABEL: expand7: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX32-NEXT: movw $1285, %ax # imm = 0x505 ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z} @@ -275,7 +275,7 @@ ; ; KNL32-LABEL: expand7: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL32-NEXT: movw $1285, %ax # imm = 0x505 ; KNL32-NEXT: kmovw %eax, %k1 ; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z} @@ -287,7 +287,7 @@ define <16 x float> @expand8(<8 x float> %a ) { ; SKX64-LABEL: expand8: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z} @@ -295,7 +295,7 @@ ; ; KNL64-LABEL: expand8: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA ; KNL64-NEXT: kmovw %eax, %k1 ; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z} @@ -303,7 +303,7 @@ ; ; SKX32-LABEL: expand8: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z} @@ -311,7 +311,7 @@ ; ; KNL32-LABEL: expand8: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA ; KNL32-NEXT: kmovw %eax, %k1 ; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z} @@ -324,7 +324,7 @@ define <8 x double> @expand9(<4 x double> %a) { ; SKX64-LABEL: expand9: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX64-NEXT: movb $-127, %al ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z} @@ -332,7 +332,7 @@ ; ; KNL64-LABEL: expand9: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL64-NEXT: movb $-127, %al ; KNL64-NEXT: kmovw %eax, %k1 ; KNL64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z} @@ -340,7 +340,7 @@ ; ; SKX32-LABEL: expand9: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX32-NEXT: movb $-127, %al ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z} @@ -348,7 +348,7 @@ ; ; KNL32-LABEL: expand9: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL32-NEXT: movb $-127, %al ; KNL32-NEXT: kmovw %eax, %k1 ; KNL32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z} @@ -360,7 +360,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) { ; SKX64-LABEL: expand10: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z} @@ -368,7 +368,7 @@ ; ; KNL64-LABEL: expand10: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA ; KNL64-NEXT: kmovw %eax, %k1 ; KNL64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z} @@ -376,7 +376,7 @@ ; ; SKX32-LABEL: expand10: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z} @@ -384,7 +384,7 @@ ; ; KNL32-LABEL: expand10: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA ; KNL32-NEXT: kmovw %eax, %k1 ; KNL32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z} @@ -396,7 +396,7 @@ define <8 x i64> @expand11(<4 x i64> %a) { ; SKX64-LABEL: expand11: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX64-NEXT: movb $-127, %al ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} @@ -404,7 +404,7 @@ ; ; KNL64-LABEL: expand11: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL64-NEXT: movb $-127, %al ; KNL64-NEXT: kmovw %eax, %k1 ; KNL64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} @@ -412,7 +412,7 @@ ; ; SKX32-LABEL: expand11: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX32-NEXT: movb $-127, %al ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} @@ -420,7 +420,7 @@ ; ; KNL32-LABEL: expand11: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL32-NEXT: movb $-127, %al ; KNL32-NEXT: kmovw %eax, %k1 ; KNL32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z} @@ -433,7 +433,7 @@ define <16 x float> @expand12(<8 x float> %a) { ; SKX64-LABEL: expand12: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16] ; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; SKX64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1 @@ -442,7 +442,7 @@ ; ; KNL64-LABEL: expand12: ; KNL64: # %bb.0: -; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16] ; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; KNL64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1 @@ -451,7 +451,7 @@ ; ; SKX32-LABEL: expand12: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; SKX32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16] ; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; SKX32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1 @@ -460,7 +460,7 @@ ; ; KNL32-LABEL: expand12: ; KNL32: # %bb.0: -; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16] ; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; KNL32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1 @@ -503,7 +503,7 @@ define <8 x float> @expand14(<4 x float> %a) { ; SKX64-LABEL: expand14: ; SKX64: # %bb.0: -; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX64-NEXT: movb $20, %al ; SKX64-NEXT: kmovd %eax, %k1 ; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z} @@ -520,7 +520,7 @@ ; ; SKX32-LABEL: expand14: ; SKX32: # %bb.0: -; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; SKX32-NEXT: movb $20, %al ; SKX32-NEXT: kmovd %eax, %k1 ; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z} Index: test/CodeGen/X86/vector-shuffle-combining-avx2.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -196,13 +196,13 @@ define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) { ; X32-LABEL: combine_pshufb_as_vpbroadcastb256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vpbroadcastb %xmm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_pshufb_as_vpbroadcastb256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vpbroadcastb %xmm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> @@ -231,13 +231,13 @@ define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) { ; X32-LABEL: combine_pshufb_as_vpbroadcastw256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vpbroadcastw %xmm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_pshufb_as_vpbroadcastw256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vpbroadcastw %xmm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> @@ -269,14 +269,14 @@ define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) { ; X32-LABEL: combine_permd_as_vpbroadcastd256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vpbroadcastd %xmm0, %ymm0 ; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_permd_as_vpbroadcastd256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vpbroadcastd %xmm0, %ymm0 ; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq @@ -303,14 +303,14 @@ define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) { ; X32-LABEL: combine_permd_as_vpbroadcastq256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vpbroadcastq %xmm0, %ymm0 ; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_permd_as_vpbroadcastq256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vpbroadcastq %xmm0, %ymm0 ; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq @@ -339,13 +339,13 @@ define <8 x float> @combine_permps_as_vpbroadcastss256(<4 x float> %a) { ; X32-LABEL: combine_permps_as_vpbroadcastss256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vbroadcastss %xmm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_permps_as_vpbroadcastss256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vbroadcastss %xmm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> @@ -356,13 +356,13 @@ define <4 x double> @combine_permps_as_vpbroadcastsd256(<2 x double> %a) { ; X32-LABEL: combine_permps_as_vpbroadcastsd256: ; X32: # %bb.0: -; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X32-NEXT: vbroadcastsd %xmm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_permps_as_vpbroadcastsd256: ; X64: # %bb.0: -; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0 +; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; X64-NEXT: vbroadcastsd %xmm0, %ymm0 ; X64-NEXT: retq %1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> Index: test/CodeGen/X86/vector-shuffle-v1.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-v1.ll +++ test/CodeGen/X86/vector-shuffle-v1.ll @@ -12,7 +12,7 @@ ; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 ; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -51,7 +51,7 @@ ; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 ; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -93,7 +93,7 @@ ; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0] ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -131,7 +131,7 @@ ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -487,7 +487,7 @@ ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -531,7 +531,7 @@ ; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0 ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -547,7 +547,7 @@ ; AVX512VL-NEXT: vpslld $31, %ymm2, %ymm0 ; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 ; AVX512VL-NEXT: kmovw %k0, %eax -; AVX512VL-NEXT: # kill: def %al killed %al killed %eax +; AVX512VL-NEXT: # kill: def $al killed $al killed $eax ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -561,7 +561,7 @@ ; VL_BW_DQ-NEXT: vpermi2d %ymm1, %ymm0, %ymm2 ; VL_BW_DQ-NEXT: vpmovd2m %ymm2, %k0 ; VL_BW_DQ-NEXT: kmovd %k0, %eax -; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax +; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax ; VL_BW_DQ-NEXT: vzeroupper ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -579,7 +579,7 @@ ; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0 ; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -592,7 +592,7 @@ ; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 ; AVX512VL-NEXT: kmovw %k0, %eax -; AVX512VL-NEXT: # kill: def %al killed %al killed %eax +; AVX512VL-NEXT: # kill: def $al killed $al killed $eax ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -603,7 +603,7 @@ ; VL_BW_DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] ; VL_BW_DQ-NEXT: vpmovd2m %ymm0, %k0 ; VL_BW_DQ-NEXT: kmovd %k0, %eax -; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax +; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax ; VL_BW_DQ-NEXT: vzeroupper ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -622,7 +622,7 @@ ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -636,7 +636,7 @@ ; AVX512VL-NEXT: vpermi2d %ymm1, %ymm0, %ymm2 ; AVX512VL-NEXT: vptestmd %ymm2, %ymm2, %k0 ; AVX512VL-NEXT: kmovw %k0, %eax -; AVX512VL-NEXT: # kill: def %al killed %al killed %eax +; AVX512VL-NEXT: # kill: def $al killed $al killed $eax ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -649,7 +649,7 @@ ; VL_BW_DQ-NEXT: vpermi2d %ymm1, %ymm0, %ymm2 ; VL_BW_DQ-NEXT: vpmovd2m %ymm2, %k0 ; VL_BW_DQ-NEXT: kmovd %k0, %eax -; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax +; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax ; VL_BW_DQ-NEXT: vzeroupper ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -668,7 +668,7 @@ ; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2 ; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -682,7 +682,7 @@ ; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4,5,6,7] ; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 ; AVX512VL-NEXT: kmovw %k0, %eax -; AVX512VL-NEXT: # kill: def %al killed %al killed %eax +; AVX512VL-NEXT: # kill: def $al killed $al killed $eax ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -695,7 +695,7 @@ ; VL_BW_DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4,5,6,7] ; VL_BW_DQ-NEXT: vpmovd2m %ymm0, %k0 ; VL_BW_DQ-NEXT: kmovd %k0, %eax -; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax +; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax ; VL_BW_DQ-NEXT: vzeroupper ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -714,7 +714,7 @@ ; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2 ; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -727,7 +727,7 @@ ; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6,7] ; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 ; AVX512VL-NEXT: kmovw %k0, %eax -; AVX512VL-NEXT: # kill: def %al killed %al killed %eax +; AVX512VL-NEXT: # kill: def $al killed $al killed $eax ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -739,7 +739,7 @@ ; VL_BW_DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6,7] ; VL_BW_DQ-NEXT: vpmovd2m %ymm0, %k0 ; VL_BW_DQ-NEXT: kmovd %k0, %eax -; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax +; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax ; VL_BW_DQ-NEXT: vzeroupper ; VL_BW_DQ-NEXT: retq %b = bitcast i8 %a to <8 x i1> @@ -760,7 +760,7 @@ ; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2 ; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %al killed %al killed %eax +; AVX512F-NEXT: # kill: def $al killed $al killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -775,7 +775,7 @@ ; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] ; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 ; AVX512VL-NEXT: kmovw %k0, %eax -; AVX512VL-NEXT: # kill: def %al killed %al killed %eax +; AVX512VL-NEXT: # kill: def $al killed $al killed $eax ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -789,7 +789,7 @@ ; VL_BW_DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7] ; VL_BW_DQ-NEXT: vpmovd2m %ymm0, %k0 ; VL_BW_DQ-NEXT: kmovd %k0, %eax -; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax +; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax ; VL_BW_DQ-NEXT: vzeroupper ; VL_BW_DQ-NEXT: retq %c = shufflevector <8 x i1> , <8 x i1> %a, <8 x i32> @@ -806,7 +806,7 @@ ; AVX512F-NEXT: vpbroadcastd %xmm0, %zmm0 ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax -; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -817,7 +817,7 @@ ; AVX512VL-NEXT: vpbroadcastd %xmm0, %zmm0 ; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; AVX512VL-NEXT: kmovw %k0, %eax -; AVX512VL-NEXT: # kill: def %ax killed %ax killed %eax +; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -828,7 +828,7 @@ ; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0 ; VL_BW_DQ-NEXT: vpmovd2m %zmm0, %k0 ; VL_BW_DQ-NEXT: kmovd %k0, %eax -; VL_BW_DQ-NEXT: # kill: def %ax killed %ax killed %eax +; VL_BW_DQ-NEXT: # kill: def $ax killed $ax killed $eax ; VL_BW_DQ-NEXT: vzeroupper ; VL_BW_DQ-NEXT: retq %b = bitcast i16 %a to <16 x i1> Index: test/CodeGen/X86/vector-shuffle-variable-128.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-variable-128.ll +++ test/CodeGen/X86/vector-shuffle-variable-128.ll @@ -37,8 +37,8 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind { ; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def %esi killed %esi def %rsi -; SSE-NEXT: # kill: def %edi killed %edi def %rdi +; SSE-NEXT: # kill: def $esi killed $esi def $rsi +; SSE-NEXT: # kill: def $edi killed $edi def $rdi ; SSE-NEXT: andl $1, %edi ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: andl $1, %esi @@ -49,8 +49,8 @@ ; ; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %esi killed %esi def %rsi -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $esi killed $esi def $rsi +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: andl $1, %edi ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX-NEXT: andl $1, %esi @@ -68,10 +68,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind { ; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32: ; SSE2: # %bb.0: -; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE2-NEXT: # kill: def %edx killed %edx def %rdx -; SSE2-NEXT: # kill: def %esi killed %esi def %rsi -; SSE2-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE2-NEXT: # kill: def $edx killed $edx def $rdx +; SSE2-NEXT: # kill: def $esi killed $esi def $rsi +; SSE2-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-NEXT: andl $3, %edi ; SSE2-NEXT: andl $3, %esi ; SSE2-NEXT: andl $3, %edx @@ -88,10 +88,10 @@ ; ; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx -; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi -; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx +; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi +; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSSE3-NEXT: andl $3, %edi ; SSSE3-NEXT: andl $3, %esi ; SSSE3-NEXT: andl $3, %edx @@ -108,10 +108,10 @@ ; ; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32: ; SSE41: # %bb.0: -; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE41-NEXT: # kill: def %edx killed %edx def %rdx -; SSE41-NEXT: # kill: def %esi killed %esi def %rsi -; SSE41-NEXT: # kill: def %edi killed %edi def %rdi +; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE41-NEXT: # kill: def $edx killed $edx def $rdx +; SSE41-NEXT: # kill: def $esi killed $esi def $rsi +; SSE41-NEXT: # kill: def $edi killed $edi def $rdi ; SSE41-NEXT: andl $3, %edi ; SSE41-NEXT: andl $3, %esi ; SSE41-NEXT: andl $3, %edx @@ -125,10 +125,10 @@ ; ; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX-NEXT: # kill: def %edx killed %edx def %rdx -; AVX-NEXT: # kill: def %esi killed %esi def %rsi -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX-NEXT: # kill: def $edx killed $edx def $rdx +; AVX-NEXT: # kill: def $esi killed $esi def $rsi +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: andl $3, %edi ; AVX-NEXT: andl $3, %esi ; AVX-NEXT: andl $3, %edx @@ -153,10 +153,10 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind { ; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32: ; SSE2: # %bb.0: -; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE2-NEXT: # kill: def %edx killed %edx def %rdx -; SSE2-NEXT: # kill: def %esi killed %esi def %rsi -; SSE2-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE2-NEXT: # kill: def $edx killed $edx def $rdx +; SSE2-NEXT: # kill: def $esi killed $esi def $rsi +; SSE2-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-NEXT: andl $3, %edi ; SSE2-NEXT: andl $3, %esi ; SSE2-NEXT: andl $3, %edx @@ -173,10 +173,10 @@ ; ; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx -; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi -; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx +; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi +; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSSE3-NEXT: andl $3, %edi ; SSSE3-NEXT: andl $3, %esi ; SSSE3-NEXT: andl $3, %edx @@ -193,10 +193,10 @@ ; ; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32: ; SSE41: # %bb.0: -; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE41-NEXT: # kill: def %edx killed %edx def %rdx -; SSE41-NEXT: # kill: def %esi killed %esi def %rsi -; SSE41-NEXT: # kill: def %edi killed %edi def %rdi +; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE41-NEXT: # kill: def $edx killed $edx def $rdx +; SSE41-NEXT: # kill: def $esi killed $esi def $rsi +; SSE41-NEXT: # kill: def $edi killed $edi def $rdi ; SSE41-NEXT: andl $3, %edi ; SSE41-NEXT: andl $3, %esi ; SSE41-NEXT: andl $3, %edx @@ -210,10 +210,10 @@ ; ; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX-NEXT: # kill: def %edx killed %edx def %rdx -; AVX-NEXT: # kill: def %esi killed %esi def %rsi -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX-NEXT: # kill: def $edx killed $edx def $rdx +; AVX-NEXT: # kill: def $esi killed $esi def $rsi +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: andl $3, %edi ; AVX-NEXT: andl $3, %esi ; AVX-NEXT: andl $3, %edx @@ -238,12 +238,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind { ; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: ; SSE2: # %bb.0: -; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE2-NEXT: # kill: def %edx killed %edx def %rdx -; SSE2-NEXT: # kill: def %esi killed %esi def %rsi -; SSE2-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE2-NEXT: # kill: def $edx killed $edx def $rdx +; SSE2-NEXT: # kill: def $esi killed $esi def $rsi +; SSE2-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-NEXT: andl $7, %edi ; SSE2-NEXT: andl $7, %esi ; SSE2-NEXT: andl $7, %edx @@ -282,12 +282,12 @@ ; ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: ; SSSE3: # %bb.0: -; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx -; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi -; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx +; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi +; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSSE3-NEXT: andl $7, %edi ; SSSE3-NEXT: andl $7, %esi ; SSSE3-NEXT: andl $7, %edx @@ -326,12 +326,12 @@ ; ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: ; SSE41: # %bb.0: -; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE41-NEXT: # kill: def %edx killed %edx def %rdx -; SSE41-NEXT: # kill: def %esi killed %esi def %rsi -; SSE41-NEXT: # kill: def %edi killed %edi def %rdi +; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE41-NEXT: # kill: def $edx killed $edx def $rdx +; SSE41-NEXT: # kill: def $esi killed $esi def $rsi +; SSE41-NEXT: # kill: def $edi killed $edi def $rdi ; SSE41-NEXT: andl $7, %edi ; SSE41-NEXT: andl $7, %esi ; SSE41-NEXT: andl $7, %edx @@ -356,12 +356,12 @@ ; ; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %r9d killed %r9d def %r9 -; AVX-NEXT: # kill: def %r8d killed %r8d def %r8 -; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX-NEXT: # kill: def %edx killed %edx def %rdx -; AVX-NEXT: # kill: def %esi killed %esi def %rsi -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX-NEXT: # kill: def $edx killed $edx def $rdx +; AVX-NEXT: # kill: def $esi killed $esi def $rsi +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: andl $7, %edi ; AVX-NEXT: andl $7, %esi ; AVX-NEXT: andl $7, %edx @@ -405,12 +405,12 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind { ; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSE2: # %bb.0: -; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE2-NEXT: # kill: def %edx killed %edx def %rdx -; SSE2-NEXT: # kill: def %esi killed %esi def %rsi -; SSE2-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE2-NEXT: # kill: def $edx killed $edx def $rdx +; SSE2-NEXT: # kill: def $esi killed $esi def $rsi +; SSE2-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: andl $15, %eax @@ -489,12 +489,12 @@ ; ; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSSE3: # %bb.0: -; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx -; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi -; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx +; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi +; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax ; SSSE3-NEXT: andl $15, %eax @@ -573,12 +573,12 @@ ; ; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSE41: # %bb.0: -; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE41-NEXT: # kill: def %edx killed %edx def %rdx -; SSE41-NEXT: # kill: def %esi killed %esi def %rsi -; SSE41-NEXT: # kill: def %edi killed %edi def %rdi +; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE41-NEXT: # kill: def $edx killed $edx def $rdx +; SSE41-NEXT: # kill: def $esi killed $esi def $rsi +; SSE41-NEXT: # kill: def $edi killed $edi def $rdi ; SSE41-NEXT: andl $15, %edi ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movzbl -24(%rsp,%rdi), %eax @@ -627,12 +627,12 @@ ; ; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %r9d killed %r9d def %r9 -; AVX-NEXT: # kill: def %r8d killed %r8d def %r8 -; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX-NEXT: # kill: def %edx killed %edx def %rdx -; AVX-NEXT: # kill: def %esi killed %esi def %rsi -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX-NEXT: # kill: def $edx killed $edx def $rdx +; AVX-NEXT: # kill: def $esi killed $esi def $rsi +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: andl $15, %edi ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX-NEXT: movzbl -24(%rsp,%rdi), %eax @@ -1160,9 +1160,9 @@ define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind { ; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32: ; SSE: # %bb.0: -; SSE-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE-NEXT: # kill: def %edx killed %edx def %rdx -; SSE-NEXT: # kill: def %edi killed %edi def %rdi +; SSE-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE-NEXT: # kill: def $edx killed $edx def $rdx +; SSE-NEXT: # kill: def $edi killed $edi def $rdi ; SSE-NEXT: andl $3, %edi ; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE-NEXT: andl $3, %edx @@ -1177,9 +1177,9 @@ ; ; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX-NEXT: # kill: def %edx killed %edx def %rdx -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX-NEXT: # kill: def $edx killed $edx def $rdx +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: andl $3, %edi ; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp) ; AVX-NEXT: andl $3, %edx @@ -1205,12 +1205,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind { ; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: ; SSE2: # %bb.0: -; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE2-NEXT: # kill: def %edx killed %edx def %rdx -; SSE2-NEXT: # kill: def %esi killed %esi def %rsi -; SSE2-NEXT: # kill: def %edi killed %edi def %rdi +; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE2-NEXT: # kill: def $edx killed $edx def $rdx +; SSE2-NEXT: # kill: def $esi killed $esi def $rsi +; SSE2-NEXT: # kill: def $edi killed $edi def $rdi ; SSE2-NEXT: andl $7, %edi ; SSE2-NEXT: andl $7, %esi ; SSE2-NEXT: andl $7, %edx @@ -1242,12 +1242,12 @@ ; ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: ; SSSE3: # %bb.0: -; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx -; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi -; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi +; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx +; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi +; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi ; SSSE3-NEXT: andl $7, %edi ; SSSE3-NEXT: andl $7, %esi ; SSSE3-NEXT: andl $7, %edx @@ -1279,12 +1279,12 @@ ; ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: ; SSE41: # %bb.0: -; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9 -; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8 -; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx -; SSE41-NEXT: # kill: def %edx killed %edx def %rdx -; SSE41-NEXT: # kill: def %esi killed %esi def %rsi -; SSE41-NEXT: # kill: def %edi killed %edi def %rdi +; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9 +; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8 +; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx +; SSE41-NEXT: # kill: def $edx killed $edx def $rdx +; SSE41-NEXT: # kill: def $esi killed $esi def $rsi +; SSE41-NEXT: # kill: def $edi killed $edi def $rdi ; SSE41-NEXT: andl $7, %edi ; SSE41-NEXT: andl $7, %esi ; SSE41-NEXT: andl $7, %edx @@ -1304,12 +1304,12 @@ ; ; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: ; AVX: # %bb.0: -; AVX-NEXT: # kill: def %r9d killed %r9d def %r9 -; AVX-NEXT: # kill: def %r8d killed %r8d def %r8 -; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX-NEXT: # kill: def %edx killed %edx def %rdx -; AVX-NEXT: # kill: def %esi killed %esi def %rsi -; AVX-NEXT: # kill: def %edi killed %edi def %rdi +; AVX-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX-NEXT: # kill: def $edx killed $edx def $rdx +; AVX-NEXT: # kill: def $esi killed $esi def $rsi +; AVX-NEXT: # kill: def $edi killed $edi def $rdi ; AVX-NEXT: andl $7, %edi ; AVX-NEXT: andl $7, %esi ; AVX-NEXT: andl $7, %edx Index: test/CodeGen/X86/vector-shuffle-variable-256.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-variable-256.ll +++ test/CodeGen/X86/vector-shuffle-variable-256.ll @@ -185,12 +185,12 @@ ; ALL-NEXT: movq %rsp, %rbp ; ALL-NEXT: andq $-32, %rsp ; ALL-NEXT: subq $64, %rsp -; ALL-NEXT: # kill: def %r9d killed %r9d def %r9 -; ALL-NEXT: # kill: def %r8d killed %r8d def %r8 -; ALL-NEXT: # kill: def %ecx killed %ecx def %rcx -; ALL-NEXT: # kill: def %edx killed %edx def %rdx -; ALL-NEXT: # kill: def %esi killed %esi def %rsi -; ALL-NEXT: # kill: def %edi killed %edi def %rdi +; ALL-NEXT: # kill: def $r9d killed $r9d def $r9 +; ALL-NEXT: # kill: def $r8d killed $r8d def $r8 +; ALL-NEXT: # kill: def $ecx killed $ecx def $rcx +; ALL-NEXT: # kill: def $edx killed $edx def $rdx +; ALL-NEXT: # kill: def $esi killed $esi def $rsi +; ALL-NEXT: # kill: def $edi killed $edi def $rdi ; ALL-NEXT: andl $7, %edi ; ALL-NEXT: andl $7, %esi ; ALL-NEXT: andl $7, %edx @@ -236,12 +236,12 @@ define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind { ; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32: ; ALL: # %bb.0: -; ALL-NEXT: # kill: def %r9d killed %r9d def %r9 -; ALL-NEXT: # kill: def %r8d killed %r8d def %r8 -; ALL-NEXT: # kill: def %ecx killed %ecx def %rcx -; ALL-NEXT: # kill: def %edx killed %edx def %rdx -; ALL-NEXT: # kill: def %esi killed %esi def %rsi -; ALL-NEXT: # kill: def %edi killed %edi def %rdi +; ALL-NEXT: # kill: def $r9d killed $r9d def $r9 +; ALL-NEXT: # kill: def $r8d killed $r8d def $r8 +; ALL-NEXT: # kill: def $ecx killed $ecx def $rcx +; ALL-NEXT: # kill: def $edx killed $edx def $rdx +; ALL-NEXT: # kill: def $esi killed $esi def $rsi +; ALL-NEXT: # kill: def $edi killed $edi def $rdi ; ALL-NEXT: andl $3, %edi ; ALL-NEXT: andl $3, %esi ; ALL-NEXT: andl $3, %edx @@ -289,12 +289,12 @@ ; AVX1-NEXT: movq %rsp, %rbp ; AVX1-NEXT: andq $-32, %rsp ; AVX1-NEXT: subq $64, %rsp -; AVX1-NEXT: # kill: def %r9d killed %r9d def %r9 -; AVX1-NEXT: # kill: def %r8d killed %r8d def %r8 -; AVX1-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX1-NEXT: # kill: def %edx killed %edx def %rdx -; AVX1-NEXT: # kill: def %esi killed %esi def %rsi -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX1-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX1-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX1-NEXT: # kill: def $edx killed $edx def $rdx +; AVX1-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: andl $15, %edi ; AVX1-NEXT: vmovaps %ymm0, (%rsp) ; AVX1-NEXT: movzwl (%rsp,%rdi,2), %eax @@ -351,12 +351,12 @@ ; AVX2-NEXT: movq %rsp, %rbp ; AVX2-NEXT: andq $-32, %rsp ; AVX2-NEXT: subq $64, %rsp -; AVX2-NEXT: # kill: def %r9d killed %r9d def %r9 -; AVX2-NEXT: # kill: def %r8d killed %r8d def %r8 -; AVX2-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX2-NEXT: # kill: def %edx killed %edx def %rdx -; AVX2-NEXT: # kill: def %esi killed %esi def %rsi -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX2-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: andl $15, %edi ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: movzwl (%rsp,%rdi,2), %eax @@ -444,12 +444,12 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind { ; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def %r9d killed %r9d def %r9 -; AVX1-NEXT: # kill: def %r8d killed %r8d def %r8 -; AVX1-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX1-NEXT: # kill: def %edx killed %edx def %rdx -; AVX1-NEXT: # kill: def %esi killed %esi def %rsi -; AVX1-NEXT: # kill: def %edi killed %edi def %rdi +; AVX1-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX1-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX1-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX1-NEXT: # kill: def $edx killed $edx def $rdx +; AVX1-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1-NEXT: # kill: def $edi killed $edi def $rdi ; AVX1-NEXT: andl $7, %edi ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %eax @@ -500,12 +500,12 @@ ; ; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16: ; AVX2: # %bb.0: -; AVX2-NEXT: # kill: def %r9d killed %r9d def %r9 -; AVX2-NEXT: # kill: def %r8d killed %r8d def %r8 -; AVX2-NEXT: # kill: def %ecx killed %ecx def %rcx -; AVX2-NEXT: # kill: def %edx killed %edx def %rdx -; AVX2-NEXT: # kill: def %esi killed %esi def %rsi -; AVX2-NEXT: # kill: def %edi killed %edi def %rdi +; AVX2-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX2-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi ; AVX2-NEXT: andl $7, %edi ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %eax Index: test/CodeGen/X86/vector-trunc-math.ll =================================================================== --- test/CodeGen/X86/vector-trunc-math.ll +++ test/CodeGen/X86/vector-trunc-math.ll @@ -34,7 +34,7 @@ ; AVX2-SLOW-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -43,7 +43,7 @@ ; AVX2-FAST-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -51,7 +51,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = add <4 x i64> %a0, %a1 @@ -111,7 +111,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -125,7 +125,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -170,7 +170,7 @@ ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -178,7 +178,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = add <8 x i32> %a0, %a1 @@ -432,7 +432,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -484,7 +484,7 @@ ; ; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512-NEXT: vpmovdw %zmm1, %ymm1 ; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0 ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 @@ -534,7 +534,7 @@ ; ; AVX512-LABEL: trunc_add_const_v4i64_v4i32: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -645,7 +645,7 @@ ; ; AVX512-LABEL: trunc_add_const_v8i32_v8i16: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -862,7 +862,7 @@ ; ; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper @@ -907,7 +907,7 @@ ; AVX2-SLOW-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -916,7 +916,7 @@ ; AVX2-FAST-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -924,7 +924,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = sub <4 x i64> %a0, %a1 @@ -984,7 +984,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -998,7 +998,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -1043,7 +1043,7 @@ ; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1051,7 +1051,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = sub <8 x i32> %a0, %a1 @@ -1305,7 +1305,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsubw %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1353,7 +1353,7 @@ ; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -1362,7 +1362,7 @@ ; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -1370,7 +1370,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = sub <4 x i64> %a0, @@ -1435,7 +1435,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -1449,7 +1449,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -1493,7 +1493,7 @@ ; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1501,7 +1501,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = sub <8 x i32> %a0, @@ -1754,7 +1754,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1831,8 +1831,8 @@ ; ; AVX512F-LABEL: trunc_mul_v4i64_v4i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpmovqd %zmm1, %ymm1 ; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0 @@ -1841,8 +1841,8 @@ ; ; AVX512BW-LABEL: trunc_mul_v4i64_v4i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1 ; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0 @@ -1851,11 +1851,11 @@ ; ; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0 ; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq %1 = mul <4 x i64> %a0, %a1 @@ -2024,7 +2024,7 @@ ; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -2032,7 +2032,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = mul <8 x i32> %a0, %a1 @@ -2484,7 +2484,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -2536,7 +2536,7 @@ ; ; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512-NEXT: vpmovdw %zmm1, %ymm1 ; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0 @@ -2601,7 +2601,7 @@ ; ; AVX512-LABEL: trunc_mul_const_v4i64_v4i32: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -2712,7 +2712,7 @@ ; ; AVX512-LABEL: trunc_mul_const_v8i32_v8i16: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -3081,7 +3081,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -3122,7 +3122,7 @@ ; AVX2-SLOW-NEXT: vandps %ymm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -3131,7 +3131,7 @@ ; AVX2-FAST-NEXT: vandps %ymm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -3139,7 +3139,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = and <4 x i64> %a0, %a1 @@ -3195,7 +3195,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -3209,7 +3209,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -3252,7 +3252,7 @@ ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -3260,7 +3260,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = and <8 x i32> %a0, %a1 @@ -3500,7 +3500,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -3553,7 +3553,7 @@ ; ; AVX512-LABEL: trunc_and_const_v4i64_v4i32: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -3664,7 +3664,7 @@ ; ; AVX512-LABEL: trunc_and_const_v8i32_v8i16: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -3881,7 +3881,7 @@ ; ; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper @@ -3924,7 +3924,7 @@ ; AVX2-SLOW-NEXT: vxorps %ymm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -3933,7 +3933,7 @@ ; AVX2-FAST-NEXT: vxorps %ymm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -3941,7 +3941,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = xor <4 x i64> %a0, %a1 @@ -3997,7 +3997,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -4011,7 +4011,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -4054,7 +4054,7 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -4062,7 +4062,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = xor <8 x i32> %a0, %a1 @@ -4302,7 +4302,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -4355,7 +4355,7 @@ ; ; AVX512-LABEL: trunc_xor_const_v4i64_v4i32: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -4466,7 +4466,7 @@ ; ; AVX512-LABEL: trunc_xor_const_v8i32_v8i16: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -4683,7 +4683,7 @@ ; ; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper @@ -4726,7 +4726,7 @@ ; AVX2-SLOW-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -4735,7 +4735,7 @@ ; AVX2-FAST-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -4743,7 +4743,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = or <4 x i64> %a0, %a1 @@ -4799,7 +4799,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -4813,7 +4813,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -4856,7 +4856,7 @@ ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -4864,7 +4864,7 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = or <8 x i32> %a0, %a1 @@ -5104,7 +5104,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -5157,7 +5157,7 @@ ; ; AVX512-LABEL: trunc_or_const_v4i64_v4i32: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -5268,7 +5268,7 @@ ; ; AVX512-LABEL: trunc_or_const_v8i32_v8i16: ; AVX512: # %bb.0: -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -5485,7 +5485,7 @@ ; ; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper Index: test/CodeGen/X86/vector-trunc-packus.ll =================================================================== --- test/CodeGen/X86/vector-trunc-packus.ll +++ test/CodeGen/X86/vector-trunc-packus.ll @@ -210,7 +210,7 @@ ; AVX2-SLOW-NEXT: vpand %ymm0, %ymm1, %ymm0 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -224,19 +224,19 @@ ; AVX2-FAST-NEXT: vpand %ymm0, %ymm1, %ymm0 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX512F-LABEL: trunc_packus_v4i64_v4i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295] ; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -251,13 +251,13 @@ ; ; AVX512BW-LABEL: trunc_packus_v4i64_v4i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295] ; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1072,7 +1072,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -1094,7 +1094,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -1206,7 +1206,7 @@ ; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1217,7 +1217,7 @@ ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1237,7 +1237,7 @@ ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1826,7 +1826,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -1848,7 +1848,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -2809,7 +2809,7 @@ ; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -2820,7 +2820,7 @@ ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -2840,7 +2840,7 @@ ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -3131,7 +3131,7 @@ ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; Index: test/CodeGen/X86/vector-trunc-ssat.ll =================================================================== --- test/CodeGen/X86/vector-trunc-ssat.ll +++ test/CodeGen/X86/vector-trunc-ssat.ll @@ -226,7 +226,7 @@ ; AVX2-SLOW-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -240,19 +240,19 @@ ; AVX2-FAST-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 ; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX512F-LABEL: trunc_ssat_v4i64_v4i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647] ; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968] ; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -264,13 +264,13 @@ ; ; AVX512BW-LABEL: trunc_ssat_v4i64_v4i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647] ; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968] ; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1138,7 +1138,7 @@ ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -1160,7 +1160,7 @@ ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -1277,7 +1277,7 @@ ; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1288,7 +1288,7 @@ ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528] ; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1305,7 +1305,7 @@ ; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528] ; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1936,7 +1936,7 @@ ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -1958,7 +1958,7 @@ ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -2990,7 +2990,7 @@ ; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -3001,7 +3001,7 @@ ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168] ; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -3020,7 +3020,7 @@ ; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168] ; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -3322,7 +3322,7 @@ ; AVX512BW-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: vpmaxsw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; Index: test/CodeGen/X86/vector-trunc-usat.ll =================================================================== --- test/CodeGen/X86/vector-trunc-usat.ll +++ test/CodeGen/X86/vector-trunc-usat.ll @@ -139,7 +139,7 @@ ; AVX2-SLOW-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -153,19 +153,19 @@ ; AVX2-FAST-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0 ; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX512F-LABEL: trunc_usat_v4i64_v4i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295] ; AVX512F-NEXT: vpcmpltuq %zmm1, %zmm0, %k1 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,429496729] ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} ; AVX512F-NEXT: vpmovqd %zmm1, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -180,13 +180,13 @@ ; ; AVX512BW-LABEL: trunc_usat_v4i64_v4i32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295] ; AVX512BW-NEXT: vpcmpltuq %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,429496729] ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} ; AVX512BW-NEXT: vpmovqd %zmm1, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -718,7 +718,7 @@ ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -739,7 +739,7 @@ ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -834,7 +834,7 @@ ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535] ; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -849,7 +849,7 @@ ; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535] ; AVX512BW-NEXT: vpminud %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -1257,7 +1257,7 @@ ; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -1278,7 +1278,7 @@ ; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -1927,7 +1927,7 @@ ; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255] ; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1943,7 +1943,7 @@ ; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255] ; AVX512BW-NEXT: vpminud %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -2184,7 +2184,7 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; Index: test/CodeGen/X86/vector-trunc.ll =================================================================== --- test/CodeGen/X86/vector-trunc.ll +++ test/CodeGen/X86/vector-trunc.ll @@ -264,7 +264,7 @@ ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -276,7 +276,7 @@ ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -400,15 +400,15 @@ ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: trunc8i32_8i16: ; AVX512F: # %bb.0: # %entry -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -420,9 +420,9 @@ ; ; AVX512BW-LABEL: trunc8i32_8i16: ; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -465,7 +465,7 @@ ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: vpsrad $16, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -480,7 +480,7 @@ ; AVX512BW: # %bb.0: # %entry ; AVX512BW-NEXT: vpsrad $16, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -544,7 +544,7 @@ ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: vpsrld $16, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -559,7 +559,7 @@ ; AVX512BW: # %bb.0: # %entry ; AVX512BW-NEXT: vpsrld $16, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; @@ -626,7 +626,7 @@ ; ; AVX512F-LABEL: trunc8i32_8i8: ; AVX512F: # %bb.0: # %entry -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX512F-NEXT: vmovq %xmm0, (%rax) @@ -641,7 +641,7 @@ ; ; AVX512BW-LABEL: trunc8i32_8i8: ; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX512BW-NEXT: vmovq %xmm0, (%rax) @@ -1139,7 +1139,7 @@ ; ; AVX512BW-LABEL: trunc16i16_16i8: ; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: vmovdqu %xmm0, (%rax) ; AVX512BW-NEXT: vzeroupper @@ -1433,8 +1433,8 @@ ; ; AVX512F-LABEL: trunc2x4i64_8i32: ; AVX512F: # %bb.0: # %entry -; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512F-NEXT: vpmovqd %zmm1, %ymm1 ; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 @@ -1449,8 +1449,8 @@ ; ; AVX512BW-LABEL: trunc2x4i64_8i32: ; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1 ; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 @@ -1555,8 +1555,8 @@ ; ; AVX512F-LABEL: trunc2x4i64_8i16: ; AVX512F: # %bb.0: # %entry -; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512F-NEXT: vpmovqd %zmm1, %ymm1 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] @@ -1579,8 +1579,8 @@ ; ; AVX512BW-LABEL: trunc2x4i64_8i16: ; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 ; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] @@ -1967,7 +1967,7 @@ ; ; AVX512F-LABEL: PR32160: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,2,3,4,5,6,7] ; AVX512F-NEXT: vpbroadcastd %xmm0, %xmm0 @@ -1983,7 +1983,7 @@ ; ; AVX512BW-LABEL: PR32160: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,4,5,4,5,4,5,4,5,4,5] ; AVX512BW-NEXT: vzeroupper Index: test/CodeGen/X86/vector-tzcnt-128.ll =================================================================== --- test/CodeGen/X86/vector-tzcnt-128.ll +++ test/CodeGen/X86/vector-tzcnt-128.ll @@ -134,7 +134,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -365,7 +365,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -646,7 +646,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -917,7 +917,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -1134,7 +1134,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -1159,7 +1159,7 @@ ; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; BITALG_NOVLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; BITALG_NOVLX-NEXT: vzeroupper ; BITALG_NOVLX-NEXT: retq ; @@ -1330,7 +1330,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper ; AVX512VPOPCNTDQ-NEXT: retq ; @@ -1355,7 +1355,7 @@ ; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; BITALG_NOVLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; BITALG_NOVLX-NEXT: vzeroupper ; BITALG_NOVLX-NEXT: retq ; @@ -1531,7 +1531,7 @@ ; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; BITALG_NOVLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; BITALG_NOVLX-NEXT: vzeroupper ; BITALG_NOVLX-NEXT: retq ; @@ -1703,7 +1703,7 @@ ; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; BITALG_NOVLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; BITALG_NOVLX-NEXT: vzeroupper ; BITALG_NOVLX-NEXT: retq ; Index: test/CodeGen/X86/vector-tzcnt-256.ll =================================================================== --- test/CodeGen/X86/vector-tzcnt-256.ll +++ test/CodeGen/X86/vector-tzcnt-256.ll @@ -104,7 +104,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: retq ; ; AVX512VPOPCNTDQVL-LABEL: testv4i64: @@ -250,7 +250,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: retq ; ; AVX512VPOPCNTDQVL-LABEL: testv4i64u: @@ -432,7 +432,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: retq ; ; AVX512VPOPCNTDQVL-LABEL: testv8i32: @@ -603,7 +603,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512VPOPCNTDQ-NEXT: retq ; ; AVX512VPOPCNTDQVL-LABEL: testv8i32u: @@ -812,7 +812,7 @@ ; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; BITALG_NOVLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; BITALG_NOVLX-NEXT: retq ; ; BITALG-LABEL: testv16i16: @@ -975,7 +975,7 @@ ; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; BITALG_NOVLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; BITALG_NOVLX-NEXT: retq ; ; BITALG-LABEL: testv16i16u: @@ -1133,7 +1133,7 @@ ; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; BITALG_NOVLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; BITALG_NOVLX-NEXT: retq ; ; BITALG-LABEL: testv32i8: @@ -1288,7 +1288,7 @@ ; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; BITALG_NOVLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0 -; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; BITALG_NOVLX-NEXT: retq ; ; BITALG-LABEL: testv32i8u: Index: test/CodeGen/X86/verifier-phi-fail0.mir =================================================================== --- test/CodeGen/X86/verifier-phi-fail0.mir +++ test/CodeGen/X86/verifier-phi-fail0.mir @@ -15,7 +15,7 @@ tracksRegLiveness: true body: | bb.0: - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags JMP_1 %bb.2 bb.1: Index: test/CodeGen/X86/verifier-phi.mir =================================================================== --- test/CodeGen/X86/verifier-phi.mir +++ test/CodeGen/X86/verifier-phi.mir @@ -7,7 +7,7 @@ tracksRegLiveness: true body: | bb.0: - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags JMP_1 %bb.2 bb.1: @@ -23,7 +23,7 @@ body: | bb.0: %0 : gr32 = IMPLICIT_DEF - JE_1 %bb.1, implicit undef %eflags + JE_1 %bb.1, implicit undef $eflags JMP_1 %bb.2 bb.1: Index: test/CodeGen/X86/virtual-registers-cleared-in-machine-functions-liveins.ll =================================================================== --- test/CodeGen/X86/virtual-registers-cleared-in-machine-functions-liveins.ll +++ test/CodeGen/X86/virtual-registers-cleared-in-machine-functions-liveins.ll @@ -11,9 +11,9 @@ } ; PRE-RA: liveins: -; PRE-RA-NEXT: - { reg: '%edi', virtual-reg: '%0' } -; PRE-RA-NEXT: - { reg: '%esi', virtual-reg: '%1' } +; PRE-RA-NEXT: - { reg: '$edi', virtual-reg: '%0' } +; PRE-RA-NEXT: - { reg: '$esi', virtual-reg: '%1' } ; POST-RA: liveins: -; POST-RA-NEXT: - { reg: '%edi', virtual-reg: '' } -; POST-RA-NEXT: - { reg: '%esi', virtual-reg: '' } +; POST-RA-NEXT: - { reg: '$edi', virtual-reg: '' } +; POST-RA-NEXT: - { reg: '$esi', virtual-reg: '' } Index: test/CodeGen/X86/vpshufbitqbm-intrinsics.ll =================================================================== --- test/CodeGen/X86/vpshufbitqbm-intrinsics.ll +++ test/CodeGen/X86/vpshufbitqbm-intrinsics.ll @@ -8,7 +8,7 @@ ; CHECK-NEXT: kmovd %edi, %k1 ; CHECK-NEXT: vpshufbitqmb %xmm1, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax +; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %res = call i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask) ret i16 %res Index: test/CodeGen/X86/vselect-pcmp.ll =================================================================== --- test/CodeGen/X86/vselect-pcmp.ll +++ test/CodeGen/X86/vselect-pcmp.ll @@ -50,13 +50,13 @@ ; ; AVX512F-LABEL: signbit_sel_v4i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1 ; AVX512F-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -79,13 +79,13 @@ ; ; AVX512F-LABEL: signbit_sel_v2i64: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1 ; AVX512F-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -108,13 +108,13 @@ ; ; AVX512F-LABEL: signbit_sel_v4f32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1 ; AVX512F-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -137,13 +137,13 @@ ; ; AVX512F-LABEL: signbit_sel_v2f64: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1 ; AVX512F-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -230,13 +230,13 @@ ; ; AVX512F-LABEL: signbit_sel_v8i32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1 ; AVX512F-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: signbit_sel_v8i32: @@ -258,13 +258,13 @@ ; ; AVX512F-LABEL: signbit_sel_v4i64: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1 ; AVX512F-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: signbit_sel_v4i64: @@ -286,13 +286,13 @@ ; ; AVX512F-LABEL: signbit_sel_v4f64: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2 -; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1 ; AVX512F-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: signbit_sel_v4f64: @@ -326,13 +326,13 @@ ; ; AVX512F-LABEL: signbit_sel_v4f64_small_mask: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2 -; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1 ; AVX512F-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: signbit_sel_v4f64_small_mask: @@ -380,12 +380,12 @@ ; ; AVX512F-LABEL: signbit_sel_v4f32_fcmp: ; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512F-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vcmpltps %zmm2, %zmm0, %k1 ; AVX512F-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; Index: test/CodeGen/X86/widen_bitops-0.ll =================================================================== --- test/CodeGen/X86/widen_bitops-0.ll +++ test/CodeGen/X86/widen_bitops-0.ll @@ -141,9 +141,9 @@ ; X32-SSE-NEXT: pextrb $0, %xmm1, %eax ; X32-SSE-NEXT: pextrb $4, %xmm1, %edx ; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx -; X32-SSE-NEXT: # kill: def %al killed %al killed %eax -; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx -; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx +; X32-SSE-NEXT: # kill: def $al killed $al killed $eax +; X32-SSE-NEXT: # kill: def $dl killed $dl killed $edx +; X32-SSE-NEXT: # kill: def $cl killed $cl killed $ecx ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: and_v3i8_as_i24: @@ -158,9 +158,9 @@ ; X64-SSE-NEXT: pextrb $0, %xmm1, %eax ; X64-SSE-NEXT: pextrb $4, %xmm1, %edx ; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx -; X64-SSE-NEXT: # kill: def %al killed %al killed %eax -; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx -; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx +; X64-SSE-NEXT: # kill: def $al killed $al killed $eax +; X64-SSE-NEXT: # kill: def $dl killed $dl killed $edx +; X64-SSE-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-SSE-NEXT: retq %1 = bitcast <3 x i8> %a to i24 %2 = bitcast <3 x i8> %b to i24 @@ -182,9 +182,9 @@ ; X32-SSE-NEXT: pextrb $0, %xmm1, %eax ; X32-SSE-NEXT: pextrb $4, %xmm1, %edx ; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx -; X32-SSE-NEXT: # kill: def %al killed %al killed %eax -; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx -; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx +; X32-SSE-NEXT: # kill: def $al killed $al killed $eax +; X32-SSE-NEXT: # kill: def $dl killed $dl killed $edx +; X32-SSE-NEXT: # kill: def $cl killed $cl killed $ecx ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: xor_v3i8_as_i24: @@ -199,9 +199,9 @@ ; X64-SSE-NEXT: pextrb $0, %xmm1, %eax ; X64-SSE-NEXT: pextrb $4, %xmm1, %edx ; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx -; X64-SSE-NEXT: # kill: def %al killed %al killed %eax -; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx -; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx +; X64-SSE-NEXT: # kill: def $al killed $al killed $eax +; X64-SSE-NEXT: # kill: def $dl killed $dl killed $edx +; X64-SSE-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-SSE-NEXT: retq %1 = bitcast <3 x i8> %a to i24 %2 = bitcast <3 x i8> %b to i24 @@ -223,9 +223,9 @@ ; X32-SSE-NEXT: pextrb $0, %xmm1, %eax ; X32-SSE-NEXT: pextrb $4, %xmm1, %edx ; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx -; X32-SSE-NEXT: # kill: def %al killed %al killed %eax -; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx -; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx +; X32-SSE-NEXT: # kill: def $al killed $al killed $eax +; X32-SSE-NEXT: # kill: def $dl killed $dl killed $edx +; X32-SSE-NEXT: # kill: def $cl killed $cl killed $ecx ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: or_v3i8_as_i24: @@ -240,9 +240,9 @@ ; X64-SSE-NEXT: pextrb $0, %xmm1, %eax ; X64-SSE-NEXT: pextrb $4, %xmm1, %edx ; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx -; X64-SSE-NEXT: # kill: def %al killed %al killed %eax -; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx -; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx +; X64-SSE-NEXT: # kill: def $al killed $al killed $eax +; X64-SSE-NEXT: # kill: def $dl killed $dl killed $edx +; X64-SSE-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-SSE-NEXT: retq %1 = bitcast <3 x i8> %a to i24 %2 = bitcast <3 x i8> %b to i24 Index: test/CodeGen/X86/x86-64-baseptr.ll =================================================================== --- test/CodeGen/X86/x86-64-baseptr.ll +++ test/CodeGen/X86/x86-64-baseptr.ll @@ -43,7 +43,7 @@ ; X32ABI-NEXT: subl $32, %esp ; X32ABI-NEXT: movl %esp, %ebx ; X32ABI-NEXT: callq helper -; X32ABI-NEXT: # kill: def %eax killed %eax def %rax +; X32ABI-NEXT: # kill: def $eax killed $eax def $rax ; X32ABI-NEXT: movl %esp, %ecx ; X32ABI-NEXT: leal 31(,%rax,4), %eax ; X32ABI-NEXT: andl $-32, %eax Index: test/CodeGen/X86/x86-interleaved-access.ll =================================================================== --- test/CodeGen/X86/x86-interleaved-access.ll +++ test/CodeGen/X86/x86-interleaved-access.ll @@ -643,7 +643,7 @@ ; AVX512-NEXT: vpcmpeqb %zmm0, %zmm3, %k1 ; AVX512-NEXT: kxnorw %k1, %k0, %k0 ; AVX512-NEXT: vpmovm2b %k0, %zmm0 -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <64 x i8>, <64 x i8>* %ptr @@ -946,7 +946,7 @@ ; AVX512-NEXT: vpcmpeqb %zmm0, %zmm2, %k1 ; AVX512-NEXT: kxnord %k1, %k0, %k0 ; AVX512-NEXT: vpmovm2b %k0, %zmm0 -; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq %wide.vec = load <128 x i8>, <128 x i8>* %ptr %v1 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> Index: test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll =================================================================== --- test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll +++ test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll @@ -8,7 +8,7 @@ define <4 x i64> @broadcast128(<2 x i64> %src) { ; CHECK-LABEL: broadcast128: ; CHECK: ## %bb.0: -; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0 +; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0 ; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; CHECK-NEXT: retq Index: test/CodeGen/X86/xor-combine-debugloc.ll =================================================================== --- test/CodeGen/X86/xor-combine-debugloc.ll +++ test/CodeGen/X86/xor-combine-debugloc.ll @@ -1,16 +1,16 @@ ; RUN: llc -stop-after=expand-isel-pseudos < %s | FileCheck %s ; ; Make sure that when the entry block of IR below is lowered, an instruction -; that implictly defines %eflags has a same debug location with the icmp +; that implictly defines $eflags has a same debug location with the icmp ; instruction, and the branch instructions have a same debug location with the ; br instruction. ; ; CHECK: [[DLOC1:![0-9]+]] = !DILocation(line: 5, column: 9, scope: !{{[0-9]+}}) ; CHECK: [[DLOC2:![0-9]+]] = !DILocation(line: 5, column: 7, scope: !{{[0-9]+}}) -; CHECK-DAG: [[VREG1:%[^ ]+]]:gr32 = COPY %esi -; CHECK-DAG: [[VREG2:%[^ ]+]]:gr32 = COPY %edi -; CHECK: SUB32rr [[VREG2]], [[VREG1]], implicit-def %eflags, debug-location [[DLOC1]] -; CHECK-NEXT: JE_1{{.*}} implicit %eflags, debug-location [[DLOC2]] +; CHECK-DAG: [[VREG1:%[^ ]+]]:gr32 = COPY $esi +; CHECK-DAG: [[VREG2:%[^ ]+]]:gr32 = COPY $edi +; CHECK: SUB32rr [[VREG2]], [[VREG1]], implicit-def $eflags, debug-location [[DLOC1]] +; CHECK-NEXT: JE_1{{.*}} implicit $eflags, debug-location [[DLOC2]] ; CHECK-NEXT: JMP_1{{.*}} debug-location [[DLOC2]] target triple = "x86_64-unknown-linux-gnu" Index: test/CodeGen/X86/xray-empty-firstmbb.mir =================================================================== --- test/CodeGen/X86/xray-empty-firstmbb.mir +++ test/CodeGen/X86/xray-empty-firstmbb.mir @@ -15,9 +15,9 @@ name: foo tracksRegLiveness: true liveins: - - { reg: '%edi'} + - { reg: '$edi'} body: | bb.0.entry: - liveins: %edi + liveins: $edi ; CHECK-NOT: PATCHABLE_FUNCTION_ENTER ... Index: test/CodeGen/X86/xray-empty-function.mir =================================================================== --- test/CodeGen/X86/xray-empty-function.mir +++ test/CodeGen/X86/xray-empty-function.mir @@ -5,7 +5,7 @@ name: empty tracksRegLiveness: true liveins: - - { reg: '%edi'} + - { reg: '$edi'} body: | bb.0: ; CHECK-NOT: PATCHABLE_FUNCTION_ENTER Index: test/CodeGen/X86/xray-multiplerets-in-blocks.mir =================================================================== --- test/CodeGen/X86/xray-multiplerets-in-blocks.mir +++ test/CodeGen/X86/xray-multiplerets-in-blocks.mir @@ -16,10 +16,10 @@ name: foo tracksRegLiveness: true liveins: - - { reg: '%edi'} + - { reg: '$edi'} body: | bb.0: - liveins: %edi + liveins: $edi ; CHECK: PATCHABLE_FUNCTION_ENTER RETQ ; CHECK-NEXT: PATCHABLE_RET Index: test/CodeGen/X86/zext-demanded.ll =================================================================== --- test/CodeGen/X86/zext-demanded.ll +++ test/CodeGen/X86/zext-demanded.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movzwl %di, %eax ; CHECK-NEXT: shrl %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %y = lshr i16 %x, 1 ret i16 %y @@ -43,7 +43,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movzbl %dil, %eax ; CHECK-NEXT: shrl %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %y = and i16 %x, 255 %z = lshr i16 %y, 1 @@ -55,7 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movzwl %di, %eax ; CHECK-NEXT: shrl $9, %eax -; CHECK-NEXT: # kill: def %ax killed %ax killed %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %y = lshr i16 %x, 9 ret i16 %y @@ -76,7 +76,7 @@ define i32 @test7(i32 %x) { ; CHECK-LABEL: test7: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def %edi killed %edi def %rdi +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi ; CHECK-NEXT: andl $65534, %edi # imm = 0xFFFE ; CHECK-NEXT: leal 1(%rdi), %eax ; CHECK-NEXT: retq Index: test/DebugInfo/ARM/PR16736.ll =================================================================== --- test/DebugInfo/ARM/PR16736.ll +++ test/DebugInfo/ARM/PR16736.ll @@ -2,7 +2,7 @@ ; RUN: llc -filetype=obj < %s \ ; RUN: | llvm-dwarfdump -debug-info - | FileCheck %s --check-prefix=DWARF ; -; CHECK: @DEBUG_VALUE: h:x <- [DW_OP_plus_uconst {{.*}}] [%r{{.*}}+0] +; CHECK: @DEBUG_VALUE: h:x <- [DW_OP_plus_uconst {{.*}}] [$r{{.*}}+0] ; DWARF: DW_TAG_formal_parameter ; DWARF: DW_AT_location ; DWARF-NEXT: DW_OP_reg0 R0 Index: test/DebugInfo/ARM/sdag-split-arg.ll =================================================================== --- test/DebugInfo/ARM/sdag-split-arg.ll +++ test/DebugInfo/ARM/sdag-split-arg.ll @@ -19,8 +19,8 @@ ; Function Attrs: optsize ssp define i64 @_Z3foox(i64 returned) local_unnamed_addr #0 !dbg !13 { tail call void @llvm.dbg.value(metadata i64 %0, metadata !17, metadata !DIExpression()), !dbg !18 - ; CHECK: @DEBUG_VALUE: foo:offset <- [DW_OP_LLVM_fragment 0 32] %r5 - ; CHECK: @DEBUG_VALUE: foo:offset <- [DW_OP_LLVM_fragment 32 32] %r4 + ; CHECK: @DEBUG_VALUE: foo:offset <- [DW_OP_LLVM_fragment 0 32] $r5 + ; CHECK: @DEBUG_VALUE: foo:offset <- [DW_OP_LLVM_fragment 32 32] $r4 %2 = load i64, i64* @g, align 8, !dbg !19, !tbaa !21 %3 = icmp eq i64 %2, %0, !dbg !19 Index: test/DebugInfo/ARM/sdag-split-arg1.ll =================================================================== --- test/DebugInfo/ARM/sdag-split-arg1.ll +++ test/DebugInfo/ARM/sdag-split-arg1.ll @@ -7,7 +7,7 @@ %0 = bitcast double %a to i64 %extract.t84 = trunc i64 %0 to i32 tail call void @llvm.dbg.value(metadata i32 %extract.t84, metadata !8, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32)), !dbg !12 - ; CHECK: DBG_VALUE debug-use %r0, debug-use %noreg, !6, !DIExpression(DW_OP_LLVM_fragment, 0, 32) + ; CHECK: DBG_VALUE debug-use $r0, debug-use $noreg, !6, !DIExpression(DW_OP_LLVM_fragment, 0, 32) %r.sroa.0.0.insert.ext35 = zext i32 %extract.t84 to i64 ret i64 %r.sroa.0.0.insert.ext35 } Index: test/DebugInfo/COFF/fpo-csrs.ll =================================================================== --- test/DebugInfo/COFF/fpo-csrs.ll +++ test/DebugInfo/COFF/fpo-csrs.ll @@ -65,7 +65,7 @@ ; ASM: pushl %esi ; ASM: .cv_fpo_pushreg %esi ; ASM: .cv_fpo_endprologue -; ASM: #DEBUG_VALUE: csr1:a <- %esi +; ASM: #DEBUG_VALUE: csr1:a <- $esi ; ASM: retl ; ASM: .cv_fpo_endproc @@ -122,8 +122,8 @@ ; ASM: pushl %esi ; ASM: .cv_fpo_pushreg %esi ; ASM: .cv_fpo_endprologue -; ASM: #DEBUG_VALUE: csr2:a <- %esi -; ASM: #DEBUG_VALUE: csr2:b <- %edi +; ASM: #DEBUG_VALUE: csr2:a <- $esi +; ASM: #DEBUG_VALUE: csr2:b <- $edi ; ASM: retl ; ASM: .cv_fpo_endproc @@ -192,9 +192,9 @@ ; ASM: pushl %esi ; ASM: .cv_fpo_pushreg %esi ; ASM: .cv_fpo_endprologue -; ASM: #DEBUG_VALUE: csr3:a <- %esi -; ASM: #DEBUG_VALUE: csr3:b <- %edi -; ASM: #DEBUG_VALUE: csr3:c <- %ebx +; ASM: #DEBUG_VALUE: csr3:a <- $esi +; ASM: #DEBUG_VALUE: csr3:b <- $edi +; ASM: #DEBUG_VALUE: csr3:c <- $ebx ; ASM: retl ; ASM: .cv_fpo_endproc @@ -279,10 +279,10 @@ ; ASM: pushl %esi ; ASM: .cv_fpo_pushreg %esi ; ASM: .cv_fpo_endprologue -; ASM: #DEBUG_VALUE: csr4:a <- %esi -; ASM: #DEBUG_VALUE: csr4:b <- %edi -; ASM: #DEBUG_VALUE: csr4:c <- %ebx -; ASM: #DEBUG_VALUE: csr4:d <- %ebp +; ASM: #DEBUG_VALUE: csr4:a <- $esi +; ASM: #DEBUG_VALUE: csr4:b <- $edi +; ASM: #DEBUG_VALUE: csr4:c <- $ebx +; ASM: #DEBUG_VALUE: csr4:d <- $ebp ; ASM: retl ; ASM: .cv_fpo_endproc Index: test/DebugInfo/COFF/local-variable-gap.ll =================================================================== --- test/DebugInfo/COFF/local-variable-gap.ll +++ test/DebugInfo/COFF/local-variable-gap.ll @@ -33,13 +33,13 @@ ; ASM: callq vardef ; ASM: movl %eax, %esi ; ASM: [[p_b1:\.Ltmp[0-9]+]]: -; ASM: #DEBUG_VALUE: p <- %esi +; ASM: #DEBUG_VALUE: p <- $esi ; ASM: callq barrier ; ASM: movl %esi, %ecx ; ASM: testl %eax, %eax ; ASM: jne .LBB0_5 ; ASM: # %bb.2: # %if.end -; ASM: #DEBUG_VALUE: p <- %esi +; ASM: #DEBUG_VALUE: p <- $esi ; ASM: callq use ; ASM: jmp .LBB0_4 ; ASM: [[p_e1:\.Ltmp[0-9]+]]: @@ -52,7 +52,7 @@ ; ASM: retq ; ASM: .LBB0_5: # %if.then4 ; ASM: [[p_b2:\.Ltmp[0-9]+]]: -; ASM: #DEBUG_VALUE: p <- %esi +; ASM: #DEBUG_VALUE: p <- $esi ; ASM: callq call_noreturn ; ASM: ud2 ; ASM: .Lfunc_end0: Index: test/DebugInfo/COFF/pieces.ll =================================================================== --- test/DebugInfo/COFF/pieces.ll +++ test/DebugInfo/COFF/pieces.ll @@ -43,19 +43,19 @@ ; ASM: .p2align 4, 0x90 ; ASM: .LBB0_3: # %for.body ; ASM: [[ox_start:\.Ltmp[0-9]+]]: -; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] %edi +; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] $edi ; ASM: .cv_loc 0 1 13 11 # t.c:13:11 ; ASM: movl %edi, %ecx ; ASM: callq g ; ASM: movl %eax, %edi ; ASM: [[oy_start:\.Ltmp[0-9]+]]: -; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] %edi -; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] %esi +; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] $edi +; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] $esi ; ASM: .cv_loc 0 1 14 11 # t.c:14:11 ; ASM: movl %esi, %ecx ; ASM: callq g ; ASM: movl %eax, %esi -; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] %esi +; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] $esi ; ASM: cmpl n(%rip), %eax ; ASM: jl .LBB0_3 ; ASM: [[oy_end:\.Ltmp[0-9]+]]: @@ -64,23 +64,23 @@ ; ASM-LABEL: pad_right: # @pad_right -; ASM: #DEBUG_VALUE: pad_right:o <- [DW_OP_LLVM_fragment 32 32] %ecx +; ASM: #DEBUG_VALUE: pad_right:o <- [DW_OP_LLVM_fragment 32 32] $ecx ; ASM: movl %ecx, %eax ; ASM: retq ; ASM-LABEL: pad_left: # @pad_left -; ASM: #DEBUG_VALUE: pad_left:o <- [DW_OP_LLVM_fragment 0 32] %ecx +; ASM: #DEBUG_VALUE: pad_left:o <- [DW_OP_LLVM_fragment 0 32] $ecx ; ASM: .cv_loc 2 1 24 3 # t.c:24:3 ; ASM: movl %ecx, %eax ; ASM: retq ; ASM-LABEL: nested: # @nested -; ASM: #DEBUG_VALUE: nested:o <- [DW_OP_deref] [%rcx+0] +; ASM: #DEBUG_VALUE: nested:o <- [DW_OP_deref] [$rcx+0] ; ASM: movl 12(%rcx), %eax ; ASM: [[p_start:\.Ltmp[0-9]+]]: -; ASM: #DEBUG_VALUE: nested:p <- [DW_OP_LLVM_fragment 32 32] %eax +; ASM: #DEBUG_VALUE: nested:p <- [DW_OP_LLVM_fragment 32 32] $eax ; ASM: retq ; ASM-LABEL: bitpiece_spill: # @bitpiece_spill @@ -89,7 +89,7 @@ ; ASM: callq g ; ASM: movl %eax, [[offset_o_x:[0-9]+]](%rsp) # 4-byte Spill ; ASM: [[spill_o_x_start:\.Ltmp[0-9]+]]: -; ASM: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_plus_uconst [[offset_o_x]], DW_OP_LLVM_fragment 32 32] [%rsp+0] +; ASM: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_plus_uconst [[offset_o_x]], DW_OP_LLVM_fragment 32 32] [$rsp+0] ; ASM: #APP ; ASM: #NO_APP ; ASM: movl [[offset_o_x]](%rsp), %eax # 4-byte Reload Index: test/DebugInfo/COFF/register-variables.ll =================================================================== --- test/DebugInfo/COFF/register-variables.ll +++ test/DebugInfo/COFF/register-variables.ll @@ -26,30 +26,30 @@ ; ASM: # %bb.0: # %entry ; ASM: pushq %rsi ; ASM: subq $32, %rsp -; ASM: #DEBUG_VALUE: f:p <- %ecx +; ASM: #DEBUG_VALUE: f:p <- $ecx ; ASM: movl %ecx, %esi ; ASM: [[p_ecx_esi:\.Ltmp.*]]: -; ASM: #DEBUG_VALUE: f:p <- %esi +; ASM: #DEBUG_VALUE: f:p <- $esi ; ASM: callq getint ; ASM: [[after_getint:\.Ltmp.*]]: -; ASM: #DEBUG_VALUE: a <- %eax -; ASM: #DEBUG_VALUE: inlineinc:a <- %eax -; ASM: #DEBUG_VALUE: c <- %eax +; ASM: #DEBUG_VALUE: a <- $eax +; ASM: #DEBUG_VALUE: inlineinc:a <- $eax +; ASM: #DEBUG_VALUE: c <- $eax ; ASM: testl %esi, %esi ; ASM: je .LBB0_2 ; ASM: [[after_je:\.Ltmp.*]]: ; ASM: # %bb.1: # %if.then -; ASM-DAG: #DEBUG_VALUE: inlineinc:a <- %eax -; ASM-DAG: #DEBUG_VALUE: a <- %eax -; ASM-DAG: #DEBUG_VALUE: f:p <- %esi +; ASM-DAG: #DEBUG_VALUE: inlineinc:a <- $eax +; ASM-DAG: #DEBUG_VALUE: a <- $eax +; ASM-DAG: #DEBUG_VALUE: f:p <- $esi ; ASM: addl $1, %eax ; ASM: [[after_inc_eax:\.Ltmp.*]]: -; ASM: #DEBUG_VALUE: inlineinc:b <- %eax -; ASM: #DEBUG_VALUE: b <- %eax +; ASM: #DEBUG_VALUE: inlineinc:b <- $eax +; ASM: #DEBUG_VALUE: b <- $eax ; ASM: addl $1, x(%rip) ; ASM: [[after_if:\.Ltmp.*]]: ; ASM: .LBB0_2: # %if.else -; ASM: #DEBUG_VALUE: f:p <- %esi +; ASM: #DEBUG_VALUE: f:p <- $esi ; ASM: movl %eax, %ecx ; ASM: addq $32, %rsp ; ASM: popq %rsi Index: test/DebugInfo/MIR/AArch64/clobber-sp.mir =================================================================== --- test/DebugInfo/MIR/AArch64/clobber-sp.mir +++ test/DebugInfo/MIR/AArch64/clobber-sp.mir @@ -112,12 +112,12 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%w0' } - - { reg: '%w1' } - - { reg: '%d0' } - - { reg: '%d1' } - - { reg: '%d2' } - - { reg: '%d3' } + - { reg: '$w0' } + - { reg: '$w1' } + - { reg: '$d0' } + - { reg: '$d1' } + - { reg: '$d2' } + - { reg: '$d3' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -135,39 +135,39 @@ stack: - { id: 0, name: x.addr, offset: -20, size: 4, alignment: 4, local-offset: -4 } - { id: 1, type: spill-slot, offset: -24, size: 4, alignment: 4 } - - { id: 2, type: spill-slot, offset: -8, size: 8, alignment: 8, callee-saved-register: '%lr' } - - { id: 3, type: spill-slot, offset: -16, size: 8, alignment: 8, callee-saved-register: '%fp' } + - { id: 2, type: spill-slot, offset: -8, size: 8, alignment: 8, callee-saved-register: '$lr' } + - { id: 3, type: spill-slot, offset: -16, size: 8, alignment: 8, callee-saved-register: '$fp' } body: | bb.0.entry: successors: %bb.2.if.end(0x40000000), %bb.1.if.then(0x40000000) - liveins: %w0, %w1, %d0, %d1, %d2, %d3, %lr + liveins: $w0, $w1, $d0, $d1, $d2, $d3, $lr - %sp = frame-setup SUBXri %sp, 32, 0 - frame-setup STPXi killed %fp, killed %lr, %sp, 2 :: (store 8 into %stack.3), (store 8 into %stack.2) - %fp = frame-setup ADDXri %sp, 16, 0 - DBG_VALUE debug-use %w0, debug-use _, !19, !22, debug-location !23 - STURWi killed %w0, %fp, -4 :: (store 4 into %stack.0.x.addr) - DBG_VALUE debug-use %w1, debug-use _, !20, !22, debug-location !28 - STRWui killed %w1, %sp, 2, debug-location !30 :: (store 4 into %stack.1) - DBG_VALUE %sp, 0, !20, !36, debug-location !28 - BL @g, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %d0, implicit killed %d1, implicit killed %d2, implicit killed %d3, implicit-def %sp, debug-location !30 - %w0 = LDRWui %sp, 2, debug-location !33 :: (load 4 from %stack.1) - CBZW killed %w0, %bb.2.if.end, debug-location !33 + $sp = frame-setup SUBXri $sp, 32, 0 + frame-setup STPXi killed $fp, killed $lr, $sp, 2 :: (store 8 into %stack.3), (store 8 into %stack.2) + $fp = frame-setup ADDXri $sp, 16, 0 + DBG_VALUE debug-use $w0, debug-use _, !19, !22, debug-location !23 + STURWi killed $w0, $fp, -4 :: (store 4 into %stack.0.x.addr) + DBG_VALUE debug-use $w1, debug-use _, !20, !22, debug-location !28 + STRWui killed $w1, $sp, 2, debug-location !30 :: (store 4 into %stack.1) + DBG_VALUE $sp, 0, !20, !36, debug-location !28 + BL @g, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $d0, implicit killed $d1, implicit killed $d2, implicit killed $d3, implicit-def $sp, debug-location !30 + $w0 = LDRWui $sp, 2, debug-location !33 :: (load 4 from %stack.1) + CBZW killed $w0, %bb.2.if.end, debug-location !33 bb.1.if.then: successors: %bb.2.if.end(0x80000000) - DBG_VALUE debug-use %sp, 0, !20, !36, debug-location !28 - %x0 = SUBXri %fp, 4, 0 - DBG_VALUE debug-use %x0, debug-use _, !19, !22, debug-location !23 - BL @h, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %x0, debug-location !34 + DBG_VALUE debug-use $sp, 0, !20, !36, debug-location !28 + $x0 = SUBXri $fp, 4, 0 + DBG_VALUE debug-use $x0, debug-use _, !19, !22, debug-location !23 + BL @h, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $x0, debug-location !34 bb.2.if.end: - DBG_VALUE debug-use %sp, 0, !20, !36, debug-location !28 - %w8 = MOVZWi 0, 0 - %x0 = ORRXrs %xzr, undef %x8, 0, implicit killed %w8, debug-location !35 - %fp, %lr = LDPXi %sp, 2, debug-location !35 :: (load 8 from %stack.3), (load 8 from %stack.2) - %sp = ADDXri %sp, 32, 0, debug-location !35 - RET undef %lr, implicit killed %w0, debug-location !35 + DBG_VALUE debug-use $sp, 0, !20, !36, debug-location !28 + $w8 = MOVZWi 0, 0 + $x0 = ORRXrs $xzr, undef $x8, 0, implicit killed $w8, debug-location !35 + $fp, $lr = LDPXi $sp, 2, debug-location !35 :: (load 8 from %stack.3), (load 8 from %stack.2) + $sp = ADDXri $sp, 32, 0, debug-location !35 + RET undef $lr, implicit killed $w0, debug-location !35 ... Index: test/DebugInfo/MIR/AArch64/implicit-def-dead-scope.mir =================================================================== --- test/DebugInfo/MIR/AArch64/implicit-def-dead-scope.mir +++ test/DebugInfo/MIR/AArch64/implicit-def-dead-scope.mir @@ -152,7 +152,7 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%x0', virtual-reg: '' } + - { reg: '$x0', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -179,71 +179,71 @@ callee-saved-register: '', local-offset: -32, di-variable: '', di-expression: '', di-location: '' } - { id: 2, name: '', type: spill-slot, offset: -16, size: 8, alignment: 16, - callee-saved-register: '%lr', di-variable: '', di-expression: '', + callee-saved-register: '$lr', di-variable: '', di-expression: '', di-location: '' } constants: body: | bb.0.entry: successors: %bb.3.sw.bb2(0x2aaaaaab), %bb.1.entry(0x55555555) - liveins: %x0, %lr + liveins: $x0, $lr - %sp = frame-setup SUBXri %sp, 48, 0 - frame-setup STRXui killed %lr, %sp, 4 :: (store 8 into %stack.2) + $sp = frame-setup SUBXri $sp, 48, 0 + frame-setup STRXui killed $lr, $sp, 4 :: (store 8 into %stack.2) frame-setup CFI_INSTRUCTION def_cfa_offset 48 - frame-setup CFI_INSTRUCTION offset %w30, -16 - %w8 = LDRSBWui %x0, 4 :: (load 1 from %ir.ap, align 4) - CBZW %w8, %bb.3.sw.bb2 + frame-setup CFI_INSTRUCTION offset $w30, -16 + $w8 = LDRSBWui $x0, 4 :: (load 1 from %ir.ap, align 4) + CBZW $w8, %bb.3.sw.bb2 bb.1.entry: successors: %bb.2._ZN1jILi6EN1a1fEE1mEj.exit(0x40000001), %bb.4(0x3fffffff) - liveins: %w8, %x0 + liveins: $w8, $x0 - dead %wzr = SUBSWri killed %w8, 1, 0, implicit-def %nzcv - Bcc 1, %bb.4, implicit %nzcv + dead $wzr = SUBSWri killed $w8, 1, 0, implicit-def $nzcv + Bcc 1, %bb.4, implicit $nzcv bb.2._ZN1jILi6EN1a1fEE1mEj.exit: successors: %bb.5.sw.epilog(0x80000000) - liveins: %x0 + liveins: $x0 - %w2 = ORRWri %wzr, 0, implicit-def %x2 - %x3 = IMPLICIT_DEF debug-location !32 - %x1 = IMPLICIT_DEF - STRXui %x2, %sp, 2 :: (store 8 into %ir.2) + $w2 = ORRWri $wzr, 0, implicit-def $x2 + $x3 = IMPLICIT_DEF debug-location !32 + $x1 = IMPLICIT_DEF + STRXui $x2, $sp, 2 :: (store 8 into %ir.2) DBG_VALUE 0, 0, !18, !23, debug-location !24 - STRXui %x2, %sp, 0, debug-location !27 :: (store 8 into %ir.1) - %w4 = ORRWri %wzr, 0, implicit-def %x4 + STRXui $x2, $sp, 0, debug-location !27 :: (store 8 into %ir.1) + $w4 = ORRWri $wzr, 0, implicit-def $x4 B %bb.5.sw.epilog bb.3.sw.bb2: successors: %bb.5.sw.epilog(0x80000000) - liveins: %x0 + liveins: $x0 - %x4, %x3 = LDPXi %sp, 0 :: (dereferenceable load 8 from %ir.3), (dereferenceable load 8 from %ir..phi.trans.insert26) - %x2, %x1 = LDPXi %sp, 2 :: (dereferenceable load 8 from %ir..phi.trans.insert), (dereferenceable load 8 from %ir.4) + $x4, $x3 = LDPXi $sp, 0 :: (dereferenceable load 8 from %ir.3), (dereferenceable load 8 from %ir..phi.trans.insert26) + $x2, $x1 = LDPXi $sp, 2 :: (dereferenceable load 8 from %ir..phi.trans.insert), (dereferenceable load 8 from %ir.4) B %bb.5.sw.epilog bb.4: successors: %bb.5.sw.epilog(0x80000000) - liveins: %x0 + liveins: $x0 - %x2 = ORRXrs %xzr, %xzr, 0 - %x4 = ORRXrs %xzr, %xzr, 0 - %x3 = IMPLICIT_DEF debug-location !32 - %x1 = IMPLICIT_DEF + $x2 = ORRXrs $xzr, $xzr, 0 + $x4 = ORRXrs $xzr, $xzr, 0 + $x3 = IMPLICIT_DEF debug-location !32 + $x1 = IMPLICIT_DEF bb.5.sw.epilog: - liveins: %x0, %x1, %x2, %x3, %x4 - - %w8 = LDRBBui %x0, 5 :: (load 1 from %ir.bx) - %w0 = LDRWui killed %x0, 0 :: (load 4 from %ir.bw1) - %x7 = ADRP target-flags(aarch64-page) @bt - %x7 = ADDXri killed %x7, target-flags(aarch64-pageoff, aarch64-nc) @bt, 0 - dead %wzr = SUBSWri killed %w8, 0, 0, implicit-def %nzcv - %w5 = CSINCWr %wzr, %wzr, 0, implicit killed %nzcv - %w6 = ORRWrs %wzr, %wzr, 0 - BL @_Z2byi1LS_bbPi, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %w0, implicit killed %x1, implicit killed %x2, implicit killed %x3, implicit killed %x4, implicit killed %w5, implicit killed %w6, implicit killed %x7, implicit-def %sp - %lr = LDRXui %sp, 4 :: (load 8 from %stack.2) - %sp = ADDXri %sp, 48, 0 - RET undef %lr + liveins: $x0, $x1, $x2, $x3, $x4 + + $w8 = LDRBBui $x0, 5 :: (load 1 from %ir.bx) + $w0 = LDRWui killed $x0, 0 :: (load 4 from %ir.bw1) + $x7 = ADRP target-flags(aarch64-page) @bt + $x7 = ADDXri killed $x7, target-flags(aarch64-pageoff, aarch64-nc) @bt, 0 + dead $wzr = SUBSWri killed $w8, 0, 0, implicit-def $nzcv + $w5 = CSINCWr $wzr, $wzr, 0, implicit killed $nzcv + $w6 = ORRWrs $wzr, $wzr, 0 + BL @_Z2byi1LS_bbPi, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $w0, implicit killed $x1, implicit killed $x2, implicit killed $x3, implicit killed $x4, implicit killed $w5, implicit killed $w6, implicit killed $x7, implicit-def $sp + $lr = LDRXui $sp, 4 :: (load 8 from %stack.2) + $sp = ADDXri $sp, 48, 0 + RET undef $lr ... Index: test/DebugInfo/MIR/ARM/split-superreg-complex.mir =================================================================== --- test/DebugInfo/MIR/ARM/split-superreg-complex.mir +++ test/DebugInfo/MIR/ARM/split-superreg-complex.mir @@ -74,19 +74,19 @@ regBankSelected: false selected: false tracksRegLiveness: true -calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13', - '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4', - '%r5', '%r6', '%r7', '%r8', '%r10', '%r11', '%s16', - '%s17', '%s18', '%s19', '%s20', '%s21', '%s22', - '%s23', '%s24', '%s25', '%s26', '%s27', '%s28', - '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11', '%d10_d12', - '%d11_d13', '%d12_d14', '%d13_d15', '%q4_q5', '%q5_q6', - '%q6_q7', '%q4_q5_q6_q7', '%r4_r5', '%r6_r7', '%r10_r11', - '%d8_d9_d10', '%d9_d10_d11', '%d10_d11_d12', '%d11_d12_d13', - '%d12_d13_d14', '%d13_d14_d15', '%d8_d10_d12', - '%d9_d11_d13', '%d10_d12_d14', '%d11_d13_d15', - '%d8_d10_d12_d14', '%d9_d11_d13_d15', '%d9_d10', - '%d11_d12', '%d13_d14', '%d9_d10_d11_d12', '%d11_d12_d13_d14' ] +calleeSavedRegisters: [ '$lr', '$d8', '$d9', '$d10', '$d11', '$d12', '$d13', + '$d14', '$d15', '$q4', '$q5', '$q6', '$q7', '$r4', + '$r5', '$r6', '$r7', '$r8', '$r10', '$r11', '$s16', + '$s17', '$s18', '$s19', '$s20', '$s21', '$s22', + '$s23', '$s24', '$s25', '$s26', '$s27', '$s28', + '$s29', '$s30', '$s31', '$d8_d10', '$d9_d11', '$d10_d12', + '$d11_d13', '$d12_d14', '$d13_d15', '$q4_q5', '$q5_q6', + '$q6_q7', '$q4_q5_q6_q7', '$r4_r5', '$r6_r7', '$r10_r11', + '$d8_d9_d10', '$d9_d10_d11', '$d10_d11_d12', '$d11_d12_d13', + '$d12_d13_d14', '$d13_d14_d15', '$d8_d10_d12', + '$d9_d11_d13', '$d10_d12_d14', '$d11_d13_d15', + '$d8_d10_d12_d14', '$d9_d11_d13_d15', '$d9_d10', + '$d11_d12', '$d13_d14', '$d9_d10_d11_d12', '$d11_d12_d13_d14' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -102,22 +102,22 @@ hasVAStart: false hasMustTailInVarArgFunc: false stack: - - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr' } + - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$lr' } body: | bb.0.entry: - liveins: %lr + liveins: $lr - early-clobber %sp = frame-setup t2STR_PRE killed undef %lr, %sp, -4, 14, _ + early-clobber $sp = frame-setup t2STR_PRE killed undef $lr, $sp, -4, 14, _ frame-setup CFI_INSTRUCTION def_cfa_offset 4 - frame-setup CFI_INSTRUCTION offset %lr, -4 - tBL 14, _, @v, csr_ios, implicit-def dead %lr, implicit %sp, implicit-def %sp, implicit-def %r0, implicit-def %r1, implicit-def %r2, implicit-def %r3, debug-location !19 - %d1 = VMOVDRR killed %r2, killed %r3, 14, _, implicit-def %q0, debug-location !19 - %d0 = VMOVDRR killed %r0, killed %r1, 14, _, implicit killed %q0, implicit-def %q0, debug-location !19 - DBG_VALUE debug-use %q0, debug-use _, !14, !20, debug-location !21 - %s4 = VMOVS %s1, 14, _, implicit-def %d2, debug-location !24 - %d0 = VADDfd %d0, killed %d2, 14, _, implicit killed %q0, debug-location !24 - %r0 = VMOVRS %s0, 14, _, implicit killed %d0, debug-location !25 - %lr, %sp = t2LDR_POST %sp, 4, 14, _, debug-location !25 - tBX_RET 14, _, implicit %r0, debug-location !25 + frame-setup CFI_INSTRUCTION offset $lr, -4 + tBL 14, _, @v, csr_ios, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $r0, implicit-def $r1, implicit-def $r2, implicit-def $r3, debug-location !19 + $d1 = VMOVDRR killed $r2, killed $r3, 14, _, implicit-def $q0, debug-location !19 + $d0 = VMOVDRR killed $r0, killed $r1, 14, _, implicit killed $q0, implicit-def $q0, debug-location !19 + DBG_VALUE debug-use $q0, debug-use _, !14, !20, debug-location !21 + $s4 = VMOVS $s1, 14, _, implicit-def $d2, debug-location !24 + $d0 = VADDfd $d0, killed $d2, 14, _, implicit killed $q0, debug-location !24 + $r0 = VMOVRS $s0, 14, _, implicit killed $d0, debug-location !25 + $lr, $sp = t2LDR_POST $sp, 4, 14, _, debug-location !25 + tBX_RET 14, _, implicit $r0, debug-location !25 ... Index: test/DebugInfo/MIR/ARM/split-superreg-piece.mir =================================================================== --- test/DebugInfo/MIR/ARM/split-superreg-piece.mir +++ test/DebugInfo/MIR/ARM/split-superreg-piece.mir @@ -74,19 +74,19 @@ regBankSelected: false selected: false tracksRegLiveness: true -calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13', - '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4', - '%r5', '%r6', '%r7', '%r8', '%r10', '%r11', '%s16', - '%s17', '%s18', '%s19', '%s20', '%s21', '%s22', - '%s23', '%s24', '%s25', '%s26', '%s27', '%s28', - '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11', '%d10_d12', - '%d11_d13', '%d12_d14', '%d13_d15', '%q4_q5', '%q5_q6', - '%q6_q7', '%q4_q5_q6_q7', '%r4_r5', '%r6_r7', '%r10_r11', - '%d8_d9_d10', '%d9_d10_d11', '%d10_d11_d12', '%d11_d12_d13', - '%d12_d13_d14', '%d13_d14_d15', '%d8_d10_d12', - '%d9_d11_d13', '%d10_d12_d14', '%d11_d13_d15', - '%d8_d10_d12_d14', '%d9_d11_d13_d15', '%d9_d10', - '%d11_d12', '%d13_d14', '%d9_d10_d11_d12', '%d11_d12_d13_d14' ] +calleeSavedRegisters: [ '$lr', '$d8', '$d9', '$d10', '$d11', '$d12', '$d13', + '$d14', '$d15', '$q4', '$q5', '$q6', '$q7', '$r4', + '$r5', '$r6', '$r7', '$r8', '$r10', '$r11', '$s16', + '$s17', '$s18', '$s19', '$s20', '$s21', '$s22', + '$s23', '$s24', '$s25', '$s26', '$s27', '$s28', + '$s29', '$s30', '$s31', '$d8_d10', '$d9_d11', '$d10_d12', + '$d11_d13', '$d12_d14', '$d13_d15', '$q4_q5', '$q5_q6', + '$q6_q7', '$q4_q5_q6_q7', '$r4_r5', '$r6_r7', '$r10_r11', + '$d8_d9_d10', '$d9_d10_d11', '$d10_d11_d12', '$d11_d12_d13', + '$d12_d13_d14', '$d13_d14_d15', '$d8_d10_d12', + '$d9_d11_d13', '$d10_d12_d14', '$d11_d13_d15', + '$d8_d10_d12_d14', '$d9_d11_d13_d15', '$d9_d10', + '$d11_d12', '$d13_d14', '$d9_d10_d11_d12', '$d11_d12_d13_d14' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -102,22 +102,22 @@ hasVAStart: false hasMustTailInVarArgFunc: false stack: - - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr' } + - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$lr' } body: | bb.0.entry: - liveins: %lr + liveins: $lr - early-clobber %sp = frame-setup t2STR_PRE killed undef %lr, %sp, -4, 14, _ + early-clobber $sp = frame-setup t2STR_PRE killed undef $lr, $sp, -4, 14, _ frame-setup CFI_INSTRUCTION def_cfa_offset 4 - frame-setup CFI_INSTRUCTION offset %lr, -4 - tBL 14, _, @v, csr_ios, implicit-def dead %lr, implicit %sp, implicit-def %sp, implicit-def %r0, implicit-def %r1, implicit-def %r2, implicit-def %r3, debug-location !19 - %d1 = VMOVDRR killed %r2, killed %r3, 14, _, implicit-def %q0, debug-location !19 - %d0 = VMOVDRR killed %r0, killed %r1, 14, _, implicit killed %q0, implicit-def %q0, debug-location !19 - DBG_VALUE debug-use %q0, debug-use _, !14, !20, debug-location !21 - %s4 = VMOVS %s1, 14, _, implicit-def %d2, debug-location !24 - %d0 = VADDfd %d0, killed %d2, 14, _, implicit killed %q0, debug-location !24 - %r0 = VMOVRS %s0, 14, _, implicit killed %d0, debug-location !25 - %lr, %sp = t2LDR_POST %sp, 4, 14, _, debug-location !25 - tBX_RET 14, _, implicit %r0, debug-location !25 + frame-setup CFI_INSTRUCTION offset $lr, -4 + tBL 14, _, @v, csr_ios, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $r0, implicit-def $r1, implicit-def $r2, implicit-def $r3, debug-location !19 + $d1 = VMOVDRR killed $r2, killed $r3, 14, _, implicit-def $q0, debug-location !19 + $d0 = VMOVDRR killed $r0, killed $r1, 14, _, implicit killed $q0, implicit-def $q0, debug-location !19 + DBG_VALUE debug-use $q0, debug-use _, !14, !20, debug-location !21 + $s4 = VMOVS $s1, 14, _, implicit-def $d2, debug-location !24 + $d0 = VADDfd $d0, killed $d2, 14, _, implicit killed $q0, debug-location !24 + $r0 = VMOVRS $s0, 14, _, implicit killed $d0, debug-location !25 + $lr, $sp = t2LDR_POST $sp, 4, 14, _, debug-location !25 + tBX_RET 14, _, implicit $r0, debug-location !25 ... Index: test/DebugInfo/MIR/ARM/split-superreg.mir =================================================================== --- test/DebugInfo/MIR/ARM/split-superreg.mir +++ test/DebugInfo/MIR/ARM/split-superreg.mir @@ -74,19 +74,19 @@ regBankSelected: false selected: false tracksRegLiveness: true -calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13', - '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4', - '%r5', '%r6', '%r7', '%r8', '%r10', '%r11', '%s16', - '%s17', '%s18', '%s19', '%s20', '%s21', '%s22', - '%s23', '%s24', '%s25', '%s26', '%s27', '%s28', - '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11', '%d10_d12', - '%d11_d13', '%d12_d14', '%d13_d15', '%q4_q5', '%q5_q6', - '%q6_q7', '%q4_q5_q6_q7', '%r4_r5', '%r6_r7', '%r10_r11', - '%d8_d9_d10', '%d9_d10_d11', '%d10_d11_d12', '%d11_d12_d13', - '%d12_d13_d14', '%d13_d14_d15', '%d8_d10_d12', - '%d9_d11_d13', '%d10_d12_d14', '%d11_d13_d15', - '%d8_d10_d12_d14', '%d9_d11_d13_d15', '%d9_d10', - '%d11_d12', '%d13_d14', '%d9_d10_d11_d12', '%d11_d12_d13_d14' ] +calleeSavedRegisters: [ '$lr', '$d8', '$d9', '$d10', '$d11', '$d12', '$d13', + '$d14', '$d15', '$q4', '$q5', '$q6', '$q7', '$r4', + '$r5', '$r6', '$r7', '$r8', '$r10', '$r11', '$s16', + '$s17', '$s18', '$s19', '$s20', '$s21', '$s22', + '$s23', '$s24', '$s25', '$s26', '$s27', '$s28', + '$s29', '$s30', '$s31', '$d8_d10', '$d9_d11', '$d10_d12', + '$d11_d13', '$d12_d14', '$d13_d15', '$q4_q5', '$q5_q6', + '$q6_q7', '$q4_q5_q6_q7', '$r4_r5', '$r6_r7', '$r10_r11', + '$d8_d9_d10', '$d9_d10_d11', '$d10_d11_d12', '$d11_d12_d13', + '$d12_d13_d14', '$d13_d14_d15', '$d8_d10_d12', + '$d9_d11_d13', '$d10_d12_d14', '$d11_d13_d15', + '$d8_d10_d12_d14', '$d9_d11_d13_d15', '$d9_d10', + '$d11_d12', '$d13_d14', '$d9_d10_d11_d12', '$d11_d12_d13_d14' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -102,22 +102,22 @@ hasVAStart: false hasMustTailInVarArgFunc: false stack: - - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr' } + - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$lr' } body: | bb.0.entry: - liveins: %lr + liveins: $lr - early-clobber %sp = frame-setup t2STR_PRE killed undef %lr, %sp, -4, 14, _ + early-clobber $sp = frame-setup t2STR_PRE killed undef $lr, $sp, -4, 14, _ frame-setup CFI_INSTRUCTION def_cfa_offset 4 - frame-setup CFI_INSTRUCTION offset %lr, -4 - tBL 14, _, @v, csr_ios, implicit-def dead %lr, implicit %sp, implicit-def %sp, implicit-def %r0, implicit-def %r1, implicit-def %r2, implicit-def %r3, debug-location !19 - %d1 = VMOVDRR killed %r2, killed %r3, 14, _, implicit-def %q0, debug-location !19 - %d0 = VMOVDRR killed %r0, killed %r1, 14, _, implicit killed %q0, implicit-def %q0, debug-location !19 - DBG_VALUE debug-use %q0, debug-use _, !14, !20, debug-location !21 - %s4 = VMOVS %s1, 14, _, implicit-def %d2, debug-location !24 - %d0 = VADDfd %d0, killed %d2, 14, _, implicit killed %q0, debug-location !24 - %r0 = VMOVRS %s0, 14, _, implicit killed %d0, debug-location !25 - %lr, %sp = t2LDR_POST %sp, 4, 14, _, debug-location !25 - tBX_RET 14, _, implicit %r0, debug-location !25 + frame-setup CFI_INSTRUCTION offset $lr, -4 + tBL 14, _, @v, csr_ios, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $r0, implicit-def $r1, implicit-def $r2, implicit-def $r3, debug-location !19 + $d1 = VMOVDRR killed $r2, killed $r3, 14, _, implicit-def $q0, debug-location !19 + $d0 = VMOVDRR killed $r0, killed $r1, 14, _, implicit killed $q0, implicit-def $q0, debug-location !19 + DBG_VALUE debug-use $q0, debug-use _, !14, !20, debug-location !21 + $s4 = VMOVS $s1, 14, _, implicit-def $d2, debug-location !24 + $d0 = VADDfd $d0, killed $d2, 14, _, implicit killed $q0, debug-location !24 + $r0 = VMOVRS $s0, 14, _, implicit killed $d0, debug-location !25 + $lr, $sp = t2LDR_POST $sp, 4, 14, _, debug-location !25 + tBX_RET 14, _, implicit $r0, debug-location !25 ... Index: test/DebugInfo/MIR/Mips/last-inst-bundled.mir =================================================================== --- test/DebugInfo/MIR/Mips/last-inst-bundled.mir +++ test/DebugInfo/MIR/Mips/last-inst-bundled.mir @@ -21,7 +21,7 @@ # # Check that last bundled instruction of block gets recognized as end of basic block. # CHECK: bb.2.if.end -# CHECK-NEXT: DBG_VALUE debug-use %s0, debug-use %noreg, !12, !DIExpression(), debug-location !17 +# CHECK-NEXT: DBG_VALUE debug-use $s0, debug-use $noreg, !12, !DIExpression(), debug-location !17 --- | ; ModuleID = '' @@ -118,7 +118,7 @@ tracksRegLiveness: false registers: liveins: - - { reg: '%a0', virtual-reg: '' } + - { reg: '$a0', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -142,47 +142,47 @@ stack-id: 0, callee-saved-register: '', callee-saved-restored: true, di-variable: '', di-expression: '', di-location: '' } - { id: 1, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, - stack-id: 0, callee-saved-register: '%ra', callee-saved-restored: true, + stack-id: 0, callee-saved-register: '$ra', callee-saved-restored: true, di-variable: '', di-expression: '', di-location: '' } - { id: 2, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, - stack-id: 0, callee-saved-register: '%s0', callee-saved-restored: true, + stack-id: 0, callee-saved-register: '$s0', callee-saved-restored: true, di-variable: '', di-expression: '', di-location: '' } constants: body: | bb.0.entry: successors: %bb.2(0x30000000), %bb.1(0x50000000) - %sp = ADDiu %sp, -32 + $sp = ADDiu $sp, -32 CFI_INSTRUCTION def_cfa_offset 32 - SW killed %ra, %sp, 28 :: (store 4 into %stack.1) - SW killed %s0, %sp, 24 :: (store 4 into %stack.2) - CFI_INSTRUCTION offset %ra_64, -4 - CFI_INSTRUCTION offset %s0_64, -8 - DBG_VALUE debug-use %a0, debug-use %noreg, !12, !DIExpression(), debug-location !17 - %s0 = OR %a0, %zero - DBG_VALUE debug-use %s0, debug-use %noreg, !12, !DIExpression(), debug-location !17 - DBG_VALUE %sp, 0, !13, !DIExpression(DW_OP_plus_uconst, 20), debug-location !19 - JAL @set_cond, csr_o32, implicit-def dead %ra, implicit %a0, implicit %a1, implicit-def %sp, debug-location !20 { - renamable %a1 = LEA_ADDiu %sp, 20 + SW killed $ra, $sp, 28 :: (store 4 into %stack.1) + SW killed $s0, $sp, 24 :: (store 4 into %stack.2) + CFI_INSTRUCTION offset $ra_64, -4 + CFI_INSTRUCTION offset $s0_64, -8 + DBG_VALUE debug-use $a0, debug-use $noreg, !12, !DIExpression(), debug-location !17 + $s0 = OR $a0, $zero + DBG_VALUE debug-use $s0, debug-use $noreg, !12, !DIExpression(), debug-location !17 + DBG_VALUE $sp, 0, !13, !DIExpression(DW_OP_plus_uconst, 20), debug-location !19 + JAL @set_cond, csr_o32, implicit-def dead $ra, implicit $a0, implicit $a1, implicit-def $sp, debug-location !20 { + renamable $a1 = LEA_ADDiu $sp, 20 } - renamable %at = LW %sp, 20, debug-location !21 :: (dereferenceable load 4 from %ir.condition, !tbaa !23) - DBG_VALUE debug-use %at, debug-use %noreg, !13, !DIExpression(), debug-location !19 - BEQ killed renamable %at, %zero, %bb.2, implicit-def %at, debug-location !27 { + renamable $at = LW $sp, 20, debug-location !21 :: (dereferenceable load 4 from %ir.condition, !tbaa !23) + DBG_VALUE debug-use $at, debug-use $noreg, !13, !DIExpression(), debug-location !19 + BEQ killed renamable $at, $zero, %bb.2, implicit-def $at, debug-location !27 { NOP debug-location !27 } bb.1.if.then: successors: %bb.2(0x80000000) - JAL @do_something, csr_o32, implicit-def dead %ra, implicit undef %a0, implicit %a1, implicit-def %sp, debug-location !28 { - %a1 = OR killed %s0, %zero, debug-location !28 + JAL @do_something, csr_o32, implicit-def dead $ra, implicit undef $a0, implicit $a1, implicit-def $sp, debug-location !28 { + $a1 = OR killed $s0, $zero, debug-location !28 } bb.2.if.end: - %s0 = LW %sp, 24, debug-location !29 :: (load 4 from %stack.2) - %ra = LW %sp, 28, debug-location !29 :: (load 4 from %stack.1) - PseudoReturn undef %ra, debug-location !29 { - %sp = ADDiu %sp, 32 + $s0 = LW $sp, 24, debug-location !29 :: (load 4 from %stack.2) + $ra = LW $sp, 28, debug-location !29 :: (load 4 from %stack.1) + PseudoReturn undef $ra, debug-location !29 { + $sp = ADDiu $sp, 32 } ... Index: test/DebugInfo/MIR/X86/bit-piece-dh.mir =================================================================== --- test/DebugInfo/MIR/X86/bit-piece-dh.mir +++ test/DebugInfo/MIR/X86/bit-piece-dh.mir @@ -62,7 +62,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%edi' } + - { reg: '$edi' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -81,17 +81,17 @@ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: - liveins: %edi, %rbp + liveins: $edi, $rbp - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = frame-setup MOV64rr %rsp - CFI_INSTRUCTION def_cfa_register %rbp - DBG_VALUE debug-use %dh, debug-use _, !14, !15, debug-location !16 - %edi = SHR32ri killed %edi, 8, implicit-def dead %eflags, debug-location !17 - %eax = MOVSX32rr8 %dil, implicit killed %edi, debug-location !20 - %rbp = POP64r implicit-def %rsp, implicit %rsp, debug-location !20 - RETQ %eax, debug-location !20 + CFI_INSTRUCTION offset $rbp, -16 + $rbp = frame-setup MOV64rr $rsp + CFI_INSTRUCTION def_cfa_register $rbp + DBG_VALUE debug-use $dh, debug-use _, !14, !15, debug-location !16 + $edi = SHR32ri killed $edi, 8, implicit-def dead $eflags, debug-location !17 + $eax = MOVSX32rr8 $dil, implicit killed $edi, debug-location !20 + $rbp = POP64r implicit-def $rsp, implicit $rsp, debug-location !20 + RETQ $eax, debug-location !20 ... Index: test/DebugInfo/MIR/X86/empty-inline.mir =================================================================== --- test/DebugInfo/MIR/X86/empty-inline.mir +++ test/DebugInfo/MIR/X86/empty-inline.mir @@ -78,7 +78,7 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%rdi' } + - { reg: '$rdi' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -98,24 +98,24 @@ body: | bb.0 (%ir-block.1): successors: %bb.1(0x30000000), %bb.2(0x50000000) - liveins: %rdi + liveins: $rdi - CMP64mi8 %rip, 1, _, @a, _, 0, implicit-def %eflags :: (dereferenceable load 8 from @a, align 4) - JE_1 %bb.1, implicit %eflags + CMP64mi8 $rip, 1, _, @a, _, 0, implicit-def $eflags :: (dereferenceable load 8 from @a, align 4) + JE_1 %bb.1, implicit $eflags bb.2 (%ir-block.5): - liveins: %rdi + liveins: $rdi - %rax = MOV64rm %rdi, 1, _, 0, _ :: (load 8 from %ir.6, align 4) - %al = MOV8rm killed %rax, 1, _, 0, _ :: (load 1 from %ir.8) - MOV8mr killed %rdi, 1, _, 8, _, killed %al, debug-location !14 :: (store 1 into %ir.12) - RETQ undef %eax + $rax = MOV64rm $rdi, 1, _, 0, _ :: (load 8 from %ir.6, align 4) + $al = MOV8rm killed $rax, 1, _, 0, _ :: (load 1 from %ir.8) + MOV8mr killed $rdi, 1, _, 8, _, killed $al, debug-location !14 :: (store 1 into %ir.12) + RETQ undef $eax bb.1: - liveins: %rdi + liveins: $rdi - %al = IMPLICIT_DEF debug-location !10 - MOV8mr killed %rdi, 1, _, 8, _, killed %al, debug-location !14 :: (store 1 into %ir.12) - RETQ undef %eax + $al = IMPLICIT_DEF debug-location !10 + MOV8mr killed $rdi, 1, _, 8, _, killed $al, debug-location !14 :: (store 1 into %ir.12) + RETQ undef $eax ... Index: test/DebugInfo/MIR/X86/kill-after-spill.mir =================================================================== --- test/DebugInfo/MIR/X86/kill-after-spill.mir +++ test/DebugInfo/MIR/X86/kill-after-spill.mir @@ -14,8 +14,8 @@ # ... # # CHECK: bb.1.if.end: -# CHECK: DBG_VALUE debug-use %rbp, 0, !37, !DIExpression(DW_OP_constu, 44, DW_OP_minus), debug-location !58 -# CHECK-NOT: DBG_VALUE debug-use %rbp, 0, !36, !DIExpression(DW_OP_constu, 48, DW_OP_minus), debug-location !57 +# CHECK: DBG_VALUE debug-use $rbp, 0, !37, !DIExpression(DW_OP_constu, 44, DW_OP_minus), debug-location !58 +# CHECK-NOT: DBG_VALUE debug-use $rbp, 0, !36, !DIExpression(DW_OP_constu, 48, DW_OP_minus), debug-location !57 --- | ; ModuleID = '' @@ -211,8 +211,8 @@ tracksRegLiveness: true registers: liveins: - - { reg: '%edi', virtual-reg: '' } - - { reg: '%esi', virtual-reg: '' } + - { reg: '$edi', virtual-reg: '' } + - { reg: '$esi', virtual-reg: '' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -232,15 +232,15 @@ restorePoint: '' fixedStack: - { id: 0, type: spill-slot, offset: -56, size: 8, alignment: 8, stack-id: 0, - callee-saved-register: '%rbx', callee-saved-restored: true } + callee-saved-register: '$rbx', callee-saved-restored: true } - { id: 1, type: spill-slot, offset: -48, size: 8, alignment: 16, stack-id: 0, - callee-saved-register: '%r12', callee-saved-restored: true } + callee-saved-register: '$r12', callee-saved-restored: true } - { id: 2, type: spill-slot, offset: -40, size: 8, alignment: 8, stack-id: 0, - callee-saved-register: '%r13', callee-saved-restored: true } + callee-saved-register: '$r13', callee-saved-restored: true } - { id: 3, type: spill-slot, offset: -32, size: 8, alignment: 16, stack-id: 0, - callee-saved-register: '%r14', callee-saved-restored: true } + callee-saved-register: '$r14', callee-saved-restored: true } - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, stack-id: 0, - callee-saved-register: '%r15', callee-saved-restored: true } + callee-saved-register: '$r15', callee-saved-restored: true } - { id: 5, type: spill-slot, offset: -16, size: 8, alignment: 16, stack-id: 0, callee-saved-register: '', callee-saved-restored: true } stack: @@ -254,135 +254,135 @@ body: | bb.0.entry: successors: %bb.9(0x30000000), %bb.1(0x50000000) - liveins: %edi, %esi, %r15, %r14, %r13, %r12, %rbx + liveins: $edi, $esi, $r15, $r14, $r13, $r12, $rbx - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = frame-setup MOV64rr %rsp - CFI_INSTRUCTION def_cfa_register %rbp - frame-setup PUSH64r killed %r15, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r14, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r13, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r12, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r undef %rax, implicit-def %rsp, implicit %rsp - CFI_INSTRUCTION offset %rbx, -56 - CFI_INSTRUCTION offset %r12, -48 - CFI_INSTRUCTION offset %r13, -40 - CFI_INSTRUCTION offset %r14, -32 - CFI_INSTRUCTION offset %r15, -24 - DBG_VALUE debug-use %edi, debug-use %noreg, !36, !DIExpression(), debug-location !57 - DBG_VALUE debug-use %esi, debug-use %noreg, !37, !DIExpression(), debug-location !58 - %ebx = MOV32rr %esi - DBG_VALUE %ebx, debug-use %noreg, !37, !DIExpression(), debug-location !58 - %r15d = MOV32rr %edi - DBG_VALUE %r15d, debug-use %noreg, !36, !DIExpression(), debug-location !57 - renamable %r14 = MOV64ri -9223372036854775808 - %edi = MOV32rr %ebx - CALL64pcrel32 @func1, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit-def %rsp, implicit-def %ssp, implicit-def %rax - %r13 = MOV64rr %rax - renamable %ecx = XOR32rr undef %ecx, undef %ecx, implicit-def dead %eflags - renamable %r13 = AND64rr killed renamable %r13, renamable %r14, implicit-def %eflags - JE_1 %bb.9, implicit %eflags + CFI_INSTRUCTION offset $rbp, -16 + $rbp = frame-setup MOV64rr $rsp + CFI_INSTRUCTION def_cfa_register $rbp + frame-setup PUSH64r killed $r15, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r14, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r13, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r12, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp + CFI_INSTRUCTION offset $rbx, -56 + CFI_INSTRUCTION offset $r12, -48 + CFI_INSTRUCTION offset $r13, -40 + CFI_INSTRUCTION offset $r14, -32 + CFI_INSTRUCTION offset $r15, -24 + DBG_VALUE debug-use $edi, debug-use $noreg, !36, !DIExpression(), debug-location !57 + DBG_VALUE debug-use $esi, debug-use $noreg, !37, !DIExpression(), debug-location !58 + $ebx = MOV32rr $esi + DBG_VALUE $ebx, debug-use $noreg, !37, !DIExpression(), debug-location !58 + $r15d = MOV32rr $edi + DBG_VALUE $r15d, debug-use $noreg, !36, !DIExpression(), debug-location !57 + renamable $r14 = MOV64ri -9223372036854775808 + $edi = MOV32rr $ebx + CALL64pcrel32 @func1, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $rsp, implicit-def $ssp, implicit-def $rax + $r13 = MOV64rr $rax + renamable $ecx = XOR32rr undef $ecx, undef $ecx, implicit-def dead $eflags + renamable $r13 = AND64rr killed renamable $r13, renamable $r14, implicit-def $eflags + JE_1 %bb.9, implicit $eflags bb.1.if.end: successors: %bb.2(0x30000000), %bb.3(0x50000000) - liveins: %ebx, %r13, %r14, %r15d + liveins: $ebx, $r13, $r14, $r15d ; The instruction below is inserted additionally in order to test part of the code. - %r12d = MOV32rr %r15d - MOV32mr %rbp, 1, %noreg, -48, %noreg, renamable %r15d :: (store 4 into %stack.0) - ; The instruction below is altered (%r15d -> %r12d) in order to test part of the code. - ; The original instruction "%edi = MOV32rr killed %r15d - %edi = MOV32rr killed %r12d - MOV32mr %rbp, 1, %noreg, -44, %noreg, renamable %ebx :: (store 4 into %stack.1) - %esi = MOV32rr killed %ebx - CALL64pcrel32 @func2, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi, implicit-def %rsp, implicit-def %ssp, implicit-def %rax - %r12 = MOV64rr %rax - %r15 = MOV64rr %r12 - renamable %r15 = AND64ri8 killed renamable %r15, -123, implicit-def %eflags - JE_1 %bb.2, implicit %eflags + $r12d = MOV32rr $r15d + MOV32mr $rbp, 1, $noreg, -48, $noreg, renamable $r15d :: (store 4 into %stack.0) + ; The instruction below is altered ($r15d -> $r12d) in order to test part of the code. + ; The original instruction "$edi = MOV32rr killed $r15d + $edi = MOV32rr killed $r12d + MOV32mr $rbp, 1, $noreg, -44, $noreg, renamable $ebx :: (store 4 into %stack.1) + $esi = MOV32rr killed $ebx + CALL64pcrel32 @func2, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi, implicit-def $rsp, implicit-def $ssp, implicit-def $rax + $r12 = MOV64rr $rax + $r15 = MOV64rr $r12 + renamable $r15 = AND64ri8 killed renamable $r15, -123, implicit-def $eflags + JE_1 %bb.2, implicit $eflags bb.3.private.exit: successors: %bb.9(0x30000000), %bb.4(0x50000000) - liveins: %r12, %r13, %r14, %r15 - - renamable %r14 = OR64rr killed renamable %r14, renamable %r12, implicit-def dead %eflags - %esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags - dead %edx = MOV32ri 16, implicit-def %rdx - %rdi = MOV64rr killed %r14 - CALL64pcrel32 @memset, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %esi, implicit %rdx, implicit-def %rsp, implicit-def %ssp, implicit-def dead %rax - %rdi = MOV64rr %r12 - CALL64pcrel32 @func4, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax - renamable %ecx = MOV32ri 1 - TEST32rr killed renamable %eax, renamable %eax, implicit-def %eflags - JE_1 %bb.9, implicit %eflags + liveins: $r12, $r13, $r14, $r15 + + renamable $r14 = OR64rr killed renamable $r14, renamable $r12, implicit-def dead $eflags + $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags + dead $edx = MOV32ri 16, implicit-def $rdx + $rdi = MOV64rr killed $r14 + CALL64pcrel32 @memset, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax + $rdi = MOV64rr $r12 + CALL64pcrel32 @func4, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def $eax + renamable $ecx = MOV32ri 1 + TEST32rr killed renamable $eax, renamable $eax, implicit-def $eflags + JE_1 %bb.9, implicit $eflags bb.4.if.then8: successors: %bb.8(0x30000000), %bb.5(0x50000000) - liveins: %r12, %r13, %r15 + liveins: $r12, $r13, $r15 - %esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags - %rdi = MOV64rr %r12 - CALL64pcrel32 @func5, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %esi, implicit-def %rsp, implicit-def %ssp - renamable %rax = MOV64rm killed renamable %r13, 1, %noreg, 8, %noreg :: (load 8 from %ir.13) - TEST64rr renamable %rax, renamable %rax, implicit-def %eflags - JE_1 %bb.8, implicit %eflags + $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags + $rdi = MOV64rr $r12 + CALL64pcrel32 @func5, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit-def $rsp, implicit-def $ssp + renamable $rax = MOV64rm killed renamable $r13, 1, $noreg, 8, $noreg :: (load 8 from %ir.13) + TEST64rr renamable $rax, renamable $rax, implicit-def $eflags + JE_1 %bb.8, implicit $eflags bb.5.land.lhs.true: successors: %bb.6(0x30000000), %bb.7(0x50000000) - liveins: %rax, %r12, %r15 + liveins: $rax, $r12, $r15 - CMP32mi8 renamable %r15, 1, %noreg, 0, %noreg, 0, implicit-def %eflags :: (load 4 from %ir.tot_perf2, align 8) - JNE_1 %bb.7, implicit %eflags + CMP32mi8 renamable $r15, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load 4 from %ir.tot_perf2, align 8) + JNE_1 %bb.7, implicit $eflags bb.6.lor.lhs.false: successors: %bb.8(0x30000000), %bb.7(0x50000000) - liveins: %rax, %r12, %r15 + liveins: $rax, $r12, $r15 - CMP32mi8 killed renamable %r15, 1, %noreg, 4, %noreg, 0, implicit-def %eflags :: (load 4 from %ir.tot_bw) - JE_1 %bb.8, implicit %eflags + CMP32mi8 killed renamable $r15, 1, $noreg, 4, $noreg, 0, implicit-def $eflags :: (load 4 from %ir.tot_bw) + JE_1 %bb.8, implicit $eflags bb.7.if.then14: successors: %bb.8(0x80000000) - liveins: %rax, %r12 + liveins: $rax, $r12 - renamable %rdx = MOV64rm killed renamable %rax, 1, %noreg, 8, %noreg :: (load 8 from %ir.20) - %rdi = MOV64rr killed %r12 - %esi = MOV32rm %rbp, 1, %noreg, -44, %noreg :: (load 4 from %stack.1) - CALL64pcrel32 @func6, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %esi, implicit %rdx, implicit-def %rsp, implicit-def %ssp + renamable $rdx = MOV64rm killed renamable $rax, 1, $noreg, 8, $noreg :: (load 8 from %ir.20) + $rdi = MOV64rr killed $r12 + $esi = MOV32rm $rbp, 1, $noreg, -44, $noreg :: (load 4 from %stack.1) + CALL64pcrel32 @func6, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit $rdx, implicit-def $rsp, implicit-def $ssp bb.8.cleanup: successors: %bb.9(0x80000000) - renamable %ecx = MOV32ri 1 + renamable $ecx = MOV32ri 1 JMP_1 %bb.9 bb.2.if.then3: successors: %bb.9(0x80000000) - liveins: %r13 + liveins: $r13 - renamable %edi = MOV32rm renamable %r13, 1, %noreg, 0, %noreg :: (load 4 from %ir.variableLocal11, align 8) - renamable %esi = MOVZX32rm8 killed renamable %r13, 1, %noreg, 4, %noreg :: (load 1 from %ir.variableLocal2, align 4) - renamable %ecx = MOV32ri @.str, implicit-def %rcx - renamable %rcx = OR64ri32 killed renamable %rcx, -92238, implicit-def dead %eflags - %edx = MOV32ri 5 - %r8d = MOV32rm %rbp, 1, %noreg, -48, %noreg :: (load 4 from %stack.0) - CALL64pcrel32 @func3, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi, implicit %edx, implicit %rcx, implicit %r8d, implicit-def %rsp, implicit-def %ssp - renamable %ecx = XOR32rr undef %ecx, undef %ecx, implicit-def dead %eflags + renamable $edi = MOV32rm renamable $r13, 1, $noreg, 0, $noreg :: (load 4 from %ir.variableLocal11, align 8) + renamable $esi = MOVZX32rm8 killed renamable $r13, 1, $noreg, 4, $noreg :: (load 1 from %ir.variableLocal2, align 4) + renamable $ecx = MOV32ri @.str, implicit-def $rcx + renamable $rcx = OR64ri32 killed renamable $rcx, -92238, implicit-def dead $eflags + $edx = MOV32ri 5 + $r8d = MOV32rm $rbp, 1, $noreg, -48, $noreg :: (load 4 from %stack.0) + CALL64pcrel32 @func3, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi, implicit $edx, implicit $rcx, implicit $r8d, implicit-def $rsp, implicit-def $ssp + renamable $ecx = XOR32rr undef $ecx, undef $ecx, implicit-def dead $eflags bb.9.cleanup: - liveins: %ecx - - %eax = MOV32rr killed %ecx - %rsp = ADD64ri8 %rsp, 8, implicit-def dead %eflags - %rbx = POP64r implicit-def %rsp, implicit %rsp - %r12 = POP64r implicit-def %rsp, implicit %rsp - %r13 = POP64r implicit-def %rsp, implicit %rsp - %r14 = POP64r implicit-def %rsp, implicit %rsp - %r15 = POP64r implicit-def %rsp, implicit %rsp - %rbp = POP64r implicit-def %rsp, implicit %rsp - RETQ %eax + liveins: $ecx + + $eax = MOV32rr killed $ecx + $rsp = ADD64ri8 $rsp, 8, implicit-def dead $eflags + $rbx = POP64r implicit-def $rsp, implicit $rsp + $r12 = POP64r implicit-def $rsp, implicit $rsp + $r13 = POP64r implicit-def $rsp, implicit $rsp + $r14 = POP64r implicit-def $rsp, implicit $rsp + $r15 = POP64r implicit-def $rsp, implicit $rsp + $rbp = POP64r implicit-def $rsp, implicit $rsp + RETQ $eax ... Index: test/DebugInfo/MIR/X86/live-debug-values-3preds.mir =================================================================== --- test/DebugInfo/MIR/X86/live-debug-values-3preds.mir +++ test/DebugInfo/MIR/X86/live-debug-values-3preds.mir @@ -31,9 +31,9 @@ # DBG_VALUE for variables "x", "y" and "z" are extended into %bb.9 from its # predecessors %bb.0, %bb.2 and %bb.8. # CHECK: bb.9.for.end: -# CHECK-DAG: DBG_VALUE debug-use %edi, debug-use %noreg, ![[X_VAR]], !DIExpression(), debug-location !{{[0-9]+}} -# CHECK-DAG: DBG_VALUE debug-use %esi, debug-use %noreg, ![[Y_VAR]], !DIExpression(), debug-location !{{[0-9]+}} -# CHECK-DAG: DBG_VALUE debug-use %edx, debug-use %noreg, ![[Z_VAR]], !DIExpression(), debug-location !{{[0-9]+}} +# CHECK-DAG: DBG_VALUE debug-use $edi, debug-use $noreg, ![[X_VAR]], !DIExpression(), debug-location !{{[0-9]+}} +# CHECK-DAG: DBG_VALUE debug-use $esi, debug-use $noreg, ![[Y_VAR]], !DIExpression(), debug-location !{{[0-9]+}} +# CHECK-DAG: DBG_VALUE debug-use $edx, debug-use $noreg, ![[Z_VAR]], !DIExpression(), debug-location !{{[0-9]+}} # CHECK: RET --- | @@ -163,10 +163,10 @@ exposesReturnsTwice: false tracksRegLiveness: true liveins: - - { reg: '%edi' } - - { reg: '%esi' } - - { reg: '%edx' } - - { reg: '%ecx' } + - { reg: '$edi' } + - { reg: '$esi' } + - { reg: '$edx' } + - { reg: '$ecx' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -184,116 +184,116 @@ body: | bb.0.entry: successors: %bb.1.for.body.preheader(20), %bb.9.for.end(12) - liveins: %ecx, %edi, %edx, %esi + liveins: $ecx, $edi, $edx, $esi - DBG_VALUE debug-use %edi, debug-use _, !9, !17, debug-location !18 - DBG_VALUE debug-use %esi, debug-use _, !10, !17, debug-location !19 - DBG_VALUE debug-use %edx, debug-use _, !11, !17, debug-location !21 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $edi, debug-use _, !9, !17, debug-location !18 + DBG_VALUE debug-use $esi, debug-use _, !10, !17, debug-location !19 + DBG_VALUE debug-use $edx, debug-use _, !11, !17, debug-location !21 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - %r8d = MOV32rr %esi, debug-location !26 - %r8d = IMUL32rr killed %r8d, %edi, implicit-def dead %eflags, debug-location !26 - TEST32rr %r8d, %r8d, implicit-def %eflags, debug-location !31 - JLE_1 %bb.9.for.end, implicit %eflags + $r8d = MOV32rr $esi, debug-location !26 + $r8d = IMUL32rr killed $r8d, $edi, implicit-def dead $eflags, debug-location !26 + TEST32rr $r8d, $r8d, implicit-def $eflags, debug-location !31 + JLE_1 %bb.9.for.end, implicit $eflags bb.1.for.body.preheader: successors: %bb.3.for.body(0) - liveins: %ecx, %edi, %edx, %esi, %r8d + liveins: $ecx, $edi, $edx, $esi, $r8d - DBG_VALUE debug-use %edi, debug-use _, !9, !17, debug-location !18 - DBG_VALUE debug-use %esi, debug-use _, !10, !17, debug-location !19 - DBG_VALUE debug-use %edx, debug-use _, !11, !17, debug-location !21 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $edi, debug-use _, !9, !17, debug-location !18 + DBG_VALUE debug-use $esi, debug-use _, !10, !17, debug-location !19 + DBG_VALUE debug-use $edx, debug-use _, !11, !17, debug-location !21 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags bb.3.for.body (align 4): successors: %bb.4.if.then(4), %bb.5.if.end(124) - liveins: %eax, %ecx, %edi, %edx, %esi, %r8d + liveins: $eax, $ecx, $edi, $edx, $esi, $r8d - DBG_VALUE debug-use %edi, debug-use _, !9, !17, debug-location !18 - DBG_VALUE debug-use %esi, debug-use _, !10, !17, debug-location !19 - DBG_VALUE debug-use %edx, debug-use _, !11, !17, debug-location !21 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $edi, debug-use _, !9, !17, debug-location !18 + DBG_VALUE debug-use $esi, debug-use _, !10, !17, debug-location !19 + DBG_VALUE debug-use $edx, debug-use _, !11, !17, debug-location !21 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - TEST32rr %edi, %edi, implicit-def %eflags, debug-location !35 - JG_1 %bb.4.if.then, implicit %eflags + TEST32rr $edi, $edi, implicit-def $eflags, debug-location !35 + JG_1 %bb.4.if.then, implicit $eflags bb.5.if.end: successors: %bb.6.if.then.4(4), %bb.7.if.end.6(124) - liveins: %eax, %ecx, %edi, %edx, %esi, %r8d + liveins: $eax, $ecx, $edi, $edx, $esi, $r8d - DBG_VALUE debug-use %edi, debug-use _, !9, !17, debug-location !18 - DBG_VALUE debug-use %esi, debug-use _, !10, !17, debug-location !19 - DBG_VALUE debug-use %edx, debug-use _, !11, !17, debug-location !21 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $edi, debug-use _, !9, !17, debug-location !18 + DBG_VALUE debug-use $esi, debug-use _, !10, !17, debug-location !19 + DBG_VALUE debug-use $edx, debug-use _, !11, !17, debug-location !21 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - TEST32rr %esi, %esi, implicit-def %eflags, debug-location !39 - JG_1 %bb.6.if.then.4, implicit %eflags + TEST32rr $esi, $esi, implicit-def $eflags, debug-location !39 + JG_1 %bb.6.if.then.4, implicit $eflags bb.7.if.end.6: successors: %bb.8.if.then.8(4), %bb.2.for.cond(124) - liveins: %eax, %ecx, %edi, %edx, %esi, %r8d + liveins: $eax, $ecx, $edi, $edx, $esi, $r8d - DBG_VALUE debug-use %edi, debug-use _, !9, !17, debug-location !18 - DBG_VALUE debug-use %esi, debug-use _, !10, !17, debug-location !19 - DBG_VALUE debug-use %edx, debug-use _, !11, !17, debug-location !21 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $edi, debug-use _, !9, !17, debug-location !18 + DBG_VALUE debug-use $esi, debug-use _, !10, !17, debug-location !19 + DBG_VALUE debug-use $edx, debug-use _, !11, !17, debug-location !21 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - TEST32rr %edx, %edx, implicit-def %eflags, debug-location !45 - JG_1 %bb.8.if.then.8, implicit %eflags + TEST32rr $edx, $edx, implicit-def $eflags, debug-location !45 + JG_1 %bb.8.if.then.8, implicit $eflags bb.2.for.cond: successors: %bb.3.for.body(124), %bb.9.for.end(4) - liveins: %eax, %ecx, %edi, %edx, %esi, %r8d + liveins: $eax, $ecx, $edi, $edx, $esi, $r8d - DBG_VALUE debug-use %edi, debug-use _, !9, !17, debug-location !18 - DBG_VALUE debug-use %esi, debug-use _, !10, !17, debug-location !19 - DBG_VALUE debug-use %edx, debug-use _, !11, !17, debug-location !21 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $edi, debug-use _, !9, !17, debug-location !18 + DBG_VALUE debug-use $esi, debug-use _, !10, !17, debug-location !19 + DBG_VALUE debug-use $edx, debug-use _, !11, !17, debug-location !21 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - %eax = INC32r killed %eax, implicit-def dead %eflags, debug-location !44 - DBG_VALUE debug-use %eax, debug-use _, !13, !17, debug-location !25 - CMP32rr %eax, %r8d, implicit-def %eflags, debug-location !31 - JL_1 %bb.3.for.body, implicit %eflags + $eax = INC32r killed $eax, implicit-def dead $eflags, debug-location !44 + DBG_VALUE debug-use $eax, debug-use _, !13, !17, debug-location !25 + CMP32rr $eax, $r8d, implicit-def $eflags, debug-location !31 + JL_1 %bb.3.for.body, implicit $eflags JMP_1 %bb.9.for.end bb.4.if.then: - liveins: %ecx, %edi + liveins: $ecx, $edi - DBG_VALUE debug-use %edi, debug-use _, !9, !17, debug-location !18 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $edi, debug-use _, !9, !17, debug-location !18 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - %ecx = IMUL32rr killed %ecx, killed %edi, implicit-def dead %eflags, debug-location !36 + $ecx = IMUL32rr killed $ecx, killed $edi, implicit-def dead $eflags, debug-location !36 DBG_VALUE 0, 0, !13, !17, debug-location !25 - %eax = MOV32rr killed %ecx, debug-location !50 - RETQ %eax, debug-location !50 + $eax = MOV32rr killed $ecx, debug-location !50 + RETQ $eax, debug-location !50 bb.6.if.then.4: - liveins: %ecx, %esi + liveins: $ecx, $esi - DBG_VALUE debug-use %esi, debug-use _, !10, !17, debug-location !19 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $esi, debug-use _, !10, !17, debug-location !19 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - %ecx = IMUL32rr killed %ecx, killed %esi, implicit-def dead %eflags, debug-location !40 + $ecx = IMUL32rr killed $ecx, killed $esi, implicit-def dead $eflags, debug-location !40 DBG_VALUE 0, 0, !13, !17, debug-location !25 - %eax = MOV32rr killed %ecx, debug-location !50 - RETQ %eax, debug-location !50 + $eax = MOV32rr killed $ecx, debug-location !50 + RETQ $eax, debug-location !50 bb.8.if.then.8: successors: %bb.9.for.end(0) - liveins: %ecx, %edx + liveins: $ecx, $edx - DBG_VALUE debug-use %edx, debug-use _, !11, !17, debug-location !21 - DBG_VALUE debug-use %ecx, debug-use _, !12, !17, debug-location !23 + DBG_VALUE debug-use $edx, debug-use _, !11, !17, debug-location !21 + DBG_VALUE debug-use $ecx, debug-use _, !12, !17, debug-location !23 DBG_VALUE 0, 0, !13, !17, debug-location !25 - %ecx = IMUL32rr killed %ecx, killed %edx, implicit-def dead %eflags, debug-location !46 + $ecx = IMUL32rr killed $ecx, killed $edx, implicit-def dead $eflags, debug-location !46 bb.9.for.end: - liveins: %ecx + liveins: $ecx DBG_VALUE 0, 0, !13, !17, debug-location !25 - %eax = MOV32rr killed %ecx, debug-location !50 - RETQ %eax, debug-location !50 + $eax = MOV32rr killed $ecx, debug-location !50 + RETQ $eax, debug-location !50 ... Index: test/DebugInfo/MIR/X86/live-debug-values-spill.mir =================================================================== --- test/DebugInfo/MIR/X86/live-debug-values-spill.mir +++ test/DebugInfo/MIR/X86/live-debug-values-spill.mir @@ -52,36 +52,36 @@ # GENERATE: ![[INTD:[0-9]+]] = !DILocalVariable(name: "intd",{{.*}}) # # GENERATE: bb.1.if.end: -# GENERATE: MOV32mr %rbp, 1, %noreg, -48, %noreg, killed %edx :: (store 4 into %stack.5) -# GENERATE-NEXT: DBG_VALUE debug-use %rbp, 0, ![[INT0]], !DIExpression(DW_OP_constu, 48, DW_OP_minus) -# GENERATE: MOV32mr %rbp, 1, %noreg, -52, %noreg, killed %r8d :: (store 4 into %stack.4) -# GENERATE-NEXT: DBG_VALUE debug-use %rbp, 0, ![[INTB]], !DIExpression(DW_OP_constu, 52, DW_OP_minus) -# GENERATE: MOV32mr %rbp, 1, %noreg, -56, %noreg, killed %esi :: (store 4 into %stack.3) -# GENERATE-NEXT: DBG_VALUE debug-use %rbp, 0, ![[INTD]], !DIExpression(DW_OP_constu, 56, DW_OP_minus) +# GENERATE: MOV32mr $rbp, 1, $noreg, -48, $noreg, killed $edx :: (store 4 into %stack.5) +# GENERATE-NEXT: DBG_VALUE debug-use $rbp, 0, ![[INT0]], !DIExpression(DW_OP_constu, 48, DW_OP_minus) +# GENERATE: MOV32mr $rbp, 1, $noreg, -52, $noreg, killed $r8d :: (store 4 into %stack.4) +# GENERATE-NEXT: DBG_VALUE debug-use $rbp, 0, ![[INTB]], !DIExpression(DW_OP_constu, 52, DW_OP_minus) +# GENERATE: MOV32mr $rbp, 1, $noreg, -56, $noreg, killed $esi :: (store 4 into %stack.3) +# GENERATE-NEXT: DBG_VALUE debug-use $rbp, 0, ![[INTD]], !DIExpression(DW_OP_constu, 56, DW_OP_minus) # # Check that the spill locations that are valid at the end of bb.1.if.end are # propagated to subsequent BBs. # # GENERATE: bb.2.if.then4: # GENERATE-NOT: bb.3: -# GENERATE-DAG: DBG_VALUE debug-use %rbp, 0, ![[INTD]], !DIExpression(DW_OP_constu, 56, DW_OP_minus) -# GENERATE-DAG: DBG_VALUE debug-use %rbp, 0, ![[INTB]], !DIExpression(DW_OP_constu, 52, DW_OP_minus) +# GENERATE-DAG: DBG_VALUE debug-use $rbp, 0, ![[INTD]], !DIExpression(DW_OP_constu, 56, DW_OP_minus) +# GENERATE-DAG: DBG_VALUE debug-use $rbp, 0, ![[INTB]], !DIExpression(DW_OP_constu, 52, DW_OP_minus) # # GENERATE: bb.3: # GENERATE-NOT: bb.4.if.end13: -# GENERATE-DAG: DBG_VALUE debug-use %rbp, 0, ![[INTD]], !DIExpression(DW_OP_constu, 56, DW_OP_minus) -# GENERATE-DAG: DBG_VALUE debug-use %rbp, 0, ![[INTB]], !DIExpression(DW_OP_constu, 52, DW_OP_minus) +# GENERATE-DAG: DBG_VALUE debug-use $rbp, 0, ![[INTD]], !DIExpression(DW_OP_constu, 56, DW_OP_minus) +# GENERATE-DAG: DBG_VALUE debug-use $rbp, 0, ![[INTB]], !DIExpression(DW_OP_constu, 52, DW_OP_minus) # # GENERATE: bb.4.if.end13: # GENERATE-NOT: bb.5.cleanup: -# GENERATE-DAG: DBG_VALUE debug-use %rbp, 0, ![[INTD]], !DIExpression(DW_OP_constu, 56, DW_OP_minus) -# GENERATE-DAG: DBG_VALUE debug-use %rbp, 0, ![[INTB]], !DIExpression(DW_OP_constu, 52, DW_OP_minus) +# GENERATE-DAG: DBG_VALUE debug-use $rbp, 0, ![[INTD]], !DIExpression(DW_OP_constu, 56, DW_OP_minus) +# GENERATE-DAG: DBG_VALUE debug-use $rbp, 0, ![[INTB]], !DIExpression(DW_OP_constu, 52, DW_OP_minus) # # Check that the spill location rbp-48 (the variable int0) is not propagated # because int0 is redefined within the same basic block. # # TERMINATE: bb.2.if.then4: -# TERMINATE-NOT: DBG_VALUE debug-use %rbp, -48, +# TERMINATE-NOT: DBG_VALUE debug-use $rbp, -48, --- | ; ModuleID = '' source_filename = "spill1.c" @@ -309,16 +309,16 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%edi' } - - { reg: '%esi' } - - { reg: '%edx' } - - { reg: '%ecx' } - - { reg: '%r8d' } - - { reg: '%r9d' } -calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', - '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15', - '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', - '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] + - { reg: '$edi' } + - { reg: '$esi' } + - { reg: '$edx' } + - { reg: '$ecx' } + - { reg: '$r8d' } + - { reg: '$r9d' } +calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx', + '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15', + '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d', + '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -334,11 +334,11 @@ hasVAStart: false hasMustTailInVarArgFunc: false fixedStack: - - { id: 0, type: spill-slot, offset: -56, size: 8, alignment: 8, callee-saved-register: '%rbx' } - - { id: 1, type: spill-slot, offset: -48, size: 8, alignment: 16, callee-saved-register: '%r12' } - - { id: 2, type: spill-slot, offset: -40, size: 8, alignment: 8, callee-saved-register: '%r13' } - - { id: 3, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '%r14' } - - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%r15' } + - { id: 0, type: spill-slot, offset: -56, size: 8, alignment: 8, callee-saved-register: '$rbx' } + - { id: 1, type: spill-slot, offset: -48, size: 8, alignment: 16, callee-saved-register: '$r12' } + - { id: 2, type: spill-slot, offset: -40, size: 8, alignment: 8, callee-saved-register: '$r13' } + - { id: 3, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '$r14' } + - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$r15' } - { id: 5, type: spill-slot, offset: -16, size: 8, alignment: 16 } - { id: 6, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false } stack: @@ -351,122 +351,122 @@ body: | bb.0.entry: successors: %bb.1.if.end(0x30000000), %bb.5.cleanup(0x50000000) - liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d, %r15, %r14, %r13, %r12, %rbx, %rbp + liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d, $r15, $r14, $r13, $r12, $rbx, $rbp - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = frame-setup MOV64rr %rsp - CFI_INSTRUCTION def_cfa_register %rbp - frame-setup PUSH64r killed %r15, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r14, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r13, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %r12, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - %rsp = frame-setup SUB64ri8 %rsp, 24, implicit-def dead %eflags - CFI_INSTRUCTION offset %rbx, -56 - CFI_INSTRUCTION offset %r12, -48 - CFI_INSTRUCTION offset %r13, -40 - CFI_INSTRUCTION offset %r14, -32 - CFI_INSTRUCTION offset %r15, -24 - DBG_VALUE debug-use %edi, debug-use _, !24, !38, debug-location !39 - DBG_VALUE debug-use %esi, debug-use _, !25, !38, debug-location !40 - DBG_VALUE debug-use %edx, debug-use _, !26, !38, debug-location !41 - DBG_VALUE debug-use %ecx, debug-use _, !27, !38, debug-location !42 - DBG_VALUE debug-use %r8d, debug-use _, !28, !38, debug-location !43 - DBG_VALUE debug-use %r9d, debug-use _, !29, !38, debug-location !44 - %r14d = MOV32rr %r8d - DBG_VALUE debug-use %r14d, debug-use _, !28, !38, debug-location !43 - %r12d = MOV32rr %esi - DBG_VALUE debug-use %r12d, debug-use _, !25, !38, debug-location !40 - %eax = MOV32rr %edi - DBG_VALUE debug-use %eax, debug-use _, !24, !38, debug-location !39 - %r13d = MOV32rm %rip, 1, _, @glob0, _, debug-location !46 :: (dereferenceable load 4 from @glob0, !tbaa !47) - DBG_VALUE debug-use %r13d, debug-use _, !31, !38, debug-location !51 - %r8d = MOV32rm %rip, 1, _, @glob1, _, debug-location !52 :: (dereferenceable load 4 from @glob1, !tbaa !47) - DBG_VALUE debug-use %r8d, debug-use _, !32, !38, debug-location !53 - %r15d = MOV32rm %rip, 1, _, @glob2, _, debug-location !54 :: (dereferenceable load 4 from @glob2, !tbaa !47) - DBG_VALUE debug-use %r15d, debug-use _, !33, !38, debug-location !55 - %esi = MOV32rm %rip, 1, _, @glob3, _, debug-location !56 :: (dereferenceable load 4 from @glob3, !tbaa !47) - DBG_VALUE debug-use %esi, debug-use _, !34, !38, debug-location !57 - %ebx = MOV32rm %rip, 1, _, @glob4, _, debug-location !59 :: (dereferenceable load 4 from @glob4, !tbaa !47) - DBG_VALUE debug-use %ebx, debug-use _, !35, !38, debug-location !60 - MOV32mr %rbp, 1, _, -44, _, %ebx, debug-location !60 :: (store 4 into %ir.inte, !tbaa !47) - %edi = MOV32rm %rip, 1, _, @glob5, _, debug-location !62 :: (dereferenceable load 4 from @glob5, !tbaa !47) - DBG_VALUE debug-use %edi, debug-use _, !36, !38, debug-location !63 - MOV32mr %rbp, 1, _, -60, _, %edi, debug-location !63 :: (store 4 into %ir.intf, !tbaa !47) - TEST32rr killed %eax, %eax, implicit-def %eflags, debug-location !67 - JNE_1 %bb.5.cleanup, implicit %eflags + CFI_INSTRUCTION offset $rbp, -16 + $rbp = frame-setup MOV64rr $rsp + CFI_INSTRUCTION def_cfa_register $rbp + frame-setup PUSH64r killed $r15, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r14, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r13, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $r12, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + $rsp = frame-setup SUB64ri8 $rsp, 24, implicit-def dead $eflags + CFI_INSTRUCTION offset $rbx, -56 + CFI_INSTRUCTION offset $r12, -48 + CFI_INSTRUCTION offset $r13, -40 + CFI_INSTRUCTION offset $r14, -32 + CFI_INSTRUCTION offset $r15, -24 + DBG_VALUE debug-use $edi, debug-use _, !24, !38, debug-location !39 + DBG_VALUE debug-use $esi, debug-use _, !25, !38, debug-location !40 + DBG_VALUE debug-use $edx, debug-use _, !26, !38, debug-location !41 + DBG_VALUE debug-use $ecx, debug-use _, !27, !38, debug-location !42 + DBG_VALUE debug-use $r8d, debug-use _, !28, !38, debug-location !43 + DBG_VALUE debug-use $r9d, debug-use _, !29, !38, debug-location !44 + $r14d = MOV32rr $r8d + DBG_VALUE debug-use $r14d, debug-use _, !28, !38, debug-location !43 + $r12d = MOV32rr $esi + DBG_VALUE debug-use $r12d, debug-use _, !25, !38, debug-location !40 + $eax = MOV32rr $edi + DBG_VALUE debug-use $eax, debug-use _, !24, !38, debug-location !39 + $r13d = MOV32rm $rip, 1, _, @glob0, _, debug-location !46 :: (dereferenceable load 4 from @glob0, !tbaa !47) + DBG_VALUE debug-use $r13d, debug-use _, !31, !38, debug-location !51 + $r8d = MOV32rm $rip, 1, _, @glob1, _, debug-location !52 :: (dereferenceable load 4 from @glob1, !tbaa !47) + DBG_VALUE debug-use $r8d, debug-use _, !32, !38, debug-location !53 + $r15d = MOV32rm $rip, 1, _, @glob2, _, debug-location !54 :: (dereferenceable load 4 from @glob2, !tbaa !47) + DBG_VALUE debug-use $r15d, debug-use _, !33, !38, debug-location !55 + $esi = MOV32rm $rip, 1, _, @glob3, _, debug-location !56 :: (dereferenceable load 4 from @glob3, !tbaa !47) + DBG_VALUE debug-use $esi, debug-use _, !34, !38, debug-location !57 + $ebx = MOV32rm $rip, 1, _, @glob4, _, debug-location !59 :: (dereferenceable load 4 from @glob4, !tbaa !47) + DBG_VALUE debug-use $ebx, debug-use _, !35, !38, debug-location !60 + MOV32mr $rbp, 1, _, -44, _, $ebx, debug-location !60 :: (store 4 into %ir.inte, !tbaa !47) + $edi = MOV32rm $rip, 1, _, @glob5, _, debug-location !62 :: (dereferenceable load 4 from @glob5, !tbaa !47) + DBG_VALUE debug-use $edi, debug-use _, !36, !38, debug-location !63 + MOV32mr $rbp, 1, _, -60, _, $edi, debug-location !63 :: (store 4 into %ir.intf, !tbaa !47) + TEST32rr killed $eax, $eax, implicit-def $eflags, debug-location !67 + JNE_1 %bb.5.cleanup, implicit $eflags bb.1.if.end: successors: %bb.2(0x30000000), %bb.3.if.then4(0x50000000) - liveins: %ebx, %ecx, %edi, %edx, %esi, %r8d, %r9d, %r12d, %r13d, %r14d, %r15d, %rbp + liveins: $ebx, $ecx, $edi, $edx, $esi, $r8d, $r9d, $r12d, $r13d, $r14d, $r15d, $rbp - MOV32mr %rbp, 1, _, -48, _, killed %edx :: (store 4 into %stack.5) - MOV32mr %rbp, 1, _, -52, _, killed %r8d :: (store 4 into %stack.4) - MOV32mr %rbp, 1, _, -56, _, killed %esi :: (store 4 into %stack.3) + MOV32mr $rbp, 1, _, -48, _, killed $edx :: (store 4 into %stack.5) + MOV32mr $rbp, 1, _, -52, _, killed $r8d :: (store 4 into %stack.4) + MOV32mr $rbp, 1, _, -56, _, killed $esi :: (store 4 into %stack.3) DBG_VALUE debug-use _, debug-use _, !30, !38, debug-location !45 - %r14d = ADD32rr killed %r14d, killed %ecx, implicit-def dead %eflags, debug-location !68 - %r14d = ADD32rr killed %r14d, killed %r9d, implicit-def dead %eflags, debug-location !69 - %r14d = IMUL32rm killed %r14d, %rbp, 1, _, 16, _, implicit-def dead %eflags, debug-location !70 :: (load 4 from %fixed-stack.6, align 16) - DBG_VALUE debug-use %r14d, debug-use _, !26, !38, debug-location !41 - CALL64pcrel32 @use, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, debug-location !72 - %edi = MOV32rr killed %ebx, debug-location !73 - CALL64pcrel32 @use, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, debug-location !73 - TEST32rr killed %r12d, %r12d, implicit-def %eflags, debug-location !74 - JE_1 %bb.2, implicit %eflags + $r14d = ADD32rr killed $r14d, killed $ecx, implicit-def dead $eflags, debug-location !68 + $r14d = ADD32rr killed $r14d, killed $r9d, implicit-def dead $eflags, debug-location !69 + $r14d = IMUL32rm killed $r14d, $rbp, 1, _, 16, _, implicit-def dead $eflags, debug-location !70 :: (load 4 from %fixed-stack.6, align 16) + DBG_VALUE debug-use $r14d, debug-use _, !26, !38, debug-location !41 + CALL64pcrel32 @use, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, debug-location !72 + $edi = MOV32rr killed $ebx, debug-location !73 + CALL64pcrel32 @use, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, debug-location !73 + TEST32rr killed $r12d, $r12d, implicit-def $eflags, debug-location !74 + JE_1 %bb.2, implicit $eflags bb.3.if.then4: successors: %bb.4.if.end13(0x80000000) - liveins: %r14d, %r15d, %rbp + liveins: $r14d, $r15d, $rbp - %rdi = LEA64r %rbp, 1, _, -44, _ - DBG_VALUE %rbp, -44, !35, !38, debug-location !60 - %rsi = LEA64r %rbp, 1, _, -60, _ - DBG_VALUE %rbp, -60, !36, !38, debug-location !63 - %rdx = LEA64r %rbp, 1, _, -64, _ - DBG_VALUE %rbp, -64, !37, !38, debug-location !78 - CALL64pcrel32 @set, csr_64, implicit %rsp, implicit %rdi, implicit %rsi, implicit %rdx, implicit-def %rsp, debug-location !79 - %eax = MOV32rm %rbp, 1, _, -44, _, debug-location !81 :: (dereferenceable load 4 from %ir.inte, !tbaa !47) - DBG_VALUE debug-use %eax, debug-use _, !35, !38, debug-location !60 - %r15d = ADD32rm killed %r15d, %rbp, 1, _, -52, _, implicit-def dead %eflags, debug-location !82 :: (load 4 from %stack.4) - %r15d = IMUL32rr killed %r15d, %eax, implicit-def dead %eflags, debug-location !82 - %r15d = ADD32rm killed %r15d, %rbp, 1, _, -56, _, implicit-def dead %eflags, debug-location !83 :: (load 4 from %stack.3) - %r15d = IMUL32rr killed %r15d, killed %eax, implicit-def dead %eflags, debug-location !84 - DBG_VALUE debug-use %r15d, debug-use _, !31, !38, debug-location !51 - %r13d = MOV32rr killed %r15d - DBG_VALUE debug-use %r13d, debug-use _, !31, !38, debug-location !51 + $rdi = LEA64r $rbp, 1, _, -44, _ + DBG_VALUE $rbp, -44, !35, !38, debug-location !60 + $rsi = LEA64r $rbp, 1, _, -60, _ + DBG_VALUE $rbp, -60, !36, !38, debug-location !63 + $rdx = LEA64r $rbp, 1, _, -64, _ + DBG_VALUE $rbp, -64, !37, !38, debug-location !78 + CALL64pcrel32 @set, csr_64, implicit $rsp, implicit $rdi, implicit $rsi, implicit $rdx, implicit-def $rsp, debug-location !79 + $eax = MOV32rm $rbp, 1, _, -44, _, debug-location !81 :: (dereferenceable load 4 from %ir.inte, !tbaa !47) + DBG_VALUE debug-use $eax, debug-use _, !35, !38, debug-location !60 + $r15d = ADD32rm killed $r15d, $rbp, 1, _, -52, _, implicit-def dead $eflags, debug-location !82 :: (load 4 from %stack.4) + $r15d = IMUL32rr killed $r15d, $eax, implicit-def dead $eflags, debug-location !82 + $r15d = ADD32rm killed $r15d, $rbp, 1, _, -56, _, implicit-def dead $eflags, debug-location !83 :: (load 4 from %stack.3) + $r15d = IMUL32rr killed $r15d, killed $eax, implicit-def dead $eflags, debug-location !84 + DBG_VALUE debug-use $r15d, debug-use _, !31, !38, debug-location !51 + $r13d = MOV32rr killed $r15d + DBG_VALUE debug-use $r13d, debug-use _, !31, !38, debug-location !51 JMP_1 %bb.4.if.end13 bb.2: successors: %bb.4.if.end13(0x80000000) - liveins: %r13d, %r14d, %rbp + liveins: $r13d, $r14d, $rbp - %r14d = ADD32rm killed %r14d, %rbp, 1, _, -48, _, implicit-def dead %eflags, debug-location !71 :: (load 4 from %stack.5) - DBG_VALUE debug-use %r14d, debug-use _, !26, !38, debug-location !41 + $r14d = ADD32rm killed $r14d, $rbp, 1, _, -48, _, implicit-def dead $eflags, debug-location !71 :: (load 4 from %stack.5) + DBG_VALUE debug-use $r14d, debug-use _, !26, !38, debug-location !41 bb.4.if.end13: successors: %bb.5.cleanup(0x80000000) - liveins: %r13d, %r14d, %rbp + liveins: $r13d, $r14d, $rbp - DBG_VALUE debug-use %r14d, debug-use _, !26, !38, debug-location !41 - DBG_VALUE debug-use %r13d, debug-use _, !31, !38, debug-location !51 - %r13d = IMUL32rm killed %r13d, %rbp, 1, _, 16, _, implicit-def dead %eflags, debug-location !86 :: (load 4 from %fixed-stack.6, align 16) - %r13d = ADD32rr killed %r13d, killed %r14d, implicit-def dead %eflags, debug-location !87 - DBG_VALUE debug-use %r13d, debug-use _, !26, !38, debug-location !41 - %edi = MOV32rr killed %r13d, debug-location !88 - CALL64pcrel32 @use, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, debug-location !88 + DBG_VALUE debug-use $r14d, debug-use _, !26, !38, debug-location !41 + DBG_VALUE debug-use $r13d, debug-use _, !31, !38, debug-location !51 + $r13d = IMUL32rm killed $r13d, $rbp, 1, _, 16, _, implicit-def dead $eflags, debug-location !86 :: (load 4 from %fixed-stack.6, align 16) + $r13d = ADD32rr killed $r13d, killed $r14d, implicit-def dead $eflags, debug-location !87 + DBG_VALUE debug-use $r13d, debug-use _, !26, !38, debug-location !41 + $edi = MOV32rr killed $r13d, debug-location !88 + CALL64pcrel32 @use, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, debug-location !88 bb.5.cleanup: - liveins: %rbp + liveins: $rbp - %rsp = ADD64ri8 %rsp, 24, implicit-def dead %eflags, debug-location !90 - %rbx = POP64r implicit-def %rsp, implicit %rsp, debug-location !90 - %r12 = POP64r implicit-def %rsp, implicit %rsp, debug-location !90 - %r13 = POP64r implicit-def %rsp, implicit %rsp, debug-location !90 - %r14 = POP64r implicit-def %rsp, implicit %rsp, debug-location !90 - %r15 = POP64r implicit-def %rsp, implicit %rsp, debug-location !90 - %rbp = POP64r implicit-def %rsp, implicit %rsp, debug-location !90 + $rsp = ADD64ri8 $rsp, 24, implicit-def dead $eflags, debug-location !90 + $rbx = POP64r implicit-def $rsp, implicit $rsp, debug-location !90 + $r12 = POP64r implicit-def $rsp, implicit $rsp, debug-location !90 + $r13 = POP64r implicit-def $rsp, implicit $rsp, debug-location !90 + $r14 = POP64r implicit-def $rsp, implicit $rsp, debug-location !90 + $r15 = POP64r implicit-def $rsp, implicit $rsp, debug-location !90 + $rbp = POP64r implicit-def $rsp, implicit $rsp, debug-location !90 RETQ debug-location !90 ... Index: test/DebugInfo/MIR/X86/live-debug-values.mir =================================================================== --- test/DebugInfo/MIR/X86/live-debug-values.mir +++ test/DebugInfo/MIR/X86/live-debug-values.mir @@ -35,7 +35,7 @@ # CHECK: ![[N_VAR:[0-9]+]] = !DILocalVariable(name: "n",{{.*}}) # # CHECK: bb.5.if.end.7: -# CHECK: DBG_VALUE debug-use %ebx, debug-use %noreg, ![[N_VAR]], !DIExpression(), debug-location !{{[0-9]+}} +# CHECK: DBG_VALUE debug-use $ebx, debug-use $noreg, ![[N_VAR]], !DIExpression(), debug-location !{{[0-9]+}} --- | @@ -163,12 +163,12 @@ exposesReturnsTwice: false tracksRegLiveness: true liveins: - - { reg: '%edi' } - - { reg: '%rsi' } -calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', - '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15', - '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', - '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] + - { reg: '$edi' } + - { reg: '$rsi' } +calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx', + '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15', + '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d', + '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -184,74 +184,74 @@ hasVAStart: false hasMustTailInVarArgFunc: false fixedStack: - - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%rbx' } + - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '$rbx' } body: | bb.0.entry: successors: %bb.1.if.else(16), %bb.2.if.end(16) - liveins: %edi, %rsi, %rbx + liveins: $edi, $rsi, $rbx - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbx, -16 - DBG_VALUE debug-use %edi, debug-use _, !12, !20, debug-location !21 - DBG_VALUE debug-use %rsi, debug-use _, !13, !20, debug-location !22 - %eax = MOV32rr %edi - DBG_VALUE debug-use %eax, debug-use _, !12, !20, debug-location !21 - %edi = MOV32ri 2 - CMP32ri8 killed %eax, 2, implicit-def %eflags, debug-location !26 - JNE_1 %bb.2.if.end, implicit %eflags + CFI_INSTRUCTION offset $rbx, -16 + DBG_VALUE debug-use $edi, debug-use _, !12, !20, debug-location !21 + DBG_VALUE debug-use $rsi, debug-use _, !13, !20, debug-location !22 + $eax = MOV32rr $edi + DBG_VALUE debug-use $eax, debug-use _, !12, !20, debug-location !21 + $edi = MOV32ri 2 + CMP32ri8 killed $eax, 2, implicit-def $eflags, debug-location !26 + JNE_1 %bb.2.if.end, implicit $eflags bb.1.if.else: successors: %bb.2.if.end(0) - liveins: %rsi + liveins: $rsi - DBG_VALUE debug-use %rsi, debug-use _, !13, !20, debug-location !22 - %rdi = MOV64rm killed %rsi, 1, _, 8, _, debug-location !27 :: (load 8 from %ir.arrayidx, !tbaa !28) - dead %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags, implicit-def %al, debug-location !32 - CALL64pcrel32 @atoi, csr_64, implicit %rsp, implicit %rdi, implicit %al, implicit-def %rsp, implicit-def %eax, debug-location !32 - %edi = MOV32rr %eax, debug-location !32 - DBG_VALUE debug-use %edi, debug-use _, !14, !20, debug-location !33 + DBG_VALUE debug-use $rsi, debug-use _, !13, !20, debug-location !22 + $rdi = MOV64rm killed $rsi, 1, _, 8, _, debug-location !27 :: (load 8 from %ir.arrayidx, !tbaa !28) + dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al, debug-location !32 + CALL64pcrel32 @atoi, csr_64, implicit $rsp, implicit $rdi, implicit $al, implicit-def $rsp, implicit-def $eax, debug-location !32 + $edi = MOV32rr $eax, debug-location !32 + DBG_VALUE debug-use $edi, debug-use _, !14, !20, debug-location !33 bb.2.if.end: successors: %bb.3.if.then.3(16), %bb.4.if.else.5(16) - liveins: %edi + liveins: $edi - CALL64pcrel32 @change, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax, debug-location !34 - %ebx = MOV32rr %eax, debug-location !34 - DBG_VALUE debug-use %ebx, debug-use _, !14, !20, debug-location !33 - CMP32ri8 %ebx, 11, implicit-def %eflags, debug-location !37 - JL_1 %bb.4.if.else.5, implicit killed %eflags, debug-location !37 + CALL64pcrel32 @change, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax, debug-location !34 + $ebx = MOV32rr $eax, debug-location !34 + DBG_VALUE debug-use $ebx, debug-use _, !14, !20, debug-location !33 + CMP32ri8 $ebx, 11, implicit-def $eflags, debug-location !37 + JL_1 %bb.4.if.else.5, implicit killed $eflags, debug-location !37 bb.3.if.then.3: successors: %bb.5.if.end.7(0) - liveins: %ebx + liveins: $ebx - DBG_VALUE debug-use %ebx, debug-use _, !14, !20, debug-location !33 - %edi = MOV32rr %ebx, debug-location !38 - CALL64pcrel32 @modify, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax, debug-location !38 - %ecx = MOV32rr %eax, debug-location !38 - %ecx = ADD32rr killed %ecx, killed %ebx, implicit-def dead %eflags, debug-location !40 + DBG_VALUE debug-use $ebx, debug-use _, !14, !20, debug-location !33 + $edi = MOV32rr $ebx, debug-location !38 + CALL64pcrel32 @modify, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax, debug-location !38 + $ecx = MOV32rr $eax, debug-location !38 + $ecx = ADD32rr killed $ecx, killed $ebx, implicit-def dead $eflags, debug-location !40 JMP_1 %bb.5.if.end.7 bb.4.if.else.5: successors: %bb.5.if.end.7(0) - liveins: %ebx + liveins: $ebx - DBG_VALUE debug-use %ebx, debug-use _, !14, !20, debug-location !33 - %edi = MOV32rr killed %ebx, debug-location !42 - CALL64pcrel32 @inc, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax, debug-location !42 - %ecx = MOV32rr %eax, debug-location !42 + DBG_VALUE debug-use $ebx, debug-use _, !14, !20, debug-location !33 + $edi = MOV32rr killed $ebx, debug-location !42 + CALL64pcrel32 @inc, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, implicit-def $eax, debug-location !42 + $ecx = MOV32rr $eax, debug-location !42 bb.5.if.end.7: - liveins: %ecx + liveins: $ecx - MOV32mr %rip, 1, _, @m, _, %ecx, debug-location !43 :: (store 4 into @m, !tbaa !44) - dead undef %edi = MOV32ri64 @.str, implicit-def %rdi, debug-location !46 - dead %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags, implicit-def %al, debug-location !47 - %esi = MOV32rr killed %ecx, debug-location !46 - CALL64pcrel32 @printf, csr_64, implicit %rsp, implicit %rdi, implicit %esi, implicit %al, implicit-def %rsp, implicit-def dead %eax, debug-location !46 - %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags, debug-location !47 - %rbx = POP64r implicit-def %rsp, implicit %rsp, debug-location !47 - RETQ %eax, debug-location !47 + MOV32mr $rip, 1, _, @m, _, $ecx, debug-location !43 :: (store 4 into @m, !tbaa !44) + dead undef $edi = MOV32ri64 @.str, implicit-def $rdi, debug-location !46 + dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al, debug-location !47 + $esi = MOV32rr killed $ecx, debug-location !46 + CALL64pcrel32 @printf, csr_64, implicit $rsp, implicit $rdi, implicit $esi, implicit $al, implicit-def $rsp, implicit-def dead $eax, debug-location !46 + $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, debug-location !47 + $rbx = POP64r implicit-def $rsp, implicit $rsp, debug-location !47 + RETQ $eax, debug-location !47 ... Index: test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir =================================================================== --- test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir +++ test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir @@ -130,32 +130,32 @@ constants: body: | bb.0.entry: - DBG_VALUE debug-use %edi, debug-use _, !21, !DIExpression(), debug-location !25 - DBG_VALUE debug-use %rsi, debug-use _, !22, !DIExpression(), debug-location !26 - %2 = MOV32rm %rip, 1, _, @bar, _, debug-location !27 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 0)`, !tbaa !28) + DBG_VALUE debug-use $edi, debug-use _, !21, !DIExpression(), debug-location !25 + DBG_VALUE debug-use $rsi, debug-use _, !22, !DIExpression(), debug-location !26 + %2 = MOV32rm $rip, 1, _, @bar, _, debug-location !27 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 0)`, !tbaa !28) DBG_VALUE debug-use %2, debug-use _, !23, !DIExpression(), debug-location !32 - %3 = MOV32rm %rip, 1, _, @bar + 4, _, debug-location !33 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 1)`, !tbaa !28) + %3 = MOV32rm $rip, 1, _, @bar + 4, _, debug-location !33 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 1)`, !tbaa !28) DBG_VALUE debug-use %3, debug-use _, !24, !DIExpression(), debug-location !34 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !35 - %edi = COPY %2, debug-location !35 - %esi = COPY %3, debug-location !35 - CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit killed %edi, implicit killed %esi, implicit-def %rsp, debug-location !35 - ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !35 - %eax = MOV32r0 implicit-def dead %eflags, debug-location !36 - RET 0, killed %eax, debug-location !36 + ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !35 + $edi = COPY %2, debug-location !35 + $esi = COPY %3, debug-location !35 + CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $ssp, implicit killed $edi, implicit killed $esi, implicit-def $rsp, debug-location !35 + ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !35 + $eax = MOV32r0 implicit-def dead $eflags, debug-location !36 + RET 0, killed $eax, debug-location !36 ... # Let's verify that the slot index ranges for the unused variables argc/argv, -# connected to physical regs %edi and %rsi, does not overlap with the ranges +# connected to physical regs $edi and $rsi, does not overlap with the ranges # for %2 and %3. The register allocator is actually allocating the -# virtual registers # to %edi and %esi, so the ranges for argc/argv should +# virtual registers # to $edi and $esi, so the ranges for argc/argv should # not cover the whole BB. # # CHECKDBG-LABEL: ********** EMITTING LIVE DEBUG VARIABLES ********** -# CHECKDBG-NEXT: !"argc,5" [0B;0e):0 Loc0=debug-use %edi +# CHECKDBG-NEXT: !"argc,5" [0B;0e):0 Loc0=debug-use $edi # CHECKDBG-NEXT: [0B;0e):0 %bb.0-160B -# CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=debug-use %rsi +# CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=debug-use $rsi # CHECKDBG-NEXT: [0B;0e):0 %bb.0-160B # CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=debug-use %2 # CHECKDBG-NEXT: [16r;64r):0 %bb.0-160B Index: test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir =================================================================== --- test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir +++ test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir @@ -128,19 +128,19 @@ constants: body: | bb.0.entry: - DBG_VALUE debug-use %edi, debug-use _, !21, !DIExpression(), debug-location !25 - DBG_VALUE debug-use %rsi, debug-use _, !22, !DIExpression(), debug-location !26 - %2 = MOV32rm %rip, 1, _, @bar, _, debug-location !27 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 0)`, !tbaa !28) + DBG_VALUE debug-use $edi, debug-use _, !21, !DIExpression(), debug-location !25 + DBG_VALUE debug-use $rsi, debug-use _, !22, !DIExpression(), debug-location !26 + %2 = MOV32rm $rip, 1, _, @bar, _, debug-location !27 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 0)`, !tbaa !28) DBG_VALUE debug-use %2, debug-use _, !23, !DIExpression(), debug-location !32 - %3 = MOV32rm %rip, 1, _, @bar + 4, _, debug-location !33 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 1)`, !tbaa !28) + %3 = MOV32rm $rip, 1, _, @bar + 4, _, debug-location !33 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 1)`, !tbaa !28) DBG_VALUE debug-use %3, debug-use _, !24, !DIExpression(), debug-location !34 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !35 - %edi = COPY %2, debug-location !35 - %esi = COPY %3, debug-location !35 - CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit killed %edi, implicit killed %esi, implicit-def %rsp, debug-location !35 - ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !35 - %eax = MOV32r0 implicit-def dead %eflags, debug-location !36 - RET 0, killed %eax, debug-location !36 + ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !35 + $edi = COPY %2, debug-location !35 + $esi = COPY %3, debug-location !35 + CALL64pcrel32 @foo, csr_64, implicit $rsp, implicit $ssp, implicit killed $edi, implicit killed $esi, implicit-def $rsp, debug-location !35 + ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !35 + $eax = MOV32r0 implicit-def dead $eflags, debug-location !36 + RET 0, killed $eax, debug-location !36 ... @@ -150,9 +150,9 @@ # CHECKMIR: ![[ARGV:[0-9]+]] = !DILocalVariable(name: "argv", arg: 2 # CHECKMIR: name: main # CHECKMIR: body: -# CHECKMIR: DBG_VALUE debug-use %edi, debug-use %noreg, ![[ARGC]] -# CHECKMIR-NOT: DBG_VALUE debug-use %{{.*}}, debug-use %noreg, ![[ARGC]] -# CHECKMIR: DBG_VALUE debug-use %rsi, debug-use %noreg, ![[ARGV]] -# CHECKMIR-NOT: DBG_VALUE debug-use %{{.*}}, debug-use %noreg, ![[ARGC]] -# CHECKMIR-NOT: DBG_VALUE debug-use %{{.*}}, debug-use %noreg, ![[ARGV]] +# CHECKMIR: DBG_VALUE debug-use $edi, debug-use $noreg, ![[ARGC]] +# CHECKMIR-NOT: DBG_VALUE debug-use %{{.*}}, debug-use $noreg, ![[ARGC]] +# CHECKMIR: DBG_VALUE debug-use $rsi, debug-use $noreg, ![[ARGV]] +# CHECKMIR-NOT: DBG_VALUE debug-use %{{.*}}, debug-use $noreg, ![[ARGC]] +# CHECKMIR-NOT: DBG_VALUE debug-use %{{.*}}, debug-use $noreg, ![[ARGV]] Index: test/DebugInfo/MIR/X86/livedebugvalues-limit.mir =================================================================== --- test/DebugInfo/MIR/X86/livedebugvalues-limit.mir +++ test/DebugInfo/MIR/X86/livedebugvalues-limit.mir @@ -25,13 +25,13 @@ ; CHECK: ![[CS3]] = distinct !DILocation(line: 8, column: 3, scope: !{{[0-9]+}}) ; ; CHECK: bb.1.if.then: - ; CHECK: DBG_VALUE debug-use %ebx, debug-use %noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]] - ; CHECK-NOT: DBG_VALUE debug-use %ebx, debug-use %noreg, ![[A_VAR]], !DIExpression(), debug-location - ; CHECK: DBG_VALUE debug-use %ebx, debug-use %noreg, ![[A_VAR]], !DIExpression(), debug-location ![[INLCS2]] + ; CHECK: DBG_VALUE debug-use $ebx, debug-use $noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]] + ; CHECK-NOT: DBG_VALUE debug-use $ebx, debug-use $noreg, ![[A_VAR]], !DIExpression(), debug-location + ; CHECK: DBG_VALUE debug-use $ebx, debug-use $noreg, ![[A_VAR]], !DIExpression(), debug-location ![[INLCS2]] ; CHECK: bb.2.if.end: - ; CHECK: DBG_VALUE debug-use %ebx, debug-use %noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]] - ; CHECK-NOT: DBG_VALUE debug-use %ebx, debug-use %noreg, ![[A_VAR]], !DIExpression(), debug-location - ; CHECK: DBG_VALUE debug-use %ebx, debug-use %noreg, ![[A_VAR]], !DIExpression(), debug-location ![[INLCS3]] + ; CHECK: DBG_VALUE debug-use $ebx, debug-use $noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]] + ; CHECK-NOT: DBG_VALUE debug-use $ebx, debug-use $noreg, ![[A_VAR]], !DIExpression(), debug-location + ; CHECK: DBG_VALUE debug-use $ebx, debug-use $noreg, ![[A_VAR]], !DIExpression(), debug-location ![[INLCS3]] ; ; ModuleID = 'livedebugvalues-limit.ll' source_filename = "livedebugvalues-limit.c" @@ -129,11 +129,11 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%edi' } -calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', - '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15', - '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', - '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] + - { reg: '$edi' } +calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx', + '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15', + '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d', + '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -152,16 +152,16 @@ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: - liveins: %edi, %rbp + liveins: $edi, $rbp - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = frame-setup MOV64rr %rsp - CFI_INSTRUCTION def_cfa_register %rbp - DBG_VALUE debug-use %edi, debug-use _, !12, !13, debug-location !14 - %rbp = POP64r implicit-def %rsp, implicit %rsp, debug-location !15 - TAILJMPd64 @sink, csr_64, implicit %rsp, implicit %rsp, implicit %edi, debug-location !15 + CFI_INSTRUCTION offset $rbp, -16 + $rbp = frame-setup MOV64rr $rsp + CFI_INSTRUCTION def_cfa_register $rbp + DBG_VALUE debug-use $edi, debug-use _, !12, !13, debug-location !14 + $rbp = POP64r implicit-def $rsp, implicit $rsp, debug-location !15 + TAILJMPd64 @sink, csr_64, implicit $rsp, implicit $rsp, implicit $edi, debug-location !15 ... --- @@ -173,11 +173,11 @@ selected: false tracksRegLiveness: true liveins: - - { reg: '%edi' } -calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', - '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15', - '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', - '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] + - { reg: '$edi' } +calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx', + '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15', + '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d', + '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -193,47 +193,47 @@ hasVAStart: false hasMustTailInVarArgFunc: false fixedStack: - - { id: 0, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%rbx' } + - { id: 0, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$rbx' } - { id: 1, type: spill-slot, offset: -16, size: 8, alignment: 16 } body: | bb.0.entry: successors: %bb.2.if.end, %bb.1.if.then - liveins: %edi, %rbx, %rbp + liveins: $edi, $rbx, $rbp - frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp + frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp CFI_INSTRUCTION def_cfa_offset 16 - CFI_INSTRUCTION offset %rbp, -16 - %rbp = frame-setup MOV64rr %rsp - CFI_INSTRUCTION def_cfa_register %rbp - frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp - frame-setup PUSH64r undef %rax, implicit-def %rsp, implicit %rsp - CFI_INSTRUCTION offset %rbx, -24 - DBG_VALUE debug-use %edi, debug-use _, !19, !13, debug-location !20 - %ebx = MOV32rr %edi - DBG_VALUE debug-use %ebx, debug-use _, !12, !13, debug-location !21 - DBG_VALUE debug-use %ebx, debug-use _, !19, !13, debug-location !20 - CALL64pcrel32 @sink, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, debug-location !23 - TEST32rr %ebx, %ebx, implicit-def %eflags, debug-location !24 - JE_1 %bb.2.if.end, implicit %eflags + CFI_INSTRUCTION offset $rbp, -16 + $rbp = frame-setup MOV64rr $rsp + CFI_INSTRUCTION def_cfa_register $rbp + frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp + CFI_INSTRUCTION offset $rbx, -24 + DBG_VALUE debug-use $edi, debug-use _, !19, !13, debug-location !20 + $ebx = MOV32rr $edi + DBG_VALUE debug-use $ebx, debug-use _, !12, !13, debug-location !21 + DBG_VALUE debug-use $ebx, debug-use _, !19, !13, debug-location !20 + CALL64pcrel32 @sink, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, debug-location !23 + TEST32rr $ebx, $ebx, implicit-def $eflags, debug-location !24 + JE_1 %bb.2.if.end, implicit $eflags bb.1.if.then: successors: %bb.2.if.end - liveins: %ebx, %rbp + liveins: $ebx, $rbp - DBG_VALUE debug-use %ebx, debug-use _, !19, !13, debug-location !20 - DBG_VALUE debug-use %ebx, debug-use _, !12, !13, debug-location !27 - %edi = MOV32rr %ebx, debug-location !29 - CALL64pcrel32 @sink, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, debug-location !29 + DBG_VALUE debug-use $ebx, debug-use _, !19, !13, debug-location !20 + DBG_VALUE debug-use $ebx, debug-use _, !12, !13, debug-location !27 + $edi = MOV32rr $ebx, debug-location !29 + CALL64pcrel32 @sink, csr_64, implicit $rsp, implicit $edi, implicit-def $rsp, debug-location !29 bb.2.if.end: - liveins: %ebx, %rbp - - DBG_VALUE debug-use %ebx, debug-use _, !19, !13, debug-location !20 - %edi = MOV32rr killed %ebx, debug-location !33 - %rsp = ADD64ri8 %rsp, 8, implicit-def dead %eflags, debug-location !33 - DBG_VALUE debug-use %ebx, debug-use _, !12, !13, debug-location !31 - %rbx = POP64r implicit-def %rsp, implicit %rsp, debug-location !33 - %rbp = POP64r implicit-def %rsp, implicit %rsp, debug-location !33 - TAILJMPd64 @sink, csr_64, implicit %rsp, implicit %rsp, implicit %edi, debug-location !33 + liveins: $ebx, $rbp + + DBG_VALUE debug-use $ebx, debug-use _, !19, !13, debug-location !20 + $edi = MOV32rr killed $ebx, debug-location !33 + $rsp = ADD64ri8 $rsp, 8, implicit-def dead $eflags, debug-location !33 + DBG_VALUE debug-use $ebx, debug-use _, !12, !13, debug-location !31 + $rbx = POP64r implicit-def $rsp, implicit $rsp, debug-location !33 + $rbp = POP64r implicit-def $rsp, implicit $rsp, debug-location !33 + TAILJMPd64 @sink, csr_64, implicit $rsp, implicit $rsp, implicit $edi, debug-location !33 ... Index: test/DebugInfo/MIR/X86/mlicm-hoist.mir =================================================================== --- test/DebugInfo/MIR/X86/mlicm-hoist.mir +++ test/DebugInfo/MIR/X86/mlicm-hoist.mir @@ -18,7 +18,7 @@ ; out of a loop the debug information is not retained. ; ; CHECK-LABEL: bb.0.entry: - ; CHECK: MOV64rm %rip, 1, %noreg, target-flags(x86-gotpcrel) @x, %noreg :: (load 8 from got) + ; CHECK: MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @x, $noreg :: (load 8 from got) ; CHECK-LABEL: bb.1.while.body: ; ; ModuleID = 'tx.ll' @@ -102,7 +102,7 @@ - { id: 3, class: gr32 } - { id: 4, class: gr64 } liveins: - - { reg: '%rdi', virtual-reg: '%2' } + - { reg: '$rdi', virtual-reg: '%2' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -120,10 +120,10 @@ body: | bb.0.entry: successors: %bb.1.while.body(0x80000000) - liveins: %rdi + liveins: $rdi - DBG_VALUE debug-use %rdi, debug-use _, !16, !17, debug-location !18 - %2 = COPY %rdi + DBG_VALUE debug-use $rdi, debug-use _, !16, !17, debug-location !18 + %2 = COPY $rdi DBG_VALUE debug-use %2, debug-use _, !16, !17, debug-location !18 bb.1.while.body: @@ -131,10 +131,10 @@ %0 = PHI %2, %bb.0.entry, %1, %bb.1.while.body DBG_VALUE debug-use %0, debug-use _, !16, !17, debug-location !18 - %1 = ADD64ri8 %0, 4, implicit-def dead %eflags, debug-location !20 + %1 = ADD64ri8 %0, 4, implicit-def dead $eflags, debug-location !20 DBG_VALUE debug-use %1, debug-use _, !16, !17, debug-location !18 %3 = MOV32rm %0, 1, _, 0, _, debug-location !21 :: (load 4 from %ir.p.addr.0, !tbaa !22) - %4 = MOV64rm %rip, 1, _, target-flags(x86-gotpcrel) @x, _, debug-location !26 :: (load 8 from got) + %4 = MOV64rm $rip, 1, _, target-flags(x86-gotpcrel) @x, _, debug-location !26 :: (load 8 from got) MOV32mr killed %4, 1, _, 0, _, killed %3, debug-location !26 :: (store 4 into @x, !tbaa !22) JMP_1 %bb.1.while.body, debug-location !27 Index: test/DebugInfo/MIR/X86/no-cfi-loc.mir =================================================================== --- test/DebugInfo/MIR/X86/no-cfi-loc.mir +++ test/DebugInfo/MIR/X86/no-cfi-loc.mir @@ -48,10 +48,10 @@ regBankSelected: false selected: false tracksRegLiveness: true -calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', - '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15', - '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', - '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] +calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx', + '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15', + '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d', + '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ] frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -68,10 +68,10 @@ hasMustTailInVarArgFunc: false body: | bb.0.entry: - frame-setup PUSH64r undef %rax, implicit-def %rsp, implicit %rsp, debug-location !8 + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp, debug-location !8 CFI_INSTRUCTION def_cfa_offset 16 - CALL64pcrel32 @bar, csr_64, implicit %rsp, implicit-def %rsp, debug-location !8 - %rax = POP64r implicit-def %rsp, implicit %rsp, debug-location !9 + CALL64pcrel32 @bar, csr_64, implicit $rsp, implicit-def $rsp, debug-location !8 + $rax = POP64r implicit-def $rsp, implicit $rsp, debug-location !9 RET 0, debug-location !9 ... Index: test/DebugInfo/MIR/X86/regcoalescer.mir =================================================================== --- test/DebugInfo/MIR/X86/regcoalescer.mir +++ test/DebugInfo/MIR/X86/regcoalescer.mir @@ -39,12 +39,12 @@ - { id: 0, class: gr32, preferred-register: '' } body: | bb.0.entry: - %0 = MOV32r0 implicit-def dead %eflags, debug-location !19 + %0 = MOV32r0 implicit-def dead $eflags, debug-location !19 DBG_VALUE debug-use %0, debug-use _, !18, !DIExpression(), debug-location !20 - %eax = COPY killed %0, debug-location !21 - RET 0, killed %eax, debug-location !21 + $eax = COPY killed %0, debug-location !21 + RET 0, killed $eax, debug-location !21 ... -# CHECK: %eax = MOV32r0 -# CHECK-NEXT: DBG_VALUE debug-use %eax +# CHECK: $eax = MOV32r0 +# CHECK-NEXT: DBG_VALUE debug-use $eax Index: test/DebugInfo/MSP430/sdagsplit-1.ll =================================================================== --- test/DebugInfo/MSP430/sdagsplit-1.ll +++ test/DebugInfo/MSP430/sdagsplit-1.ll @@ -13,10 +13,10 @@ ; return 0; ; } ; -; CHECK-DAG: DBG_VALUE debug-use %r{{[0-9]+}}, debug-use %noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 32, 16), debug-location !{{[0-9]+}} -; CHECK-DAG: DBG_VALUE debug-use %r{{[0-9]+}}, debug-use %noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 48, 16), debug-location !{{[0-9]+}} -; CHECK-DAG: DBG_VALUE debug-use %r{{[0-9]+}}, debug-use %noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 0, 16), debug-location !{{[0-9]+}} -; CHECK-DAG: DBG_VALUE debug-use %r{{[0-9]+}}, debug-use %noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 16, 16), debug-location !{{[0-9]+}} +; CHECK-DAG: DBG_VALUE debug-use $r{{[0-9]+}}, debug-use $noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 32, 16), debug-location !{{[0-9]+}} +; CHECK-DAG: DBG_VALUE debug-use $r{{[0-9]+}}, debug-use $noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 48, 16), debug-location !{{[0-9]+}} +; CHECK-DAG: DBG_VALUE debug-use $r{{[0-9]+}}, debug-use $noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 0, 16), debug-location !{{[0-9]+}} +; CHECK-DAG: DBG_VALUE debug-use $r{{[0-9]+}}, debug-use $noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 16, 16), debug-location !{{[0-9]+}} ; ModuleID = 'sdagsplit-1.c' target datalayout = "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16" Index: test/DebugInfo/X86/bbjoin.ll =================================================================== --- test/DebugInfo/X86/bbjoin.ll +++ test/DebugInfo/X86/bbjoin.ll @@ -11,13 +11,13 @@ ; } ; CHECK: ![[X:.*]] = !DILocalVariable(name: "x", ; CHECK: bb.0.entry: -; CHECK: DBG_VALUE 23, debug-use %noreg, ![[X]], -; CHECK: DBG_VALUE %rsp, 0, ![[X]], !DIExpression(DW_OP_plus_uconst, 4, DW_OP_deref), +; CHECK: DBG_VALUE 23, debug-use $noreg, ![[X]], +; CHECK: DBG_VALUE $rsp, 0, ![[X]], !DIExpression(DW_OP_plus_uconst, 4, DW_OP_deref), ; CHECK: bb.1.if.then: -; CHECK: DBG_VALUE 43, debug-use %noreg, ![[X]], +; CHECK: DBG_VALUE 43, debug-use $noreg, ![[X]], ; CHECK: bb.2.if.end: -; CHECK-NOT: DBG_VALUE 23, debug-use %noreg, ![[X]], -; CHECK: RETQ %eax +; CHECK-NOT: DBG_VALUE 23, debug-use $noreg, ![[X]], +; CHECK: RETQ $eax target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.11.0" Index: test/DebugInfo/X86/dbg-addr-dse.ll =================================================================== --- test/DebugInfo/X86/dbg-addr-dse.ll +++ test/DebugInfo/X86/dbg-addr-dse.ll @@ -47,12 +47,12 @@ ; ASM-LABEL: f: # @f ; ASM: movl %ecx, [[OFF_X:[0-9]+]](%rsp) -; ASM: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%rsp+0] +; ASM: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [$rsp+0] ; ASM: callq escape ; ASM: #DEBUG_VALUE: f:x <- 1 ; ASM: movl $1, global(%rip) ; FIXME: Needs a fix to LiveDebugVariables -; ASMX: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%rsp+0] +; ASMX: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [$rsp+0] ; ASM: movl $2, [[OFF_X]](%rsp) ; ASM: callq escape ; ASM: retq Index: test/DebugInfo/X86/dbg-addr.ll =================================================================== --- test/DebugInfo/X86/dbg-addr.ll +++ test/DebugInfo/X86/dbg-addr.ll @@ -7,7 +7,7 @@ ; is control-dependent. ; CHECK-LABEL: use_dbg_addr: -; CHECK: #DEBUG_VALUE: use_dbg_addr:o <- [%rsp+0] +; CHECK: #DEBUG_VALUE: use_dbg_addr:o <- [$rsp+0] ; FIXME: Avoid the use of a single-location location list and use ; DW_AT_start_offset instead. Index: test/DebugInfo/X86/dbg-value-dag-combine.ll =================================================================== --- test/DebugInfo/X86/dbg-value-dag-combine.ll +++ test/DebugInfo/X86/dbg-value-dag-combine.ll @@ -8,8 +8,8 @@ ; CHECK-LABEL: __OpenCL_test_kernel: ; CHECK-DAG: ##DEBUG_VALUE: __OpenCL_test_kernel:ip <- ; CHECK-DAG: ##DEBUG_VALUE: xxx <- 0 -; CHECK-DAG: ##DEBUG_VALUE: gid <- %e{{..$}} -; CHECK-DAG: ##DEBUG_VALUE: idx <- %e{{..$}} +; CHECK-DAG: ##DEBUG_VALUE: gid <- $e{{..$}} +; CHECK-DAG: ##DEBUG_VALUE: idx <- $e{{..$}} ; CHECK-NOT: ##DEBUG_VALUE: declare <4 x i32> @__amdil_get_global_id_int() Index: test/DebugInfo/X86/dbg-value-frame-index.ll =================================================================== --- test/DebugInfo/X86/dbg-value-frame-index.ll +++ test/DebugInfo/X86/dbg-value-frame-index.ll @@ -20,7 +20,7 @@ } ; CHECK-LABEL: test -; CHECK: #DEBUG_VALUE: test:w <- [DW_OP_plus_uconst 8] [%rsp+0] +; CHECK: #DEBUG_VALUE: test:w <- [DW_OP_plus_uconst 8] [$rsp+0] ; DWARF: DW_AT_location [DW_FORM_sec_offset] ( ; DWARF-NEXT: [{{.*}}, {{.*}}): DW_OP_breg7 RSP+8) Index: test/DebugInfo/X86/dbg-value-regmask-clobber.ll =================================================================== --- test/DebugInfo/X86/dbg-value-regmask-clobber.ll +++ test/DebugInfo/X86/dbg-value-regmask-clobber.ll @@ -5,7 +5,7 @@ ; of individual register def operands. ; ASM: main: # @main -; ASM: #DEBUG_VALUE: main:argc <- %ecx +; ASM: #DEBUG_VALUE: main:argc <- $ecx ; ASM: movl $1, x(%rip) ; ASM: callq clobber ; ASM-NEXT: [[argc_range_end:.Ltmp[0-9]+]]: Index: test/DebugInfo/X86/dbg-value-transfer-order.ll =================================================================== --- test/DebugInfo/X86/dbg-value-transfer-order.ll +++ test/DebugInfo/X86/dbg-value-transfer-order.ll @@ -33,7 +33,7 @@ ; CHECK: movl %eax, %ecx ; CHECK: .LBB0_3: # %if.end ; Check that this DEBUG_VALUE comes before the left shift. -; CHECK: #DEBUG_VALUE: bit_offset <- %ecx +; CHECK: #DEBUG_VALUE: bit_offset <- $ecx ; CHECK: .cv_loc 0 1 8 28 # t.c:8:28 ; CHECK: movl $1, %[[reg:[^ ]*]] ; CHECK: shll %cl, %[[reg]] Index: test/DebugInfo/X86/debug-loc-asan.ll =================================================================== --- test/DebugInfo/X86/debug-loc-asan.ll +++ test/DebugInfo/X86/debug-loc-asan.ll @@ -14,7 +14,7 @@ ; The address of the (potentially now malloc'ed) alloca ends up ; in rdi, after which it is spilled to the stack. We record the ; spill OFFSET on the stack for checking the debug info below. -; CHECK: #DEBUG_VALUE: bar:y <- [DW_OP_deref] [%rdi+0] +; CHECK: #DEBUG_VALUE: bar:y <- [DW_OP_deref] [$rdi+0] ; CHECK: movq %rdi, [[OFFSET:[0-9]+]](%rsp) ; CHECK-NEXT: [[START_LABEL:.Ltmp[0-9]+]] ; CHECK-NEXT: #DEBUG_VALUE: bar:y <- [DW_OP_plus_uconst [[OFFSET]], DW_OP_deref, DW_OP_deref] Index: test/DebugInfo/X86/live-debug-values.ll =================================================================== --- test/DebugInfo/X86/live-debug-values.ll +++ test/DebugInfo/X86/live-debug-values.ll @@ -30,7 +30,7 @@ ; DBG_VALUE for variable "n" is extended into %bb.5 from its predecessors %bb.3 ; and %bb.4. ; CHECK: .LBB0_5: -; CHECK-NEXT: #DEBUG_VALUE: main:n <- %ebx +; CHECK-NEXT: #DEBUG_VALUE: main:n <- $ebx ; Other register values have been clobbered. ; CHECK-NOT: #DEBUG_VALUE: ; CHECK: movl %ecx, m(%rip) Index: test/DebugInfo/X86/live-debug-vars-dse.mir =================================================================== --- test/DebugInfo/X86/live-debug-vars-dse.mir +++ test/DebugInfo/X86/live-debug-vars-dse.mir @@ -13,12 +13,12 @@ # CHECK-LABEL: f: # @f # CHECK: movl %ecx, [[OFF_X:[0-9]+]](%rsp) -# CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%rsp+0] +# CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [$rsp+0] # CHECK: leaq [[OFF_X]](%rsp), %rsi # CHECK: callq escape # CHECK: #DEBUG_VALUE: f:x <- 1 # CHECK: movl $1, global(%rip) -# CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%rsp+0] +# CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [$rsp+0] # CHECK: movl $2, [[OFF_X]](%rsp) # CHECK: callq escape # CHECK: retq @@ -98,7 +98,7 @@ - { id: 0, class: gr32, preferred-register: '' } - { id: 1, class: gr64, preferred-register: '' } liveins: - - { reg: '%ecx', virtual-reg: '%0' } + - { reg: '$ecx', virtual-reg: '%0' } frameInfo: isFrameAddressTaken: false isReturnAddressTaken: false @@ -124,24 +124,24 @@ constants: body: | bb.0.entry: - liveins: %ecx + liveins: $ecx - %0 = COPY %ecx + %0 = COPY $ecx MOV32mr %stack.0.x.addr, 1, _, 0, _, %0 :: (store 4 into %ir.x.addr) DBG_VALUE %stack.0.x.addr, 0, !13, !DIExpression(), debug-location !14 - ADJCALLSTACKDOWN64 32, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !15 + ADJCALLSTACKDOWN64 32, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !15 %1 = LEA64r %stack.0.x.addr, 1, _, 0, _ - %rcx = COPY %1, debug-location !15 - CALL64pcrel32 @escape, csr_win64, implicit %rsp, implicit %ssp, implicit %rcx, implicit-def %rsp, implicit-def %ssp, debug-location !15 - ADJCALLSTACKUP64 32, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !15 + $rcx = COPY %1, debug-location !15 + CALL64pcrel32 @escape, csr_win64, implicit $rsp, implicit $ssp, implicit $rcx, implicit-def $rsp, implicit-def $ssp, debug-location !15 + ADJCALLSTACKUP64 32, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !15 DBG_VALUE 1, debug-use _, !13, !DIExpression(), debug-location !16 - MOV32mi %rip, 1, _, @global, _, 1, debug-location !17 :: (store 4 into @global) + MOV32mi $rip, 1, _, @global, _, 1, debug-location !17 :: (store 4 into @global) DBG_VALUE %stack.0.x.addr, 0, !13, !DIExpression(), debug-location !18 MOV32mi %stack.0.x.addr, 1, _, 0, _, 2, debug-location !18 :: (store 4 into %ir.x.addr) - ADJCALLSTACKDOWN64 32, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !19 - %rcx = COPY %1, debug-location !19 - CALL64pcrel32 @escape, csr_win64, implicit %rsp, implicit %ssp, implicit %rcx, implicit-def %rsp, implicit-def %ssp, debug-location !19 - ADJCALLSTACKUP64 32, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !19 + ADJCALLSTACKDOWN64 32, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !19 + $rcx = COPY %1, debug-location !19 + CALL64pcrel32 @escape, csr_win64, implicit $rsp, implicit $ssp, implicit $rcx, implicit-def $rsp, implicit-def $ssp, debug-location !19 + ADJCALLSTACKUP64 32, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !19 RET 0, debug-location !20 ... Index: test/DebugInfo/X86/op_deref.ll =================================================================== --- test/DebugInfo/X86/op_deref.ll +++ test/DebugInfo/X86/op_deref.ll @@ -17,7 +17,7 @@ ; Check the DEBUG_VALUE comments for good measure. ; RUN: llc -O0 -mtriple=x86_64-apple-darwin %s -o - -filetype=asm | FileCheck %s -check-prefix=ASM-CHECK ; vla should have a register-indirect address at one point. -; ASM-CHECK: DEBUG_VALUE: vla <- [DW_OP_deref] [%rcx+0] +; ASM-CHECK: DEBUG_VALUE: vla <- [DW_OP_deref] [$rcx+0] ; ASM-CHECK: DW_OP_breg2 ; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s --check-prefix=PRETTY-PRINT Index: test/DebugInfo/X86/pieces-4.ll =================================================================== --- test/DebugInfo/X86/pieces-4.ll +++ test/DebugInfo/X86/pieces-4.ll @@ -17,7 +17,7 @@ ; CHECK: callq g ; CHECK: movl %eax, [[offs:[0-9]+]](%rsp) # 4-byte Spill ; CHECK: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_LLVM_fragment 32 32] 0 -; CHECK: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_plus_uconst [[offs]], DW_OP_LLVM_fragment 0 32] [%rsp+0] +; CHECK: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_plus_uconst [[offs]], DW_OP_LLVM_fragment 0 32] [$rsp+0] ; CHECK: #APP ; CHECK: #NO_APP ; CHECK: movl [[offs]](%rsp), %eax # 4-byte Reload Index: test/DebugInfo/X86/pr34545.ll =================================================================== --- test/DebugInfo/X86/pr34545.ll +++ test/DebugInfo/X86/pr34545.ll @@ -1,14 +1,14 @@ ; RUN: llc -O1 -filetype=asm -mtriple x86_64-unknown-linux-gnu -mcpu=x86-64 -o - %s -stop-after=livedebugvars | FileCheck %s -; CHECK: %eax = MOV32rm -; CHECK: DBG_VALUE %eax -; CHECK: %eax = SHL32rCL killed renamable %eax -; CHECK: DBG_VALUE %eax -; CHECK: DBG_VALUE %rsp, 0, !{{[0-9]+}}, !DIExpression(DW_OP_constu, 4, DW_OP_minus) -; CHECK: DBG_VALUE %eax -; CHECK: %eax = SHL32rCL killed renamable %eax -; CHECK: DBG_VALUE %eax -; CHECK: RETQ %eax +; CHECK: $eax = MOV32rm +; CHECK: DBG_VALUE $eax +; CHECK: $eax = SHL32rCL killed renamable $eax +; CHECK: DBG_VALUE $eax +; CHECK: DBG_VALUE $rsp, 0, !{{[0-9]+}}, !DIExpression(DW_OP_constu, 4, DW_OP_minus) +; CHECK: DBG_VALUE $eax +; CHECK: $eax = SHL32rCL killed renamable $eax +; CHECK: DBG_VALUE $eax +; CHECK: RETQ $eax target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" Index: test/DebugInfo/X86/sdag-combine.ll =================================================================== --- test/DebugInfo/X86/sdag-combine.ll +++ test/DebugInfo/X86/sdag-combine.ll @@ -15,7 +15,7 @@ entry: %0 = alloca %TSb, align 1 %1 = call swiftcc i1 @f(), !dbg !7 - ; CHECK: DBG_VALUE debug-use %rax, debug-use %noreg, !8, !DIExpression(), debug-location !7 + ; CHECK: DBG_VALUE debug-use $rax, debug-use $noreg, !8, !DIExpression(), debug-location !7 call void @llvm.dbg.value(metadata i1 %1, metadata !8, metadata !DIExpression()), !dbg !7 %2 = getelementptr inbounds %TSb, %TSb* %0, i32 0, i32 0, !dbg !7 store i1 %1, i1* %2, align 1, !dbg !7 Index: test/DebugInfo/X86/sdag-salvage-add.ll =================================================================== --- test/DebugInfo/X86/sdag-salvage-add.ll +++ test/DebugInfo/X86/sdag-salvage-add.ll @@ -24,11 +24,11 @@ ; ; CHECK: ![[S4:.*]] = !DILocalVariable(name: "s4", ; CHECK: ![[MYVAR:.*]] = !DILocalVariable(name: "myVar", -; CHECK: DBG_VALUE debug-use %rax, debug-use %noreg, ![[MYVAR]], +; CHECK: DBG_VALUE debug-use $rax, debug-use $noreg, ![[MYVAR]], ; CHECK-SAME: !DIExpression(DW_OP_plus_uconst, 4096, DW_OP_stack_value) -; CHECK-NEXT: DBG_VALUE debug-use %rax, debug-use %noreg, ![[S4]], +; CHECK-NEXT: DBG_VALUE debug-use $rax, debug-use $noreg, ![[S4]], ; CHECK-SAME: !DIExpression(DW_OP_plus_uconst, 4096, DW_OP_stack_value) -; CHECK-NEXT: %rdi = MOV64rm killed renamable %rax, 1, %noreg, 4096, %noreg, +; CHECK-NEXT: $rdi = MOV64rm killed renamable $rax, 1, $noreg, 4096, $noreg, source_filename = "test.c" target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" Index: test/DebugInfo/X86/sdag-split-arg.ll =================================================================== --- test/DebugInfo/X86/sdag-split-arg.ll +++ test/DebugInfo/X86/sdag-split-arg.ll @@ -1,10 +1,10 @@ ; RUN: llc -O0 -filetype=asm %s -o - | FileCheck %s ; Test large integral function arguments passed in multiple registers. -; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 64 16] %ax -; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 48 16] %r9w -; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 32 16] %r10w -; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 16 16] %r11w -; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 0 16] %bx +; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 64 16] $ax +; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 48 16] $r9w +; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 32 16] $r10w +; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 16 16] $r11w +; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 0 16] $bx target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-unknown" Index: test/DebugInfo/X86/sdagsplit-1.ll =================================================================== --- test/DebugInfo/X86/sdagsplit-1.ll +++ test/DebugInfo/X86/sdagsplit-1.ll @@ -13,8 +13,8 @@ ; return 0; ; } ; -; CHECK-DAG: DBG_VALUE debug-use %{{[a-z]+}}, debug-use %noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 0, 32), debug-location !{{[0-9]+}} -; CHECK-DAG: DBG_VALUE debug-use %{{[a-z]+}}, debug-use %noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 32, 32), debug-location !{{[0-9]+}} +; CHECK-DAG: DBG_VALUE debug-use ${{[a-z]+}}, debug-use $noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 0, 32), debug-location !{{[0-9]+}} +; CHECK-DAG: DBG_VALUE debug-use ${{[a-z]+}}, debug-use $noreg, !{{[0-9]+}}, !DIExpression(DW_OP_LLVM_fragment, 32, 32), debug-location !{{[0-9]+}} ; ModuleID = 'sdagsplit-1.c' target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" Index: test/DebugInfo/X86/spill-indirect-nrvo.ll =================================================================== --- test/DebugInfo/X86/spill-indirect-nrvo.ll +++ test/DebugInfo/X86/spill-indirect-nrvo.ll @@ -21,9 +21,9 @@ ; } ; CHECK-LABEL: _Z10get_stringv: -; CHECK: #DEBUG_VALUE: get_string:result <- [%rdi+0] +; CHECK: #DEBUG_VALUE: get_string:result <- [$rdi+0] ; CHECK: movq %rdi, [[OFFS:[0-9]+]](%rsp) # 8-byte Spill -; CHECK: #DEBUG_VALUE: get_string:result <- [DW_OP_plus_uconst [[OFFS]], DW_OP_deref] [%rsp+0] +; CHECK: #DEBUG_VALUE: get_string:result <- [DW_OP_plus_uconst [[OFFS]], DW_OP_deref] [$rsp+0] ; CHECK: callq _ZN6stringC1Ei ; CHECK: #APP ; CHECK: #NO_APP Index: test/DebugInfo/X86/spill-nontrivial-param.ll =================================================================== --- test/DebugInfo/X86/spill-nontrivial-param.ll +++ test/DebugInfo/X86/spill-nontrivial-param.ll @@ -20,9 +20,9 @@ ; } ; CHECK-LABEL: _Z3foo10NonTrivial: -; CHECK: #DEBUG_VALUE: foo:nt <- [%rdi+0] +; CHECK: #DEBUG_VALUE: foo:nt <- [$rdi+0] ; CHECK: movq %rdi, -8(%rsp) # 8-byte Spill -; CHECK: #DEBUG_VALUE: foo:nt <- [DW_OP_constu 8, DW_OP_minus, DW_OP_deref] [%rsp+0] +; CHECK: #DEBUG_VALUE: foo:nt <- [DW_OP_constu 8, DW_OP_minus, DW_OP_deref] [$rsp+0] ; CHECK: #APP ; CHECK: #NO_APP ; CHECK: movq -8(%rsp), %rax # 8-byte Reload Index: test/DebugInfo/X86/spill-nospill.ll =================================================================== --- test/DebugInfo/X86/spill-nospill.ll +++ test/DebugInfo/X86/spill-nospill.ll @@ -24,12 +24,12 @@ ; CHECK-LABEL: f: # @f ; CHECK: callq g ; CHECK: movl %eax, [[X_OFFS:[0-9]+]](%rsp) # 4-byte Spill -; CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[X_OFFS]]] [%rsp+0] +; CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[X_OFFS]]] [$rsp+0] ; CHECK: #APP ; CHECK: #NO_APP ; CHECK: callq g ; CHECK: movl %eax, %[[CSR:[^ ]*]] -; CHECK: #DEBUG_VALUE: f:y <- %esi +; CHECK: #DEBUG_VALUE: f:y <- $esi ; CHECK: movl %[[CSR]], %ecx ; CHECK: callq g ; CHECK: movl %[[CSR]], %ecx @@ -37,7 +37,7 @@ ; CHECK: movl %[[CSR]], %ecx ; CHECK: callq g ; CHECK: movl [[X_OFFS]](%rsp), %eax # 4-byte Reload -; CHECK: #DEBUG_VALUE: f:x <- %eax +; CHECK: #DEBUG_VALUE: f:x <- $eax ; CHECK: addl %[[CSR]], %eax ; DWARF: DW_TAG_variable Index: test/DebugInfo/X86/vla.ll =================================================================== --- test/DebugInfo/X86/vla.ll +++ test/DebugInfo/X86/vla.ll @@ -1,6 +1,6 @@ ; RUN: llc -O0 -mtriple=x86_64-apple-darwin -filetype=asm %s -o - | FileCheck %s ; Ensure that we generate an indirect location for the variable length array a. -; CHECK: ##DEBUG_VALUE: vla:a <- [DW_OP_deref] [%rcx+0] +; CHECK: ##DEBUG_VALUE: vla:a <- [DW_OP_deref] [$rcx+0] ; CHECK: DW_OP_breg2 ; rdar://problem/13658587 ; Index: test/Verifier/test_g_phi.mir =================================================================== --- test/Verifier/test_g_phi.mir +++ test/Verifier/test_g_phi.mir @@ -46,11 +46,11 @@ body: | bb.1.entry: successors: %bb.2.case1(0x40000000), %bb.3.case2(0x40000000) - liveins: %w0 + liveins: $w0 ; This test makes sure that the Verifier catches G_PHI with mismatching types. ; CHECK: Bad machine code: Generic Instruction G_PHI has operands with incompatible/missing types - %0(s32) = COPY %w0 + %0(s32) = COPY $w0 %1(s32) = G_CONSTANT i32 0 %3(s32) = G_CONSTANT i32 1 %5(s32) = G_CONSTANT i32 2 @@ -73,7 +73,7 @@ bb.4.return: %7(s32) = G_PHI %9(s16), %bb.2.case1, %6(s32), %bb.3.case2 - %w0 = COPY %7(s32) - RET_ReallyLR implicit %w0 + $w0 = COPY %7(s32) + RET_ReallyLR implicit $w0 ... Index: unittests/CodeGen/GlobalISel/PatternMatchTest.cpp =================================================================== --- unittests/CodeGen/GlobalISel/PatternMatchTest.cpp +++ unittests/CodeGen/GlobalISel/PatternMatchTest.cpp @@ -91,9 +91,9 @@ - { id: 3, class: _ } body: | bb.1: - %0(s64) = COPY %x0 - %1(s64) = COPY %x1 - %2(s64) = COPY %x2 + %0(s64) = COPY $x0 + %1(s64) = COPY $x1 + %2(s64) = COPY $x2 )MIR") + Twine(MIRFunc) + Twine("...\n")) .toNullTerminatedStringRef(S); std::unique_ptr MIR; Index: unittests/CodeGen/MachineInstrTest.cpp =================================================================== --- unittests/CodeGen/MachineInstrTest.cpp +++ unittests/CodeGen/MachineInstrTest.cpp @@ -264,7 +264,7 @@ raw_string_ostream OS(str); MI->print(OS); ASSERT_TRUE( - StringRef(OS.str()).startswith("%noreg = UNKNOWN debug-location ")); + StringRef(OS.str()).startswith("$noreg = UNKNOWN debug-location ")); } } // end namespace Index: unittests/CodeGen/MachineOperandTest.cpp =================================================================== --- unittests/CodeGen/MachineOperandTest.cpp +++ unittests/CodeGen/MachineOperandTest.cpp @@ -80,7 +80,7 @@ std::string str; raw_string_ostream OS(str); MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr); - ASSERT_TRUE(OS.str() == "%physreg1.subreg5"); + ASSERT_TRUE(OS.str() == "$physreg1.subreg5"); } TEST(MachineOperandTest, PrintCImm) { Index: unittests/MI/LiveIntervalTest.cpp =================================================================== --- unittests/MI/LiveIntervalTest.cpp +++ unittests/MI/LiveIntervalTest.cpp @@ -313,7 +313,7 @@ liveIntervalTest(R"MIR( successors: %bb.1, %bb.2 %0 = IMPLICIT_DEF - S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc S_BRANCH %bb.1 bb.2: S_NOP 0, implicit %0 @@ -343,10 +343,10 @@ TEST(LiveIntervalTest, MoveOverUndefUse1) { // findLastUseBefore() used by handleMoveUp() must ignore undef operands. liveIntervalTest(R"MIR( - %sgpr0 = IMPLICIT_DEF + $sgpr0 = IMPLICIT_DEF S_NOP 0 - S_NOP 0, implicit undef %sgpr0 - %sgpr0 = IMPLICIT_DEF implicit %sgpr0(tied-def 0) + S_NOP 0, implicit undef $sgpr0 + $sgpr0 = IMPLICIT_DEF implicit $sgpr0(tied-def 0) )MIR", [](MachineFunction &MF, LiveIntervals &LIS) { testHandleMove(MF, LIS, 3, 1); }); @@ -358,7 +358,7 @@ liveIntervalTest(R"MIR( successors: %bb.1, %bb.2 %0 = IMPLICIT_DEF - S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc S_BRANCH %bb.1 bb.2: successors: %bb.1 @@ -384,7 +384,7 @@ successors: %bb.1, %bb.2 undef %0.sub0 = IMPLICIT_DEF %0.sub1 = IMPLICIT_DEF - S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc S_BRANCH %bb.1 bb.1: S_NOP 0, implicit %0.sub1