Index: lib/Target/AArch64/AArch64InstrFormats.td =================================================================== --- lib/Target/AArch64/AArch64InstrFormats.td +++ lib/Target/AArch64/AArch64InstrFormats.td @@ -496,7 +496,7 @@ return ((uint32_t)Imm) < 65536; }]> { let ParserMatchClass = Imm0_65535Operand; - let PrintMethod = "printHexImm"; + let PrintMethod = "printImmHex"; } // imm0_255 predicate - True if the immediate is in the range [0,255]. @@ -505,7 +505,7 @@ return ((uint32_t)Imm) < 256; }]> { let ParserMatchClass = Imm0_255Operand; - let PrintMethod = "printHexImm"; + let PrintMethod = "printImm"; } // imm0_127 predicate - True if the immediate is in the range [0,127] @@ -514,7 +514,7 @@ return ((uint32_t)Imm) < 128; }]> { let ParserMatchClass = Imm0_127Operand; - let PrintMethod = "printHexImm"; + let PrintMethod = "printImm"; } // NOTE: These imm0_N operands have to be of type i64 because i64 is the size @@ -1549,7 +1549,7 @@ def movimm32_imm : Operand { let ParserMatchClass = Imm0_65535Operand; let EncoderMethod = "getMoveWideImmOpValue"; - let PrintMethod = "printHexImm"; + let PrintMethod = "printImm"; } def movimm32_shift : Operand { let PrintMethod = "printShifter"; Index: lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h =================================================================== --- lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h +++ lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h @@ -49,7 +49,9 @@ // Operand printers void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O); - void printHexImm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, + void printImm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, + raw_ostream &O); + void printImmHex(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O); void printPostIncOperand(const MCInst *MI, unsigned OpNo, unsigned Imm, raw_ostream &O); @@ -94,8 +96,6 @@ const MCSubtargetInfo &STI, raw_ostream &O); void printUImm12Offset(const MCInst *MI, unsigned OpNum, unsigned Scale, raw_ostream &O); - void printAMIndexedWB(const MCInst *MI, unsigned OpNum, unsigned Scale, - raw_ostream &O); template void printUImm12Offset(const MCInst *MI, unsigned OpNum, @@ -103,15 +103,6 @@ printUImm12Offset(MI, OpNum, Scale, O); } - template - void printAMIndexedWB(const MCInst *MI, unsigned OpNum, - const MCSubtargetInfo &STI, raw_ostream &O) { - printAMIndexedWB(MI, OpNum, BitWidth / 8, O); - } - - void printAMNoIndex(const MCInst *MI, unsigned OpNum, - const MCSubtargetInfo &STI, raw_ostream &O); - template void printImmScale(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O); Index: lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp =================================================================== --- lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp +++ lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp @@ -928,14 +928,21 @@ unsigned Reg = Op.getReg(); O << getRegisterName(Reg); } else if (Op.isImm()) { - O << '#' << Op.getImm(); + printImm(MI, OpNo, STI, O); } else { assert(Op.isExpr() && "unknown operand kind in printOperand"); Op.getExpr()->print(O, &MAI); } } -void AArch64InstPrinter::printHexImm(const MCInst *MI, unsigned OpNo, +void AArch64InstPrinter::printImm(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, + raw_ostream &O) { + const MCOperand &Op = MI->getOperand(OpNo); + O << "#" << formatImm(Op.getImm()); +} + +void AArch64InstPrinter::printImmHex(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); @@ -981,12 +988,12 @@ assert(Val == MO.getImm() && "Add/sub immediate out of range!"); unsigned Shift = AArch64_AM::getShiftValue(MI->getOperand(OpNum + 1).getImm()); - O << '#' << Val; + O << '#' << formatImm(Val); if (Shift != 0) printShifter(MI, OpNum + 1, STI, O); if (CommentStream) - *CommentStream << '=' << (Val << Shift) << '\n'; + *CommentStream << '=' << formatImm(Val << Shift) << '\n'; } else { assert(MO.isExpr() && "Unexpected operand type!"); MO.getExpr()->print(O, &MAI); @@ -1094,44 +1101,24 @@ O << AArch64CC::getCondCodeName(AArch64CC::getInvertedCondCode(CC)); } -void AArch64InstPrinter::printAMNoIndex(const MCInst *MI, unsigned OpNum, - const MCSubtargetInfo &STI, - raw_ostream &O) { - O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ']'; -} - template void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O) { - O << '#' << Scale * MI->getOperand(OpNum).getImm(); + O << '#' << formatImm(Scale * MI->getOperand(OpNum).getImm()); } void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum, unsigned Scale, raw_ostream &O) { const MCOperand MO = MI->getOperand(OpNum); if (MO.isImm()) { - O << "#" << (MO.getImm() * Scale); + O << "#" << formatImm(MO.getImm() * Scale); } else { assert(MO.isExpr() && "Unexpected operand type!"); MO.getExpr()->print(O, &MAI); } } -void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum, - unsigned Scale, raw_ostream &O) { - const MCOperand MO1 = MI->getOperand(OpNum + 1); - O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()); - if (MO1.isImm()) { - O << ", #" << (MO1.getImm() * Scale); - } else { - assert(MO1.isExpr() && "Unexpected operand type!"); - O << ", "; - MO1.getExpr()->print(O, &MAI); - } - O << ']'; -} - void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O) { @@ -1142,7 +1129,7 @@ if (Valid) O << Name; else - O << '#' << prfop; + O << '#' << formatImm(prfop); } void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum, @@ -1155,7 +1142,7 @@ if (Valid) O << Name; else - O << '#' << psbhintop; + O << '#' << formatImm(psbhintop); } void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum, @@ -1310,7 +1297,7 @@ // If the label has already been resolved to an immediate offset (say, when // we're running the disassembler), just print the immediate. if (Op.isImm()) { - O << "#" << (Op.getImm() * 4); + O << "#" << formatImm(Op.getImm() * 4); return; } @@ -1335,7 +1322,7 @@ // If the label has already been resolved to an immediate offset (say, when // we're running the disassembler), just print the immediate. if (Op.isImm()) { - O << "#" << (Op.getImm() * (1 << 12)); + O << "#" << formatImm(Op.getImm() * (1 << 12)); return; } @@ -1396,7 +1383,7 @@ if (Valid) O << Name.upper(); else - O << "#" << Val; + O << "#" << formatImm(Val); } void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo, Index: test/CodeGen/AArch64/aarch64-be-bv.ll =================================================================== --- test/CodeGen/AArch64/aarch64-be-bv.ll +++ test/CodeGen/AArch64/aarch64-be-bv.ll @@ -5,7 +5,7 @@ ; CHECK-LABEL: movi_modimm_t1: define i16 @movi_modimm_t1() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -17,7 +17,7 @@ ; CHECK-LABEL: movi_modimm_t2: define i16 @movi_modimm_t2() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #8 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #1, lsl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -29,7 +29,7 @@ ; CHECK-LABEL: movi_modimm_t3: define i16 @movi_modimm_t3() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #16 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #1, lsl #16 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -41,7 +41,7 @@ ; CHECK-LABEL: movi_modimm_t4: define i16 @movi_modimm_t4() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #24 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #1, lsl #24 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -53,7 +53,7 @@ ; CHECK-LABEL: movi_modimm_t5: define i16 @movi_modimm_t5() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].8h, #0x1 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].8h, #1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -65,7 +65,7 @@ ; CHECK-LABEL: movi_modimm_t6: define i16 @movi_modimm_t6() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].8h, #0x1, lsl #8 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].8h, #1, lsl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -77,7 +77,7 @@ ; CHECK-LABEL: movi_modimm_t7: define i16 @movi_modimm_t7() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, msl #8 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #1, msl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -89,7 +89,7 @@ ; CHECK-LABEL: movi_modimm_t8: define i16 @movi_modimm_t8() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, msl #16 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #1, msl #16 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -101,7 +101,7 @@ ; CHECK-LABEL: movi_modimm_t9: define i16 @movi_modimm_t9() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: movi v[[REG2:[0-9]+]].16b, #0x1 + ; CHECK-NEXT: movi v[[REG2:[0-9]+]].16b, #1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -149,7 +149,7 @@ ; CHECK-LABEL: mvni_modimm_t1: define i16 @mvni_modimm_t1() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1 + ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -161,7 +161,7 @@ ; CHECK-LABEL: mvni_modimm_t2: define i16 @mvni_modimm_t2() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #8 + ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #1, lsl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -173,7 +173,7 @@ ; CHECK-LABEL: mvni_modimm_t3: define i16 @mvni_modimm_t3() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #16 + ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #1, lsl #16 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -185,7 +185,7 @@ ; CHECK-LABEL: mvni_modimm_t4: define i16 @mvni_modimm_t4() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #24 + ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #1, lsl #24 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -197,7 +197,7 @@ ; CHECK-LABEL: mvni_modimm_t5: define i16 @mvni_modimm_t5() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].8h, #0x1 + ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].8h, #1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -209,7 +209,7 @@ ; CHECK-LABEL: mvni_modimm_t6: define i16 @mvni_modimm_t6() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].8h, #0x1, lsl #8 + ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].8h, #1, lsl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -221,7 +221,7 @@ ; CHECK-LABEL: mvni_modimm_t7: define i16 @mvni_modimm_t7() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, msl #8 + ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #1, msl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -233,7 +233,7 @@ ; CHECK-LABEL: mvni_modimm_t8: define i16 @mvni_modimm_t8() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, msl #16 + ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #1, msl #16 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 @@ -245,7 +245,7 @@ ; CHECK-LABEL: bic_modimm_t1: define i16 @bic_modimm_t1() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1 + ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #1 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, @@ -256,7 +256,7 @@ ; CHECK-LABEL: bic_modimm_t2: define i16 @bic_modimm_t2() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #8 + ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #1, lsl #8 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, @@ -267,7 +267,7 @@ ; CHECK-LABEL: bic_modimm_t3: define i16 @bic_modimm_t3() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #16 + ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #1, lsl #16 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, @@ -278,7 +278,7 @@ ; CHECK-LABEL: bic_modimm_t4: define i16 @bic_modimm_t4() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #24 + ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #1, lsl #24 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, @@ -289,7 +289,7 @@ ; CHECK-LABEL: bic_modimm_t5: define i16 @bic_modimm_t5() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: bic v[[REG2:[0-9]+]].8h, #0x1 + ; CHECK-NEXT: bic v[[REG2:[0-9]+]].8h, #1 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, @@ -300,7 +300,7 @@ ; CHECK-LABEL: bic_modimm_t6: define i16 @bic_modimm_t6() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: bic v[[REG2:[0-9]+]].8h, #0x1, lsl #8 + ; CHECK-NEXT: bic v[[REG2:[0-9]+]].8h, #1, lsl #8 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, @@ -311,7 +311,7 @@ ; CHECK-LABEL: orr_modimm_t1: define i16 @orr_modimm_t1() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1 + ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #1 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, @@ -322,7 +322,7 @@ ; CHECK-LABEL: orr_modimm_t2: define i16 @orr_modimm_t2() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #8 + ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #1, lsl #8 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, @@ -333,7 +333,7 @@ ; CHECK-LABEL: orr_modimm_t3: define i16 @orr_modimm_t3() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #16 + ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #1, lsl #16 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, @@ -344,7 +344,7 @@ ; CHECK-LABEL: orr_modimm_t4: define i16 @orr_modimm_t4() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #24 + ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #1, lsl #24 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, @@ -355,7 +355,7 @@ ; CHECK-LABEL: orr_modimm_t5: define i16 @orr_modimm_t5() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: orr v[[REG2:[0-9]+]].8h, #0x1 + ; CHECK-NEXT: orr v[[REG2:[0-9]+]].8h, #1 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, @@ -366,7 +366,7 @@ ; CHECK-LABEL: orr_modimm_t6: define i16 @orr_modimm_t6() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] - ; CHECK-NEXT: orr v[[REG2:[0-9]+]].8h, #0x1, lsl #8 + ; CHECK-NEXT: orr v[[REG2:[0-9]+]].8h, #1, lsl #8 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, @@ -385,37 +385,37 @@ ; CHECK-LABEL: modimm_t1_call: define void @modimm_t1_call() { - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x8 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x7 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #7 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x6 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #6 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v{{[0-9]+}}.2s, #0x5 + ; CHECK: movi v{{[0-9]+}}.2s, #5 ; CHECK-NEXT: bl f_v1i64 call i64 @f_v1i64(<1 x i64> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x5 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #5 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x4 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #4 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x3 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #3 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 call i32 @f_v4i32(<4 x i32> ) - ; CHECK: movi v[[REG:[0-9]+]].4s, #0x2 + ; CHECK: movi v[[REG:[0-9]+]].4s, #2 ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v2i64 call i64 @f_v2i64(<2 x i64> ) @@ -425,37 +425,37 @@ ; CHECK-LABEL: modimm_t2_call: define void @modimm_t2_call() { - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x8, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #8, lsl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x7, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #7, lsl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x6, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #6, lsl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v{{[0-9]+}}.2s, #0x5, lsl #8 + ; CHECK: movi v{{[0-9]+}}.2s, #5, lsl #8 ; CHECK-NEXT: bl f_v1i64 call i64 @f_v1i64(<1 x i64> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x5, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #5, lsl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x4, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #4, lsl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x3, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #3, lsl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 call i32 @f_v4i32(<4 x i32> ) - ; CHECK: movi v[[REG:[0-9]+]].4s, #0x2, lsl #8 + ; CHECK: movi v[[REG:[0-9]+]].4s, #2, lsl #8 ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v2i64 call i64 @f_v2i64(<2 x i64> ) @@ -465,37 +465,37 @@ ; CHECK-LABEL: modimm_t3_call: define void @modimm_t3_call() { - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x8, lsl #16 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #8, lsl #16 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x7, lsl #16 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #7, lsl #16 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x6, lsl #16 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #6, lsl #16 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v{{[0-9]+}}.2s, #0x5, lsl #16 + ; CHECK: movi v{{[0-9]+}}.2s, #5, lsl #16 ; CHECK-NEXT: bl f_v1i64 call i64 @f_v1i64(<1 x i64> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x5, lsl #16 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #5, lsl #16 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x4, lsl #16 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #4, lsl #16 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x3, lsl #16 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #3, lsl #16 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 call i32 @f_v4i32(<4 x i32> ) - ; CHECK: movi v[[REG:[0-9]+]].4s, #0x2, lsl #16 + ; CHECK: movi v[[REG:[0-9]+]].4s, #2, lsl #16 ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v2i64 call i64 @f_v2i64(<2 x i64> ) @@ -505,37 +505,37 @@ ; CHECK-LABEL: modimm_t4_call: define void @modimm_t4_call() { - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x8, lsl #24 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #8, lsl #24 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x7, lsl #24 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #7, lsl #24 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x6, lsl #24 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #6, lsl #24 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v{{[0-9]+}}.2s, #0x5, lsl #24 + ; CHECK: movi v{{[0-9]+}}.2s, #5, lsl #24 ; CHECK-NEXT: bl f_v1i64 call i64 @f_v1i64(<1 x i64> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x5, lsl #24 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #5, lsl #24 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x4, lsl #24 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #4, lsl #24 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x3, lsl #24 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #3, lsl #24 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 call i32 @f_v4i32(<4 x i32> ) - ; CHECK: movi v[[REG:[0-9]+]].4s, #0x2, lsl #24 + ; CHECK: movi v[[REG:[0-9]+]].4s, #2, lsl #24 ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v2i64 call i64 @f_v2i64(<2 x i64> ) @@ -545,37 +545,37 @@ ; CHECK-LABEL: modimm_t5_call: define void @modimm_t5_call() { - ; CHECK: movi v[[REG1:[0-9]+]].4h, #0x8 + ; CHECK: movi v[[REG1:[0-9]+]].4h, #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].4h, #0x7 + ; CHECK: movi v[[REG1:[0-9]+]].4h, #7 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].4h, #0x6 + ; CHECK: movi v[[REG1:[0-9]+]].4h, #6 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v{{[0-9]+}}.4h, #0x5 + ; CHECK: movi v{{[0-9]+}}.4h, #5 ; CHECK-NEXT: bl f_v1i64 call i64 @f_v1i64(<1 x i64> ) - ; CHECK: movi v[[REG1:[0-9]+]].8h, #0x5 + ; CHECK: movi v[[REG1:[0-9]+]].8h, #5 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].8h, #0x4 + ; CHECK: movi v[[REG1:[0-9]+]].8h, #4 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].8h, #0x3 + ; CHECK: movi v[[REG1:[0-9]+]].8h, #3 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 call i32 @f_v4i32(<4 x i32> ) - ; CHECK: movi v[[REG:[0-9]+]].8h, #0x2 + ; CHECK: movi v[[REG:[0-9]+]].8h, #2 ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v2i64 call i64 @f_v2i64(<2 x i64> ) @@ -585,37 +585,37 @@ ; CHECK-LABEL: modimm_t6_call: define void @modimm_t6_call() { - ; CHECK: movi v[[REG1:[0-9]+]].4h, #0x8, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4h, #8, lsl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].4h, #0x7, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4h, #7, lsl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].4h, #0x6, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4h, #6, lsl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v{{[0-9]+}}.4h, #0x5, lsl #8 + ; CHECK: movi v{{[0-9]+}}.4h, #5, lsl #8 ; CHECK-NEXT: bl f_v1i64 call i64 @f_v1i64(<1 x i64> ) - ; CHECK: movi v[[REG1:[0-9]+]].8h, #0x5, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].8h, #5, lsl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].8h, #0x4, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].8h, #4, lsl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].8h, #0x3, lsl #8 + ; CHECK: movi v[[REG1:[0-9]+]].8h, #3, lsl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 call i32 @f_v4i32(<4 x i32> ) - ; CHECK: movi v[[REG:[0-9]+]].8h, #0x2, lsl #8 + ; CHECK: movi v[[REG:[0-9]+]].8h, #2, lsl #8 ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v2i64 call i64 @f_v2i64(<2 x i64> ) @@ -625,37 +625,37 @@ ; CHECK-LABEL: modimm_t7_call: define void @modimm_t7_call() { - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x8, msl #8 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #8, msl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x7, msl #8 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #7, msl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x6, msl #8 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #6, msl #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v{{[0-9]+}}.2s, #0x5, msl #8 + ; CHECK: movi v{{[0-9]+}}.2s, #5, msl #8 ; CHECK-NEXT: bl f_v1i64 call i64 @f_v1i64(<1 x i64> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x5, msl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #5, msl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x4, msl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #4, msl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x3, msl #8 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #3, msl #8 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 call i32 @f_v4i32(<4 x i32> ) - ; CHECK: movi v[[REG:[0-9]+]].4s, #0x2, msl #8 + ; CHECK: movi v[[REG:[0-9]+]].4s, #2, msl #8 ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v2i64 call i64 @f_v2i64(<2 x i64> ) @@ -665,37 +665,37 @@ ; CHECK-LABEL: modimm_t8_call: define void @modimm_t8_call() { - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x8, msl #16 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #8, msl #16 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x7, msl #16 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #7, msl #16 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].2s, #0x6, msl #16 + ; CHECK: movi v[[REG1:[0-9]+]].2s, #6, msl #16 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v{{[0-9]+}}.2s, #0x5, msl #16 + ; CHECK: movi v{{[0-9]+}}.2s, #5, msl #16 ; CHECK-NEXT: bl f_v1i64 call i64 @f_v1i64(<1 x i64> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x5, msl #16 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #5, msl #16 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x4, msl #16 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #4, msl #16 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].4s, #0x3, msl #16 + ; CHECK: movi v[[REG1:[0-9]+]].4s, #3, msl #16 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 call i32 @f_v4i32(<4 x i32> ) - ; CHECK: movi v[[REG:[0-9]+]].4s, #0x2, msl #16 + ; CHECK: movi v[[REG:[0-9]+]].4s, #2, msl #16 ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v2i64 call i64 @f_v2i64(<2 x i64> ) @@ -705,29 +705,29 @@ ; CHECK-LABEL: modimm_t9_call: define void @modimm_t9_call() { - ; CHECK: movi v[[REG1:[0-9]+]].8b, #0x8 + ; CHECK: movi v[[REG1:[0-9]+]].8b, #8 ; CHECK-NEXT: rev64 v{{[0-9]+}}.8b, v[[REG1]].8b ; CHECK-NEXT: bl f_v8i8 call i8 @f_v8i8(<8 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].8b, #0x7 + ; CHECK: movi v[[REG1:[0-9]+]].8b, #7 ; CHECK-NEXT: rev64 v{{[0-9]+}}.4h, v[[REG1]].4h ; CHECK-NEXT: bl f_v4i16 call i16 @f_v4i16(<4 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].8b, #0x6 + ; CHECK: movi v[[REG1:[0-9]+]].8b, #6 ; CHECK-NEXT: rev64 v{{[0-9]+}}.2s, v[[REG1]].2s ; CHECK-NEXT: bl f_v2i32 call i32 @f_v2i32(<2 x i32> ) - ; CHECK: movi v[[REG1:[0-9]+]].16b, #0x5 + ; CHECK: movi v[[REG1:[0-9]+]].16b, #5 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].16b, v[[REG1]].16b ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v16i8 call i8 @f_v16i8(<16 x i8> ) - ; CHECK: movi v[[REG1:[0-9]+]].16b, #0x4 + ; CHECK: movi v[[REG1:[0-9]+]].16b, #4 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].8h, v[[REG1]].8h ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v8i16 call i16 @f_v8i16(<8 x i16> ) - ; CHECK: movi v[[REG1:[0-9]+]].16b, #0x3 + ; CHECK: movi v[[REG1:[0-9]+]].16b, #3 ; CHECK-NEXT: rev64 v[[REG2:[0-9]+]].4s, v[[REG1]].4s ; CHECK-NEXT: ext v[[REG2]].16b, v[[REG2]].16b, v[[REG2]].16b, #8 ; CHECK-NEXT: bl f_v4i32 Index: test/CodeGen/AArch64/arm64-abi_align.ll =================================================================== --- test/CodeGen/AArch64/arm64-abi_align.ll +++ test/CodeGen/AArch64/arm64-abi_align.ll @@ -74,7 +74,7 @@ entry: ; CHECK-LABEL: caller38_stack ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] -; CHECK: movz w[[C:[0-9]+]], #0x9 +; CHECK: movz w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 @@ -128,7 +128,7 @@ ; CHECK-LABEL: caller39_stack ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #32] ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16] -; CHECK: movz w[[C:[0-9]+]], #0x9 +; CHECK: movz w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 @@ -184,7 +184,7 @@ ; CHECK-LABEL: caller40_stack ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #24] ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] -; CHECK: movz w[[C:[0-9]+]], #0x9 +; CHECK: movz w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 @@ -238,7 +238,7 @@ ; CHECK-LABEL: caller41_stack ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #32] ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16] -; CHECK: movz w[[C:[0-9]+]], #0x9 +; CHECK: movz w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 @@ -330,7 +330,7 @@ ; CHECK: sub x[[A:[0-9]+]], x29, #32 ; Address of s1 is passed on stack at sp+8 ; CHECK: str x[[A]], [sp, #8] -; CHECK: movz w[[C:[0-9]+]], #0x9 +; CHECK: movz w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] ; FAST-LABEL: caller42_stack @@ -442,7 +442,7 @@ ; CHECK: sub x[[A:[0-9]+]], x29, #32 ; Address of s1 is passed on stack at sp+8 ; CHECK: str x[[A]], [sp, #8] -; CHECK: movz w[[C:[0-9]+]], #0x9 +; CHECK: movz w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] ; FAST-LABEL: caller43_stack Index: test/CodeGen/AArch64/arm64-addrmode.ll =================================================================== --- test/CodeGen/AArch64/arm64-addrmode.ll +++ test/CodeGen/AArch64/arm64-addrmode.ll @@ -82,7 +82,7 @@ define void @t8(i64 %a) { ; CHECK-LABEL: t8: -; CHECK: movn [[REG:x[0-9]+]], #0x1235 +; CHECK: movn [[REG:x[0-9]+]], #4661 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = sub i64 %a, 4662 ;-4662 is 0xffffffffffffedca %2 = inttoptr i64 %1 to i64* @@ -92,7 +92,7 @@ define void @t9(i64 %a) { ; CHECK-LABEL: t9: -; CHECK: movn [[REG:x[0-9]+]], #0x1235, lsl #16 +; CHECK: movn [[REG:x[0-9]+]], #4661, lsl #16 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = add i64 -305463297, %a ;-305463297 is 0xffffffffedcaffff %2 = inttoptr i64 %1 to i64* @@ -102,7 +102,7 @@ define void @t10(i64 %a) { ; CHECK-LABEL: t10: -; CHECK: movz [[REG:x[0-9]+]], #0x123, lsl #48 +; CHECK: movz [[REG:x[0-9]+]], #291, lsl #48 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = add i64 %a, 81909218222800896 ;0x123000000000000 %2 = inttoptr i64 %1 to i64* @@ -112,8 +112,8 @@ define void @t11(i64 %a) { ; CHECK-LABEL: t11: -; CHECK: movz w[[NUM:[0-9]+]], #0x123, lsl #16 -; CHECK: movk w[[NUM:[0-9]+]], #0x4567 +; CHECK: movz w[[NUM:[0-9]+]], #291, lsl #16 +; CHECK: movk w[[NUM:[0-9]+]], #17767 ; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]] %1 = add i64 %a, 19088743 ;0x1234567 %2 = inttoptr i64 %1 to i64* Index: test/CodeGen/AArch64/arm64-atomic.ll =================================================================== --- test/CodeGen/AArch64/arm64-atomic.ll +++ test/CodeGen/AArch64/arm64-atomic.ll @@ -103,7 +103,7 @@ define i32 @fetch_and_or(i32* %p) #0 { ; CHECK-LABEL: fetch_and_or: -; CHECK: movz [[OLDVAL_REG:w[0-9]+]], #0x5 +; CHECK: movz [[OLDVAL_REG:w[0-9]+]], #5 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]: ; CHECK: ldaxr w[[DEST_REG:[0-9]+]], [x0] ; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], w[[DEST_REG]], [[OLDVAL_REG]] Index: test/CodeGen/AArch64/arm64-bitfield-extract.ll =================================================================== --- test/CodeGen/AArch64/arm64-bitfield-extract.ll +++ test/CodeGen/AArch64/arm64-bitfield-extract.ll @@ -348,8 +348,8 @@ ; CHECK-LABEL: fct16: ; CHECK: ldr [[REG1:w[0-9]+]], ; Create the constant -; CHECK: movz [[REGCST:w[0-9]+]], #0x1a, lsl #16 -; CHECK: movk [[REGCST]], #0x8160 +; CHECK: movz [[REGCST:w[0-9]+]], #26, lsl #16 +; CHECK: movk [[REGCST]], #33120 ; Do the masking ; CHECK: and [[REG2:w[0-9]+]], [[REG1]], [[REGCST]] ; CHECK-NEXT: bfxil [[REG2]], w1, #16, #3 @@ -377,8 +377,8 @@ ; CHECK-LABEL: fct17: ; CHECK: ldr [[REG1:x[0-9]+]], ; Create the constant -; CHECK: movz w[[REGCST:[0-9]+]], #0x1a, lsl #16 -; CHECK: movk w[[REGCST]], #0x8160 +; CHECK: movz w[[REGCST:[0-9]+]], #26, lsl #16 +; CHECK: movk w[[REGCST]], #33120 ; Do the masking ; CHECK: and [[REG2:x[0-9]+]], [[REG1]], x[[REGCST]] ; CHECK-NEXT: bfxil [[REG2]], x1, #16, #3 Index: test/CodeGen/AArch64/arm64-build-vector.ll =================================================================== --- test/CodeGen/AArch64/arm64-build-vector.ll +++ test/CodeGen/AArch64/arm64-build-vector.ll @@ -36,7 +36,7 @@ define <8 x i16> @build_all_zero(<8 x i16> %a) #1 { ; CHECK-LABEL: build_all_zero: -; CHECK: movz w[[GREG:[0-9]+]], #0xae80 +; CHECK: movz w[[GREG:[0-9]+]], #44672 ; CHECK-NEXT: fmov s[[FREG:[0-9]+]], w[[GREG]] ; CHECK-NEXT: mul.8h v0, v0, v[[FREG]] %b = add <8 x i16> %a, @@ -56,4 +56,4 @@ %vshl_n2 = shl <4 x i16> %vshl_n, %shuffle.i = shufflevector <4 x i16> %vshl_n2, <4 x i16> zeroinitializer, <8 x i32> ret <8 x i16> %shuffle.i -} \ No newline at end of file +} Index: test/CodeGen/AArch64/arm64-const-addr.ll =================================================================== --- test/CodeGen/AArch64/arm64-const-addr.ll +++ test/CodeGen/AArch64/arm64-const-addr.ll @@ -5,8 +5,8 @@ ; Test if the constant base address gets only materialized once. define i32 @test1() nounwind { ; CHECK-LABEL: test1 -; CHECK: movz w8, #0x40f, lsl #16 -; CHECK-NEXT: movk w8, #0xc000 +; CHECK: movz w8, #1039, lsl #16 +; CHECK-NEXT: movk w8, #49152 ; CHECK-NEXT: ldp w9, w10, [x8, #4] ; CHECK: ldr w8, [x8, #12] %at = inttoptr i64 68141056 to %T* Index: test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll +++ test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll @@ -9,7 +9,7 @@ ; CHECK: @foo ; CHECK: adrp x[[REG:[0-9]+]], _sortlist@GOTPAGE ; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _sortlist@GOTPAGEOFF] -; CHECK: movz x[[REG2:[0-9]+]], #0x4e20 +; CHECK: movz x[[REG2:[0-9]+]], #20000 ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]] ; CHECK: ldr w0, [x[[REG3]]] ; CHECK: ret @@ -22,7 +22,7 @@ ; CHECK: @foo2 ; CHECK: adrp x[[REG:[0-9]+]], _sortlist2@GOTPAGE ; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _sortlist2@GOTPAGEOFF] -; CHECK: movz x[[REG2:[0-9]+]], #0x9c40 +; CHECK: movz x[[REG2:[0-9]+]], #40000 ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]] ; CHECK: ldr x0, [x[[REG3]]] ; CHECK: ret @@ -37,9 +37,9 @@ define signext i8 @foo3() nounwind ssp { entry: ; CHECK: @foo3 -; CHECK: movz x[[REG:[0-9]+]], #0xb3a, lsl #32 -; CHECK: movk x[[REG]], #0x73ce, lsl #16 -; CHECK: movk x[[REG]], #0x2ff2 +; CHECK: movz x[[REG:[0-9]+]], #2874, lsl #32 +; CHECK: movk x[[REG]], #29646, lsl #16 +; CHECK: movk x[[REG]], #12274 %0 = load i8*, i8** @pd2, align 8 %arrayidx = getelementptr inbounds i8, i8* %0, i64 12345678901234 %1 = load i8, i8* %arrayidx, align 1 Index: test/CodeGen/AArch64/arm64-fast-isel-gv.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-gv.ll +++ test/CodeGen/AArch64/arm64-fast-isel-gv.ll @@ -18,8 +18,8 @@ ; CHECK: @Rand ; CHECK: adrp [[REG1:x[0-9]+]], _seed@GOTPAGE ; CHECK: ldr [[REG2:x[0-9]+]], {{\[}}[[REG1]], _seed@GOTPAGEOFF{{\]}} -; CHECK: movz [[REG3:x[0-9]+]], #0x3619 -; CHECK: movz [[REG4:x[0-9]+]], #0x51d +; CHECK: movz [[REG3:x[0-9]+]], #13849 +; CHECK: movz [[REG4:x[0-9]+]], #1309 ; CHECK: ldr [[REG5:x[0-9]+]], {{\[}}[[REG2]]{{\]}} ; CHECK: mul [[REG6:x[0-9]+]], [[REG5]], [[REG4]] ; CHECK: add [[REG7:x[0-9]+]], [[REG6]], [[REG3]] Index: test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll +++ test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll @@ -8,7 +8,7 @@ ; ARM64: adrp x8, _message@PAGE ; ARM64: add x0, x8, _message@PAGEOFF ; ARM64: mov w9, wzr -; ARM64: movz x2, #0x50 +; ARM64: movz x2, #80 ; ARM64: uxtb w1, w9 ; ARM64: bl _memset call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i8 0, i64 80, i32 16, i1 false) @@ -23,7 +23,7 @@ ; ARM64: ldr x0, [x8, _temp@GOTPAGEOFF] ; ARM64: adrp x8, _message@PAGE ; ARM64: add x1, x8, _message@PAGEOFF -; ARM64: movz x2, #0x50 +; ARM64: movz x2, #80 ; ARM64: bl _memcpy call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 80, i32 16, i1 false) ret void @@ -37,7 +37,7 @@ ; ARM64: ldr x0, [x8, _temp@GOTPAGEOFF] ; ARM64: adrp x8, _message@PAGE ; ARM64: add x1, x8, _message@PAGEOFF -; ARM64: movz x2, #0x14 +; ARM64: movz x2, #20 ; ARM64: bl _memmove call void @llvm.memmove.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 20, i32 16, i1 false) ret void @@ -137,7 +137,7 @@ define void @test_distant_memcpy(i8* %dst) { ; ARM64-LABEL: test_distant_memcpy: ; ARM64: mov [[ARRAY:x[0-9]+]], sp -; ARM64: movz [[OFFSET:x[0-9]+]], #0x1f40 +; ARM64: movz [[OFFSET:x[0-9]+]], #8000 ; ARM64: add x[[ADDR:[0-9]+]], [[ARRAY]], [[OFFSET]] ; ARM64: ldrb [[BYTE:w[0-9]+]], [x[[ADDR]]] ; ARM64: strb [[BYTE]], [x0] Index: test/CodeGen/AArch64/arm64-fcopysign.ll =================================================================== --- test/CodeGen/AArch64/arm64-fcopysign.ll +++ test/CodeGen/AArch64/arm64-fcopysign.ll @@ -5,7 +5,7 @@ define float @test1(float %x, float %y) nounwind { entry: ; CHECK-LABEL: test1: -; CHECK: movi.4s v2, #0x80, lsl #24 +; CHECK: movi.4s v2, #128, lsl #24 ; CHECK: bit.16b v0, v1, v2 %0 = tail call float @copysignf(float %x, float %y) nounwind readnone ret float %0 @@ -37,7 +37,7 @@ entry: ; CHECK-LABEL: test4: ; CHECK: fcvt s0, d0 -; CHECK: movi.4s v[[CONST:[0-9]+]], #0x80, lsl #24 +; CHECK: movi.4s v[[CONST:[0-9]+]], #128, lsl #24 ; CHECK: bit.16b v{{[0-9]+}}, v0, v[[CONST]] %0 = tail call double (...) @bar() nounwind %1 = fptrunc double %0 to float Index: test/CodeGen/AArch64/arm64-fp128.ll =================================================================== --- test/CodeGen/AArch64/arm64-fp128.ll +++ test/CodeGen/AArch64/arm64-fp128.ll @@ -174,11 +174,11 @@ iftrue: ret i32 42 ; CHECK-NEXT: BB# -; CHECK-NEXT: movz w0, #0x2a +; CHECK-NEXT: movz w0, #42 ; CHECK: ret iffalse: ret i32 29 -; CHECK: movz w0, #0x1d +; CHECK: movz w0, #29 ; CHECK: ret } Index: test/CodeGen/AArch64/arm64-memcpy-inline.ll =================================================================== --- test/CodeGen/AArch64/arm64-memcpy-inline.ll +++ test/CodeGen/AArch64/arm64-memcpy-inline.ll @@ -75,7 +75,7 @@ entry: ; CHECK-LABEL: t5: ; CHECK: strb wzr, [x0, #6] -; CHECK: movz [[REG7:w[0-9]+]], #0x5453 +; CHECK: movz [[REG7:w[0-9]+]], #21587 ; CHECK: strh [[REG7]], [x0, #4] ; CHECK: movz [[REG8:w[0-9]+]], ; CHECK: movk [[REG8]], Index: test/CodeGen/AArch64/arm64-movi.ll =================================================================== --- test/CodeGen/AArch64/arm64-movi.ll +++ test/CodeGen/AArch64/arm64-movi.ll @@ -45,30 +45,30 @@ define i32 @movz() nounwind { ; CHECK-LABEL: movz: -; CHECK: movz w0, #0x5 +; CHECK: movz w0, #5 ret i32 5 } define i64 @movz_3movk() nounwind { ; CHECK-LABEL: movz_3movk: -; CHECK: movz x0, #0x5, lsl #48 -; CHECK-NEXT: movk x0, #0x1234, lsl #32 -; CHECK-NEXT: movk x0, #0xabcd, lsl #16 -; CHECK-NEXT: movk x0, #0x5678 +; CHECK: movz x0, #5, lsl #48 +; CHECK-NEXT: movk x0, #4660, lsl #32 +; CHECK-NEXT: movk x0, #43981, lsl #16 +; CHECK-NEXT: movk x0, #22136 ret i64 1427392313513592 } define i64 @movz_movk_skip1() nounwind { ; CHECK-LABEL: movz_movk_skip1: -; CHECK: movz x0, #0x5, lsl #32 -; CHECK-NEXT: movk x0, #0x4321, lsl #16 +; CHECK: movz x0, #5, lsl #32 +; CHECK-NEXT: movk x0, #17185, lsl #16 ret i64 22601072640 } define i64 @movz_skip1_movk() nounwind { ; CHECK-LABEL: movz_skip1_movk: -; CHECK: movz x0, #0x8654, lsl #32 -; CHECK-NEXT: movk x0, #0x1234 +; CHECK: movz x0, #34388, lsl #32 +; CHECK-NEXT: movk x0, #4660 ret i64 147695335379508 } @@ -78,14 +78,14 @@ define i64 @movn() nounwind { ; CHECK-LABEL: movn: -; CHECK: movn x0, #0x29 +; CHECK: movn x0, #41 ret i64 -42 } define i64 @movn_skip1_movk() nounwind { ; CHECK-LABEL: movn_skip1_movk: -; CHECK: movn x0, #0x29, lsl #32 -; CHECK-NEXT: movk x0, #0x1234 +; CHECK: movn x0, #41, lsl #32 +; CHECK-NEXT: movk x0, #4660 ret i64 -176093720012 } @@ -97,28 +97,28 @@ define i64 @orr_movk1() nounwind { ; CHECK-LABEL: orr_movk1: ; CHECK: orr x0, xzr, #0xffff0000ffff0 -; CHECK: movk x0, #0xdead, lsl #16 +; CHECK: movk x0, #57005, lsl #16 ret i64 72056498262245120 } define i64 @orr_movk2() nounwind { ; CHECK-LABEL: orr_movk2: ; CHECK: orr x0, xzr, #0xffff0000ffff0 -; CHECK: movk x0, #0xdead, lsl #48 +; CHECK: movk x0, #57005, lsl #48 ret i64 -2400982650836746496 } define i64 @orr_movk3() nounwind { ; CHECK-LABEL: orr_movk3: ; CHECK: orr x0, xzr, #0xffff0000ffff0 -; CHECK: movk x0, #0xdead, lsl #32 +; CHECK: movk x0, #57005, lsl #32 ret i64 72020953688702720 } define i64 @orr_movk4() nounwind { ; CHECK-LABEL: orr_movk4: ; CHECK: orr x0, xzr, #0xffff0000ffff0 -; CHECK: movk x0, #0xdead +; CHECK: movk x0, #57005 ret i64 72056494543068845 } @@ -126,30 +126,30 @@ define i64 @orr_movk5() nounwind { ; CHECK-LABEL: orr_movk5: ; CHECK: orr x0, xzr, #0xff00ff00ff00ff00 -; CHECK: movk x0, #0xdead, lsl #16 +; CHECK: movk x0, #57005, lsl #16 ret i64 -71777214836900096 } define i64 @orr_movk6() nounwind { ; CHECK-LABEL: orr_movk6: ; CHECK: orr x0, xzr, #0xff00ff00ff00ff00 -; CHECK: movk x0, #0xdead, lsl #16 -; CHECK: movk x0, #0xdead, lsl #48 +; CHECK: movk x0, #57005, lsl #16 +; CHECK: movk x0, #57005, lsl #48 ret i64 -2400982647117578496 } define i64 @orr_movk7() nounwind { ; CHECK-LABEL: orr_movk7: ; CHECK: orr x0, xzr, #0xff00ff00ff00ff00 -; CHECK: movk x0, #0xdead, lsl #48 +; CHECK: movk x0, #57005, lsl #48 ret i64 -2400982646575268096 } define i64 @orr_movk8() nounwind { ; CHECK-LABEL: orr_movk8: ; CHECK: orr x0, xzr, #0xff00ff00ff00ff00 -; CHECK: movk x0, #0xdead -; CHECK: movk x0, #0xdead, lsl #48 +; CHECK: movk x0, #57005 +; CHECK: movk x0, #57005, lsl #48 ret i64 -2400982646575276371 } @@ -157,46 +157,46 @@ define i64 @orr_movk9() nounwind { ; CHECK-LABEL: orr_movk9: ; CHECK: orr x0, xzr, #0xffffff000000000 -; CHECK: movk x0, #0xff00 -; CHECK: movk x0, #0xdead, lsl #16 +; CHECK: movk x0, #65280 +; CHECK: movk x0, #57005, lsl #16 ret i64 1152921439623315200 } define i64 @orr_movk10() nounwind { ; CHECK-LABEL: orr_movk10: ; CHECK: orr x0, xzr, #0xfffffffffffff00 -; CHECK: movk x0, #0xdead, lsl #16 +; CHECK: movk x0, #57005, lsl #16 ret i64 1152921504047824640 } define i64 @orr_movk11() nounwind { ; CHECK-LABEL: orr_movk11: ; CHECK: orr x0, xzr, #0xfff00000000000ff -; CHECK: movk x0, #0xdead, lsl #16 -; CHECK: movk x0, #0xffff, lsl #32 +; CHECK: movk x0, #57005, lsl #16 +; CHECK: movk x0, #65535, lsl #32 ret i64 -4222125209747201 } define i64 @orr_movk12() nounwind { ; CHECK-LABEL: orr_movk12: ; CHECK: orr x0, xzr, #0xfff00000000000ff -; CHECK: movk x0, #0xdead, lsl #32 +; CHECK: movk x0, #57005, lsl #32 ret i64 -4258765016661761 } define i64 @orr_movk13() nounwind { ; CHECK-LABEL: orr_movk13: ; CHECK: orr x0, xzr, #0xfffff000000 -; CHECK: movk x0, #0xdead -; CHECK: movk x0, #0xdead, lsl #48 +; CHECK: movk x0, #57005 +; CHECK: movk x0, #57005, lsl #48 ret i64 -2401245434149282131 } ; rdar://13944082 define i64 @g() nounwind { ; CHECK-LABEL: g: -; CHECK: movz x0, #0xffff, lsl #48 -; CHECK: movk x0, #0x2 +; CHECK: movz x0, #65535, lsl #48 +; CHECK: movk x0, #2 entry: ret i64 -281474976710654 } Index: test/CodeGen/AArch64/arm64-neon-2velem-high.ll =================================================================== --- test/CodeGen/AArch64/arm64-neon-2velem-high.ll +++ test/CodeGen/AArch64/arm64-neon-2velem-high.ll @@ -18,7 +18,7 @@ define <4 x i32> @test_vmull_high_n_s16_imm(<8 x i16> %a) #0 { ; CHECK-LABEL: test_vmull_high_n_s16_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #29 ; CHECK-NEXT: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -42,7 +42,7 @@ define <2 x i64> @test_vmull_high_n_s32_imm(<4 x i32> %a) #0 { ; CHECK-LABEL: test_vmull_high_n_s32_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1, msl #8 +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #1, msl #8 ; CHECK-NEXT: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: @@ -68,7 +68,7 @@ define <4 x i32> @test_vmull_high_n_u16_imm(<8 x i16> %a) #0 { ; CHECK-LABEL: test_vmull_high_n_u16_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x11, lsl #8 +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #17, lsl #8 ; CHECK-NEXT: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -92,7 +92,7 @@ define <2 x i64> @test_vmull_high_n_u32_imm(<4 x i32> %a) #0 { ; CHECK-LABEL: test_vmull_high_n_u32_imm: -; CHECK-NEXT: mvni [[REPLICATE:v[0-9]+]].4s, #0x1, msl #8 +; CHECK-NEXT: mvni [[REPLICATE:v[0-9]+]].4s, #1, msl #8 ; CHECK-NEXT: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: @@ -118,7 +118,7 @@ define <4 x i32> @test_vqdmull_high_n_s16_imm(<8 x i16> %a) #0 { ; CHECK-LABEL: test_vqdmull_high_n_s16_imm: -; CHECK-NEXT: mvni [[REPLICATE:v[0-9]+]].8h, #0x11, lsl #8 +; CHECK-NEXT: mvni [[REPLICATE:v[0-9]+]].8h, #17, lsl #8 ; CHECK-NEXT: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -142,7 +142,7 @@ define <2 x i64> @test_vqdmull_high_n_s32_imm(<4 x i32> %a) #0 { ; CHECK-LABEL: test_vqdmull_high_n_s32_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #29 ; CHECK-NEXT: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define <4 x i32> @test_vmlal_high_n_s16_imm(<4 x i32> %a, <8 x i16> %b) #0 { ; CHECK-LABEL: test_vmlal_high_n_s16_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #29 ; CHECK-NEXT: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -195,7 +195,7 @@ define <2 x i64> @test_vmlal_high_n_s32_imm(<2 x i64> %a, <4 x i32> %b) #0 { ; CHECK-LABEL: test_vmlal_high_n_s32_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #29 ; CHECK-NEXT: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: @@ -223,7 +223,7 @@ define <4 x i32> @test_vmlal_high_n_u16_imm(<4 x i32> %a, <8 x i16> %b) #0 { ; CHECK-LABEL: test_vmlal_high_n_u16_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #29 ; CHECK-NEXT: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define <2 x i64> @test_vmlal_high_n_u32_imm(<2 x i64> %a, <4 x i32> %b) #0 { ; CHECK-LABEL: test_vmlal_high_n_u32_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #29 ; CHECK-NEXT: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: @@ -277,7 +277,7 @@ define <4 x i32> @test_vqdmlal_high_n_s16_imm(<4 x i32> %a, <8 x i16> %b) #0 { ; CHECK-LABEL: test_vqdmlal_high_n_s16_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #29 ; CHECK-NEXT: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -303,7 +303,7 @@ define <2 x i64> @test_vqdmlal_high_n_s32_imm(<2 x i64> %a, <4 x i32> %b) #0 { ; CHECK-LABEL: test_vqdmlal_high_n_s32_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #29 ; CHECK-NEXT: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: @@ -331,7 +331,7 @@ define <4 x i32> @test_vmlsl_high_n_s16_imm(<4 x i32> %a, <8 x i16> %b) #0 { ; CHECK-LABEL: test_vmlsl_high_n_s16_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #29 ; CHECK-NEXT: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define <2 x i64> @test_vmlsl_high_n_s32_imm(<2 x i64> %a, <4 x i32> %b) #0 { ; CHECK-LABEL: test_vmlsl_high_n_s32_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #29 ; CHECK-NEXT: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: @@ -385,7 +385,7 @@ define <4 x i32> @test_vmlsl_high_n_u16_imm(<4 x i32> %a, <8 x i16> %b) #0 { ; CHECK-LABEL: test_vmlsl_high_n_u16_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #29 ; CHECK-NEXT: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -411,7 +411,7 @@ define <2 x i64> @test_vmlsl_high_n_u32_imm(<2 x i64> %a, <4 x i32> %b) #0 { ; CHECK-LABEL: test_vmlsl_high_n_u32_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #29 ; CHECK-NEXT: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: @@ -439,7 +439,7 @@ define <4 x i32> @test_vqdmlsl_high_n_s16_imm(<4 x i32> %a, <8 x i16> %b) #0 { ; CHECK-LABEL: test_vqdmlsl_high_n_s16_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #29 ; CHECK-NEXT: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h ; CHECK-NEXT: ret entry: @@ -465,7 +465,7 @@ define <2 x i64> @test_vqdmlsl_high_n_s32_imm(<2 x i64> %a, <4 x i32> %b) #0 { ; CHECK-LABEL: test_vqdmlsl_high_n_s32_imm: -; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d +; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #29 ; CHECK-NEXT: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s ; CHECK-NEXT: ret entry: Index: test/CodeGen/AArch64/arm64-neon-copy.ll =================================================================== --- test/CodeGen/AArch64/arm64-neon-copy.ll +++ test/CodeGen/AArch64/arm64-neon-copy.ll @@ -1405,7 +1405,7 @@ define <4 x i16> @concat_vector_v4i16_const_one() { ; CHECK-LABEL: concat_vector_v4i16_const_one: -; CHECK: movi {{v[0-9]+}}.4h, #0x1 +; CHECK: movi {{v[0-9]+}}.4h, #1 %r = shufflevector <1 x i16> , <1 x i16> undef, <4 x i32> zeroinitializer ret <4 x i16> %r } @@ -1433,7 +1433,7 @@ define <8 x i16> @concat_vector_v8i16_const_one() { ; CHECK-LABEL: concat_vector_v8i16_const_one: -; CHECK: movi {{v[0-9]+}}.8h, #0x1 +; CHECK: movi {{v[0-9]+}}.8h, #1 %r = shufflevector <1 x i16> , <1 x i16> undef, <8 x i32> zeroinitializer ret <8 x i16> %r } Index: test/CodeGen/AArch64/arm64-nvcast.ll =================================================================== --- test/CodeGen/AArch64/arm64-nvcast.ll +++ test/CodeGen/AArch64/arm64-nvcast.ll @@ -15,7 +15,7 @@ } ; CHECK-LABEL: _test2 -; CHECK: movi.16b v0, #0x3f +; CHECK: movi.16b v0, #63 ; CHECK: str q0, [sp] ; CHECK: mov x8, sp ; CHECK: ldr s0, [x8, w1, sxtw #2] Index: test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll =================================================================== --- test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll +++ test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll @@ -10,17 +10,17 @@ ; CHECK: str x{{.+}}, [sp, #-16]! ; CHECK-NEXT: mov x0, x{{.+}} ; CHECK: Ltmp -; CHECK-NEXT: movz x16, #0xffff, lsl #32 -; CHECK-NEXT: movk x16, #0xdead, lsl #16 -; CHECK-NEXT: movk x16, #0xbeef +; CHECK-NEXT: movz x16, #65535, lsl #32 +; CHECK-NEXT: movk x16, #57005, lsl #16 +; CHECK-NEXT: movk x16, #48879 ; CHECK-NEXT: blr x16 ; FAST-LABEL: jscall_patchpoint_codegen: ; FAST: Ltmp ; FAST: str x{{.+}}, [sp, #-16]! ; FAST: Ltmp -; FAST-NEXT: movz x16, #0xffff, lsl #32 -; FAST-NEXT: movk x16, #0xdead, lsl #16 -; FAST-NEXT: movk x16, #0xbeef +; FAST-NEXT: movz x16, #65535, lsl #32 +; FAST-NEXT: movk x16, #57005, lsl #16 +; FAST-NEXT: movk x16, #48879 ; FAST-NEXT: blr x16 %resolveCall2 = inttoptr i64 281474417671919 to i8* %result = tail call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveCall2, i32 2, i64 %p4, i64 %p2) @@ -41,9 +41,9 @@ ; CHECK-NEXT: orr w[[REG:[0-9]+]], wzr, #0x2 ; CHECK-NEXT: str x[[REG]], [sp] ; CHECK: Ltmp -; CHECK-NEXT: movz x16, #0xffff, lsl #32 -; CHECK-NEXT: movk x16, #0xdead, lsl #16 -; CHECK-NEXT: movk x16, #0xbeef +; CHECK-NEXT: movz x16, #65535, lsl #32 +; CHECK-NEXT: movk x16, #57005, lsl #16 +; CHECK-NEXT: movk x16, #48879 ; CHECK-NEXT: blr x16 ; FAST-LABEL: jscall_patchpoint_codegen2: ; FAST: Ltmp @@ -54,9 +54,9 @@ ; FAST-NEXT: str [[REG2]], [sp, #16] ; FAST-NEXT: str [[REG3]], [sp, #24] ; FAST: Ltmp -; FAST-NEXT: movz x16, #0xffff, lsl #32 -; FAST-NEXT: movk x16, #0xdead, lsl #16 -; FAST-NEXT: movk x16, #0xbeef +; FAST-NEXT: movz x16, #65535, lsl #32 +; FAST-NEXT: movk x16, #57005, lsl #16 +; FAST-NEXT: movk x16, #48879 ; FAST-NEXT: blr x16 %call = inttoptr i64 281474417671919 to i8* %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 6, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6) @@ -68,7 +68,7 @@ entry: ; CHECK-LABEL: jscall_patchpoint_codegen3: ; CHECK: Ltmp -; CHECK: movz w[[REG:[0-9]+]], #0xa +; CHECK: movz w[[REG:[0-9]+]], #10 ; CHECK-NEXT: str x[[REG]], [sp, #48] ; CHECK-NEXT: orr w[[REG:[0-9]+]], wzr, #0x8 ; CHECK-NEXT: str w[[REG]], [sp, #36] @@ -79,9 +79,9 @@ ; CHECK-NEXT: orr w[[REG:[0-9]+]], wzr, #0x2 ; CHECK-NEXT: str x[[REG]], [sp] ; CHECK: Ltmp -; CHECK-NEXT: movz x16, #0xffff, lsl #32 -; CHECK-NEXT: movk x16, #0xdead, lsl #16 -; CHECK-NEXT: movk x16, #0xbeef +; CHECK-NEXT: movz x16, #65535, lsl #32 +; CHECK-NEXT: movk x16, #57005, lsl #16 +; CHECK-NEXT: movk x16, #48879 ; CHECK-NEXT: blr x16 ; FAST-LABEL: jscall_patchpoint_codegen3: ; FAST: Ltmp @@ -89,16 +89,16 @@ ; FAST-NEXT: orr [[REG2:w[0-9]+]], wzr, #0x4 ; FAST-NEXT: orr [[REG3:x[0-9]+]], xzr, #0x6 ; FAST-NEXT: orr [[REG4:w[0-9]+]], wzr, #0x8 -; FAST-NEXT: movz [[REG5:x[0-9]+]], #0xa +; FAST-NEXT: movz [[REG5:x[0-9]+]], #10 ; FAST-NEXT: str [[REG1]], [sp, #-64]! ; FAST-NEXT: str [[REG2]], [sp, #16] ; FAST-NEXT: str [[REG3]], [sp, #24] ; FAST-NEXT: str [[REG4]], [sp, #36] ; FAST-NEXT: str [[REG5]], [sp, #48] ; FAST: Ltmp -; FAST-NEXT: movz x16, #0xffff, lsl #32 -; FAST-NEXT: movk x16, #0xdead, lsl #16 -; FAST-NEXT: movk x16, #0xbeef +; FAST-NEXT: movz x16, #65535, lsl #32 +; FAST-NEXT: movk x16, #57005, lsl #16 +; FAST-NEXT: movk x16, #48879 ; FAST-NEXT: blr x16 %call = inttoptr i64 281474417671919 to i8* %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 10, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6, i32 undef, i32 8, i32 undef, i64 10) Index: test/CodeGen/AArch64/arm64-patchpoint.ll =================================================================== --- test/CodeGen/AArch64/arm64-patchpoint.ll +++ test/CodeGen/AArch64/arm64-patchpoint.ll @@ -6,13 +6,13 @@ define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { entry: ; CHECK-LABEL: trivial_patchpoint_codegen: -; CHECK: movz x16, #0xdead, lsl #32 -; CHECK-NEXT: movk x16, #0xbeef, lsl #16 -; CHECK-NEXT: movk x16, #0xcafe +; CHECK: movz x16, #57005, lsl #32 +; CHECK-NEXT: movk x16, #48879, lsl #16 +; CHECK-NEXT: movk x16, #51966 ; CHECK-NEXT: blr x16 -; CHECK: movz x16, #0xdead, lsl #32 -; CHECK-NEXT: movk x16, #0xbeef, lsl #16 -; CHECK-NEXT: movk x16, #0xcaff +; CHECK: movz x16, #57005, lsl #32 +; CHECK-NEXT: movk x16, #48879, lsl #16 +; CHECK-NEXT: movk x16, #51967 ; CHECK-NEXT: blr x16 ; CHECK: ret %resolveCall2 = inttoptr i64 244837814094590 to i8* Index: test/CodeGen/AArch64/arm64-register-pairing.ll =================================================================== --- test/CodeGen/AArch64/arm64-register-pairing.ll +++ test/CodeGen/AArch64/arm64-register-pairing.ll @@ -13,7 +13,7 @@ ; CHECK: stp x24, x23, [sp, #96] ; CHECK: stp x22, x21, [sp, #112] ; CHECK: stp x20, x19, [sp, #128] -; CHECK: movz x0, #0x2a +; CHECK: movz x0, #42 ; CHECK: ldp x20, x19, [sp, #128] ; CHECK: ldp x22, x21, [sp, #112] ; CHECK: ldp x24, x23, [sp, #96] @@ -38,7 +38,7 @@ ; CHECK: stp x24, x23, [sp, #96] ; CHECK: stp x22, x21, [sp, #112] ; CHECK: stp x20, x19, [sp, #128] -; CHECK: movz x0, #0x2a +; CHECK: movz x0, #42 ; CHECK: ldp x20, x19, [sp, #128] ; CHECK: ldp x22, x21, [sp, #112] ; CHECK: ldp x24, x23, [sp, #96] Index: test/CodeGen/AArch64/arm64-shrink-wrapping.ll =================================================================== --- test/CodeGen/AArch64/arm64-shrink-wrapping.ll +++ test/CodeGen/AArch64/arm64-shrink-wrapping.ll @@ -73,7 +73,7 @@ ; DISABLE: cbz w0, [[ELSE_LABEL:LBB[0-9_]+]] ; ; CHECK: mov [[SUM:w[0-9]+]], wzr -; CHECK-NEXT: movz [[IV:w[0-9]+]], #0xa +; CHECK-NEXT: movz [[IV:w[0-9]+]], #10 ; ; Next BB. ; CHECK: [[LOOP:LBB[0-9_]+]]: ; %for.body @@ -140,7 +140,7 @@ ; CHECK-NEXT: stp [[CSR3:x[0-9]+]], [[CSR4:x[0-9]+]], [sp, #16] ; CHECK-NEXT: add [[NEW_SP:x[0-9]+]], sp, #16 ; CHECK: mov [[SUM:w[0-9]+]], wzr -; CHECK-NEXT: movz [[IV:w[0-9]+]], #0xa +; CHECK-NEXT: movz [[IV:w[0-9]+]], #10 ; Next BB. ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body ; CHECK: bl _something @@ -184,7 +184,7 @@ ; DISABLE: cbz w0, [[ELSE_LABEL:LBB[0-9_]+]] ; ; CHECK: mov [[SUM:w[0-9]+]], wzr -; CHECK-NEXT: movz [[IV:w[0-9]+]], #0xa +; CHECK-NEXT: movz [[IV:w[0-9]+]], #10 ; ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body ; CHECK: bl _something @@ -255,7 +255,7 @@ ; ; CHECK: bl _somethingElse ; CHECK-NEXT: mov [[SUM:w[0-9]+]], wzr -; CHECK-NEXT: movz [[IV:w[0-9]+]], #0xa +; CHECK-NEXT: movz [[IV:w[0-9]+]], #10 ; ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body ; CHECK: bl _something @@ -409,7 +409,7 @@ ; ; DISABLE: cbz w0, [[ELSE_LABEL:LBB[0-9_]+]] ; -; CHECK: movz [[IV:w[0-9]+]], #0xa +; CHECK: movz [[IV:w[0-9]+]], #10 ; ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body ; Inline asm statement. @@ -511,7 +511,7 @@ ; CHECK: and [[TEST:w[0-9]+]], w0, #0xff ; CHECK-NEXT: cbnz [[TEST]], [[ABORT:LBB[0-9_]+]] ; -; CHECK: movz w0, #0x2a +; CHECK: movz w0, #42 ; ; DISABLE-NEXT: ldp ; Index: test/CodeGen/AArch64/arm64-variadic-aapcs.ll =================================================================== --- test/CodeGen/AArch64/arm64-variadic-aapcs.ll +++ test/CodeGen/AArch64/arm64-variadic-aapcs.ll @@ -32,7 +32,7 @@ ; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #128 ; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16] -; CHECK: movn [[GR_OFFS:w[0-9]+]], #0x37 +; CHECK: movn [[GR_OFFS:w[0-9]+]], #55 ; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24] ; CHECK: orr [[VR_OFFS:w[0-9]+]], wzr, #0xffffff80 @@ -70,10 +70,10 @@ ; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #112 ; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16] -; CHECK: movn [[GR_OFFS:w[0-9]+]], #0x27 +; CHECK: movn [[GR_OFFS:w[0-9]+]], #39 ; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24] -; CHECK: movn [[VR_OFFS:w[0-9]+]], #0x6f +; CHECK: movn [[VR_OFFS:w[0-9]+]], #111 ; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28] %addr = bitcast %va_list* @var to i8* Index: test/CodeGen/AArch64/arm64-vector-ext.ll =================================================================== --- test/CodeGen/AArch64/arm64-vector-ext.ll +++ test/CodeGen/AArch64/arm64-vector-ext.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s ;CHECK: @func30 -;CHECK: movi.4h v1, #0x1 +;CHECK: movi.4h v1, #1 ;CHECK: and.8b v0, v0, v1 ;CHECK: ushll.4s v0, v0, #0 ;CHECK: str q0, [x0] Index: test/CodeGen/AArch64/arm64-vector-imm.ll =================================================================== --- test/CodeGen/AArch64/arm64-vector-imm.ll +++ test/CodeGen/AArch64/arm64-vector-imm.ll @@ -50,35 +50,35 @@ define <4 x i32> @movi_4s_imm_t1() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_4s_imm_t1: -; CHECK: movi.4s v0, #0x4b +; CHECK: movi.4s v0, #75 ret <4 x i32> } define <4 x i32> @movi_4s_imm_t2() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_4s_imm_t2: -; CHECK: movi.4s v0, #0x4b, lsl #8 +; CHECK: movi.4s v0, #75, lsl #8 ret <4 x i32> } define <4 x i32> @movi_4s_imm_t3() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_4s_imm_t3: -; CHECK: movi.4s v0, #0x4b, lsl #16 +; CHECK: movi.4s v0, #75, lsl #16 ret <4 x i32> } define <4 x i32> @movi_4s_imm_t4() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_4s_imm_t4: -; CHECK: movi.4s v0, #0x4b, lsl #24 +; CHECK: movi.4s v0, #75, lsl #24 ret <4 x i32> } define <8 x i16> @movi_8h_imm_t5() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_8h_imm_t5: -; CHECK: movi.8h v0, #0x4b +; CHECK: movi.8h v0, #75 ret <8 x i16> } @@ -86,28 +86,28 @@ define <8 x i16> @movi_8h_imm_t6() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_8h_imm_t6: -; CHECK: movi.8h v0, #0x4b, lsl #8 +; CHECK: movi.8h v0, #75, lsl #8 ret <8 x i16> } define <4 x i32> @movi_4s_imm_t7() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_4s_imm_t7: -; CHECK: movi.4s v0, #0x4b, msl #8 +; CHECK: movi.4s v0, #75, msl #8 ret <4 x i32> } define <4 x i32> @movi_4s_imm_t8() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_4s_imm_t8: -; CHECK: movi.4s v0, #0x4b, msl #16 +; CHECK: movi.4s v0, #75, msl #16 ret <4 x i32> } define <16 x i8> @movi_16b_imm_t9() nounwind readnone ssp { entry: ; CHECK-LABEL: movi_16b_imm_t9: -; CHECK: movi.16b v0, #0x4b +; CHECK: movi.16b v0, #75 ret <16 x i8> } Index: test/CodeGen/AArch64/arm64-vshift.ll =================================================================== --- test/CodeGen/AArch64/arm64-vshift.ll +++ test/CodeGen/AArch64/arm64-vshift.ll @@ -1315,7 +1315,7 @@ define <8 x i8> @uqshli8b_1(<8 x i8>* %A) nounwind { ;CHECK-LABEL: uqshli8b_1: -;CHECK: movi.8b [[REG:v[0-9]+]], #0x8 +;CHECK: movi.8b [[REG:v[0-9]+]], #8 ;CHECK: uqshl.8b v0, v0, [[REG]] %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> ) Index: test/CodeGen/AArch64/arm64-vshuffle.ll =================================================================== --- test/CodeGen/AArch64/arm64-vshuffle.ll +++ test/CodeGen/AArch64/arm64-vshuffle.ll @@ -36,7 +36,7 @@ } ; CHECK: test3 -; CHECK: movi.4s v{{[0-9]+}}, #0x1 +; CHECK: movi.4s v{{[0-9]+}}, #1 define <16 x i1> @test3(i1* %ptr, i32 %v) { bb: %Shuff = shufflevector <16 x i1> , <16 x i1> undef, Index: test/CodeGen/AArch64/bitreverse.ll =================================================================== --- test/CodeGen/AArch64/bitreverse.ll +++ test/CodeGen/AArch64/bitreverse.ll @@ -47,14 +47,14 @@ ; Try and match as much of the sequence as precisely as possible. ; CHECK-LABEL: g_vec: -; CHECK-DAG: movi [[M1:v.*]], #0x80 -; CHECK-DAG: movi [[M2:v.*]], #0x40 -; CHECK-DAG: movi [[M3:v.*]], #0x20 -; CHECK-DAG: movi [[M4:v.*]], #0x10 -; CHECK-DAG: movi [[M5:v.*]], #0x8 -; CHECK-DAG: movi [[M6:v.*]], #0x4{{$}} -; CHECK-DAG: movi [[M7:v.*]], #0x2{{$}} -; CHECK-DAG: movi [[M8:v.*]], #0x1{{$}} +; CHECK-DAG: movi [[M1:v.*]], #128 +; CHECK-DAG: movi [[M2:v.*]], #64 +; CHECK-DAG: movi [[M3:v.*]], #32 +; CHECK-DAG: movi [[M4:v.*]], #16 +; CHECK-DAG: movi [[M5:v.*]], #8 +; CHECK-DAG: movi [[M6:v.*]], #4{{$}} +; CHECK-DAG: movi [[M7:v.*]], #2{{$}} +; CHECK-DAG: movi [[M8:v.*]], #1{{$}} ; CHECK-DAG: shl [[S1:v.*]], v0.8b, #7 ; CHECK-DAG: shl [[S2:v.*]], v0.8b, #5 ; CHECK-DAG: shl [[S3:v.*]], v0.8b, #3 Index: test/CodeGen/AArch64/complex-int-to-fp.ll =================================================================== --- test/CodeGen/AArch64/complex-int-to-fp.ll +++ test/CodeGen/AArch64/complex-int-to-fp.ll @@ -155,7 +155,7 @@ } define <4 x float> @test_unsigned_v4i8_to_v4f32(<4 x i8> %v) nounwind readnone { ; CHECK-LABEL: test_unsigned_v4i8_to_v4f32 -; CHECK: bic.4h v0, #0xff, lsl #8 +; CHECK: bic.4h v0, #255, lsl #8 ; CHECK: ushll.4s [[VAL32:v[0-9]+]], v0, #0 ; CHECK: ucvtf.4s v0, [[VAL32]] Index: test/CodeGen/AArch64/f16-instructions.ll =================================================================== --- test/CodeGen/AArch64/f16-instructions.ll +++ test/CodeGen/AArch64/f16-instructions.ll @@ -695,7 +695,7 @@ ; CHECK-LABEL: test_copysign: ; CHECK-NEXT: fcvt s1, h1 ; CHECK-NEXT: fcvt s0, h0 -; CHECK-NEXT: movi.4s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.4s v2, #128, lsl #24 ; CHECK-NEXT: bit.16b v0, v1, v2 ; CHECK-NEXT: fcvt h0, s0 ; CHECK-NEXT: ret @@ -706,7 +706,7 @@ ; CHECK-LABEL: test_copysign_f32: ; CHECK-NEXT: fcvt s0, h0 -; CHECK-NEXT: movi.4s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.4s v2, #128, lsl #24 ; CHECK-NEXT: bit.16b v0, v1, v2 ; CHECK-NEXT: fcvt h0, s0 ; CHECK-NEXT: ret @@ -719,7 +719,7 @@ ; CHECK-LABEL: test_copysign_f64: ; CHECK-NEXT: fcvt s1, d1 ; CHECK-NEXT: fcvt s0, h0 -; CHECK-NEXT: movi.4s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.4s v2, #128, lsl #24 ; CHECK-NEXT: bit.16b v0, v1, v2 ; CHECK-NEXT: fcvt h0, s0 ; CHECK-NEXT: ret @@ -735,7 +735,7 @@ ; CHECK-LABEL: test_copysign_extended: ; CHECK-NEXT: fcvt s1, h1 ; CHECK-NEXT: fcvt s0, h0 -; CHECK-NEXT: movi.4s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.4s v2, #128, lsl #24 ; CHECK-NEXT: bit.16b v0, v1, v2 ; CHECK-NEXT: ret define float @test_copysign_extended(half %a, half %b) #0 { Index: test/CodeGen/AArch64/fast-isel-cmp-vec.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-cmp-vec.ll +++ test/CodeGen/AArch64/fast-isel-cmp-vec.ll @@ -11,7 +11,7 @@ ; CHECK: ; BB#0: ; CHECK-NEXT: cmeq.2s [[CMP:v[0-9]+]], v0, #0 ; CHECK-NEXT: ; BB#1: -; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #0x1 +; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret %c = icmp eq <2 x i32> %a, zeroinitializer @@ -26,7 +26,7 @@ ; CHECK: ; BB#0: ; CHECK-NEXT: movi d[[CMP:[0-9]+]], #0xffffffffffffffff ; CHECK-NEXT: ; BB#1: -; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #0x1 +; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b v0, v[[CMP]], [[MASK]] ; CHECK-NEXT: ret %1 = icmp eq <2 x i32> %a, %a @@ -42,7 +42,7 @@ ; CHECK-NEXT: cmeq.4s [[CMP:v[0-9]+]], v0, #0 ; CHECK-NEXT: xtn.4h [[CMPV4I16:v[0-9]+]], [[CMP]] ; CHECK-NEXT: ; BB#1: -; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #0x1 +; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b [[ZEXT:v[0-9]+]], [[CMPV4I16]], [[MASK]] ; CHECK-NEXT: ushll.4s v0, [[ZEXT]], #0 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ ; CHECK: ; BB#0: ; CHECK-NEXT: movi d[[CMP:[0-9]+]], #0xffffffffffffffff ; CHECK-NEXT: ; BB#1: -; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #0x1 +; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b [[ZEXT:v[0-9]+]], v[[CMP]], [[MASK]] ; CHECK-NEXT: ushll.4s v0, [[ZEXT]], #0 ; CHECK-NEXT: ret @@ -74,7 +74,7 @@ ; CHECK: ; BB#0: ; CHECK-NEXT: cmeq.16b [[CMP:v[0-9]+]], v0, #0 ; CHECK-NEXT: ; BB#1: -; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #0x1 +; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.16b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret %c = icmp eq <16 x i8> %a, zeroinitializer @@ -89,7 +89,7 @@ ; CHECK: ; BB#0: ; CHECK-NEXT: movi.2d [[CMP:v[0-9]+]], #0xffffffffffffffff ; CHECK-NEXT: ; BB#1: -; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #0x1 +; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.16b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret %1 = icmp eq <16 x i8> %a, %a Index: test/CodeGen/AArch64/fast-isel-gep.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-gep.ll +++ test/CodeGen/AArch64/fast-isel-gep.ll @@ -33,7 +33,7 @@ define i32* @test_array4(i32* %a) { ; CHECK-LABEL: test_array4 -; CHECK: movz [[REG:x[0-9]+]], #0x1008 +; CHECK: movz [[REG:x[0-9]+]], #4104 ; CHECK-NEXR: add x0, x0, [[REG]] %1 = getelementptr inbounds i32, i32* %a, i64 1026 ret i32* %1 Index: test/CodeGen/AArch64/fdiv_combine.ll =================================================================== --- test/CodeGen/AArch64/fdiv_combine.ll +++ test/CodeGen/AArch64/fdiv_combine.ll @@ -38,7 +38,7 @@ ; Test which should not fold due to power of 2 out of range. ; CHECK-LABEL: @test4 ; CHECK: scvtf.2s v0, v0 -; CHECK: movi.2s v1, #0x50, lsl #24 +; CHECK: movi.2s v1, #80, lsl #24 ; CHECK: fdiv.2s v0, v0, v1 ; CHECK: ret define <2 x float> @test4(<2 x i32> %in) { @@ -96,7 +96,7 @@ ; CHECK-LABEL: @test9 ; CHECK: ucvtf.2d v0, v0 ; CHECK: fcvtn v0.2s, v0.2d -; CHECK: movi.2s v1, #0x40, lsl #24 +; CHECK: movi.2s v1, #64, lsl #24 ; CHECK: fdiv.2s v0, v0, v1 ; CHECK: ret define <2 x float> @test9(<2 x i64> %in) { Index: test/CodeGen/AArch64/fp16-v4-instructions.ll =================================================================== --- test/CodeGen/AArch64/fp16-v4-instructions.ll +++ test/CodeGen/AArch64/fp16-v4-instructions.ll @@ -15,7 +15,7 @@ define <4 x half> @build_h4(<4 x half> %a) { entry: ; CHECK-LABEL: build_h4: -; CHECK: movz [[GPR:w[0-9]+]], #0x3ccd +; CHECK: movz [[GPR:w[0-9]+]], #15565 ; CHECK: dup v0.4h, [[GPR]] ret <4 x half> } @@ -176,7 +176,7 @@ define <4 x half> @uitofp_i8(<4 x i8> %a) #0 { ; CHECK-LABEL: uitofp_i8: -; CHECK-NEXT: bic v0.4h, #0xff, lsl #8 +; CHECK-NEXT: bic v0.4h, #255, lsl #8 ; CHECK-NEXT: ushll [[OP1:v[0-9]+\.4s]], v0.4h, #0 ; CHECK-NEXT: ucvtf [[OP2:v[0-9]+\.4s]], [[OP1]] ; CHECK-NEXT: fcvtn v0.4h, [[OP2]] Index: test/CodeGen/AArch64/fp16-vector-nvcast.ll =================================================================== --- test/CodeGen/AArch64/fp16-vector-nvcast.ll +++ test/CodeGen/AArch64/fp16-vector-nvcast.ll @@ -3,7 +3,7 @@ ; Test pattern (v4f16 (AArch64NvCast (v2i32 FPR64:$src))) define void @nvcast_v2i32(<4 x half>* %a) #0 { ; CHECK-LABEL: nvcast_v2i32: -; CHECK-NEXT: movi v[[REG:[0-9]+]].2s, #0xab, lsl #16 +; CHECK-NEXT: movi v[[REG:[0-9]+]].2s, #171, lsl #16 ; CHECK-NEXT: str d[[REG]], [x0] ; CHECK-NEXT: ret store volatile <4 x half> , <4 x half>* %a @@ -14,7 +14,7 @@ ; Test pattern (v4f16 (AArch64NvCast (v4i16 FPR64:$src))) define void @nvcast_v4i16(<4 x half>* %a) #0 { ; CHECK-LABEL: nvcast_v4i16: -; CHECK-NEXT: movi v[[REG:[0-9]+]].4h, #0xab +; CHECK-NEXT: movi v[[REG:[0-9]+]].4h, #171 ; CHECK-NEXT: str d[[REG]], [x0] ; CHECK-NEXT: ret store volatile <4 x half> , <4 x half>* %a @@ -25,7 +25,7 @@ ; Test pattern (v4f16 (AArch64NvCast (v8i8 FPR64:$src))) define void @nvcast_v8i8(<4 x half>* %a) #0 { ; CHECK-LABEL: nvcast_v8i8: -; CHECK-NEXT: movi v[[REG:[0-9]+]].8b, #0xab +; CHECK-NEXT: movi v[[REG:[0-9]+]].8b, #171 ; CHECK-NEXT: str d[[REG]], [x0] ; CHECK-NEXT: ret store volatile <4 x half> , <4 x half>* %a @@ -46,7 +46,7 @@ ; Test pattern (v8f16 (AArch64NvCast (v4i32 FPR128:$src))) define void @nvcast_v4i32(<8 x half>* %a) #0 { ; CHECK-LABEL: nvcast_v4i32: -; CHECK-NEXT: movi v[[REG:[0-9]+]].4s, #0xab, lsl #16 +; CHECK-NEXT: movi v[[REG:[0-9]+]].4s, #171, lsl #16 ; CHECK-NEXT: str q[[REG]], [x0] ; CHECK-NEXT: ret store volatile <8 x half> , <8 x half>* %a @@ -57,7 +57,7 @@ ; Test pattern (v8f16 (AArch64NvCast (v8i16 FPR128:$src))) define void @nvcast_v8i16(<8 x half>* %a) #0 { ; CHECK-LABEL: nvcast_v8i16: -; CHECK-NEXT: movi v[[REG:[0-9]+]].8h, #0xab +; CHECK-NEXT: movi v[[REG:[0-9]+]].8h, #171 ; CHECK-NEXT: str q[[REG]], [x0] ; CHECK-NEXT: ret store volatile <8 x half> , <8 x half>* %a @@ -68,7 +68,7 @@ ; Test pattern (v8f16 (AArch64NvCast (v16i8 FPR128:$src))) define void @nvcast_v16i8(<8 x half>* %a) #0 { ; CHECK-LABEL: nvcast_v16i8: -; CHECK-NEXT: movi v[[REG:[0-9]+]].16b, #0xab +; CHECK-NEXT: movi v[[REG:[0-9]+]].16b, #171 ; CHECK-NEXT: str q[[REG]], [x0] ; CHECK-NEXT: ret store volatile <8 x half> , <8 x half>* %a Index: test/CodeGen/AArch64/fpimm.ll =================================================================== --- test/CodeGen/AArch64/fpimm.ll +++ test/CodeGen/AArch64/fpimm.ll @@ -38,18 +38,18 @@ } ; LARGE-LABEL: check_float2 -; LARGE: movz [[REG:w[0-9]+]], #0x4049, lsl #16 -; LARGE-NEXT: movk [[REG]], #0xfdb +; LARGE: movz [[REG:w[0-9]+]], #16457, lsl #16 +; LARGE-NEXT: movk [[REG]], #4059 ; LARGE-NEXT: fmov s0, [[REG]] define float @check_float2() { ret float 3.14159274101257324218750 } ; LARGE-LABEL: check_double2 -; LARGE: movz [[REG:x[0-9]+]], #0x4009, lsl #48 -; LARGE-NEXT: movk [[REG]], #0x21fb, lsl #32 -; LARGE-NEXT: movk [[REG]], #0x5444, lsl #16 -; LARGE-NEXT: movk [[REG]], #0x2d18 +; LARGE: movz [[REG:x[0-9]+]], #16393, lsl #48 +; LARGE-NEXT: movk [[REG]], #8699, lsl #32 +; LARGE-NEXT: movk [[REG]], #21572, lsl #16 +; LARGE-NEXT: movk [[REG]], #11544 ; LARGE-NEXT: fmov d0, [[REG]] define double @check_double2() { ret double 3.1415926535897931159979634685441851615905761718750 Index: test/CodeGen/AArch64/func-calls.ll =================================================================== --- test/CodeGen/AArch64/func-calls.ll +++ test/CodeGen/AArch64/func-calls.ll @@ -104,10 +104,10 @@ float -2.0, float -8.0, float 16.0, float 1.0, float 64.0) -; CHECK: movz [[SIXTY_FOUR:w[0-9]+]], #0x4280, lsl #16 +; CHECK: movz [[SIXTY_FOUR:w[0-9]+]], #17024, lsl #16 ; CHECK: str [[SIXTY_FOUR]], [sp] -; CHECK-NONEON: movz [[SIXTY_FOUR:w[0-9]+]], #0x4280, lsl #16 +; CHECK-NONEON: movz [[SIXTY_FOUR:w[0-9]+]], #17024, lsl #16 ; CHECK-NONEON: str [[SIXTY_FOUR]], [sp] ; CHECK: bl stacked_fpu Index: test/CodeGen/AArch64/hints.ll =================================================================== --- test/CodeGen/AArch64/hints.ll +++ test/CodeGen/AArch64/hints.ll @@ -63,5 +63,5 @@ } ; CHECK-LABEL: hint_undefined -; CHECK: hint #0x8 +; CHECK: hint #8 Index: test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll =================================================================== --- test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll +++ test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll @@ -5,7 +5,7 @@ ; RUN: llc -mtriple=aarch64 < %s -filetype=obj | llvm-objdump -arch=aarch64 -d - | FileCheck %s ; CHECK-LABEL: foo: -; CHECK: a0 79 95 d2 movz x0, #0xabcd +; CHECK: a0 79 95 d2 movz x0, #43981 ; CHECK: c0 03 5f d6 ret define i32 @foo() nounwind { entry: Index: test/CodeGen/AArch64/vector-fcopysign.ll =================================================================== --- test/CodeGen/AArch64/vector-fcopysign.ll +++ test/CodeGen/AArch64/vector-fcopysign.ll @@ -7,7 +7,7 @@ ; WidenVecRes same define <1 x float> @test_copysign_v1f32_v1f32(<1 x float> %a, <1 x float> %b) #0 { ; CHECK-LABEL: test_copysign_v1f32_v1f32: -; CHECK-NEXT: movi.2s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.2s v2, #128, lsl #24 ; CHECK-NEXT: bit.8b v0, v1, v2 ; CHECK-NEXT: ret %r = call <1 x float> @llvm.copysign.v1f32(<1 x float> %a, <1 x float> %b) @@ -18,7 +18,7 @@ define <1 x float> @test_copysign_v1f32_v1f64(<1 x float> %a, <1 x double> %b) #0 { ; CHECK-LABEL: test_copysign_v1f32_v1f64: ; CHECK-NEXT: fcvt s1, d1 -; CHECK-NEXT: movi.4s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.4s v2, #128, lsl #24 ; CHECK-NEXT: bit.16b v0, v1, v2 ; CHECK-NEXT: ret %tmp0 = fptrunc <1 x double> %b to <1 x float> @@ -59,7 +59,7 @@ define <2 x float> @test_copysign_v2f32_v2f32(<2 x float> %a, <2 x float> %b) #0 { ; CHECK-LABEL: test_copysign_v2f32_v2f32: -; CHECK-NEXT: movi.2s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.2s v2, #128, lsl #24 ; CHECK-NEXT: bit.8b v0, v1, v2 ; CHECK-NEXT: ret %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) @@ -69,7 +69,7 @@ define <2 x float> @test_copysign_v2f32_v2f64(<2 x float> %a, <2 x double> %b) #0 { ; CHECK-LABEL: test_copysign_v2f32_v2f64: ; CHECK-NEXT: fcvtn v1.2s, v1.2d -; CHECK-NEXT: movi.2s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.2s v2, #128, lsl #24 ; CHECK-NEXT: bit.8b v0, v1, v2 ; CHECK-NEXT: ret %tmp0 = fptrunc <2 x double> %b to <2 x float> @@ -83,7 +83,7 @@ define <4 x float> @test_copysign_v4f32_v4f32(<4 x float> %a, <4 x float> %b) #0 { ; CHECK-LABEL: test_copysign_v4f32_v4f32: -; CHECK-NEXT: movi.4s v2, #0x80, lsl #24 +; CHECK-NEXT: movi.4s v2, #128, lsl #24 ; CHECK-NEXT: bit.16b v0, v1, v2 ; CHECK-NEXT: ret %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) @@ -95,7 +95,7 @@ ; CHECK-LABEL: test_copysign_v4f32_v4f64: ; CHECK-NEXT: mov s3, v0[1] ; CHECK-NEXT: mov d4, v1[1] -; CHECK-NEXT: movi.4s v5, #0x80, lsl #24 +; CHECK-NEXT: movi.4s v5, #128, lsl #24 ; CHECK-NEXT: fcvt s1, d1 ; CHECK-NEXT: mov s6, v0[2] ; CHECK-NEXT: mov s7, v0[3] Index: test/MC/AArch64/arm64-advsimd.s =================================================================== --- test/MC/AArch64/arm64-advsimd.s +++ test/MC/AArch64/arm64-advsimd.s @@ -931,19 +931,19 @@ bic.2s v0, #1, lsl #16 bic.2s v0, #1, lsl #24 -; CHECK: bic.2s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x2f] -; CHECK: bic.2s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x2f] -; CHECK: bic.2s v0, #0x1, lsl #8 ; encoding: [0x20,0x34,0x00,0x2f] -; CHECK: bic.2s v0, #0x1, lsl #16 ; encoding: [0x20,0x54,0x00,0x2f] -; CHECK: bic.2s v0, #0x1, lsl #24 ; encoding: [0x20,0x74,0x00,0x2f] +; CHECK: bic.2s v0, #1 ; encoding: [0x20,0x14,0x00,0x2f] +; CHECK: bic.2s v0, #1 ; encoding: [0x20,0x14,0x00,0x2f] +; CHECK: bic.2s v0, #1, lsl #8 ; encoding: [0x20,0x34,0x00,0x2f] +; CHECK: bic.2s v0, #1, lsl #16 ; encoding: [0x20,0x54,0x00,0x2f] +; CHECK: bic.2s v0, #1, lsl #24 ; encoding: [0x20,0x74,0x00,0x2f] bic.4h v0, #1 bic.4h v0, #1, lsl #0 bic.4h v0, #1, lsl #8 -; CHECK: bic.4h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x2f] -; CHECK: bic.4h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x2f] -; CHECK: bic.4h v0, #0x1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x2f] +; CHECK: bic.4h v0, #1 ; encoding: [0x20,0x94,0x00,0x2f] +; CHECK: bic.4h v0, #1 ; encoding: [0x20,0x94,0x00,0x2f] +; CHECK: bic.4h v0, #1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x2f] bic.4s v0, #1 bic.4s v0, #1, lsl #0 @@ -951,19 +951,19 @@ bic.4s v0, #1, lsl #16 bic.4s v0, #1, lsl #24 -; CHECK: bic.4s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x6f] -; CHECK: bic.4s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x6f] -; CHECK: bic.4s v0, #0x1, lsl #8 ; encoding: [0x20,0x34,0x00,0x6f] -; CHECK: bic.4s v0, #0x1, lsl #16 ; encoding: [0x20,0x54,0x00,0x6f] -; CHECK: bic.4s v0, #0x1, lsl #24 ; encoding: [0x20,0x74,0x00,0x6f] +; CHECK: bic.4s v0, #1 ; encoding: [0x20,0x14,0x00,0x6f] +; CHECK: bic.4s v0, #1 ; encoding: [0x20,0x14,0x00,0x6f] +; CHECK: bic.4s v0, #1, lsl #8 ; encoding: [0x20,0x34,0x00,0x6f] +; CHECK: bic.4s v0, #1, lsl #16 ; encoding: [0x20,0x54,0x00,0x6f] +; CHECK: bic.4s v0, #1, lsl #24 ; encoding: [0x20,0x74,0x00,0x6f] bic.8h v0, #1 bic.8h v0, #1, lsl #0 bic.8h v0, #1, lsl #8 -; CHECK: bic.8h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x6f] -; CHECK: bic.8h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x6f] -; CHECK: bic.8h v0, #0x1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x6f] +; CHECK: bic.8h v0, #1 ; encoding: [0x20,0x94,0x00,0x6f] +; CHECK: bic.8h v0, #1 ; encoding: [0x20,0x94,0x00,0x6f] +; CHECK: bic.8h v0, #1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x6f] fmov.2d v0, #1.250000e-01 @@ -981,19 +981,19 @@ orr.2s v0, #1, lsl #16 orr.2s v0, #1, lsl #24 -; CHECK: orr.2s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x0f] -; CHECK: orr.2s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x0f] -; CHECK: orr.2s v0, #0x1, lsl #8 ; encoding: [0x20,0x34,0x00,0x0f] -; CHECK: orr.2s v0, #0x1, lsl #16 ; encoding: [0x20,0x54,0x00,0x0f] -; CHECK: orr.2s v0, #0x1, lsl #24 ; encoding: [0x20,0x74,0x00,0x0f] +; CHECK: orr.2s v0, #1 ; encoding: [0x20,0x14,0x00,0x0f] +; CHECK: orr.2s v0, #1 ; encoding: [0x20,0x14,0x00,0x0f] +; CHECK: orr.2s v0, #1, lsl #8 ; encoding: [0x20,0x34,0x00,0x0f] +; CHECK: orr.2s v0, #1, lsl #16 ; encoding: [0x20,0x54,0x00,0x0f] +; CHECK: orr.2s v0, #1, lsl #24 ; encoding: [0x20,0x74,0x00,0x0f] orr.4h v0, #1 orr.4h v0, #1, lsl #0 orr.4h v0, #1, lsl #8 -; CHECK: orr.4h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x0f] -; CHECK: orr.4h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x0f] -; CHECK: orr.4h v0, #0x1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x0f] +; CHECK: orr.4h v0, #1 ; encoding: [0x20,0x94,0x00,0x0f] +; CHECK: orr.4h v0, #1 ; encoding: [0x20,0x94,0x00,0x0f] +; CHECK: orr.4h v0, #1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x0f] orr.4s v0, #1 orr.4s v0, #1, lsl #0 @@ -1001,19 +1001,19 @@ orr.4s v0, #1, lsl #16 orr.4s v0, #1, lsl #24 -; CHECK: orr.4s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x4f] -; CHECK: orr.4s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x4f] -; CHECK: orr.4s v0, #0x1, lsl #8 ; encoding: [0x20,0x34,0x00,0x4f] -; CHECK: orr.4s v0, #0x1, lsl #16 ; encoding: [0x20,0x54,0x00,0x4f] -; CHECK: orr.4s v0, #0x1, lsl #24 ; encoding: [0x20,0x74,0x00,0x4f] +; CHECK: orr.4s v0, #1 ; encoding: [0x20,0x14,0x00,0x4f] +; CHECK: orr.4s v0, #1 ; encoding: [0x20,0x14,0x00,0x4f] +; CHECK: orr.4s v0, #1, lsl #8 ; encoding: [0x20,0x34,0x00,0x4f] +; CHECK: orr.4s v0, #1, lsl #16 ; encoding: [0x20,0x54,0x00,0x4f] +; CHECK: orr.4s v0, #1, lsl #24 ; encoding: [0x20,0x74,0x00,0x4f] orr.8h v0, #1 orr.8h v0, #1, lsl #0 orr.8h v0, #1, lsl #8 -; CHECK: orr.8h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x4f] -; CHECK: orr.8h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x4f] -; CHECK: orr.8h v0, #0x1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x4f] +; CHECK: orr.8h v0, #1 ; encoding: [0x20,0x94,0x00,0x4f] +; CHECK: orr.8h v0, #1 ; encoding: [0x20,0x94,0x00,0x4f] +; CHECK: orr.8h v0, #1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x4f] movi d0, #0x000000000000ff movi.2d v0, #0x000000000000ff @@ -1027,11 +1027,11 @@ movi.2s v0, #1, lsl #16 movi.2s v0, #1, lsl #24 -; CHECK: movi.2s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x0f] -; CHECK: movi.2s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x0f] -; CHECK: movi.2s v0, #0x1, lsl #8 ; encoding: [0x20,0x24,0x00,0x0f] -; CHECK: movi.2s v0, #0x1, lsl #16 ; encoding: [0x20,0x44,0x00,0x0f] -; CHECK: movi.2s v0, #0x1, lsl #24 ; encoding: [0x20,0x64,0x00,0x0f] +; CHECK: movi.2s v0, #1 ; encoding: [0x20,0x04,0x00,0x0f] +; CHECK: movi.2s v0, #1 ; encoding: [0x20,0x04,0x00,0x0f] +; CHECK: movi.2s v0, #1, lsl #8 ; encoding: [0x20,0x24,0x00,0x0f] +; CHECK: movi.2s v0, #1, lsl #16 ; encoding: [0x20,0x44,0x00,0x0f] +; CHECK: movi.2s v0, #1, lsl #24 ; encoding: [0x20,0x64,0x00,0x0f] movi.4s v0, #1 movi.4s v0, #1, lsl #0 @@ -1039,43 +1039,43 @@ movi.4s v0, #1, lsl #16 movi.4s v0, #1, lsl #24 -; CHECK: movi.4s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x4f] -; CHECK: movi.4s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x4f] -; CHECK: movi.4s v0, #0x1, lsl #8 ; encoding: [0x20,0x24,0x00,0x4f] -; CHECK: movi.4s v0, #0x1, lsl #16 ; encoding: [0x20,0x44,0x00,0x4f] -; CHECK: movi.4s v0, #0x1, lsl #24 ; encoding: [0x20,0x64,0x00,0x4f] +; CHECK: movi.4s v0, #1 ; encoding: [0x20,0x04,0x00,0x4f] +; CHECK: movi.4s v0, #1 ; encoding: [0x20,0x04,0x00,0x4f] +; CHECK: movi.4s v0, #1, lsl #8 ; encoding: [0x20,0x24,0x00,0x4f] +; CHECK: movi.4s v0, #1, lsl #16 ; encoding: [0x20,0x44,0x00,0x4f] +; CHECK: movi.4s v0, #1, lsl #24 ; encoding: [0x20,0x64,0x00,0x4f] movi.4h v0, #1 movi.4h v0, #1, lsl #0 movi.4h v0, #1, lsl #8 -; CHECK: movi.4h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x0f] -; CHECK: movi.4h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x0f] -; CHECK: movi.4h v0, #0x1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x0f] +; CHECK: movi.4h v0, #1 ; encoding: [0x20,0x84,0x00,0x0f] +; CHECK: movi.4h v0, #1 ; encoding: [0x20,0x84,0x00,0x0f] +; CHECK: movi.4h v0, #1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x0f] movi.8h v0, #1 movi.8h v0, #1, lsl #0 movi.8h v0, #1, lsl #8 -; CHECK: movi.8h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x4f] -; CHECK: movi.8h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x4f] -; CHECK: movi.8h v0, #0x1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x4f] +; CHECK: movi.8h v0, #1 ; encoding: [0x20,0x84,0x00,0x4f] +; CHECK: movi.8h v0, #1 ; encoding: [0x20,0x84,0x00,0x4f] +; CHECK: movi.8h v0, #1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x4f] movi.2s v0, #1, msl #8 movi.2s v0, #1, msl #16 movi.4s v0, #1, msl #8 movi.4s v0, #1, msl #16 -; CHECK: movi.2s v0, #0x1, msl #8 ; encoding: [0x20,0xc4,0x00,0x0f] -; CHECK: movi.2s v0, #0x1, msl #16 ; encoding: [0x20,0xd4,0x00,0x0f] -; CHECK: movi.4s v0, #0x1, msl #8 ; encoding: [0x20,0xc4,0x00,0x4f] -; CHECK: movi.4s v0, #0x1, msl #16 ; encoding: [0x20,0xd4,0x00,0x4f] +; CHECK: movi.2s v0, #1, msl #8 ; encoding: [0x20,0xc4,0x00,0x0f] +; CHECK: movi.2s v0, #1, msl #16 ; encoding: [0x20,0xd4,0x00,0x0f] +; CHECK: movi.4s v0, #1, msl #8 ; encoding: [0x20,0xc4,0x00,0x4f] +; CHECK: movi.4s v0, #1, msl #16 ; encoding: [0x20,0xd4,0x00,0x4f] movi.8b v0, #1 movi.16b v0, #1 -; CHECK: movi.8b v0, #0x1 ; encoding: [0x20,0xe4,0x00,0x0f] -; CHECK: movi.16b v0, #0x1 ; encoding: [0x20,0xe4,0x00,0x4f] +; CHECK: movi.8b v0, #1 ; encoding: [0x20,0xe4,0x00,0x0f] +; CHECK: movi.16b v0, #1 ; encoding: [0x20,0xe4,0x00,0x4f] mvni.2s v0, #1 mvni.2s v0, #1, lsl #0 @@ -1083,11 +1083,11 @@ mvni.2s v0, #1, lsl #16 mvni.2s v0, #1, lsl #24 -; CHECK: mvni.2s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x2f] -; CHECK: mvni.2s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x2f] -; CHECK: mvni.2s v0, #0x1, lsl #8 ; encoding: [0x20,0x24,0x00,0x2f] -; CHECK: mvni.2s v0, #0x1, lsl #16 ; encoding: [0x20,0x44,0x00,0x2f] -; CHECK: mvni.2s v0, #0x1, lsl #24 ; encoding: [0x20,0x64,0x00,0x2f] +; CHECK: mvni.2s v0, #1 ; encoding: [0x20,0x04,0x00,0x2f] +; CHECK: mvni.2s v0, #1 ; encoding: [0x20,0x04,0x00,0x2f] +; CHECK: mvni.2s v0, #1, lsl #8 ; encoding: [0x20,0x24,0x00,0x2f] +; CHECK: mvni.2s v0, #1, lsl #16 ; encoding: [0x20,0x44,0x00,0x2f] +; CHECK: mvni.2s v0, #1, lsl #24 ; encoding: [0x20,0x64,0x00,0x2f] mvni.4s v0, #1 mvni.4s v0, #1, lsl #0 @@ -1095,37 +1095,37 @@ mvni.4s v0, #1, lsl #16 mvni.4s v0, #1, lsl #24 -; CHECK: mvni.4s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x6f] -; CHECK: mvni.4s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x6f] -; CHECK: mvni.4s v0, #0x1, lsl #8 ; encoding: [0x20,0x24,0x00,0x6f] -; CHECK: mvni.4s v0, #0x1, lsl #16 ; encoding: [0x20,0x44,0x00,0x6f] -; CHECK: mvni.4s v0, #0x1, lsl #24 ; encoding: [0x20,0x64,0x00,0x6f] +; CHECK: mvni.4s v0, #1 ; encoding: [0x20,0x04,0x00,0x6f] +; CHECK: mvni.4s v0, #1 ; encoding: [0x20,0x04,0x00,0x6f] +; CHECK: mvni.4s v0, #1, lsl #8 ; encoding: [0x20,0x24,0x00,0x6f] +; CHECK: mvni.4s v0, #1, lsl #16 ; encoding: [0x20,0x44,0x00,0x6f] +; CHECK: mvni.4s v0, #1, lsl #24 ; encoding: [0x20,0x64,0x00,0x6f] mvni.4h v0, #1 mvni.4h v0, #1, lsl #0 mvni.4h v0, #1, lsl #8 -; CHECK: mvni.4h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x2f] -; CHECK: mvni.4h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x2f] -; CHECK: mvni.4h v0, #0x1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x2f] +; CHECK: mvni.4h v0, #1 ; encoding: [0x20,0x84,0x00,0x2f] +; CHECK: mvni.4h v0, #1 ; encoding: [0x20,0x84,0x00,0x2f] +; CHECK: mvni.4h v0, #1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x2f] mvni.8h v0, #1 mvni.8h v0, #1, lsl #0 mvni.8h v0, #1, lsl #8 -; CHECK: mvni.8h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x6f] -; CHECK: mvni.8h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x6f] -; CHECK: mvni.8h v0, #0x1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x6f] +; CHECK: mvni.8h v0, #1 ; encoding: [0x20,0x84,0x00,0x6f] +; CHECK: mvni.8h v0, #1 ; encoding: [0x20,0x84,0x00,0x6f] +; CHECK: mvni.8h v0, #1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x6f] mvni.2s v0, #1, msl #8 mvni.2s v0, #1, msl #16 mvni.4s v0, #1, msl #8 mvni.4s v0, #1, msl #16 -; CHECK: mvni.2s v0, #0x1, msl #8 ; encoding: [0x20,0xc4,0x00,0x2f] -; CHECK: mvni.2s v0, #0x1, msl #16 ; encoding: [0x20,0xd4,0x00,0x2f] -; CHECK: mvni.4s v0, #0x1, msl #8 ; encoding: [0x20,0xc4,0x00,0x6f] -; CHECK: mvni.4s v0, #0x1, msl #16 ; encoding: [0x20,0xd4,0x00,0x6f] +; CHECK: mvni.2s v0, #1, msl #8 ; encoding: [0x20,0xc4,0x00,0x2f] +; CHECK: mvni.2s v0, #1, msl #16 ; encoding: [0x20,0xd4,0x00,0x2f] +; CHECK: mvni.4s v0, #1, msl #8 ; encoding: [0x20,0xc4,0x00,0x6f] +; CHECK: mvni.4s v0, #1, msl #16 ; encoding: [0x20,0xd4,0x00,0x6f] ;===-------------------------------------------------------------------------=== ; AdvSIMD scalar x index Index: test/MC/AArch64/arm64-aliases.s =================================================================== --- test/MC/AArch64/arm64-aliases.s +++ test/MC/AArch64/arm64-aliases.s @@ -1,4 +1,4 @@ -; RUN: llvm-mc -triple arm64-apple-darwin -mattr=neon -output-asm-variant=1 -show-encoding < %s | FileCheck %s +; RUN: llvm-mc -triple arm64-apple-darwin -mattr=neon -output-asm-variant=1 -show-encoding -print-imm-hex < %s | FileCheck %s foo: ;----------------------------------------------------------------------------- @@ -66,8 +66,8 @@ cmn x2, w3, uxtb #1 cmn x4, x5, uxtx #1 -; CHECK: cmn w1, #3 ; encoding: [0x3f,0x0c,0x00,0x31] -; CHECK: cmn x2, #1024, lsl #12 ; encoding: [0x5f,0x00,0x50,0xb1] +; CHECK: cmn w1, #0x3 ; encoding: [0x3f,0x0c,0x00,0x31] +; CHECK: cmn x2, #0x400, lsl #12 ; encoding: [0x5f,0x00,0x50,0xb1] ; CHECK: cmn w4, w5 ; encoding: [0x9f,0x00,0x05,0x2b] ; CHECK: cmn x6, x7 ; encoding: [0xdf,0x00,0x07,0xab] ; CHECK: cmn w8, w9, asr #3 ; encoding: [0x1f,0x0d,0x89,0x2b] @@ -92,8 +92,8 @@ cmp w9, w8, uxtw cmp wsp, w9, lsl #0 -; CHECK: cmp w1, #1024, lsl #12 ; encoding: [0x3f,0x00,0x50,0x71] -; CHECK: cmp x2, #1024 ; encoding: [0x5f,0x00,0x10,0xf1] +; CHECK: cmp w1, #0x400, lsl #12 ; encoding: [0x3f,0x00,0x50,0x71] +; CHECK: cmp x2, #0x400 ; encoding: [0x5f,0x00,0x10,0xf1] ; CHECK: cmp w4, w5 ; encoding: [0x9f,0x00,0x05,0x6b] ; CHECK: cmp x6, x7 ; encoding: [0xdf,0x00,0x07,0xeb] ; CHECK: cmp w8, w9, asr #3 ; encoding: [0x1f,0x0d,0x89,0x6b] @@ -218,8 +218,8 @@ ubfm x0, x0, #63, #62 ubfm w0, w0, #4, #31 ubfm x0, x0, #4, #63 -; CHECK: ror w1, w3, #5 -; CHECK: ror x1, x3, #5 +; CHECK: ror w1, w3, #0x5 +; CHECK: ror x1, x3, #0x5 ror w1, w3, #5 ror x1, x3, #5 ; CHECK: lsl w1, wzr, #3 @@ -745,7 +745,7 @@ movi v1.2d, #0x000000000000ff movi v2.2D, #0x000000000000ff -; CHECK: movi.16b v4, #0 ; encoding: [0x04,0xe4,0x00,0x4f] +; CHECK: movi.16b v4, #0x0 ; encoding: [0x04,0xe4,0x00,0x4f] ; CHECK: movi.16b v4, #0x1 ; encoding: [0x24,0xe4,0x00,0x4f] ; CHECK: movi.8b v4, #0x2 ; encoding: [0x44,0xe4,0x00,0x0f] ; CHECK: movi.8b v4, #0x3 ; encoding: [0x64,0xe4,0x00,0x0f] Index: test/MC/AArch64/arm64-arithmetic-encoding.s =================================================================== --- test/MC/AArch64/arm64-arithmetic-encoding.s +++ test/MC/AArch64/arm64-arithmetic-encoding.s @@ -494,30 +494,30 @@ movz w0, #1, lsl #16 movz x0, #1, lsl #16 -; CHECK: movz w0, #0x1 ; encoding: [0x20,0x00,0x80,0x52] -; CHECK: movz x0, #0x1 ; encoding: [0x20,0x00,0x80,0xd2] -; CHECK: movz w0, #0x1, lsl #16 ; encoding: [0x20,0x00,0xa0,0x52] -; CHECK: movz x0, #0x1, lsl #16 ; encoding: [0x20,0x00,0xa0,0xd2] +; CHECK: movz w0, #1 ; encoding: [0x20,0x00,0x80,0x52] +; CHECK: movz x0, #1 ; encoding: [0x20,0x00,0x80,0xd2] +; CHECK: movz w0, #1, lsl #16 ; encoding: [0x20,0x00,0xa0,0x52] +; CHECK: movz x0, #1, lsl #16 ; encoding: [0x20,0x00,0xa0,0xd2] movn w0, #2 movn x0, #2 movn w0, #2, lsl #16 movn x0, #2, lsl #16 -; CHECK: movn w0, #0x2 ; encoding: [0x40,0x00,0x80,0x12] -; CHECK: movn x0, #0x2 ; encoding: [0x40,0x00,0x80,0x92] -; CHECK: movn w0, #0x2, lsl #16 ; encoding: [0x40,0x00,0xa0,0x12] -; CHECK: movn x0, #0x2, lsl #16 ; encoding: [0x40,0x00,0xa0,0x92] +; CHECK: movn w0, #2 ; encoding: [0x40,0x00,0x80,0x12] +; CHECK: movn x0, #2 ; encoding: [0x40,0x00,0x80,0x92] +; CHECK: movn w0, #2, lsl #16 ; encoding: [0x40,0x00,0xa0,0x12] +; CHECK: movn x0, #2, lsl #16 ; encoding: [0x40,0x00,0xa0,0x92] movk w0, #1 movk x0, #1 movk w0, #1, lsl #16 movk x0, #1, lsl #16 -; CHECK: movk w0, #0x1 ; encoding: [0x20,0x00,0x80,0x72] -; CHECK: movk x0, #0x1 ; encoding: [0x20,0x00,0x80,0xf2] -; CHECK: movk w0, #0x1, lsl #16 ; encoding: [0x20,0x00,0xa0,0x72] -; CHECK: movk x0, #0x1, lsl #16 ; encoding: [0x20,0x00,0xa0,0xf2] +; CHECK: movk w0, #1 ; encoding: [0x20,0x00,0x80,0x72] +; CHECK: movk x0, #1 ; encoding: [0x20,0x00,0x80,0xf2] +; CHECK: movk w0, #1, lsl #16 ; encoding: [0x20,0x00,0xa0,0x72] +; CHECK: movk x0, #1, lsl #16 ; encoding: [0x20,0x00,0xa0,0xf2] ;==---------------------------------------------------------------------------== ; Conditionally set flags instructions Index: test/MC/AArch64/ldr-pseudo.s =================================================================== --- test/MC/AArch64/ldr-pseudo.s +++ test/MC/AArch64/ldr-pseudo.s @@ -1,4 +1,4 @@ -//RUN: llvm-mc -triple=aarch64-linux-gnu %s | FileCheck %s +//RUN: llvm-mc -triple=aarch64-linux-gnu -print-imm-hex %s | FileCheck %s // // Check that large constants are converted to ldr from constant pool Index: test/MC/AArch64/single-slash.s =================================================================== --- test/MC/AArch64/single-slash.s +++ test/MC/AArch64/single-slash.s @@ -2,5 +2,5 @@ // Test that a single slash is not mistaken as the start of comment. -//CHECK: movz x0, #0x10 +//CHECK: movz x0, #16 movz x0, #(32 / 2) Index: test/MC/Disassembler/AArch64/arm64-advsimd.txt =================================================================== --- test/MC/Disassembler/AArch64/arm64-advsimd.txt +++ test/MC/Disassembler/AArch64/arm64-advsimd.txt @@ -1,4 +1,4 @@ -# RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -output-asm-variant=1 --disassemble < %s | FileCheck %s +# RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -output-asm-variant=1 --disassemble -print-imm-hex < %s | FileCheck %s 0x00 0xb8 0x20 0x0e 0x00 0xb8 0x20 0x4e @@ -1804,48 +1804,48 @@ 0x00 0x04 0x41 0x7f 0x00 0x14 0x41 0x7f -# CHECK: shl d0, d0, #1 -# CHECK: sli d0, d0, #1 -# CHECK: sqrshrn b0, h0, #7 -# CHECK: sqrshrn h0, s0, #14 -# CHECK: sqrshrn s0, d0, #29 -# CHECK: sqrshrun b0, h0, #7 -# CHECK: sqrshrun h0, s0, #14 -# CHECK: sqrshrun s0, d0, #29 -# CHECK: sqshlu b0, b0, #1 -# CHECK: sqshlu h0, h0, #2 -# CHECK: sqshlu s0, s0, #3 -# CHECK: sqshlu d0, d0, #4 -# CHECK: sqshl b0, b0, #1 -# CHECK: sqshl h0, h0, #2 -# CHECK: sqshl s0, s0, #3 -# CHECK: sqshl d0, d0, #4 -# CHECK: sqshrn b0, h0, #7 -# CHECK: sqshrn h0, s0, #14 -# CHECK: sqshrn s0, d0, #29 -# CHECK: sqshrun b0, h0, #7 -# CHECK: sqshrun h0, s0, #14 -# CHECK: sqshrun s0, d0, #29 -# CHECK: sri d0, d0, #63 -# CHECK: srshr d0, d0, #63 -# CHECK: srsra d0, d0, #63 -# CHECK: sshr d0, d0, #63 -# CHECK: ucvtf s0, s0, #31 -# CHECK: ucvtf d0, d0, #62 -# CHECK: uqrshrn b0, h0, #7 -# CHECK: uqrshrn h0, s0, #14 -# CHECK: uqrshrn s0, d0, #29 -# CHECK: uqshl b0, b0, #1 -# CHECK: uqshl h0, h0, #2 -# CHECK: uqshl s0, s0, #3 -# CHECK: uqshl d0, d0, #4 -# CHECK: uqshrn b0, h0, #7 -# CHECK: uqshrn h0, s0, #14 -# CHECK: uqshrn s0, d0, #29 -# CHECK: urshr d0, d0, #63 -# CHECK: ursra d0, d0, #63 -# CHECK: ushr d0, d0, #63 -# CHECK: usra d0, d0, #63 +# CHECK: shl d0, d0, #0x1 +# CHECK: sli d0, d0, #0x1 +# CHECK: sqrshrn b0, h0, #0x7 +# CHECK: sqrshrn h0, s0, #0xe +# CHECK: sqrshrn s0, d0, #0x1d +# CHECK: sqrshrun b0, h0, #0x7 +# CHECK: sqrshrun h0, s0, #0xe +# CHECK: sqrshrun s0, d0, #0x1d +# CHECK: sqshlu b0, b0, #0x1 +# CHECK: sqshlu h0, h0, #0x2 +# CHECK: sqshlu s0, s0, #0x3 +# CHECK: sqshlu d0, d0, #0x4 +# CHECK: sqshl b0, b0, #0x1 +# CHECK: sqshl h0, h0, #0x2 +# CHECK: sqshl s0, s0, #0x3 +# CHECK: sqshl d0, d0, #0x4 +# CHECK: sqshrn b0, h0, #0x7 +# CHECK: sqshrn h0, s0, #0xe +# CHECK: sqshrn s0, d0, #0x1d +# CHECK: sqshrun b0, h0, #0x7 +# CHECK: sqshrun h0, s0, #0xe +# CHECK: sqshrun s0, d0, #0x1d +# CHECK: sri d0, d0, #0x3f +# CHECK: srshr d0, d0, #0x3f +# CHECK: srsra d0, d0, #0x3f +# CHECK: sshr d0, d0, #0x3f +# CHECK: ucvtf s0, s0, #0x1f +# CHECK: ucvtf d0, d0, #0x3e +# CHECK: uqrshrn b0, h0, #0x7 +# CHECK: uqrshrn h0, s0, #0xe +# CHECK: uqrshrn s0, d0, #0x1d +# CHECK: uqshl b0, b0, #0x1 +# CHECK: uqshl h0, h0, #0x2 +# CHECK: uqshl s0, s0, #0x3 +# CHECK: uqshl d0, d0, #0x4 +# CHECK: uqshrn b0, h0, #0x7 +# CHECK: uqshrn h0, s0, #0xe +# CHECK: uqshrn s0, d0, #0x1d +# CHECK: urshr d0, d0, #0x3f +# CHECK: ursra d0, d0, #0x3f +# CHECK: ushr d0, d0, #0x3f +# CHECK: usra d0, d0, #0x3f #===-------------------------------------------------------------------------=== # AdvSIMD vector + shift instructions @@ -2023,177 +2023,177 @@ 0x00 0x14 0x26 0x6f 0x00 0x14 0x47 0x6f -# CHECK: fcvtzs.2s v0, v0, #31 -# CHECK: fcvtzs.4s v0, v0, #30 -# CHECK: fcvtzs.2d v0, v0, #61 -# CHECK: fcvtzu.2s v0, v0, #31 -# CHECK: fcvtzu.4s v0, v0, #30 -# CHECK: fcvtzu.2d v0, v0, #61 -# CHECK: rshrn.8b v0, v0, #7 -# CHECK: rshrn2.16b v0, v0, #6 -# CHECK: rshrn.4h v0, v0, #13 -# CHECK: rshrn2.8h v0, v0, #12 -# CHECK: rshrn.2s v0, v0, #27 -# CHECK: rshrn2.4s v0, v0, #26 -# CHECK: scvtf.2s v0, v0, #31 -# CHECK: scvtf.4s v0, v0, #30 -# CHECK: scvtf.2d v0, v0, #61 -# CHECK: shl.8b v0, v0, #1 -# CHECK: shl.16b v0, v0, #2 -# CHECK: shl.4h v0, v0, #3 -# CHECK: shl.8h v0, v0, #4 -# CHECK: shl.2s v0, v0, #5 -# CHECK: shl.4s v0, v0, #6 -# CHECK: shl.2d v0, v0, #7 -# CHECK: shrn.8b v0, v0, #7 -# CHECK: shrn2.16b v0, v0, #6 -# CHECK: shrn.4h v0, v0, #13 -# CHECK: shrn2.8h v0, v0, #12 -# CHECK: shrn.2s v0, v0, #27 -# CHECK: shrn2.4s v0, v0, #26 -# CHECK: sli.8b v0, v0, #1 -# CHECK: sli.16b v0, v0, #2 -# CHECK: sli.4h v0, v0, #3 -# CHECK: sli.8h v0, v0, #4 -# CHECK: sli.2s v0, v0, #5 -# CHECK: sli.4s v0, v0, #6 -# CHECK: sli.2d v0, v0, #7 -# CHECK: sqrshrn.8b v0, v0, #7 -# CHECK: sqrshrn2.16b v0, v0, #6 -# CHECK: sqrshrn.4h v0, v0, #13 -# CHECK: sqrshrn2.8h v0, v0, #12 -# CHECK: sqrshrn.2s v0, v0, #27 -# CHECK: sqrshrn2.4s v0, v0, #26 -# CHECK: sqrshrun.8b v0, v0, #7 -# CHECK: sqrshrun2.16b v0, v0, #6 -# CHECK: sqrshrun.4h v0, v0, #13 -# CHECK: sqrshrun2.8h v0, v0, #12 -# CHECK: sqrshrun.2s v0, v0, #27 -# CHECK: sqrshrun2.4s v0, v0, #26 -# CHECK: sqshlu.8b v0, v0, #1 -# CHECK: sqshlu.16b v0, v0, #2 -# CHECK: sqshlu.4h v0, v0, #3 -# CHECK: sqshlu.8h v0, v0, #4 -# CHECK: sqshlu.2s v0, v0, #5 -# CHECK: sqshlu.4s v0, v0, #6 -# CHECK: sqshlu.2d v0, v0, #7 -# CHECK: sqshl.8b v0, v0, #1 -# CHECK: sqshl.16b v0, v0, #2 -# CHECK: sqshl.4h v0, v0, #3 -# CHECK: sqshl.8h v0, v0, #4 -# CHECK: sqshl.2s v0, v0, #5 -# CHECK: sqshl.4s v0, v0, #6 -# CHECK: sqshl.2d v0, v0, #7 -# CHECK: sqshrn.8b v0, v0, #7 -# CHECK: sqshrn2.16b v0, v0, #6 -# CHECK: sqshrn.4h v0, v0, #13 -# CHECK: sqshrn2.8h v0, v0, #12 -# CHECK: sqshrn.2s v0, v0, #27 -# CHECK: sqshrn2.4s v0, v0, #26 -# CHECK: sqshrun.8b v0, v0, #7 -# CHECK: sqshrun2.16b v0, v0, #6 -# CHECK: sqshrun.4h v0, v0, #13 -# CHECK: sqshrun2.8h v0, v0, #12 -# CHECK: sqshrun.2s v0, v0, #27 -# CHECK: sqshrun2.4s v0, v0, #26 -# CHECK: sri.8b v0, v0, #7 -# CHECK: sri.16b v0, v0, #6 -# CHECK: sri.4h v0, v0, #13 -# CHECK: sri.8h v0, v0, #12 -# CHECK: sri.2s v0, v0, #27 -# CHECK: sri.4s v0, v0, #26 -# CHECK: sri.2d v0, v0, #57 -# CHECK: srshr.8b v0, v0, #7 -# CHECK: srshr.16b v0, v0, #6 -# CHECK: srshr.4h v0, v0, #13 -# CHECK: srshr.8h v0, v0, #12 -# CHECK: srshr.2s v0, v0, #27 -# CHECK: srshr.4s v0, v0, #26 -# CHECK: srshr.2d v0, v0, #57 -# CHECK: srsra.8b v0, v0, #7 -# CHECK: srsra.16b v0, v0, #6 -# CHECK: srsra.4h v0, v0, #13 -# CHECK: srsra.8h v0, v0, #12 -# CHECK: srsra.2s v0, v0, #27 -# CHECK: srsra.4s v0, v0, #26 -# CHECK: srsra.2d v0, v0, #57 -# CHECK: sshll.8h v0, v0, #1 -# CHECK: sshll2.8h v0, v0, #2 -# CHECK: sshll.4s v0, v0, #3 -# CHECK: sshll2.4s v0, v0, #4 -# CHECK: sshll.2d v0, v0, #5 -# CHECK: sshll2.2d v0, v0, #6 -# CHECK: sshr.8b v0, v0, #7 -# CHECK: sshr.16b v0, v0, #6 -# CHECK: sshr.4h v0, v0, #13 -# CHECK: sshr.8h v0, v0, #12 -# CHECK: sshr.2s v0, v0, #27 -# CHECK: sshr.4s v0, v0, #26 -# CHECK: sshr.2d v0, v0, #57 -# CHECK: sshr.8b v0, v0, #7 -# CHECK: ssra.16b v0, v0, #6 -# CHECK: ssra.4h v0, v0, #13 -# CHECK: ssra.8h v0, v0, #12 -# CHECK: ssra.2s v0, v0, #27 -# CHECK: ssra.4s v0, v0, #26 -# CHECK: ssra.2d v0, v0, #57 -# CHECK: ssra d0, d0, #64 -# CHECK: ucvtf.2s v0, v0, #31 -# CHECK: ucvtf.4s v0, v0, #30 -# CHECK: ucvtf.2d v0, v0, #61 -# CHECK: uqrshrn.8b v0, v0, #7 -# CHECK: uqrshrn2.16b v0, v0, #6 -# CHECK: uqrshrn.4h v0, v0, #13 -# CHECK: uqrshrn2.8h v0, v0, #12 -# CHECK: uqrshrn.2s v0, v0, #27 -# CHECK: uqrshrn2.4s v0, v0, #26 -# CHECK: uqshl.8b v0, v0, #1 -# CHECK: uqshl.16b v0, v0, #2 -# CHECK: uqshl.4h v0, v0, #3 -# CHECK: uqshl.8h v0, v0, #4 -# CHECK: uqshl.2s v0, v0, #5 -# CHECK: uqshl.4s v0, v0, #6 -# CHECK: uqshl.2d v0, v0, #7 -# CHECK: uqshrn.8b v0, v0, #7 -# CHECK: uqshrn2.16b v0, v0, #6 -# CHECK: uqshrn.4h v0, v0, #13 -# CHECK: uqshrn2.8h v0, v0, #12 -# CHECK: uqshrn.2s v0, v0, #27 -# CHECK: uqshrn2.4s v0, v0, #26 -# CHECK: urshr.8b v0, v0, #7 -# CHECK: urshr.16b v0, v0, #6 -# CHECK: urshr.4h v0, v0, #13 -# CHECK: urshr.8h v0, v0, #12 -# CHECK: urshr.2s v0, v0, #27 -# CHECK: urshr.4s v0, v0, #26 -# CHECK: urshr.2d v0, v0, #57 -# CHECK: ursra.8b v0, v0, #7 -# CHECK: ursra.16b v0, v0, #6 -# CHECK: ursra.4h v0, v0, #13 -# CHECK: ursra.8h v0, v0, #12 -# CHECK: ursra.2s v0, v0, #27 -# CHECK: ursra.4s v0, v0, #26 -# CHECK: ursra.2d v0, v0, #57 -# CHECK: ushll.8h v0, v0, #1 -# CHECK: ushll2.8h v0, v0, #2 -# CHECK: ushll.4s v0, v0, #3 -# CHECK: ushll2.4s v0, v0, #4 -# CHECK: ushll.2d v0, v0, #5 -# CHECK: ushll2.2d v0, v0, #6 -# CHECK: ushr.8b v0, v0, #7 -# CHECK: ushr.16b v0, v0, #6 -# CHECK: ushr.4h v0, v0, #13 -# CHECK: ushr.8h v0, v0, #12 -# CHECK: ushr.2s v0, v0, #27 -# CHECK: ushr.4s v0, v0, #26 -# CHECK: ushr.2d v0, v0, #57 -# CHECK: usra.8b v0, v0, #7 -# CHECK: usra.16b v0, v0, #6 -# CHECK: usra.4h v0, v0, #13 -# CHECK: usra.8h v0, v0, #12 -# CHECK: usra.2s v0, v0, #27 -# CHECK: usra.4s v0, v0, #26 -# CHECK: usra.2d v0, v0, #57 +# CHECK: fcvtzs.2s v0, v0, #0x1f +# CHECK: fcvtzs.4s v0, v0, #0x1e +# CHECK: fcvtzs.2d v0, v0, #0x3d +# CHECK: fcvtzu.2s v0, v0, #0x1f +# CHECK: fcvtzu.4s v0, v0, #0x1e +# CHECK: fcvtzu.2d v0, v0, #0x3d +# CHECK: rshrn.8b v0, v0, #0x7 +# CHECK: rshrn2.16b v0, v0, #0x6 +# CHECK: rshrn.4h v0, v0, #0xd +# CHECK: rshrn2.8h v0, v0, #0xc +# CHECK: rshrn.2s v0, v0, #0x1b +# CHECK: rshrn2.4s v0, v0, #0x1a +# CHECK: scvtf.2s v0, v0, #0x1f +# CHECK: scvtf.4s v0, v0, #0x1e +# CHECK: scvtf.2d v0, v0, #0x3d +# CHECK: shl.8b v0, v0, #0x1 +# CHECK: shl.16b v0, v0, #0x2 +# CHECK: shl.4h v0, v0, #0x3 +# CHECK: shl.8h v0, v0, #0x4 +# CHECK: shl.2s v0, v0, #0x5 +# CHECK: shl.4s v0, v0, #0x6 +# CHECK: shl.2d v0, v0, #0x7 +# CHECK: shrn.8b v0, v0, #0x7 +# CHECK: shrn2.16b v0, v0, #0x6 +# CHECK: shrn.4h v0, v0, #0xd +# CHECK: shrn2.8h v0, v0, #0xc +# CHECK: shrn.2s v0, v0, #0x1b +# CHECK: shrn2.4s v0, v0, #0x1a +# CHECK: sli.8b v0, v0, #0x1 +# CHECK: sli.16b v0, v0, #0x2 +# CHECK: sli.4h v0, v0, #0x3 +# CHECK: sli.8h v0, v0, #0x4 +# CHECK: sli.2s v0, v0, #0x5 +# CHECK: sli.4s v0, v0, #0x6 +# CHECK: sli.2d v0, v0, #0x7 +# CHECK: sqrshrn.8b v0, v0, #0x7 +# CHECK: sqrshrn2.16b v0, v0, #0x6 +# CHECK: sqrshrn.4h v0, v0, #0xd +# CHECK: sqrshrn2.8h v0, v0, #0xc +# CHECK: sqrshrn.2s v0, v0, #0x1b +# CHECK: sqrshrn2.4s v0, v0, #0x1a +# CHECK: sqrshrun.8b v0, v0, #0x7 +# CHECK: sqrshrun2.16b v0, v0, #0x6 +# CHECK: sqrshrun.4h v0, v0, #0xd +# CHECK: sqrshrun2.8h v0, v0, #0xc +# CHECK: sqrshrun.2s v0, v0, #0x1b +# CHECK: sqrshrun2.4s v0, v0, #0x1a +# CHECK: sqshlu.8b v0, v0, #0x1 +# CHECK: sqshlu.16b v0, v0, #0x2 +# CHECK: sqshlu.4h v0, v0, #0x3 +# CHECK: sqshlu.8h v0, v0, #0x4 +# CHECK: sqshlu.2s v0, v0, #0x5 +# CHECK: sqshlu.4s v0, v0, #0x6 +# CHECK: sqshlu.2d v0, v0, #0x7 +# CHECK: sqshl.8b v0, v0, #0x1 +# CHECK: sqshl.16b v0, v0, #0x2 +# CHECK: sqshl.4h v0, v0, #0x3 +# CHECK: sqshl.8h v0, v0, #0x4 +# CHECK: sqshl.2s v0, v0, #0x5 +# CHECK: sqshl.4s v0, v0, #0x6 +# CHECK: sqshl.2d v0, v0, #0x7 +# CHECK: sqshrn.8b v0, v0, #0x7 +# CHECK: sqshrn2.16b v0, v0, #0x6 +# CHECK: sqshrn.4h v0, v0, #0xd +# CHECK: sqshrn2.8h v0, v0, #0xc +# CHECK: sqshrn.2s v0, v0, #0x1b +# CHECK: sqshrn2.4s v0, v0, #0x1a +# CHECK: sqshrun.8b v0, v0, #0x7 +# CHECK: sqshrun2.16b v0, v0, #0x6 +# CHECK: sqshrun.4h v0, v0, #0xd +# CHECK: sqshrun2.8h v0, v0, #0xc +# CHECK: sqshrun.2s v0, v0, #0x1b +# CHECK: sqshrun2.4s v0, v0, #0x1a +# CHECK: sri.8b v0, v0, #0x7 +# CHECK: sri.16b v0, v0, #0x6 +# CHECK: sri.4h v0, v0, #0xd +# CHECK: sri.8h v0, v0, #0xc +# CHECK: sri.2s v0, v0, #0x1b +# CHECK: sri.4s v0, v0, #0x1a +# CHECK: sri.2d v0, v0, #0x39 +# CHECK: srshr.8b v0, v0, #0x7 +# CHECK: srshr.16b v0, v0, #0x6 +# CHECK: srshr.4h v0, v0, #0xd +# CHECK: srshr.8h v0, v0, #0xc +# CHECK: srshr.2s v0, v0, #0x1b +# CHECK: srshr.4s v0, v0, #0x1a +# CHECK: srshr.2d v0, v0, #0x39 +# CHECK: srsra.8b v0, v0, #0x7 +# CHECK: srsra.16b v0, v0, #0x6 +# CHECK: srsra.4h v0, v0, #0xd +# CHECK: srsra.8h v0, v0, #0xc +# CHECK: srsra.2s v0, v0, #0x1b +# CHECK: srsra.4s v0, v0, #0x1a +# CHECK: srsra.2d v0, v0, #0x39 +# CHECK: sshll.8h v0, v0, #0x1 +# CHECK: sshll2.8h v0, v0, #0x2 +# CHECK: sshll.4s v0, v0, #0x3 +# CHECK: sshll2.4s v0, v0, #0x4 +# CHECK: sshll.2d v0, v0, #0x5 +# CHECK: sshll2.2d v0, v0, #0x6 +# CHECK: sshr.8b v0, v0, #0x7 +# CHECK: sshr.16b v0, v0, #0x6 +# CHECK: sshr.4h v0, v0, #0xd +# CHECK: sshr.8h v0, v0, #0xc +# CHECK: sshr.2s v0, v0, #0x1b +# CHECK: sshr.4s v0, v0, #0x1a +# CHECK: sshr.2d v0, v0, #0x39 +# CHECK: sshr.8b v0, v0, #0x7 +# CHECK: ssra.16b v0, v0, #0x6 +# CHECK: ssra.4h v0, v0, #0xd +# CHECK: ssra.8h v0, v0, #0xc +# CHECK: ssra.2s v0, v0, #0x1b +# CHECK: ssra.4s v0, v0, #0x1a +# CHECK: ssra.2d v0, v0, #0x39 +# CHECK: ssra d0, d0, #0x40 +# CHECK: ucvtf.2s v0, v0, #0x1f +# CHECK: ucvtf.4s v0, v0, #0x1e +# CHECK: ucvtf.2d v0, v0, #0x3d +# CHECK: uqrshrn.8b v0, v0, #0x7 +# CHECK: uqrshrn2.16b v0, v0, #0x6 +# CHECK: uqrshrn.4h v0, v0, #0xd +# CHECK: uqrshrn2.8h v0, v0, #0xc +# CHECK: uqrshrn.2s v0, v0, #0x1b +# CHECK: uqrshrn2.4s v0, v0, #0x1a +# CHECK: uqshl.8b v0, v0, #0x1 +# CHECK: uqshl.16b v0, v0, #0x2 +# CHECK: uqshl.4h v0, v0, #0x3 +# CHECK: uqshl.8h v0, v0, #0x4 +# CHECK: uqshl.2s v0, v0, #0x5 +# CHECK: uqshl.4s v0, v0, #0x6 +# CHECK: uqshl.2d v0, v0, #0x7 +# CHECK: uqshrn.8b v0, v0, #0x7 +# CHECK: uqshrn2.16b v0, v0, #0x6 +# CHECK: uqshrn.4h v0, v0, #0xd +# CHECK: uqshrn2.8h v0, v0, #0xc +# CHECK: uqshrn.2s v0, v0, #0x1b +# CHECK: uqshrn2.4s v0, v0, #0x1a +# CHECK: urshr.8b v0, v0, #0x7 +# CHECK: urshr.16b v0, v0, #0x6 +# CHECK: urshr.4h v0, v0, #0xd +# CHECK: urshr.8h v0, v0, #0xc +# CHECK: urshr.2s v0, v0, #0x1b +# CHECK: urshr.4s v0, v0, #0x1a +# CHECK: urshr.2d v0, v0, #0x39 +# CHECK: ursra.8b v0, v0, #0x7 +# CHECK: ursra.16b v0, v0, #0x6 +# CHECK: ursra.4h v0, v0, #0xd +# CHECK: ursra.8h v0, v0, #0xc +# CHECK: ursra.2s v0, v0, #0x1b +# CHECK: ursra.4s v0, v0, #0x1a +# CHECK: ursra.2d v0, v0, #0x39 +# CHECK: ushll.8h v0, v0, #0x1 +# CHECK: ushll2.8h v0, v0, #0x2 +# CHECK: ushll.4s v0, v0, #0x3 +# CHECK: ushll2.4s v0, v0, #0x4 +# CHECK: ushll.2d v0, v0, #0x5 +# CHECK: ushll2.2d v0, v0, #0x6 +# CHECK: ushr.8b v0, v0, #0x7 +# CHECK: ushr.16b v0, v0, #0x6 +# CHECK: ushr.4h v0, v0, #0xd +# CHECK: ushr.8h v0, v0, #0xc +# CHECK: ushr.2s v0, v0, #0x1b +# CHECK: ushr.4s v0, v0, #0x1a +# CHECK: ushr.2d v0, v0, #0x39 +# CHECK: usra.8b v0, v0, #0x7 +# CHECK: usra.16b v0, v0, #0x6 +# CHECK: usra.4h v0, v0, #0xd +# CHECK: usra.8h v0, v0, #0xc +# CHECK: usra.2s v0, v0, #0x1b +# CHECK: usra.4s v0, v0, #0x1a +# CHECK: usra.2d v0, v0, #0x39 0x00 0xe0 0x20 0x0e Index: test/MC/Disassembler/AArch64/arm64-arithmetic.txt =================================================================== --- test/MC/Disassembler/AArch64/arm64-arithmetic.txt +++ test/MC/Disassembler/AArch64/arm64-arithmetic.txt @@ -452,30 +452,30 @@ 0x20 0x00 0xa0 0x52 0x20 0x00 0xa0 0xd2 -# CHECK: movz w0, #0x1 -# CHECK: movz x0, #0x1 -# CHECK: movz w0, #0x1, lsl #16 -# CHECK: movz x0, #0x1, lsl #16 +# CHECK: movz w0, #1 +# CHECK: movz x0, #1 +# CHECK: movz w0, #1, lsl #16 +# CHECK: movz x0, #1, lsl #16 0x40 0x00 0x80 0x12 0x40 0x00 0x80 0x92 0x40 0x00 0xa0 0x12 0x40 0x00 0xa0 0x92 -# CHECK: movn w0, #0x2 -# CHECK: movn x0, #0x2 -# CHECK: movn w0, #0x2, lsl #16 -# CHECK: movn x0, #0x2, lsl #16 +# CHECK: movn w0, #2 +# CHECK: movn x0, #2 +# CHECK: movn w0, #2, lsl #16 +# CHECK: movn x0, #2, lsl #16 0x20 0x00 0x80 0x72 0x20 0x00 0x80 0xf2 0x20 0x00 0xa0 0x72 0x20 0x00 0xa0 0xf2 -# CHECK: movk w0, #0x1 -# CHECK: movk x0, #0x1 -# CHECK: movk w0, #0x1, lsl #16 -# CHECK: movk x0, #0x1, lsl #16 +# CHECK: movk w0, #1 +# CHECK: movk x0, #1 +# CHECK: movk w0, #1, lsl #16 +# CHECK: movk x0, #1, lsl #16 #==---------------------------------------------------------------------------== # Conditionally set flags instructions Index: test/MC/Disassembler/AArch64/armv8.2a-statistical-profiling.txt =================================================================== --- test/MC/Disassembler/AArch64/armv8.2a-statistical-profiling.txt +++ test/MC/Disassembler/AArch64/armv8.2a-statistical-profiling.txt @@ -3,7 +3,7 @@ [0x3f,0x22,0x03,0xd5] # CHECK: psb csync -# NO_SPE: hint #0x11 +# NO_SPE: hint #17 [0x00,0x9a,0x18,0xd5] [0x20,0x9a,0x18,0xd5] Index: test/MC/Disassembler/AArch64/neon-instructions.txt =================================================================== --- test/MC/Disassembler/AArch64/neon-instructions.txt +++ test/MC/Disassembler/AArch64/neon-instructions.txt @@ -87,10 +87,10 @@ # Vector Bitwise Bit Clear (AND NOT) - immediate # Vector Bitwise OR - immedidate #------------------------------------------------------------------------------ -# CHECK: movi v31.4s, #0xff, lsl #24 +# CHECK: movi v31.4s, #255, lsl #24 # CHECK: mvni v0.2s, #{{0x0|0}} -# CHECK: bic v15.4h, #0xf, lsl #8 -# CHECK: orr v16.8h, #0x1f +# CHECK: bic v15.4h, #15, lsl #8 +# CHECK: orr v16.8h, #31 0xff 0x67 0x07 0x4f 0x00 0x04 0x00 0x2f 0xef 0xb5 0x00 0x2f @@ -100,8 +100,8 @@ # Vector Move Immediate Masked # Vector Move Inverted Immediate Masked #------------------------------------------------------------------------------ -# CHECK: movi v8.2s, #0x8, msl #8 -# CHECK: mvni v16.4s, #0x10, msl #16 +# CHECK: movi v8.2s, #8, msl #8 +# CHECK: mvni v16.4s, #16, msl #16 0x08 0xc5 0x00 0x0f 0x10 0xd6 0x00 0x6f @@ -110,8 +110,8 @@ # Vector Move Immediate - bytemask, per doubleword # Vector Move Immediate - bytemask, one doubleword #------------------------------------------------------------------------------ -# CHECK: movi v16.8b, #0xff -# CHECK: movi v31.16b, #0x1f +# CHECK: movi v16.8b, #255 +# CHECK: movi v31.16b, #31 # CHECK: movi d15, #0xff00ff00ff00ff # CHECK: movi v31.2d, #0xff0000ff0000ffff 0xf0 0xe7 0x07 0x0f Index: test/tools/llvm-objdump/AArch64/macho-symbolized-disassembly.test =================================================================== --- test/tools/llvm-objdump/AArch64/macho-symbolized-disassembly.test +++ test/tools/llvm-objdump/AArch64/macho-symbolized-disassembly.test @@ -7,7 +7,7 @@ OBJ: 0000000000000020 add x0, x0, L_.str@PAGEOFF OBJ: 0000000000000024 bl _printf -EXE: 0000000100007f58 add x0, x0, #4008 ; literal pool for: "Hello world +EXE: 0000000100007f58 add x0, x0, #0xfa8 ; literal pool for: "Hello world " EXE: 0000000100007f5c bl 0x100007f78 ; symbol stub for: _printf @@ -15,9 +15,9 @@ ObjC-OBJ: 0000000000000010 add x8, x8, L_OBJC_SELECTOR_REFERENCES_3@PAGEOFF ObjC-OBJ:0000000000000044 bl _objc_msgSend -ObjC-EXE: 0000000100007ed0 add x8, x8, #80 ; Objc selector ref: date -ObjC-EXE: 0000000100007ed8 add x9, x9, #96 ; Objc class ref: _OBJC_CLASS_$_NSDate +ObjC-EXE: 0000000100007ed0 add x8, x8, #0x50 ; Objc selector ref: date +ObjC-EXE: 0000000100007ed8 add x9, x9, #0x60 ; Objc class ref: _OBJC_CLASS_$_NSDate ObjC-EXE: 0000000100007f04 bl 0x100007f50 ; Objc message: +[NSObject new] ObjC-EXE: 0000000100007f1c bl 0x100007f50 ; Objc message: -[x0 new] -ObjC-EXE: 0000000100007f2c add x0, x0, #32 ; Objc cfstring ref: @"The current date and time is: %@" +ObjC-EXE: 0000000100007f2c add x0, x0, #0x20 ; Objc cfstring ref: @"The current date and time is: %@" ObjC-EXE: 0000000100007f30 bl 0x100007f44 ; symbol stub for: _NSLog