Index: llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -168,10 +168,7 @@ switch (MO.getType()) { case MachineOperand::MO_Register: { // The MI is INLINEASM ONLY and UseVSXReg is always false. - unsigned Reg = - PPCInstrInfo::getRegNumForOperand(MI->getDesc(), MO.getReg(), OpNo); - - const char *RegName = PPCInstPrinter::getRegisterName(Reg); + const char *RegName = PPCInstPrinter::getRegisterName(MO.getReg()); // Linux assembler (Others?) does not take register mnemonics. // FIXME - What about special registers used in mfspr/mtspr? Index: llvm/trunk/lib/Target/PowerPC/PPCInstrFormats.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCInstrFormats.td +++ llvm/trunk/lib/Target/PowerPC/PPCInstrFormats.td @@ -37,14 +37,6 @@ let TSFlags{2} = PPC970_Cracked; let TSFlags{5-3} = PPC970_Unit; - /// Indicate that the VSX instruction is to use VSX numbering/encoding. - /// Since ISA 3.0, there are scalar instructions that use the upper - /// half of the VSX register set only. Rather than adding further complexity - /// to the register class set, the VSX registers just include the Altivec - /// registers and this flag decides the numbering to be used for them. - bits<1> UseVSXReg = 0; - let TSFlags{6} = UseVSXReg; - // Indicate that this instruction is of type X-Form Load or Store bits<1> XFormMemOp = 0; let TSFlags{7} = XFormMemOp; @@ -73,7 +65,6 @@ class PPC970_Unit_VPERM { bits<3> PPC970_Unit = 6; } class PPC970_Unit_BRU { bits<3> PPC970_Unit = 7; } -class UseVSXReg { bits<1> UseVSXReg = 1; } class XFormMemOp { bits<1> XFormMemOp = 1; } // Two joined instructions; used to emit two adjacent instructions as one. Index: llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.h =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.h +++ llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.h @@ -65,9 +65,6 @@ /// Shift count to bypass PPC970 flags NewDef_Shift = 6, - /// The VSX instruction that uses VSX register (vs0-vs63), instead of VMX - /// register (v0-v31). - UseVSXReg = 0x1 << NewDef_Shift, /// This instruction is an X-Form memory operation. XFormMemOp = 0x1 << (NewDef_Shift+1) }; @@ -440,11 +437,24 @@ /// operands). static unsigned getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg, unsigned OpNo) { - if (Desc.TSFlags & PPCII::UseVSXReg) { - if (isVRRegister(Reg)) - Reg = PPC::VSX32 + (Reg - PPC::V0); - else if (isVFRegister(Reg)) - Reg = PPC::VSX32 + (Reg - PPC::VF0); + int16_t regClass = Desc.OpInfo[OpNo].RegClass; + switch (regClass) { + // We store F0-F31, VF0-VF31 in MCOperand and it should be F0-F31, + // VSX32-VSX63 during encoding/disassembling + case PPC::VSSRCRegClassID: + case PPC::VSFRCRegClassID: + if (isVFRegister(Reg)) + return PPC::VSX32 + (Reg - PPC::VF0); + break; + // We store VSL0-VSL31, V0-V31 in MCOperand and it should be VSL0-VSL31, + // VSX32-VSX63 during encoding/disassembling + case PPC::VSRCRegClassID: + if (isVRRegister(Reg)) + return PPC::VSX32 + (Reg - PPC::V0); + break; + // Other RegClass doesn't need mapping + default: + break; } return Reg; } Index: llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td +++ llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td @@ -123,7 +123,6 @@ let Predicates = [HasVSX] in { let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns. -let UseVSXReg = 1 in { let hasSideEffects = 0 in { // VSX instructions don't have side effects. let Uses = [RM] in { @@ -894,11 +893,10 @@ (PPCxxsplt v4i32:$XB, imm32SExt16:$UIM))]>; let isCodeGenOnly = 1 in def XXSPLTWs : XX2Form_2<60, 164, - (outs vsrc:$XT), (ins vfrc:$XB, u2imm:$UIM), + (outs vsrc:$XT), (ins vsfrc:$XB, u2imm:$UIM), "xxspltw $XT, $XB, $UIM", IIC_VecPerm, []>; } // hasSideEffects -} // UseVSXReg = 1 // SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after // instruction selection into a branch sequence. @@ -1247,7 +1245,7 @@ def NoP9Vector : Predicate<"!PPCSubTarget->hasP9Vector()">; let Predicates = [HasP8Vector] in { let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns. - let isCommutable = 1, UseVSXReg = 1 in { + let isCommutable = 1 in { def XXLEQV : XX3Form<60, 186, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), "xxleqv $XT, $XA, $XB", IIC_VecGeneral, @@ -1257,12 +1255,11 @@ "xxlnand $XT, $XA, $XB", IIC_VecGeneral, [(set v4i32:$XT, (vnot_ppc (and v4i32:$XA, v4i32:$XB)))]>; - } // isCommutable, UseVSXReg + } // isCommutable def : Pat<(int_ppc_vsx_xxleqv v4i32:$A, v4i32:$B), (XXLEQV $A, $B)>; - let UseVSXReg = 1 in { def XXLORC : XX3Form<60, 170, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), "xxlorc $XT, $XA, $XB", IIC_VecGeneral, @@ -1311,7 +1308,6 @@ "#STIWX", [(PPCstfiwx f64:$XT, xoaddr:$dst)]>; } // mayStore - } // UseVSXReg = 1 def : Pat<(f64 (extloadf32 xoaddr:$src)), (COPY_TO_REGCLASS (XFLOADf32 xoaddr:$src), VSFRC)>; @@ -1341,7 +1337,6 @@ def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETNE)), (SELECT_VSSRC (CRXOR $lhs, $rhs), $tval, $fval)>; - let UseVSXReg = 1 in { // VSX Elementary Scalar FP arithmetic (SP) let isCommutable = 1 in { def XSADDSP : XX3Form<60, 0, @@ -1469,7 +1464,6 @@ "xscvdpspn $XT, $XB", IIC_VecFP, []>; def XSCVSPDPN : XX2Form<60, 331, (outs vssrc:$XT), (ins vsrc:$XB), "xscvspdpn $XT, $XB", IIC_VecFP, []>; - } // UseVSXReg = 1 let Predicates = [IsLittleEndian] in { def : Pat, Requires<[In64BitMode]>; let isCodeGenOnly = 1 in - def MFVRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$rA), (ins vrrc:$XT), + def MFVRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$rA), (ins vsrc:$XT), "mfvsrd $rA, $XT", IIC_VecGeneral, []>, Requires<[In64BitMode]>; @@ -1556,7 +1550,7 @@ []>, Requires<[In64BitMode]>; } // IsISA3_0, HasDirectMove -} // UseVSXReg = 1 +} // AddedComplexity = 400 // We want to parse this from asm, but we don't want to emit this as it would // be emitted with a VSX reg. So leave Emit = 0 here. @@ -2414,7 +2408,6 @@ list pattern> : X_VT5_XO5_VB5_VSFR, isDOT; - let UseVSXReg = 1 in { // [PO T XO B XO BX /] class XX2_RT5_XO5_XB6 opcode, bits<5> xo2, bits<9> xo, string opc, list pattern> @@ -2433,7 +2426,6 @@ InstrItinClass itin, list pattern> : XX3Form; - } // UseVSXReg = 1 // [PO VRT VRA VRB XO /] class X_VT5_VA5_VB5 opcode, bits<10> xo, string opc, @@ -2571,8 +2563,7 @@ // DP/QP Compare Exponents def XSCMPEXPDP : XX3Form_1<60, 59, (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB), - "xscmpexpdp $crD, $XA, $XB", IIC_FPCompare, []>, - UseVSXReg; + "xscmpexpdp $crD, $XA, $XB", IIC_FPCompare, []>; def XSCMPEXPQP : X_BF3_VA5_VB5<63, 164, "xscmpexpqp", []>; // DP Compare ==, >=, >, != @@ -2630,7 +2621,6 @@ def : Pat<(f128 (uint_to_fp (i32 (load xoaddr:$src)))), (f128 (XSCVUDQP (LIWZX xoaddr:$src)))>; - let UseVSXReg = 1 in { //===--------------------------------------------------------------------===// // Round to Floating-Point Integer Instructions @@ -2647,8 +2637,6 @@ [(set v4f32:$XT, (int_ppc_vsx_xvcvsphp v4f32:$XB))]>; - } // UseVSXReg = 1 - // Pattern for matching Vector HP -> Vector SP intrinsic. Defined as a // separate pattern so that it can convert the input register class from // VRRC(v8i16) to VSRC. @@ -2690,7 +2678,7 @@ // Insert Exponent DP/QP // XT NOTE: XT.dword[1] = 0xUUUU_UUUU_UUUU_UUUU def XSIEXPDP : XX1Form <60, 918, (outs vsrc:$XT), (ins g8rc:$rA, g8rc:$rB), - "xsiexpdp $XT, $rA, $rB", IIC_VecFP, []>, UseVSXReg; + "xsiexpdp $XT, $rA, $rB", IIC_VecFP, []>; // vB NOTE: only vB.dword[0] is used, that's why we don't use // X_VT5_VA5_VB5 form def XSIEXPQP : XForm_18<63, 868, (outs vrrc:$vT), (ins vrrc:$vA, vsfrc:$vB), @@ -2711,7 +2699,6 @@ (v2i64 (XSXEXPQP $vA)), sub_64)))>; // Vector Insert Word - let UseVSXReg = 1 in { // XB NOTE: Only XB.dword[1] is used, but we use vsrc on XB. def XXINSERTW : XX2_RD6_UIM5_RS6<60, 181, (outs vsrc:$XT), @@ -2725,7 +2712,6 @@ def XXEXTRACTUW : XX2_RD6_UIM5_RS6<60, 165, (outs vsfrc:$XT), (ins vsrc:$XB, u4imm:$UIMM), "xxextractuw $XT, $XB, $UIMM", IIC_VecFP, []>; - } // UseVSXReg = 1 // Vector Insert Exponent DP/SP def XVIEXPDP : XX3_XT5_XA5_XB5<60, 248, "xviexpdp", vsrc, vsrc, vsrc, @@ -2758,20 +2744,17 @@ //===--------------------------------------------------------------------===// // Test Data Class SP/DP/QP - let UseVSXReg = 1 in { def XSTSTDCSP : XX2_BF3_DCMX7_RS6<60, 298, (outs crrc:$BF), (ins u7imm:$DCMX, vsfrc:$XB), "xststdcsp $BF, $XB, $DCMX", IIC_VecFP, []>; def XSTSTDCDP : XX2_BF3_DCMX7_RS6<60, 362, (outs crrc:$BF), (ins u7imm:$DCMX, vsfrc:$XB), "xststdcdp $BF, $XB, $DCMX", IIC_VecFP, []>; - } // UseVSXReg = 1 def XSTSTDCQP : X_BF3_DCMX7_RS5 <63, 708, (outs crrc:$BF), (ins u7imm:$DCMX, vrrc:$vB), "xststdcqp $BF, $vB, $DCMX", IIC_VecFP, []>; // Vector Test Data Class SP/DP - let UseVSXReg = 1 in { def XVTSTDCSP : XX2_RD6_DCMX7_RS6<60, 13, 5, (outs vsrc:$XT), (ins u7imm:$DCMX, vsrc:$XB), "xvtstdcsp $XT, $XB, $DCMX", IIC_VecFP, @@ -2782,7 +2765,6 @@ "xvtstdcdp $XT, $XB, $DCMX", IIC_VecFP, [(set v2i64: $XT, (int_ppc_vsx_xvtstdcdp v2f64:$XB, imm:$DCMX))]>; - } // UseVSXReg = 1 //===--------------------------------------------------------------------===// @@ -2823,7 +2805,7 @@ // Vector Splat Immediate Byte def XXSPLTIB : X_RD6_IMM8<60, 360, (outs vsrc:$XT), (ins u8imm:$IMM8), - "xxspltib $XT, $IMM8", IIC_VecPerm, []>, UseVSXReg; + "xxspltib $XT, $IMM8", IIC_VecPerm, []>; //===--------------------------------------------------------------------===// // Vector/Scalar Load/Store Instructions @@ -2833,7 +2815,7 @@ let mayLoad = 1, mayStore = 0 in { // Load Vector def LXV : DQ_RD6_RS5_DQ12<61, 1, (outs vsrc:$XT), (ins memrix16:$src), - "lxv $XT, $src", IIC_LdStLFD, []>, UseVSXReg; + "lxv $XT, $src", IIC_LdStLFD, []>; // Load DWord def LXSD : DSForm_1<57, 2, (outs vfrc:$vD), (ins memrix:$src), "lxsd $vD, $src", IIC_LdStLFD, []>; @@ -2846,7 +2828,7 @@ class X_XT6_RA5_RB5 opcode, bits<10> xo, string opc, RegisterOperand vtype, list pattern> : XX1Form_memOp, UseVSXReg; + !strconcat(opc, " $XT, $src"), IIC_LdStLFD, pattern>; // Load as Integer Byte/Halfword & Zero Indexed def LXSIBZX : X_XT6_RA5_RB5<31, 781, "lxsibzx", vsfrc, @@ -2864,12 +2846,10 @@ // Load Vector (Left-justified) with Length def LXVL : XX1Form_memOp<31, 269, (outs vsrc:$XT), (ins memr:$src, g8rc:$rB), "lxvl $XT, $src, $rB", IIC_LdStLoad, - [(set v4i32:$XT, (int_ppc_vsx_lxvl addr:$src, i64:$rB))]>, - UseVSXReg; + [(set v4i32:$XT, (int_ppc_vsx_lxvl addr:$src, i64:$rB))]>; def LXVLL : XX1Form_memOp<31,301, (outs vsrc:$XT), (ins memr:$src, g8rc:$rB), "lxvll $XT, $src, $rB", IIC_LdStLoad, - [(set v4i32:$XT, (int_ppc_vsx_lxvll addr:$src, i64:$rB))]>, - UseVSXReg; + [(set v4i32:$XT, (int_ppc_vsx_lxvll addr:$src, i64:$rB))]>; // Load Vector Word & Splat Indexed def LXVWSX : X_XT6_RA5_RB5<31, 364, "lxvwsx" , vsrc, []>; @@ -2880,7 +2860,7 @@ let mayStore = 1, mayLoad = 0 in { // Store Vector def STXV : DQ_RD6_RS5_DQ12<61, 5, (outs), (ins vsrc:$XT, memrix16:$dst), - "stxv $XT, $dst", IIC_LdStSTFD, []>, UseVSXReg; + "stxv $XT, $dst", IIC_LdStSTFD, []>; // Store DWord def STXSD : DSForm_1<61, 2, (outs), (ins vfrc:$vS, memrix:$dst), "stxsd $vS, $dst", IIC_LdStSTFD, []>; @@ -2892,7 +2872,7 @@ class X_XS6_RA5_RB5 opcode, bits<10> xo, string opc, RegisterOperand vtype, list pattern> : XX1Form_memOp, UseVSXReg; + !strconcat(opc, " $XT, $dst"), IIC_LdStSTFD, pattern>; // Store as Integer Byte/Halfword Indexed def STXSIBX : X_XS6_RA5_RB5<31, 909, "stxsibx" , vsfrc, @@ -2900,8 +2880,8 @@ def STXSIHX : X_XS6_RA5_RB5<31, 941, "stxsihx" , vsfrc, [(PPCstxsix f64:$XT, xoaddr:$dst, 2)]>; let isCodeGenOnly = 1 in { - def STXSIBXv : X_XS6_RA5_RB5<31, 909, "stxsibx" , vrrc, []>; - def STXSIHXv : X_XS6_RA5_RB5<31, 941, "stxsihx" , vrrc, []>; + def STXSIBXv : X_XS6_RA5_RB5<31, 909, "stxsibx" , vsrc, []>; + def STXSIHXv : X_XS6_RA5_RB5<31, 941, "stxsihx" , vsrc, []>; } // Store Vector Halfword*8/Byte*16 Indexed @@ -2917,14 +2897,12 @@ (ins vsrc:$XT, memr:$dst, g8rc:$rB), "stxvl $XT, $dst, $rB", IIC_LdStLoad, [(int_ppc_vsx_stxvl v4i32:$XT, addr:$dst, - i64:$rB)]>, - UseVSXReg; + i64:$rB)]>; def STXVLL : XX1Form_memOp<31, 429, (outs), (ins vsrc:$XT, memr:$dst, g8rc:$rB), "stxvll $XT, $dst, $rB", IIC_LdStLoad, [(int_ppc_vsx_stxvll v4i32:$XT, addr:$dst, - i64:$rB)]>, - UseVSXReg; + i64:$rB)]>; } // mayStore let Predicates = [IsLittleEndian] in { @@ -3158,109 +3136,109 @@ let Predicates = [IsBigEndian, HasP9Vector] in { // Scalar stores of i8 def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 0)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 9)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 9)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 1)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 10)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 2)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 11)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 11)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 3)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 12)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 4)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 13)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 13)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 5)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 14)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 6)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 15)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 15)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 7)), xoaddr:$dst), - (STXSIBXv $S, xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS $S, VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 8)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 1)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 1)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 9)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 2)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 10)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 3)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 3)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 11)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 4)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 12)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 5)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 5)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 13)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 6)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 14)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 7)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 7)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 15)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 8)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), xoaddr:$dst)>; // Scalar stores of i16 def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 0)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 10)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 1)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 12)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 2)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 14)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 3)), xoaddr:$dst), - (STXSIHXv $S, xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS $S, VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 4)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 2)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 5)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 4)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 6)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 6)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 7)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 8)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), xoaddr:$dst)>; } // IsBigEndian, HasP9Vector let Predicates = [IsLittleEndian, HasP9Vector] in { // Scalar stores of i8 def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 0)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 8)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 1)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 7)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 7)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 2)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 6)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 3)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 5)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 5)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 4)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 4)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 5)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 3)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 3)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 6)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 2)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 7)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 1)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 1)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 8)), xoaddr:$dst), - (STXSIBXv $S, xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS $S, VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 9)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 15)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 15)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 10)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 14)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 11)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 13)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 13)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 12)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 12)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 13)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 11)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 11)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 14)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 10)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 15)), xoaddr:$dst), - (STXSIBXv (v16i8 (VSLDOI $S, $S, 9)), xoaddr:$dst)>; + (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 9)), VSRC), xoaddr:$dst)>; // Scalar stores of i16 def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 0)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 8)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 1)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 6)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 2)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 4)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 3)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 2)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 4)), xoaddr:$dst), - (STXSIHXv $S, xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS $S, VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 5)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 14)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 6)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 12)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), xoaddr:$dst)>; def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 7)), xoaddr:$dst), - (STXSIHXv (v16i8 (VSLDOI $S, $S, 10)), xoaddr:$dst)>; + (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), xoaddr:$dst)>; } // IsLittleEndian, HasP9Vector Index: llvm/trunk/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll +++ llvm/trunk/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll @@ -41,11 +41,11 @@ %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer ret <4 x i32> %splat.splat ; CHECK-LABEL: vecuiuc -; CHECK: lxsibzx 34, 0, 3 -; CHECK-NEXT: xxspltw 34, 34, 1 +; CHECK: lxsibzx 0, 0, 3 +; CHECK-NEXT: xxspltw 34, 0, 1 ; CHECK-BE-LABEL: vecuiuc -; CHECK-BE: lxsibzx 34, 0, 3 -; CHECK-BE-NEXT: xxspltw 34, 34, 1 +; CHECK-BE: lxsibzx 0, 0, 3 +; CHECK-BE-NEXT: xxspltw 34, 0, 1 } ; Function Attrs: norecurse nounwind readonly @@ -104,11 +104,11 @@ %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer ret <4 x i32> %splat.splat ; CHECK-LABEL: vecsiuc -; CHECK: lxsibzx 34, 0, 3 -; CHECK-NEXT: xxspltw 34, 34, 1 +; CHECK: lxsibzx 0, 0, 3 +; CHECK-NEXT: xxspltw 34, 0, 1 ; CHECK-BE-LABEL: vecsiuc -; CHECK-BE: lxsibzx 34, 0, 3 -; CHECK-BE-NEXT: xxspltw 34, 34, 1 +; CHECK-BE: lxsibzx 0, 0, 3 +; CHECK-BE-NEXT: xxspltw 34, 0, 1 } ; Function Attrs: norecurse nounwind readonly @@ -350,11 +350,11 @@ %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer ret <4 x i32> %splat.splat ; CHECK-LABEL: vecuius -; CHECK: lxsihzx 34, 0, 3 -; CHECK-NEXT: xxspltw 34, 34, 1 +; CHECK: lxsihzx 0, 0, 3 +; CHECK-NEXT: xxspltw 34, 0, 1 ; CHECK-BE-LABEL: vecuius -; CHECK-BE: lxsihzx 34, 0, 3 -; CHECK-BE-NEXT: xxspltw 34, 34, 1 +; CHECK-BE: lxsihzx 0, 0, 3 +; CHECK-BE-NEXT: xxspltw 34, 0, 1 } ; Function Attrs: norecurse nounwind readonly @@ -414,11 +414,11 @@ %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer ret <4 x i32> %splat.splat ; CHECK-LABEL: vecsius -; CHECK: lxsihzx 34, 0, 3 -; CHECK-NEXT: xxspltw 34, 34, 1 +; CHECK: lxsihzx 0, 0, 3 +; CHECK-NEXT: xxspltw 34, 0, 1 ; CHECK-BE-LABEL: vecsius -; CHECK-BE: lxsihzx 34, 0, 3 -; CHECK-BE-NEXT: xxspltw 34, 34, 1 +; CHECK-BE: lxsihzx 0, 0, 3 +; CHECK-BE-NEXT: xxspltw 34, 0, 1 } ; Function Attrs: norecurse nounwind readonly