Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7159,10 +7159,11 @@ /// uses features that we can't model on machineinstrs, we have SDISel do the /// allocation. This produces generally horrible, but correct, code. /// -/// OpInfo describes the operand. +/// OpInfo describes the operand +/// RefOpInfo describes the matching operand if any, the operand otherwise static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI, - const SDLoc &DL, - SDISelAsmOperandInfo &OpInfo) { + const SDLoc &DL, SDISelAsmOperandInfo &OpInfo, + SDISelAsmOperandInfo &RefOpInfo) { LLVMContext &Context = *DAG.getContext(); MachineFunction &MF = DAG.getMachineFunction(); @@ -7172,8 +7173,8 @@ // If this is a constraint for a single physreg, or a constraint for a // register class, find it. std::pair PhysReg = - TLI.getRegForInlineAsmConstraint(&TRI, OpInfo.ConstraintCode, - OpInfo.ConstraintVT); + TLI.getRegForInlineAsmConstraint(&TRI, RefOpInfo.ConstraintCode, + RefOpInfo.ConstraintVT); unsigned NumRegs = 1; if (OpInfo.ConstraintVT != MVT::Other) { @@ -7215,6 +7216,11 @@ NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT); } + // No need to allocate a matching input constraint since the constraint it's + // matching to has already been allocated. + if (OpInfo.isMatchingInputConstraint()) + return; + MVT RegVT; EVT ValueVT = OpInfo.ConstraintVT; @@ -7463,19 +7469,27 @@ // If this constraint is for a specific register, allocate it before // anything else. - if (OpInfo.ConstraintType == TargetLowering::C_Register) - GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo); + SDISelAsmOperandInfo &RefOpInfo = + OpInfo.isMatchingInputConstraint() + ? ConstraintOperands[OpInfo.getMatchedOperand()] + : ConstraintOperands[i]; + if (RefOpInfo.ConstraintType == TargetLowering::C_Register) + GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo); } // Third pass - Loop over all of the operands, assigning virtual or physregs // to register class operands. for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; + SDISelAsmOperandInfo &RefOpInfo = + OpInfo.isMatchingInputConstraint() + ? ConstraintOperands[OpInfo.getMatchedOperand()] + : ConstraintOperands[i]; // C_Register operands have already been allocated, Other/Memory don't need // to be. - if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass) - GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo); + if (RefOpInfo.ConstraintType == TargetLowering::C_RegisterClass) + GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo); } // AsmNodeOperands - The operands for the ISD::INLINEASM node. Index: test/CodeGen/ARM/inline-asm-operand-implicit-cast.ll =================================================================== --- test/CodeGen/ARM/inline-asm-operand-implicit-cast.ll +++ test/CodeGen/ARM/inline-asm-operand-implicit-cast.ll @@ -17,6 +17,42 @@ ret double %1 } +; Check support for returning a float in GPR with matching float input with +; soft float ABI +define arm_aapcscc float @flt_gpr_matching_in_op_soft(float %f) #0 { +; CHECK-LABEL: flt_gpr_matching_in_op_soft +; CHECK: mov r0, r0 + %1 = call float asm "mov $0, $1", "=&r,0"(float %f) + ret float %1 +} + +; Check support for returning a double in GPR with matching double input with +; soft float ABI +define arm_aapcscc double @dbl_gpr_matching_in_op_soft(double %d) #0 { +; CHECK-LABEL: dbl_gpr_matching_in_op_soft +; CHECK: mov r1, r0 + %1 = call double asm "mov ${0:R}, ${1:Q}", "=&r,0"(double %d) + ret double %1 +} + +; Check support for returning a float in specific GPR with matching float input +; with soft float ABI +define arm_aapcscc float @flt_gpr_matching_spec_reg_in_op_soft(float %f) #0 { +; CHECK-LABEL: flt_gpr_matching_spec_reg_in_op_soft +; CHECK: mov r3, r3 + %1 = call float asm "mov $0, $1", "=&{r3},0"(float %f) + ret float %1 +} + +; Check support for returning a double in specific GPR with matching double +; input with soft float ABI +define arm_aapcscc double @dbl_gpr_matching_spec_reg_in_op_soft(double %d) #0 { +; CHECK-LABEL: dbl_gpr_matching_spec_reg_in_op_soft +; CHECK: mov r3, r2 + %1 = call double asm "mov ${0:R}, ${1:Q}", "=&{r2},0"(double %d) + ret double %1 +} + attributes #0 = { nounwind "target-features"="+d16,+vfp2,+vfp3,-fp-only-sp" "use-soft-float"="true" } @@ -39,4 +75,48 @@ ret double %1 } +; Check support for returning a float in GPR with matching float input with +; hard float ABI +define float @flt_gpr_matching_in_op_hard(float %f) #1 { +; CHECK-LABEL: flt_gpr_matching_in_op_hard +; CHECK: vmov r0, s0 +; CHECK: mov r0, r0 +; CHECK: vmov s0, r0 + %1 = call float asm "mov $0, $1", "=&r,0"(float %f) + ret float %1 +} + +; Check support for returning a double in GPR with matching double input with +; hard float ABI +define double @dbl_gpr_matching_in_op_hard(double %d) #1 { +; CHECK-LABEL: dbl_gpr_matching_in_op_hard +; CHECK: vmov r0, r1, d0 +; CHECK: mov r1, r0 +; CHECK: vmov d0, r0, r1 + %1 = call double asm "mov ${0:R}, ${1:Q}", "=&r,0"(double %d) + ret double %1 +} + +; Check support for returning a float in specific GPR with matching float +; input with hard float ABI +define float @flt_gpr_matching_spec_reg_in_op_hard(float %f) #1 { +; CHECK-LABEL: flt_gpr_matching_spec_reg_in_op_hard +; CHECK: vmov r3, s0 +; CHECK: mov r3, r3 +; CHECK: vmov s0, r3 + %1 = call float asm "mov $0, $1", "=&{r3},0"(float %f) + ret float %1 +} + +; Check support for returning a double in specific GPR with matching double +; input with hard float ABI +define double @dbl_gpr_matching_spec_reg_in_op_hard(double %d) #1 { +; CHECK-LABEL: dbl_gpr_matching_spec_reg_in_op_hard +; CHECK: vmov r2, r3, d0 +; CHECK: mov r3, r2 +; CHECK: vmov d0, r2, r3 + %1 = call double asm "mov ${0:R}, ${1:Q}", "=&{r2},0"(double %d) + ret double %1 +} + attributes #1 = { nounwind "target-features"="+d16,+vfp2,+vfp3,-fp-only-sp" "use-soft-float"="false" }