Index: lib/Target/MSP430/MSP430CallingConv.td =================================================================== --- lib/Target/MSP430/MSP430CallingConv.td +++ lib/Target/MSP430/MSP430CallingConv.td @@ -23,18 +23,15 @@ //===----------------------------------------------------------------------===// // MSP430 Argument Calling Conventions //===----------------------------------------------------------------------===// -def CC_MSP430 : CallingConv<[ +def CC_MSP430_AssignStack : CallingConv<[ // Pass by value if the byval attribute is given CCIfByVal>, // Promote i8 arguments to i16. CCIfType<[i8], CCPromoteToType>, - // The first 4 integer arguments of non-varargs functions are passed in - // integer registers. - CCIfNotVarArg>>, - // Integer values get stored in stack slots that are 2 bytes in // size and 2-byte aligned. CCIfType<[i16], CCAssignToStack<2, 2>> ]>; + Index: lib/Target/MSP430/MSP430ISelLowering.cpp =================================================================== --- lib/Target/MSP430/MSP430ISelLowering.cpp +++ lib/Target/MSP430/MSP430ISelLowering.cpp @@ -250,6 +250,123 @@ #include "MSP430GenCallingConv.inc" +/// For each argument in a function store the number of pieces it is composed +/// of. +template +static void ParseFunctionArgs(const SmallVectorImpl &Args, + SmallVectorImpl &Out) { + unsigned CurrentArgIndex = ~0U; + for (unsigned i = 0, e = Args.size(); i != e; i++) { + if (CurrentArgIndex == Args[i].OrigArgIndex) { + Out.back()++; + } else { + Out.push_back(1); + CurrentArgIndex++; + } + } +} + +static void AnalyzeVarArgs(CCState &State, + const SmallVectorImpl &Outs) { + State.AnalyzeCallOperands(Outs, CC_MSP430_AssignStack); +} + +static void AnalyzeVarArgs(CCState &State, + const SmallVectorImpl &Ins) { + State.AnalyzeFormalArguments(Ins, CC_MSP430_AssignStack); +} + +/// Analyze incoming and outgoing function arguments. We need custom C++ code +/// to handle special constraints in the ABI like reversing the order of the +/// pieces of splitted arguments. In addition, all pieces of a certain argument +/// have to be passed either using registers or the stack but never mixing both. +template +static void AnalyzeArguments(CCState &State, + SmallVectorImpl &ArgLocs, + const SmallVectorImpl &Args) { + static const uint16_t RegList[] = { + MSP430::R15W, MSP430::R14W, MSP430::R13W, MSP430::R12W + }; + static const unsigned NbRegs = array_lengthof(RegList); + + if (State.isVarArg()) { + AnalyzeVarArgs(State, Args); + return; + } + + SmallVector ArgsParts; + ParseFunctionArgs(Args, ArgsParts); + + unsigned RegsLeft = NbRegs; + bool UseStack = false; + unsigned ValNo = 0; + + for (unsigned i = 0, e = ArgsParts.size(); i != e; i++) { + MVT ArgVT = Args[ValNo].VT; + ISD::ArgFlagsTy ArgFlags = Args[ValNo].Flags; + MVT LocVT = ArgVT; + CCValAssign::LocInfo LocInfo = CCValAssign::Full; + + // Promote i8 to i16 + if (LocVT == MVT::i8) { + LocVT = MVT::i16; + if (ArgFlags.isSExt()) + LocInfo = CCValAssign::SExt; + else if (ArgFlags.isZExt()) + LocInfo = CCValAssign::ZExt; + else + LocInfo = CCValAssign::AExt; + } + + // Handle byval arguments + if (ArgFlags.isByVal()) { + State.HandleByVal(ValNo++, ArgVT, LocVT, LocInfo, 2, 2, ArgFlags); + continue; + } + + unsigned Parts = ArgsParts[i]; + + if (!UseStack && Parts <= RegsLeft) { + unsigned FirstVal = ValNo; + for (unsigned j = 0; j < Parts; j++) { + unsigned Reg = State.AllocateReg(RegList, NbRegs); + State.addLoc(CCValAssign::getReg(ValNo++, ArgVT, Reg, LocVT, LocInfo)); + RegsLeft--; + } + + // Reverse the order of the pieces to agree with the "big endian" format + // required in the calling convention ABI. + SmallVectorImpl::iterator B = ArgLocs.begin() + FirstVal; + std::reverse(B, B + Parts); + } else { + UseStack = true; + for (unsigned j = 0; j < Parts; j++) + CC_MSP430_AssignStack(ValNo++, ArgVT, LocVT, LocInfo, ArgFlags, State); + } + } +} + +static void AnalyzeRetResult(CCState &State, + const SmallVectorImpl &Ins) { + State.AnalyzeCallResult(Ins, RetCC_MSP430); +} + +static void AnalyzeRetResult(CCState &State, + const SmallVectorImpl &Outs) { + State.AnalyzeReturn(Outs, RetCC_MSP430); +} + +template +static void AnalyzeReturnValues(CCState &State, + SmallVectorImpl &RVLocs, + const SmallVectorImpl &Args) { + AnalyzeRetResult(State, Args); + + // Reverse splitted return values to get the "big endian" format required + // to agree with the calling convention ABI. + std::reverse(RVLocs.begin(), RVLocs.end()); +} + SDValue MSP430TargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, @@ -325,7 +442,7 @@ SmallVector ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), ArgLocs, *DAG.getContext()); - CCInfo.AnalyzeFormalArguments(Ins, CC_MSP430); + AnalyzeArguments(CCInfo, ArgLocs, Ins); // Create frame index for the start of the first vararg value if (isVarArg) { @@ -423,7 +540,7 @@ getTargetMachine(), RVLocs, *DAG.getContext()); // Analize return values. - CCInfo.AnalyzeReturn(Outs, RetCC_MSP430); + AnalyzeReturnValues(CCInfo, RVLocs, Outs); SDValue Flag; SmallVector RetOps(1, Chain); @@ -471,8 +588,7 @@ SmallVector ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), ArgLocs, *DAG.getContext()); - - CCInfo.AnalyzeCallOperands(Outs, CC_MSP430); + AnalyzeArguments(CCInfo, ArgLocs, Outs); // Get a count of how many bytes are to be pushed on the stack. unsigned NumBytes = CCInfo.getNextStackOffset(); @@ -610,7 +726,7 @@ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), RVLocs, *DAG.getContext()); - CCInfo.AnalyzeCallResult(Ins, RetCC_MSP430); + AnalyzeReturnValues(CCInfo, RVLocs, Ins); // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { Index: test/CodeGen/MSP430/cc_args.ll =================================================================== --- /dev/null +++ test/CodeGen/MSP430/cc_args.ll @@ -0,0 +1,118 @@ +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16" +target triple = "msp430---elf" + +define void @test() #0 { +entry: +; CHECK: test: + +; CHECK: mov.w #1, r15 +; CHECK: call #f_i16 + call void @f_i16(i16 1) + +; CHECK: mov.w #772, r14 +; CHECK: mov.w #258, r15 +; CHECK: call #f_i32 + call void @f_i32(i32 16909060) + +; CHECK: mov.w #1800, r12 +; CHECK: mov.w #1286, r13 +; CHECK: mov.w #772, r14 +; CHECK: mov.w #258, r15 +; CHECK: call #f_i64 + call void @f_i64(i64 72623859790382856) + +; CHECK: mov.w #772, r14 +; CHECK: mov.w #258, r15 +; CHECK: mov.w #1800, r12 +; CHECK: mov.w #1286, r13 +; CHECK: call #f_i32_i32 + call void @f_i32_i32(i32 16909060, i32 84281096) + +; CHECK: mov.w #1, r15 +; CHECK: mov.w #772, r13 +; CHECK: mov.w #258, r14 +; CHECK: mov.w #2, r12 +; CHECK: call #f_i16_i32_i16 + call void @f_i16_i32_i16(i16 1, i32 16909060, i16 2) + +; CHECK: mov.w #2, 8(r1) +; CHECK: mov.w #258, 6(r1) +; CHECK: mov.w #772, 4(r1) +; CHECK: mov.w #1286, 2(r1) +; CHECK: mov.w #1800, 0(r1) +; CHECK: mov.w #1, r15 +; CHECK: call #f_i16_i64_i16 + call void @f_i16_i64_i16(i16 1, i64 72623859790382856, i16 2) + + ret void +} + +@g_i16 = common global i16 0, align 2 +@g_i32 = common global i32 0, align 2 +@g_i64 = common global i64 0, align 2 + +define void @f_i16(i16 %a) #0 { +; CHECK: f_i16: +; CHECK: mov.w r15, &g_i16 + store volatile i16 %a, i16* @g_i16, align 2 + ret void +} + +define void @f_i32(i32 %a) #0 { +; CHECK: f_i32: +; CHECK: mov.w r15, &g_i32+2 +; CHECK: mov.w r14, &g_i32 + store volatile i32 %a, i32* @g_i32, align 2 + ret void +} + +define void @f_i64(i64 %a) #0 { +; CHECK: f_i64: +; CHECK: mov.w r15, &g_i64+6 +; CHECK: mov.w r14, &g_i64+4 +; CHECK: mov.w r13, &g_i64+2 +; CHECK: mov.w r12, &g_i64 + store volatile i64 %a, i64* @g_i64, align 2 + ret void +} + +define void @f_i32_i32(i32 %a, i32 %b) #0 { +; CHECK: f_i32_i32: +; CHECK: mov.w r15, &g_i32+2 +; CHECK: mov.w r14, &g_i32 + store volatile i32 %a, i32* @g_i32, align 2 +; CHECK: mov.w r13, &g_i32+2 +; CHECK: mov.w r12, &g_i32 + store volatile i32 %b, i32* @g_i32, align 2 + ret void +} + +define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 { +; CHECK: f_i16_i32_i16: +; CHECK: mov.w r15, &g_i16 + store volatile i16 %a, i16* @g_i16, align 2 +; CHECK: mov.w r14, &g_i32+2 +; CHECK: mov.w r13, &g_i32 + store volatile i32 %b, i32* @g_i32, align 2 +; CHECK: mov.w r12, &g_i16 + store volatile i16 %c, i16* @g_i16, align 2 + ret void +} + +define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 { +; CHECK: f_i16_i64_i16: +; CHECK: mov.w r15, &g_i16 + store volatile i16 %a, i16* @g_i16, align 2 +;CHECK: mov.w 10(r4), &g_i64+6 +;CHECK: mov.w 8(r4), &g_i64+4 +;CHECK: mov.w 6(r4), &g_i64+2 +;CHECK: mov.w 4(r4), &g_i64 + store volatile i64 %b, i64* @g_i64, align 2 +;CHECK: mov.w 12(r4), &g_i16 + store volatile i16 %c, i16* @g_i16, align 2 + ret void +} + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } Index: test/CodeGen/MSP430/cc_ret.ll =================================================================== --- /dev/null +++ test/CodeGen/MSP430/cc_ret.ll @@ -0,0 +1,61 @@ +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16" +target triple = "msp430---elf" + +define void @test() #0 { +entry: +; CHECK: test: + +; CHECK: call #f_i16 +; CHECK: mov.w r15, &g_i16 + %0 = call i16 @f_i16() + store volatile i16 %0, i16* @g_i16 + +; CHECK: call #f_i32 +; CHECK: mov.w r15, &g_i32+2 +; CHECK: mov.w r14, &g_i32 + %1 = call i32 @f_i32() + store volatile i32 %1, i32* @g_i32 + +; CHECK: call #f_i64 +; CHECK: mov.w r15, &g_i64+6 +; CHECK: mov.w r14, &g_i64+4 +; CHECK: mov.w r13, &g_i64+2 +; CHECK: mov.w r12, &g_i64 + %2 = call i64 @f_i64() + store volatile i64 %2, i64* @g_i64 + + ret void +} + +@g_i16 = common global i16 0, align 2 +@g_i32 = common global i32 0, align 2 +@g_i64 = common global i64 0, align 2 + +define i16 @f_i16() #0 { +; CHECK: f_i16: +; CHECK: mov.w #1, r15 +; CHECK: ret + ret i16 1 +} + +define i32 @f_i32() #0 { +; CHECK: f_i32: +; CHECK: mov.w #772, r14 +; CHECK: mov.w #258, r15 +; CHECK: ret + ret i32 16909060 +} + +define i64 @f_i64() #0 { +; CHECK: f_i64: +; CHECK: mov.w #1800, r12 +; CHECK: mov.w #1286, r13 +; CHECK: mov.w #772, r14 +; CHECK: mov.w #258, r15 +; CHECK: ret + ret i64 72623859790382856 +} + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }