Changeset View
Standalone View
clang/lib/CodeGen/TargetInfo.cpp
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
Show First 20 Lines • Show All 9,180 Lines • ▼ Show 20 Lines | |||||
//===----------------------------------------------------------------------===// | //===----------------------------------------------------------------------===// | ||||
// RISCV ABI Implementation | // RISCV ABI Implementation | ||||
//===----------------------------------------------------------------------===// | //===----------------------------------------------------------------------===// | ||||
namespace { | namespace { | ||||
class RISCVABIInfo : public DefaultABIInfo { | class RISCVABIInfo : public DefaultABIInfo { | ||||
private: | private: | ||||
unsigned XLen; // Size of the integer ('x') registers in bits. | // Size of the integer ('x') registers in bits. | ||||
unsigned XLen; | |||||
// Size of the floating point ('f') registers in bits. Note that the target | |||||
// ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target | |||||
// with soft float ABI has FLen==0). | |||||
unsigned FLen; | |||||
static const int NumArgGPRs = 8; | static const int NumArgGPRs = 8; | ||||
static const int NumArgFPRs = 8; | |||||
bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, | |||||
llvm::Type *&Field1Ty, | |||||
llvm::Type *&Field2Ty, | |||||
CharUnits &Field2Off) const; | |||||
public: | public: | ||||
RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen) | RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen) | ||||
: DefaultABIInfo(CGT), XLen(XLen) {} | : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {} | ||||
// DefaultABIInfo's classifyReturnType and classifyArgumentType are | // DefaultABIInfo's classifyReturnType and classifyArgumentType are | ||||
// non-virtual, but computeInfo is virtual, so we overload it. | // non-virtual, but computeInfo is virtual, so we overload it. | ||||
void computeInfo(CGFunctionInfo &FI) const override; | void computeInfo(CGFunctionInfo &FI) const override; | ||||
ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, | ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, | ||||
int &ArgGPRsLeft) const; | int &ArgFPRsLeft) const; | ||||
ABIArgInfo classifyReturnType(QualType RetTy) const; | ABIArgInfo classifyReturnType(QualType RetTy) const; | ||||
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
QualType Ty) const override; | QualType Ty) const override; | ||||
ABIArgInfo extendType(QualType Ty) const; | ABIArgInfo extendType(QualType Ty) const; | ||||
bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, | |||||
llvm::Type *&Field2Ty, CharUnits &Field2Off, | |||||
int &NeededArgGPRs, int &NeededArgFPRs) const; | |||||
ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty, | |||||
llvm::Type *Field2Ty, | |||||
CharUnits Field2Off) const; | |||||
}; | }; | ||||
} // end anonymous namespace | } // end anonymous namespace | ||||
void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { | void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { | ||||
QualType RetTy = FI.getReturnType(); | QualType RetTy = FI.getReturnType(); | ||||
if (!getCXXABI().classifyReturnType(FI)) | if (!getCXXABI().classifyReturnType(FI)) | ||||
FI.getReturnInfo() = classifyReturnType(RetTy); | FI.getReturnInfo() = classifyReturnType(RetTy); | ||||
// IsRetIndirect is true if classifyArgumentType indicated the value should | // IsRetIndirect is true if classifyArgumentType indicated the value should | ||||
// be passed indirect or if the type size is greater than 2*xlen. e.g. fp128 | // be passed indirect or if the type size is greater than 2*xlen. e.g. fp128 | ||||
// is passed direct in LLVM IR, relying on the backend lowering code to | // is passed direct in LLVM IR, relying on the backend lowering code to | ||||
// rewrite the argument list and pass indirectly on RV32. | // rewrite the argument list and pass indirectly on RV32. | ||||
bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect || | bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect || | ||||
getContext().getTypeSize(RetTy) > (2 * XLen); | getContext().getTypeSize(RetTy) > (2 * XLen); | ||||
// We must track the number of GPRs used in order to conform to the RISC-V | // We must track the number of GPRs used in order to conform to the RISC-V | ||||
// ABI, as integer scalars passed in registers should have signext/zeroext | // ABI, as integer scalars passed in registers should have signext/zeroext | ||||
// when promoted, but are anyext if passed on the stack. As GPR usage is | // when promoted, but are anyext if passed on the stack. As GPR usage is | ||||
// different for variadic arguments, we must also track whether we are | // different for variadic arguments, we must also track whether we are | ||||
// examining a vararg or not. | // examining a vararg or not. | ||||
int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; | int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; | ||||
int ArgFPRsLeft = FLen ? NumArgFPRs : 0; | |||||
int NumFixedArgs = FI.getNumRequiredArgs(); | int NumFixedArgs = FI.getNumRequiredArgs(); | ||||
int ArgNum = 0; | int ArgNum = 0; | ||||
for (auto &ArgInfo : FI.arguments()) { | for (auto &ArgInfo : FI.arguments()) { | ||||
bool IsFixed = ArgNum < NumFixedArgs; | bool IsFixed = ArgNum < NumFixedArgs; | ||||
ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft); | ArgInfo.info = | ||||
classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); | |||||
ArgNum++; | ArgNum++; | ||||
} | } | ||||
} | } | ||||
bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, | |||||
llvm::Type *&Field1Ty, | |||||
llvm::Type *&Field2Ty, | |||||
CharUnits &Field2Off) const { | |||||
bool IsInt = Ty->isIntegralOrEnumerationType(); | |||||
bool IsFloat = Ty->isRealFloatingType(); | |||||
if (IsInt || IsFloat) { | |||||
uint64_t Size = getContext().getTypeSize(Ty); | |||||
if (IsInt && Size > XLen) | |||||
return false; | |||||
// Can't be eligible if larger than the FP registers. Half precision isn't | |||||
// currently supported on RISC-V and the ABI hasn't been confirmed, so | |||||
rjmccall: Is this the only consideration for floating-point types? Clang does have increasing support… | |||||
These types aren't supported on RISC-V currently. As the ABI hasn't really been explicitly confirmed, I've defaulted to the integer ABI in that case. Could move to an assert if you prefer, though obviously any future move to enable half floats for RISC-V should include ABI tests too. asb: These types aren't supported on RISC-V currently. As the ABI hasn't really been explicitly… | |||||
Defaulting to the integer ABI is fine. rjmccall: Defaulting to the integer ABI is fine. | |||||
// default to the integer ABI in that case. | |||||
if (IsFloat && (Size > FLen || Size < 32)) | |||||
return false; | |||||
// Can't be eligible if an integer type was already found (int+int pairs | |||||
The comment here is wrong because fp+fp is allowed, right? Is this not already caught by the post-processing checks you do in detectFPCCEligibleStruct? Would it make more sense to just do all those checks there? rjmccall: The comment here is wrong because fp+fp is allowed, right?
Is this not already caught by the… | |||||
Thanks, I meant to say int+int isn't eligible. Reworded to say that. I don't think it would simplify things to do all checks in detectFPCCEligibleStruct. More repetition would be required in order to do checks on both Float1Ty and Float2Ty. asb: Thanks, I meant to say int+int isn't eligible. Reworded to say that.
I don't think it would… | |||||
Not Done ReplyInline ActionsOkay. It just seemed to me that responsibility was oddly split between the functions. rjmccall: Okay. It just seemed to me that responsibility was oddly split between the functions. | |||||
I added a comment to document this. It's not something I'd expose in a public API, but I think it's defensible to catch this case outside of the helper. I had another look at refactoring but the readability just seemed to be reduced when pulling out all the checks to the caller (rather than catching the single case that detectFPCCEligibleStructHelper can't handle). asb: I added a comment to document this. It's not something I'd expose in a public API, but I think… | |||||
// are not eligible). | |||||
if (IsInt && Field1Ty && Field1Ty->isIntegerTy()) | |||||
return false; | |||||
if (!Field1Ty) { | |||||
Field1Ty = CGT.ConvertType(Ty); | |||||
assert(CurOff.isZero() && "Unexpected offset for first field"); | |||||
return true; | |||||
} | |||||
if (!Field2Ty) { | |||||
Field2Ty = CGT.ConvertType(Ty); | |||||
Field2Off = CurOff; | |||||
return true; | |||||
} | |||||
return false; | |||||
} | |||||
if (auto CTy = Ty->getAs<ComplexType>()) { | |||||
if (Field1Ty) | |||||
return false; | |||||
QualType EltTy = CTy->getElementType(); | |||||
if (getContext().getTypeSize(EltTy) > FLen) | |||||
return false; | |||||
Field2Ty = Field1Ty, please. rjmccall: `Field2Ty = Field1Ty`, please. | |||||
Field1Ty = CGT.ConvertType(EltTy); | |||||
assert(CurOff.isZero() && "Unexpected offset for first field"); | |||||
Field2Ty = Field1Ty; | |||||
Field2Off = getContext().getTypeSizeInChars(EltTy); | |||||
return true; | |||||
} | |||||
if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) { | |||||
uint64_t ArraySize = ATy->getSize().getZExtValue(); | |||||
QualType EltTy = ATy->getElementType(); | |||||
CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); | |||||
for (uint64_t i = 0; i < ArraySize; ++i) { | |||||
bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty, | |||||
Field2Ty, Field2Off); | |||||
if (!Ret) | |||||
return false; | |||||
CurOff += EltSize; | |||||
} | |||||
return true; | |||||
} | |||||
if (const auto *RTy = Ty->getAs<RecordType>()) { | |||||
// Structures with either a non-trivial destructor or a non-trivial | |||||
// copy constructor are not eligible for the FP calling convention. | |||||
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT.getCXXABI())) | |||||
return false; | |||||
if (isEmptyRecord(getContext(), Ty, true)) | |||||
return true; | |||||
const RecordDecl *RD = RTy->getDecl(); | |||||
// Unions aren't eligible unless they're empty (which is caught above). | |||||
I really expect there to be something in this block about whether the field is a bit-field. What exactly does your ABI specify if there's a bit-field? rjmccall: I really expect there to be something in this block about whether the field is a bit-field. | |||||
I've updated to handle bitfields and submitted a pull request to the RISC-V psABI to improve the documentation. Unfortunately the handling of zero-width bitfields is a little weird, but the preference seems to be to just document what GCC does. asb: I've updated to handle bitfields and submitted a [pull request](https://github.com/riscv/riscv… | |||||
if (RD->isUnion()) | |||||
return false; | |||||
int ZeroWidthBitFieldCount = 0; | |||||
for (const FieldDecl *FD : RD->fields()) { | |||||
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); | |||||
uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex()); | |||||
QualType QTy = FD->getType(); | |||||
if (FD->isBitField()) { | |||||
unsigned BitWidth = FD->getBitWidthValue(getContext()); | |||||
// Allow a bitfield with a type greater than XLen as long as the | |||||
// bitwidth is XLen or less. | |||||
if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen) | |||||
QTy = getContext().getIntTypeForBitwidth(XLen, false); | |||||
rjmccallUnsubmitted Okay. So consecutive bit-fields are considered individually, not packed into a single storage unit and then considered? Unfortunate ABI rule, but if it's what you have to implement, so be it. rjmccall: Okay. So consecutive bit-fields are considered individually, not packed into a single storage… | |||||
asbAuthorUnsubmitted I'm afraid that's the rule as written, and what gcc seems to implement. asb: I'm afraid that's the rule as written, and what gcc seems to implement. | |||||
if (BitWidth == 0) { | |||||
ZeroWidthBitFieldCount++; | |||||
continue; | |||||
} | |||||
} | |||||
bool Ret = detectFPCCEligibleStructHelper( | |||||
QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits), | |||||
Field1Ty, Field2Ty, Field2Off); | |||||
if (!Ret) | |||||
return false; | |||||
// As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp | |||||
Not Done ReplyInline ActionsI found some mismatch in behaviour between gcc and g++ that we may want to address in the psABI first. For instance, given the following struct (I'm using gcc 8.3.0) // t.c struct A { int :0; double d; int :0; long x; int :0; }; extern void bar(struct A); void foo(struct A a) { a.d =- a.d; a.x += 1; return bar(a); } we are emitting this $ clang --target=riscv64 -march=rv64gc -mabi=lp64d -S -o- t.c -O2 ... foo: # @foo # %bb.0: # %entry addi a2, zero, -1 slli a2, a2, 63 xor a0, a0, a2 addi a1, a1, 1 tail bar which matches with what g++ does (i.e in both cases a0 is a.d and a1 is a.x) $ ./riscv64-unknown-linux-gnu-g++ -S -O2 -o- -x c test.cc ... foo: fmv.d.x fa5,a0 addi sp,sp,-16 fneg.d fa5,fa5 addi a1,a1,1 addi sp,sp,16 fmv.x.d a0,fa5 tail bar But I found a mismatch while using C++. Clang emits the same for C and C++ (modulo .cfi stuff) $ clang --target=riscv64 -march=rv64gc -mabi=lp64d -S -o- -x c++ t.c -O2 _Z3foo1A: # @_Z3foo1A .cfi_startproc # %bb.0: # %entry addi a2, zero, -1 slli a2, a2, 63 xor a0, a0, a2 addi a1, a1, 1 .cfi_def_cfa_offset 0 tail _Z3bar1A But g++ seems to ignore the zero-width bitfields: fa0 is a.d and a0 is a.x $ riscv64-unknown-linux-gnu-g++ -S -O2 -x c++ t.c -o- ... _Z3foo1A: .LFB0: .cfi_startproc fneg.d fa0,fa0 addi sp,sp,-16 .cfi_def_cfa_offset 16 addi a0,a0,1 addi sp,sp,16 .cfi_def_cfa_offset 0 tail _Z3bar1A .cfi_endproc This is a bit worrying as it might complicate interoperability between C and C++ (I tried wrapping everything inside an extern "C" just in case but it didn't change g++'s behaviour). Do you mind to confirm this issue? rogfer01: I found some mismatch in behaviour between gcc and g++ that we may want to address in the psABI… | |||||
Thanks, I'm seeing this in GCC 9.1.0 as well. I left[ a comment](https://github.com/riscv/riscv-elf-psabi-doc/issues/99#issuecomment-509233798) on the relevant psABI issue. It seems there is a GCC bug here, but hopefully someone can confirm what the "correct" behaviour is. asb: Thanks, I'm seeing this in GCC 9.1.0 as well. I left[ a comment](https://github.com/riscv/riscv… | |||||
// or int+fp structs, but are ignored for a struct with an fp field and | |||||
// any number of zero-width bitfields. | |||||
if (Field2Ty && ZeroWidthBitFieldCount > 0) | |||||
return false; | |||||
} | |||||
return true; | |||||
} | |||||
return false; | |||||
} | |||||
// Determine if a struct is eligible for passing according to the floating | |||||
// point calling convention (i.e., when flattened it contains a single fp | |||||
// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and | |||||
// NeededArgGPRs are incremented appropriately. | |||||
bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, | |||||
llvm::Type *&Field2Ty, | |||||
CharUnits &Field2Off, | |||||
int &NeededArgGPRs, | |||||
int &NeededArgFPRs) const { | |||||
Field1Ty = nullptr; | |||||
Field2Ty = nullptr; | |||||
NeededArgGPRs = 0; | |||||
NeededArgFPRs = 0; | |||||
bool IsCandidate = detectFPCCEligibleStructHelper( | |||||
Ty, CharUnits::Zero(), Field1Ty, Field2Ty, Field2Off); | |||||
// Not really a candidate if we have a single int but no float. | |||||
if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy()) | |||||
return false; | |||||
if (Field1Ty && Field1Ty->isFloatingPointTy()) | |||||
NeededArgFPRs++; | |||||
else if (Field1Ty) | |||||
NeededArgGPRs++; | |||||
if (Field2Ty && Field2Ty->isFloatingPointTy()) | |||||
NeededArgFPRs++; | |||||
else if (Field2Ty) | |||||
NeededArgGPRs++; | |||||
return IsCandidate; | |||||
} | |||||
// Call getCoerceAndExpand for the two-element flattened struct described by | |||||
// Field1Ty, Field2Ty, Field2Off. This method will create an appropriate | |||||
Typo in Filed2Ty and Filed2Off rogfer01: Typo in `Filed2Ty` and `Filed2Off` | |||||
// coerceToType and unpaddedCoerceToType. | |||||
ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct( | |||||
llvm::Type *Field1Ty, llvm::Type *Field2Ty, CharUnits Field2Off) const { | |||||
SmallVector<llvm::Type *, 3> CoerceElts; | |||||
SmallVector<llvm::Type *, 2> UnpaddedCoerceElts; | |||||
CoerceElts.push_back(Field1Ty); | |||||
UnpaddedCoerceElts.push_back(Field1Ty); | |||||
if (!Field2Ty) { | |||||
return ABIArgInfo::getCoerceAndExpand( | |||||
llvm::StructType::get(getVMContext(), CoerceElts, false), | |||||
UnpaddedCoerceElts[0]); | |||||
} | |||||
CharUnits Field2Align = | |||||
CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty)); | |||||
CharUnits Field1Size = | |||||
CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); | |||||
CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align); | |||||
CharUnits Padding = CharUnits::Zero(); | |||||
if (Field2Off > Field2OffNoPadNoPack) | |||||
Padding = Field2Off - Field2OffNoPadNoPack; | |||||
else if (Field2Off != Field2Align && Field2Off > Field1Size) | |||||
Padding = Field2Off - Field1Size; | |||||
bool IsPacked = !Field2Off.isMultipleOf(Field2Align); | |||||
if (!Padding.isZero()) | |||||
CoerceElts.push_back(llvm::ArrayType::get( | |||||
llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity())); | |||||
CoerceElts.push_back(Field2Ty); | |||||
UnpaddedCoerceElts.push_back(Field2Ty); | |||||
auto CoerceToType = | |||||
llvm::StructType::get(getVMContext(), CoerceElts, IsPacked); | |||||
auto UnpaddedCoerceToType = | |||||
llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked); | |||||
return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType); | |||||
} | |||||
ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, | ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, | ||||
int &ArgGPRsLeft) const { | int &ArgGPRsLeft, | ||||
int &ArgFPRsLeft) const { | |||||
assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); | assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); | ||||
Ty = useFirstFieldIfTransparentUnion(Ty); | Ty = useFirstFieldIfTransparentUnion(Ty); | ||||
// Structures with either a non-trivial destructor or a non-trivial | // Structures with either a non-trivial destructor or a non-trivial | ||||
// copy constructor are always passed indirectly. | // copy constructor are always passed indirectly. | ||||
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { | ||||
if (ArgGPRsLeft) | if (ArgGPRsLeft) | ||||
ArgGPRsLeft -= 1; | ArgGPRsLeft -= 1; | ||||
return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == | return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == | ||||
CGCXXABI::RAA_DirectInMemory); | CGCXXABI::RAA_DirectInMemory); | ||||
} | } | ||||
// Ignore empty structs/unions. | // Ignore empty structs/unions. | ||||
if (isEmptyRecord(getContext(), Ty, true)) | if (isEmptyRecord(getContext(), Ty, true)) | ||||
return ABIArgInfo::getIgnore(); | return ABIArgInfo::getIgnore(); | ||||
uint64_t Size = getContext().getTypeSize(Ty); | uint64_t Size = getContext().getTypeSize(Ty); | ||||
// Pass floating point values via FPRs if possible. | |||||
if (IsFixed && Ty->isFloatingType() && FLen >= Size && ArgFPRsLeft) { | |||||
ArgFPRsLeft--; | |||||
return ABIArgInfo::getDirect(); | |||||
} | |||||
// Complex types for the hard float ABI must be passed direct rather than | |||||
// using CoerceAndExpand. | |||||
if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) { | |||||
QualType EltTy = Ty->getAs<ComplexType>()->getElementType(); | |||||
if (getContext().getTypeSize(EltTy) <= FLen) { | |||||
ArgFPRsLeft -= 2; | |||||
return ABIArgInfo::getDirect(); | |||||
} | |||||
} | |||||
if (IsFixed && FLen && Ty->isStructureOrClassType()) { | |||||
llvm::Type *Field1Ty = nullptr; | |||||
llvm::Type *Field2Ty = nullptr; | |||||
CharUnits Field2Off = CharUnits::Zero(); | |||||
int NeededArgGPRs; | |||||
int NeededArgFPRs; | |||||
bool IsCandidate = detectFPCCEligibleStruct( | |||||
Ty, Field1Ty, Field2Ty, Field2Off, NeededArgGPRs, NeededArgFPRs); | |||||
if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft && | |||||
NeededArgFPRs <= ArgFPRsLeft) { | |||||
ArgGPRsLeft -= NeededArgGPRs; | |||||
ArgFPRsLeft -= NeededArgFPRs; | |||||
return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field2Ty, Field2Off); | |||||
} | |||||
} | |||||
uint64_t NeededAlign = getContext().getTypeAlign(Ty); | uint64_t NeededAlign = getContext().getTypeAlign(Ty); | ||||
bool MustUseStack = false; | bool MustUseStack = false; | ||||
// Determine the number of GPRs needed to pass the current argument | // Determine the number of GPRs needed to pass the current argument | ||||
// according to the ABI. 2*XLen-aligned varargs are passed in "aligned" | // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" | ||||
// register pairs, so may consume 3 registers. | // register pairs, so may consume 3 registers. | ||||
int NeededArgGPRs = 1; | int NeededArgGPRs = 1; | ||||
if (!IsFixed && NeededAlign == 2 * XLen) | if (!IsFixed && NeededAlign == 2 * XLen) | ||||
NeededArgGPRs = 2 + (ArgGPRsLeft % 2); | NeededArgGPRs = 2 + (ArgGPRsLeft % 2); | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, | ||||
return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); | ||||
} | } | ||||
ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { | ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { | ||||
if (RetTy->isVoidType()) | if (RetTy->isVoidType()) | ||||
return ABIArgInfo::getIgnore(); | return ABIArgInfo::getIgnore(); | ||||
int ArgGPRsLeft = 2; | int ArgGPRsLeft = 2; | ||||
int ArgFPRsLeft = FLen ? 2 : 0; | |||||
// The rules for return and argument types are the same, so defer to | // The rules for return and argument types are the same, so defer to | ||||
// classifyArgumentType. | // classifyArgumentType. | ||||
return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft); | return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft, | ||||
ArgFPRsLeft); | |||||
} | } | ||||
Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, | ||||
QualType Ty) const { | QualType Ty) const { | ||||
CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); | CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); | ||||
// Empty records are ignored for parameter passing purposes. | // Empty records are ignored for parameter passing purposes. | ||||
if (isEmptyRecord(getContext(), Ty, true)) { | if (isEmptyRecord(getContext(), Ty, true)) { | ||||
Show All 18 Lines | ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { | ||||
if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) | if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) | ||||
return ABIArgInfo::getSignExtend(Ty); | return ABIArgInfo::getSignExtend(Ty); | ||||
return ABIArgInfo::getExtend(Ty); | return ABIArgInfo::getExtend(Ty); | ||||
} | } | ||||
namespace { | namespace { | ||||
class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { | class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { | ||||
public: | public: | ||||
RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen) | RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, | ||||
: TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen)) {} | unsigned FLen) | ||||
: TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen, FLen)) {} | |||||
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, | ||||
CodeGen::CodeGenModule &CGM) const override { | CodeGen::CodeGenModule &CGM) const override { | ||||
const auto *FD = dyn_cast_or_null<FunctionDecl>(D); | const auto *FD = dyn_cast_or_null<FunctionDecl>(D); | ||||
if (!FD) return; | if (!FD) return; | ||||
const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); | const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); | ||||
if (!Attr) | if (!Attr) | ||||
▲ Show 20 Lines • Show All 122 Lines • ▼ Show 20 Lines | const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { | ||||
case llvm::Triple::nvptx: | case llvm::Triple::nvptx: | ||||
case llvm::Triple::nvptx64: | case llvm::Triple::nvptx64: | ||||
return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); | return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); | ||||
case llvm::Triple::msp430: | case llvm::Triple::msp430: | ||||
return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); | return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); | ||||
case llvm::Triple::riscv32: | case llvm::Triple::riscv32: | ||||
return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 32)); | case llvm::Triple::riscv64: { | ||||
case llvm::Triple::riscv64: | StringRef ABIStr = getTarget().getABI(); | ||||
return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 64)); | unsigned XLen = getTarget().getPointerWidth(0); | ||||
unsigned ABIFLen = 0; | |||||
if (ABIStr.endswith("f")) | |||||
ABIFLen = 32; | |||||
else if (ABIStr.endswith("d")) | |||||
ABIFLen = 64; | |||||
return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen)); | |||||
} | |||||
case llvm::Triple::systemz: { | case llvm::Triple::systemz: { | ||||
bool HasVector = getTarget().getABI() == "vector"; | bool HasVector = getTarget().getABI() == "vector"; | ||||
return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector)); | return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector)); | ||||
} | } | ||||
case llvm::Triple::tce: | case llvm::Triple::tce: | ||||
case llvm::Triple::tcele: | case llvm::Triple::tcele: | ||||
▲ Show 20 Lines • Show All 160 Lines • Show Last 20 Lines |
Is this the only consideration for floating-point types? Clang does have increasing support for half / various float16 types.