Index: llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h =================================================================== --- llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h +++ llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h @@ -208,7 +208,7 @@ Value *optimizeIsAscii(CallInst *CI, IRBuilderBase &B); Value *optimizeToAscii(CallInst *CI, IRBuilderBase &B); Value *optimizeAtoi(CallInst *CI, IRBuilderBase &B); - Value *optimizeStrtol(CallInst *CI, IRBuilderBase &B); + Value *optimizeStrToInt(CallInst *CI, IRBuilderBase &B, bool AsSigned); // Formatting and IO Library Call Optimizations Value *optimizeErrorReporting(CallInst *CI, IRBuilderBase &B, Index: llvm/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/lib/Analysis/ValueTracking.cpp +++ llvm/lib/Analysis/ValueTracking.cpp @@ -4266,9 +4266,10 @@ return true; } -/// This function computes the length of a null-terminated C string pointed to -/// by V. If successful, it returns true and returns the string in Str. -/// If unsuccessful, it returns false. +/// Extract bytes from the initializer of the constant array V, which need +/// not be a nul-terminated string. On success, store the bytes in Str and +/// return true. When TrimAtNul is set, Str will contain only the bytes up +/// to but not including the first nul. Return false on failure. bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, uint64_t Offset, bool TrimAtNul) { ConstantDataArraySlice Slice; Index: llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp =================================================================== --- llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -75,29 +75,91 @@ }); } -static Value *convertStrToNumber(CallInst *CI, StringRef &Str, int64_t Base) { +// Convert the entire string Str representing an integer in Base, up to +// the terminating nul if present, to a constant according to the rules +// of strtoul[l] or, when AsSigned is set, of strtol[l]. On success +// return the result, otherwise null. +// The function assumes the string is encoded in ASCII. +static Value *convertStrToInt(CallInst *CI, StringRef &Str, uint64_t Base, + bool AsSigned) { if (Base < 2 || Base > 36) - // handle special zero base if (Base != 0) + // Fail for an invalid base (required by POSIX). return nullptr; - char *End; - std::string nptr = Str.str(); - errno = 0; - long long int Result = strtoll(nptr.c_str(), &End, Base); - if (errno) - return nullptr; + // Strip leading whitespace. + for (unsigned i = 0; i != Str.size(); ++i) + if (!isSpace((unsigned char)Str[i])) { + Str = Str.substr(i); + break; + } - // if we assume all possible target locales are ASCII supersets, - // then if strtoll successfully parses a number on the host, - // it will also successfully parse the same way on the target - if (*End != '\0') - return nullptr; + if (Str.empty()) + // Fail for empty subject sequences (POSIX allows but doesn't require + // strtol[l]/strtoul[l] to fail with EINVAL). + return nullptr; + + // Strip but remember the sign. + bool Negate = Str[0] == '-'; + if (Str[0] == '-' || Str[0] == '+') + Str = Str.drop_front(); + + // Set Max to the maximum positive representable value in the type. + Type *RetTy = CI->getType(); + unsigned NBits = RetTy->getPrimitiveSizeInBits(); + uint64_t Max = AsSigned && Negate ? 1 : 0; + Max += AsSigned ? maxIntN(NBits) : maxUIntN(NBits); + + if (Base == 0) { + // Autodetect Base. + if (Str.size() > 1) { + if (Str[0] == '0') { + if (toUpper((unsigned char)Str[1]) == 'X') { + Base = 16; + Str = Str.drop_front(2); + } + else + Base = 8; + } else + Base = 10; + } + else + Base = 10; + } + + // Convert the rest of the subject sequence, not including the sign, + // to its uint64_t representation (this assumes the source character + // set is ASCII). + uint64_t Result = 0; + for (unsigned i = 0; i != Str.size(); ++i) { + unsigned char DigVal = Str[i]; + if (isDigit(DigVal)) + DigVal = DigVal - '0'; + else { + DigVal = toUpper(DigVal); + if (isAlpha(DigVal)) + DigVal = DigVal - 'A' + 10; + else + return nullptr; + } - if (!isIntN(CI->getType()->getPrimitiveSizeInBits(), Result)) - return nullptr; + if (DigVal >= Base) + // Fail if the digit is not valid in the Base. + return nullptr; - return ConstantInt::get(CI->getType(), Result); + // Add the digit and fail if the result is not representable in + // the (unsigned form of the) destination type. + bool VFlow; + Result = SaturatingMultiplyAdd(Result, Base, (uint64_t)DigVal, &VFlow); + if (VFlow || Result > Max) + return nullptr; + } + + if (Negate) + // Unsigned negation doesn't overflow. + Result = -Result; + + return ConstantInt::get(RetTy, Result); } static bool isOnlyUsedInComparisonWithZero(Value *V) { @@ -2512,15 +2574,26 @@ ConstantInt::get(CI->getType(), 0x7F)); } +// Fold calls to atoi, atol, and atoll. Value *LibCallSimplifier::optimizeAtoi(CallInst *CI, IRBuilderBase &B) { + CI->addParamAttr(0, Attribute::NoCapture); + StringRef Str; if (!getConstantStringInfo(CI->getArgOperand(0), Str)) return nullptr; - return convertStrToNumber(CI, Str, 10); + return convertStrToInt(CI, Str, 10, /*AsSigned=*/true); } -Value *LibCallSimplifier::optimizeStrtol(CallInst *CI, IRBuilderBase &B) { +// Fold calls to strtol, strtoll, strtoul, and strtoull. +Value *LibCallSimplifier::optimizeStrToInt(CallInst *CI, IRBuilderBase &B, + bool AsSigned) { + Value *EndPtr = CI->getArgOperand(1); + if (isa(EndPtr)) + // With a null EndPtr, this function won't capture the main argument. + // It would be readonly too, except that it still may write to errno. + CI->addParamAttr(0, Attribute::NoCapture); + StringRef Str; if (!getConstantStringInfo(CI->getArgOperand(0), Str)) return nullptr; @@ -2529,7 +2602,7 @@ return nullptr; if (ConstantInt *CInt = dyn_cast(CI->getArgOperand(2))) { - return convertStrToNumber(CI, Str, CInt->getSExtValue()); + return convertStrToInt(CI, Str, CInt->getSExtValue(), AsSigned); } return nullptr; @@ -3368,7 +3441,10 @@ return optimizeAtoi(CI, Builder); case LibFunc_strtol: case LibFunc_strtoll: - return optimizeStrtol(CI, Builder); + return optimizeStrToInt(CI, Builder, /*AsSigned=*/true); + case LibFunc_strtoul: + case LibFunc_strtoull: + return optimizeStrToInt(CI, Builder, /*AsSigned=*/false); case LibFunc_printf: return optimizePrintF(CI, Builder); case LibFunc_sprintf: Index: llvm/test/Transforms/InstCombine/str-int-2.ll =================================================================== --- llvm/test/Transforms/InstCombine/str-int-2.ll +++ llvm/test/Transforms/InstCombine/str-int-2.ll @@ -70,7 +70,7 @@ define i32 @atoi_not_const_str(i8* %s) #0 { ; CHECK-LABEL: @atoi_not_const_str( -; CHECK-NEXT: [[CALL:%.*]] = call i32 @atoi(i8* [[S:%.*]]) +; CHECK-NEXT: [[CALL:%.*]] = call i32 @atoi(i8* nocapture [[S:%.*]]) ; CHECK-NEXT: ret i32 [[CALL]] ; %call = call i32 @atoi(i8* %s) #4 Index: llvm/test/Transforms/InstCombine/str-int-4.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/InstCombine/str-int-4.ll @@ -0,0 +1,337 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; Verify that calls to strtol and strtoll are interpreted correctly even +; in corner cases (or not folded). +; +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare i32 @strtol(i8*, i8**, i32) +declare i64 @strtoll(i8*, i8**, i32) + + +; All POSIX whitespace characters. +@ws = constant [7 x i8] c"\09\0d\0a\0b\0c \00" + +; A negative and positive number preceded by all POSIX whitespace. +@ws_im123 = constant [11 x i8] c"\09\0d\0a\0b\0c -123\00" +@ws_ip234 = constant [11 x i8] c"\09\0d\0a\0b\0c +234\00" + +@i0 = constant [3 x i8] c" 0\00" +@i9 = constant [3 x i8] c" 9\00" +@i19azAZ = constant [7 x i8] c"19azAZ\00" +@i32min = constant [13 x i8] c" -2147483648\00" +@i32min_m1 = constant [13 x i8] c" -2147483649\00" +@o32min = constant [15 x i8] c" +020000000000\00" +@mo32min = constant [15 x i8] c" -020000000000\00" +@x32min = constant [13 x i8] c" +0x80000000\00" +@mx32min = constant [13 x i8] c" -0x80000000\00" + +@i32max = constant [12 x i8] c" 2147483647\00" +@x32max = constant [12 x i8] c" 0x7fffffff\00" +@i32max_p1 = constant [12 x i8] c" 2147483648\00" + +@ui32max = constant [12 x i8] c" 4294967295\00" +@ui32max_p1 = constant [12 x i8] c" 4294967296\00" + + +; Exercise folding calls to 32-bit strtol. + +define void @fold_strtol(i32* %ps) { +; CHECK-LABEL: @fold_strtol( +; CHECK-NEXT: store i32 -123, i32* [[PS:%.*]], align 4 +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1 +; CHECK-NEXT: store i32 234, i32* [[PS1]], align 4 +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 +; CHECK-NEXT: store i32 0, i32* [[PS2]], align 4 +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 +; CHECK-NEXT: store i32 9, i32* [[PS3]], align 4 +; CHECK-NEXT: [[PS4:%.*]] = getelementptr i32, i32* [[PS]], i64 4 +; CHECK-NEXT: store i32 76095035, i32* [[PS4]], align 4 +; CHECK-NEXT: [[PS5:%.*]] = getelementptr i32, i32* [[PS]], i64 5 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS5]], align 4 +; CHECK-NEXT: [[PS6:%.*]] = getelementptr i32, i32* [[PS]], i64 6 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS6]], align 4 +; CHECK-NEXT: [[PS7:%.*]] = getelementptr i32, i32* [[PS]], i64 7 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS7]], align 4 +; CHECK-NEXT: [[PS8:%.*]] = getelementptr i32, i32* [[PS]], i64 8 +; CHECK-NEXT: store i32 2147483647, i32* [[PS8]], align 4 +; CHECK-NEXT: [[PS9:%.*]] = getelementptr i32, i32* [[PS]], i64 9 +; CHECK-NEXT: store i32 2147483647, i32* [[PS9]], align 4 +; CHECK-NEXT: ret void +; +; Fold a valid sequence with leading POSIX whitespace and a minus to -123. + %pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0 + %im123 = call i32 @strtol(i8* %pwsm123, i8** null, i32 10) + %ps0 = getelementptr i32, i32* %ps, i32 0 + store i32 %im123, i32* %ps0 + +; Fold a valid sequence with leading POSIX whitespace and a plus to +234. + %pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0 + %ip234 = call i32 @strtol(i8* %pwsp234, i8** null, i32 10) + %ps1 = getelementptr i32, i32* %ps, i32 1 + store i32 %ip234, i32* %ps1 + +; Fold "0" in base 0 to verify correct base autodetection. + %psi0 = getelementptr [3 x i8], [3 x i8]* @i0, i32 0, i32 0 + %i0 = call i32 @strtol(i8* %psi0, i8** null, i32 0) + %ps2 = getelementptr i32, i32* %ps, i32 2 + store i32 %i0, i32* %ps2 + +; Fold "9" in base 0 to verify correct base autodetection. + %psi9 = getelementptr [3 x i8], [3 x i8]* @i9, i32 0, i32 0 + %i9 = call i32 @strtol(i8* %psi9, i8** null, i32 0) + %ps3 = getelementptr i32, i32* %ps, i32 3 + store i32 %i9, i32* %ps3 + +; Fold "19azAZ" in base 36 to 76095035. + %psi19azAZ = getelementptr [7 x i8], [7 x i8]* @i19azAZ, i32 0, i32 0 + %i19azAZ = call i32 @strtol(i8* %psi19azAZ, i8** null, i32 36) + %ps4 = getelementptr i32, i32* %ps, i32 4 + store i32 %i19azAZ, i32* %ps4 + +; Fold INT32_MIN. + %psmin = getelementptr [13 x i8], [13 x i8]* @i32min, i32 0, i32 0 + %min = call i32 @strtol(i8* %psmin, i8** null, i32 10) + %ps5 = getelementptr i32, i32* %ps, i32 5 + store i32 %min, i32* %ps5 + +; Fold -INT32_MIN in octal. + %psmo32min = getelementptr [15 x i8], [15 x i8]* @mo32min, i32 0, i32 0 + %mo32min = call i32 @strtol(i8* %psmo32min, i8** null, i32 0) + %ps6 = getelementptr i32, i32* %ps, i32 6 + store i32 %mo32min, i32* %ps6 + +; Fold -INT32_MIN in hex. + %psmx32min = getelementptr [13 x i8], [13 x i8]* @mx32min, i32 0, i32 0 + %mx32min = call i32 @strtol(i8* %psmx32min, i8** null, i32 0) + %ps7 = getelementptr i32, i32* %ps, i32 7 + store i32 %mx32min, i32* %ps7 + +; Fold INT32_MAX. + %psmax = getelementptr [12 x i8], [12 x i8]* @i32max, i32 0, i32 0 + %max = call i32 @strtol(i8* %psmax, i8** null, i32 10) + %ps8 = getelementptr i32, i32* %ps, i32 8 + store i32 %max, i32* %ps8 + +; Fold INT32_MAX in hex. + %psxmax = getelementptr [12 x i8], [12 x i8]* @x32max, i32 0, i32 0 + %xmax = call i32 @strtol(i8* %psxmax, i8** null, i32 0) + %ps9 = getelementptr i32, i32* %ps, i32 9 + store i32 %xmax, i32* %ps9 + + ret void +} + + +; Exercise not folding calls to 32-bit strtol. + +define void @call_strtol(i32* %ps) { +; CHECK-LABEL: @call_strtol( +; CHECK-NEXT: [[MINM1:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([13 x i8], [13 x i8]* @i32min_m1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: store i32 [[MINM1]], i32* [[PS:%.*]], align 4 +; CHECK-NEXT: [[MAXP1:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([12 x i8], [12 x i8]* @i32max_p1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1 +; CHECK-NEXT: store i32 [[MAXP1]], i32* [[PS1]], align 4 +; CHECK-NEXT: [[O32MIN:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([15 x i8], [15 x i8]* @o32min, i64 0, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 +; CHECK-NEXT: store i32 [[O32MIN]], i32* [[PS2]], align 4 +; CHECK-NEXT: [[X32MIN:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 +; CHECK-NEXT: store i32 [[X32MIN]], i32* [[PS3]], align 4 +; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS4:%.*]] = getelementptr i32, i32* [[PS]], i64 4 +; CHECK-NEXT: store i32 [[NWS]], i32* [[PS4]], align 4 +; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** null, i32 10) +; CHECK-NEXT: [[PS5:%.*]] = getelementptr i32, i32* [[PS]], i64 5 +; CHECK-NEXT: store i32 [[NWSP6]], i32* [[PS5]], align 4 +; CHECK-NEXT: [[I0B1:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([3 x i8], [3 x i8]* @i0, i64 0, i64 0), i8** null, i32 1) +; CHECK-NEXT: [[PS6:%.*]] = getelementptr i32, i32* [[PS]], i64 6 +; CHECK-NEXT: store i32 [[I0B1]], i32* [[PS6]], align 4 +; CHECK-NEXT: [[I0B256:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([3 x i8], [3 x i8]* @i0, i64 0, i64 0), i8** null, i32 256) +; CHECK-NEXT: [[PS7:%.*]] = getelementptr i32, i32* [[PS]], i64 7 +; CHECK-NEXT: store i32 [[I0B256]], i32* [[PS7]], align 4 +; CHECK-NEXT: ret void +; + +; Do not fold the result of conversion that's less than INT32_MIN. + %psminm1 = getelementptr [13 x i8], [13 x i8]* @i32min_m1, i32 0, i32 0 + %minm1 = call i32 @strtol(i8* %psminm1, i8** null, i32 10) + %ps0 = getelementptr i32, i32* %ps, i32 0 + store i32 %minm1, i32* %ps0 + +; Do not fold the result of conversion that's greater than INT32_MAX. + %psmaxp1 = getelementptr [12 x i8], [12 x i8]* @i32max_p1, i32 0, i32 0 + %maxp1 = call i32 @strtol(i8* %psmaxp1, i8** null, i32 10) + %ps1 = getelementptr i32, i32* %ps, i32 1 + store i32 %maxp1, i32* %ps1 + +; Do not fold INT32_MIN in octal. + %pso32min = getelementptr [15 x i8], [15 x i8]* @o32min, i32 0, i32 0 + %o32min = call i32 @strtol(i8* %pso32min, i8** null, i32 0) + %ps2 = getelementptr i32, i32* %ps, i32 2 + store i32 %o32min, i32* %ps2 + +; Do not fold INT32_MIN in hex. + %psx32min = getelementptr [13 x i8], [13 x i8]* @x32min, i32 0, i32 0 + %x32min = call i32 @strtol(i8* %psx32min, i8** null, i32 0) + %ps3 = getelementptr i32, i32* %ps, i32 3 + store i32 %x32min, i32* %ps3 + +; Do not fold a sequence consisting of just whitespace characters. + %psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0 + %nws = call i32 @strtol(i8* %psws, i8** null, i32 10) + %ps4 = getelementptr i32, i32* %ps, i32 4 + store i32 %nws, i32* %ps4 + +; Do not fold an empty sequence. + %pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6 + %nwsp6 = call i32 @strtol(i8* %pswsp6, i8** null, i32 10) + %ps5 = getelementptr i32, i32* %ps, i32 5 + store i32 %nwsp6, i32* %ps5 + +; Do not fold the invalid base 1. + %psi0 = getelementptr [3 x i8], [3 x i8]* @i0, i32 0, i32 0 + %i0b1 = call i32 @strtol(i8* %psi0, i8** null, i32 1) + %ps6 = getelementptr i32, i32* %ps, i32 6 + store i32 %i0b1, i32* %ps6 + +; Do not fold the invalid base 256. + %i0b256 = call i32 @strtol(i8* %psi0, i8** null, i32 256) + %ps7 = getelementptr i32, i32* %ps, i32 7 + store i32 %i0b256, i32* %ps7 + + ret void +} + + +@i64min = constant [22 x i8] c" -9223372036854775808\00" +@i64min_m1 = constant [22 x i8] c" -9223372036854775809\00" + +@i64max = constant [21 x i8] c" 9223372036854775807\00" +@i64max_p1 = constant [21 x i8] c" 9223372036854775808\00" + +@ui64max = constant [22 x i8] c" 18446744073709551615\00" +@ui64max_p1 = constant [22 x i8] c" 18446744073709551616\00" + + +; Exercise folding calls to the 64-bit strtoll. + +define void @fold_strtoll(i64* %ps) { +; CHECK-LABEL: @fold_strtoll( +; CHECK-NEXT: store i64 -123, i64* [[PS:%.*]], align 4 +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS]], i64 1 +; CHECK-NEXT: store i64 234, i64* [[PS1]], align 4 +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 +; CHECK-NEXT: store i64 -9223372036854775808, i64* [[PS2]], align 4 +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 +; CHECK-NEXT: store i64 9223372036854775807, i64* [[PS3]], align 4 +; CHECK-NEXT: ret void +; +; Fold a valid sequence with leading POSIX whitespace and a minus to -123. + %pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0 + %im123 = call i64 @strtoll(i8* %pwsm123, i8** null, i32 10) + %ps0 = getelementptr i64, i64* %ps, i32 0 + store i64 %im123, i64* %ps0 + +; Fold a valid sequence with leading POSIX whitespace and a plus to +234. + %pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0 + %ip234 = call i64 @strtoll(i8* %pwsp234, i8** null, i32 10) + %ps1 = getelementptr i64, i64* %ps, i32 1 + store i64 %ip234, i64* %ps1 + +; Fold INT64_MIN. + %psmin = getelementptr [22 x i8], [22 x i8]* @i64min, i32 0, i32 0 + %min = call i64 @strtoll(i8* %psmin, i8** null, i32 10) + %ps2 = getelementptr i64, i64* %ps, i32 2 + store i64 %min, i64* %ps2 + +; Fold INT64_MAX. + %psmax = getelementptr [21 x i8], [21 x i8]* @i64max, i32 0, i32 0 + %max = call i64 @strtoll(i8* %psmax, i8** null, i32 10) + %ps3 = getelementptr i64, i64* %ps, i32 3 + store i64 %max, i64* %ps3 + + ret void +} + + +; Exercise not folding calls to the 64-bit strtoll. + +define void @call_strtoll(i64* %ps) { +; CHECK-LABEL: @call_strtoll( +; CHECK-NEXT: [[MINM1:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: store i64 [[MINM1]], i64* [[PS:%.*]], align 4 +; CHECK-NEXT: [[MAXP1:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([21 x i8], [21 x i8]* @i64max_p1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS]], i64 1 +; CHECK-NEXT: store i64 [[MAXP1]], i64* [[PS1]], align 4 +; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 +; CHECK-NEXT: store i64 [[NWS]], i64* [[PS2]], align 4 +; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** null, i32 10) +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 +; CHECK-NEXT: store i64 [[NWSP6]], i64* [[PS3]], align 4 +; CHECK-NEXT: ret void +; +; Do not fold the result of conversion that's less than INT64_MIN. + %psminm1 = getelementptr [22 x i8], [22 x i8]* @i64min_m1, i32 0, i32 0 + %minm1 = call i64 @strtoll(i8* %psminm1, i8** null, i32 10) + %ps0 = getelementptr i64, i64* %ps, i32 0 + store i64 %minm1, i64* %ps0 + +; Do not fold the result of conversion that's greater than INT64_MAX. + %psmaxp1 = getelementptr [21 x i8], [21 x i8]* @i64max_p1, i32 0, i32 0 + %maxp1 = call i64 @strtoll(i8* %psmaxp1, i8** null, i32 10) + %ps1 = getelementptr i64, i64* %ps, i32 1 + store i64 %maxp1, i64* %ps1 + +; Do not fold a sequence consisting of just whitespace characters. + %psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0 + %nws = call i64 @strtoll(i8* %psws, i8** null, i32 10) + %ps2 = getelementptr i64, i64* %ps, i32 2 + store i64 %nws, i64* %ps2 + +; Do not fold an empty sequence. + %pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6 + %nwsp6 = call i64 @strtoll(i8* %pswsp6, i8** null, i32 10) + %ps3 = getelementptr i64, i64* %ps, i32 3 + store i64 %nwsp6, i64* %ps3 + + ret void +} + +@i_1_2_3_ = constant [9 x i8] c" 1 2\09\3\0a\00"; + +; Verify that strings of digits that are followed by whitespace are not +; folded (the whitespace could be interpreted in locales other than C +; as part of the leading digits, such as "123 456" is interepreted as +; 123456 in the French locale). + +define void @call_strtol_trailing_space(i32* %ps) { +; CHECK-LABEL: @call_strtol_trailing_space( +; CHECK-NEXT: [[N1:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS:%.*]], i64 1 +; CHECK-NEXT: store i32 [[N1]], i32* [[PS1]], align 4 +; CHECK-NEXT: [[N2:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 2), i8** null, i32 10) +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 +; CHECK-NEXT: store i32 [[N2]], i32* [[PS2]], align 4 +; CHECK-NEXT: [[N3:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 4), i8** null, i32 10) +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 +; CHECK-NEXT: store i32 [[N3]], i32* [[PS3]], align 4 +; CHECK-NEXT: ret void +; + %p1 = getelementptr [9 x i8], [9 x i8]* @i_1_2_3_, i32 0, i32 0 + %n1 = call i32 @strtol(i8* %p1, i8** null, i32 10) + %ps1 = getelementptr i32, i32* %ps, i32 1 + store i32 %n1, i32* %ps1 + + %p2 = getelementptr [9 x i8], [9 x i8]* @i_1_2_3_, i32 0, i32 2 + %n2 = call i32 @strtol(i8* %p2, i8** null, i32 10) + %ps2 = getelementptr i32, i32* %ps, i32 2 + store i32 %n2, i32* %ps2 + + %p3 = getelementptr [9 x i8], [9 x i8]* @i_1_2_3_, i32 0, i32 4 + %n3 = call i32 @strtol(i8* %p3, i8** null, i32 10) + %ps3 = getelementptr i32, i32* %ps, i32 3 + store i32 %n3, i32* %ps3 + + ret void +} Index: llvm/test/Transforms/InstCombine/str-int-5.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/InstCombine/str-int-5.ll @@ -0,0 +1,331 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; Verify that calls to strtoul and strtoull are interpreted correctly even +; in corner cases (or not folded). +; +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare i32 @strtoul(i8*, i8**, i32) +declare i64 @strtoull(i8*, i8**, i32) + + +; All POSIX whitespace characters. +@ws = constant [7 x i8] c"\09\0d\0a\0b\0c \00" + +; A negative and positive number preceded by all POSIX whitespace. +@ws_im123 = constant [11 x i8] c"\09\0d\0a\0b\0c -123\00" +@ws_ip234 = constant [11 x i8] c"\09\0d\0a\0b\0c +234\00" + +@i32min = constant [13 x i8] c" -2147483648\00" +@i32min_m1 = constant [13 x i8] c" -2147483649\00" +@o32min = constant [15 x i8] c" +020000000000\00" +@mo32min = constant [15 x i8] c" -020000000000\00" +@x32min = constant [13 x i8] c" +0x80000000\00" +@mx32min = constant [13 x i8] c" +0x80000000\00" + +@i32max = constant [12 x i8] c" 2147483647\00" +@i32max_p1 = constant [12 x i8] c" 2147483648\00" +@mX01 = constant [6 x i8] c" -0X1\00" + +@ui32max = constant [12 x i8] c" 4294967295\00" +@ui32max_p1 = constant [12 x i8] c" 4294967296\00" + +@i64min = constant [22 x i8] c" -9223372036854775808\00" +@i64min_m1 = constant [22 x i8] c" -9223372036854775809\00" + +@i64max = constant [21 x i8] c" 9223372036854775807\00" +@i64max_p1 = constant [21 x i8] c" 9223372036854775808\00" + +@ui64max = constant [22 x i8] c" 18446744073709551615\00" +@x64max = constant [20 x i8] c" 0xffffffffffffffff\00" +@ui64max_p1 = constant [22 x i8] c" 18446744073709551616\00" + + +; Exercise folding calls to 32-bit strtoul. + +define void @fold_strtoul(i32* %ps) { +; CHECK-LABEL: @fold_strtoul( +; CHECK-NEXT: store i32 -123, i32* [[PS:%.*]], align 4 +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1 +; CHECK-NEXT: store i32 234, i32* [[PS1]], align 4 +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 +; CHECK-NEXT: store i32 2147483647, i32* [[PS2]], align 4 +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS3]], align 4 +; CHECK-NEXT: [[PS4:%.*]] = getelementptr i32, i32* [[PS]], i64 4 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS4]], align 4 +; CHECK-NEXT: [[PS5:%.*]] = getelementptr i32, i32* [[PS]], i64 5 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS5]], align 4 +; CHECK-NEXT: [[PS6:%.*]] = getelementptr i32, i32* [[PS]], i64 6 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS6]], align 4 +; CHECK-NEXT: [[PS7:%.*]] = getelementptr i32, i32* [[PS]], i64 7 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS7]], align 4 +; CHECK-NEXT: [[PS8:%.*]] = getelementptr i32, i32* [[PS]], i64 8 +; CHECK-NEXT: store i32 2147483647, i32* [[PS8]], align 4 +; CHECK-NEXT: [[PS9:%.*]] = getelementptr i32, i32* [[PS]], i64 9 +; CHECK-NEXT: store i32 -1, i32* [[PS9]], align 4 +; CHECK-NEXT: [[PS10:%.*]] = getelementptr i32, i32* [[PS]], i64 10 +; CHECK-NEXT: store i32 -2147483648, i32* [[PS10]], align 4 +; CHECK-NEXT: [[PS11:%.*]] = getelementptr i32, i32* [[PS]], i64 11 +; CHECK-NEXT: store i32 -1, i32* [[PS11]], align 4 +; CHECK-NEXT: ret void +; +; Fold a valid sequence with leading POSIX whitespace and a minus to +; (uint32_t)-123. + %pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0 + %im123 = call i32 @strtoul(i8* %pwsm123, i8** null, i32 10) + %ps0 = getelementptr i32, i32* %ps, i32 0 + store i32 %im123, i32* %ps0 + +; Fold a valid sequence with leading POSIX whitespace and a plus to +234. + %pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0 + %ip234 = call i32 @strtoul(i8* %pwsp234, i8** null, i32 10) + %ps1 = getelementptr i32, i32* %ps, i32 1 + store i32 %ip234, i32* %ps1 + +; Fold the result of conversion that's equal to INT32_MIN - 1. + %psi32minm1 = getelementptr [13 x i8], [13 x i8]* @i32min_m1, i32 0, i32 0 + %i32min32m1 = call i32 @strtoul(i8* %psi32minm1, i8** null, i32 10) + %ps2 = getelementptr i32, i32* %ps, i32 2 + store i32 %i32min32m1, i32* %ps2 + +; Fold INT32_MIN. + %psi32min = getelementptr [13 x i8], [13 x i8]* @i32min, i32 0, i32 0 + %i32min = call i32 @strtoul(i8* %psi32min, i8** null, i32 10) + %ps3 = getelementptr i32, i32* %ps, i32 3 + store i32 %i32min, i32* %ps3 + +; Fold INT32_MIN in octal. + %pso32min = getelementptr [15 x i8], [15 x i8]* @o32min, i32 0, i32 0 + %o32min = call i32 @strtoul(i8* %pso32min, i8** null, i32 0) + %ps4 = getelementptr i32, i32* %ps, i32 4 + store i32 %o32min, i32* %ps4 + +; Fold -INT32_MIN in octal. + %psmo32min = getelementptr [15 x i8], [15 x i8]* @mo32min, i32 0, i32 0 + %mo32min = call i32 @strtoul(i8* %psmo32min, i8** null, i32 0) + %ps5 = getelementptr i32, i32* %ps, i32 5 + store i32 %mo32min, i32* %ps5 + +; Fold INT32_MIN in hex. + %psx32min = getelementptr [13 x i8], [13 x i8]* @x32min, i32 0, i32 0 + %x32min = call i32 @strtoul(i8* %psx32min, i8** null, i32 0) + %ps6 = getelementptr i32, i32* %ps, i32 6 + store i32 %x32min, i32* %ps6 + +; Fold -INT32_MIN in hex. + %psmx32min = getelementptr [13 x i8], [13 x i8]* @mx32min, i32 0, i32 0 + %mx32min = call i32 @strtoul(i8* %psmx32min, i8** null, i32 0) + %ps7 = getelementptr i32, i32* %ps, i32 7 + store i32 %x32min, i32* %ps7 + +; Fold INT32_MAX. + %psi32max = getelementptr [12 x i8], [12 x i8]* @i32max, i32 0, i32 0 + %i32max = call i32 @strtoul(i8* %psi32max, i8** null, i32 10) + %ps8 = getelementptr i32, i32* %ps, i32 8 + store i32 %i32max, i32* %ps8 + +; Fold -0x01. + %psmX01 = getelementptr [6 x i8], [6 x i8]* @mX01, i32 0, i32 0 + %mX01 = call i32 @strtoul(i8* %psmX01, i8** null, i32 0) + %ps9 = getelementptr i32, i32* %ps, i32 9 + store i32 %mX01, i32* %ps9 + +; Fold the result of conversion that's equal to INT32_MAX + 1. + %psmax32p1 = getelementptr [12 x i8], [12 x i8]* @i32max_p1, i32 0, i32 0 + %i32max32p1 = call i32 @strtoul(i8* %psmax32p1, i8** null, i32 10) + %ps10 = getelementptr i32, i32* %ps, i32 10 + store i32 %i32max32p1, i32* %ps10 + +; Fold UINT32_MAX. + %psmax = getelementptr [12 x i8], [12 x i8]* @ui32max, i32 0, i32 0 + %ui32max = call i32 @strtoul(i8* %psmax, i8** null, i32 10) + %ps11 = getelementptr i32, i32* %ps, i32 11 + store i32 %ui32max, i32* %ps11 + + ret void +} + + +; Exercise not folding calls to 32-bit strtoul. + +define void @call_strtoul(i32* %ps) { +; CHECK-LABEL: @call_strtoul( +; CHECK-NEXT: [[MINM1:%.*]] = call i32 @strtoul(i8* nocapture getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: store i32 [[MINM1]], i32* [[PS:%.*]], align 4 +; CHECK-NEXT: [[MAXP1:%.*]] = call i32 @strtoul(i8* nocapture getelementptr inbounds ([12 x i8], [12 x i8]* @ui32max_p1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1 +; CHECK-NEXT: store i32 [[MAXP1]], i32* [[PS1]], align 4 +; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtoul(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 +; CHECK-NEXT: store i32 [[NWS]], i32* [[PS2]], align 4 +; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtoul(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** null, i32 10) +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 +; CHECK-NEXT: store i32 [[NWSP6]], i32* [[PS3]], align 4 +; CHECK-NEXT: ret void +; +; Do not fold the result of conversion that overflows uint32_t. This +; could be folded into a constant provided errnor were set to ERANGE. + %psminm1 = getelementptr [22 x i8], [22 x i8]* @i64min_m1, i32 0, i32 0 + %minm1 = call i32 @strtoul(i8* %psminm1, i8** null, i32 10) + %ps0 = getelementptr i32, i32* %ps, i32 0 + store i32 %minm1, i32* %ps0 + +; Do not fold the result of conversion that's greater than UINT32_MAX +; (same logic as above applies here). + %psui32maxp1 = getelementptr [12 x i8], [12 x i8]* @ui32max_p1, i32 0, i32 0 + %maxp1 = call i32 @strtoul(i8* %psui32maxp1, i8** null, i32 10) + %ps1 = getelementptr i32, i32* %ps, i32 1 + store i32 %maxp1, i32* %ps1 + +; Do not fold a sequence consisting of just whitespace characters. + %psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0 + %nws = call i32 @strtoul(i8* %psws, i8** null, i32 10) + %ps2 = getelementptr i32, i32* %ps, i32 2 + store i32 %nws, i32* %ps2 + +; Do not fold an empty sequence. The library call may or may not end up +; storing EINVAL in errno. + %pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6 + %nwsp6 = call i32 @strtoul(i8* %pswsp6, i8** null, i32 10) + %ps3 = getelementptr i32, i32* %ps, i32 3 + store i32 %nwsp6, i32* %ps3 + + ret void +} + + +; Exercise folding calls to 64-bit strtoull. + +define void @fold_strtoull(i64* %ps) { +; CHECK-LABEL: @fold_strtoull( +; CHECK-NEXT: store i64 -123, i64* [[PS:%.*]], align 4 +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS]], i64 1 +; CHECK-NEXT: store i64 234, i64* [[PS1]], align 4 +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 +; CHECK-NEXT: store i64 9223372036854775807, i64* [[PS2]], align 4 +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 +; CHECK-NEXT: store i64 -2147483648, i64* [[PS3]], align 4 +; CHECK-NEXT: [[PS4:%.*]] = getelementptr i64, i64* [[PS]], i64 4 +; CHECK-NEXT: store i64 2147483648, i64* [[PS4]], align 4 +; CHECK-NEXT: [[PS5:%.*]] = getelementptr i64, i64* [[PS]], i64 5 +; CHECK-NEXT: store i64 2147483648, i64* [[PS5]], align 4 +; CHECK-NEXT: [[PS6:%.*]] = getelementptr i64, i64* [[PS]], i64 6 +; CHECK-NEXT: store i64 -9223372036854775808, i64* [[PS6]], align 4 +; CHECK-NEXT: [[PS7:%.*]] = getelementptr i64, i64* [[PS]], i64 7 +; CHECK-NEXT: store i64 9223372036854775807, i64* [[PS7]], align 4 +; CHECK-NEXT: [[PS8:%.*]] = getelementptr i64, i64* [[PS]], i64 8 +; CHECK-NEXT: store i64 -9223372036854775808, i64* [[PS8]], align 4 +; CHECK-NEXT: [[PS9:%.*]] = getelementptr i64, i64* [[PS]], i64 9 +; CHECK-NEXT: store i64 -1, i64* [[PS9]], align 4 +; CHECK-NEXT: [[PS10:%.*]] = getelementptr i64, i64* [[PS]], i64 10 +; CHECK-NEXT: store i64 -1, i64* [[PS10]], align 4 +; CHECK-NEXT: ret void +; +; Fold a valid sequence with leading POSIX whitespace and a minus to +; (uint64_t)-123. + %pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0 + %im123 = call i64 @strtoull(i8* %pwsm123, i8** null, i32 10) + %ps0 = getelementptr i64, i64* %ps, i32 0 + store i64 %im123, i64* %ps0 + +; Fold a valid sequence with leading POSIX whitespace and a plus to +234. + %pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0 + %ip234 = call i64 @strtoull(i8* %pwsp234, i8** null, i32 10) + %ps1 = getelementptr i64, i64* %ps, i32 1 + store i64 %ip234, i64* %ps1 + +; Fold the result of conversion that's equal to INT64_MIN - 1. + %psi64minm1 = getelementptr [22 x i8], [22 x i8]* @i64min_m1, i32 0, i32 0 + %i64min32m1 = call i64 @strtoull(i8* %psi64minm1, i8** null, i32 10) + %ps2 = getelementptr i64, i64* %ps, i32 2 + store i64 %i64min32m1, i64* %ps2 + +; Fold INT32_MIN. + %psi32min = getelementptr [13 x i8], [13 x i8]* @i32min, i32 0, i32 0 + %i32min = call i64 @strtoull(i8* %psi32min, i8** null, i32 10) + %ps3 = getelementptr i64, i64* %ps, i32 3 + store i64 %i32min, i64* %ps3 + +; Fold INT32_MIN in octal. + %pso32min = getelementptr [15 x i8], [15 x i8]* @o32min, i32 0, i32 0 + %o32min = call i64 @strtoull(i8* %pso32min, i8** null, i32 0) + %ps4 = getelementptr i64, i64* %ps, i32 4 + store i64 %o32min, i64* %ps4 + +; Fold INT32_MIN in hex. + %psx32min = getelementptr [13 x i8], [13 x i8]* @x32min, i32 0, i32 0 + %x32min = call i64 @strtoull(i8* %psx32min, i8** null, i32 0) + %ps5 = getelementptr i64, i64* %ps, i32 5 + store i64 %x32min, i64* %ps5 + +; Fold INT64_MIN. + %psi64min = getelementptr [22 x i8], [22 x i8]* @i64min, i32 0, i32 0 + %i64min = call i64 @strtoull(i8* %psi64min, i8** null, i32 10) + %ps6 = getelementptr i64, i64* %ps, i32 6 + store i64 %i64min, i64* %ps6 + +; Fold INT64_MAX. + %psi64max = getelementptr [21 x i8], [21 x i8]* @i64max, i32 0, i32 0 + %i64max = call i64 @strtoull(i8* %psi64max, i8** null, i32 10) + %ps7 = getelementptr i64, i64* %ps, i32 7 + store i64 %i64max, i64* %ps7 + +; Fold the result of conversion that's equal to INT64_MAX + 1 to INT64_MIN. + %psmax32p1 = getelementptr [21 x i8], [21 x i8]* @i64max_p1, i32 0, i32 0 + %i64max32p1 = call i64 @strtoull(i8* %psmax32p1, i8** null, i32 10) + %ps8 = getelementptr i64, i64* %ps, i32 8 + store i64 %i64max32p1, i64* %ps8 + +; Fold UINT64_MAX. + %psmax = getelementptr [22 x i8], [22 x i8]* @ui64max, i32 0, i32 0 + %ui64max = call i64 @strtoull(i8* %psmax, i8** null, i32 10) + %ps9 = getelementptr i64, i64* %ps, i32 9 + store i64 %ui64max, i64* %ps9 + +; Fold UINT64_MAX in hex. + %psxmax = getelementptr [20 x i8], [20 x i8]* @x64max, i32 0, i32 0 + %x64max = call i64 @strtoull(i8* %psxmax, i8** null, i32 0) + %ps10 = getelementptr i64, i64* %ps, i32 10 + store i64 %x64max, i64* %ps10 + + ret void +} + + +; Exercise not folding calls to 64-bit strtoull. + +define void @call_strtoull(i64* %ps) { +; CHECK-LABEL: @call_strtoull( +; CHECK-NEXT: [[MAXP1:%.*]] = call i64 @strtoull(i8* nocapture getelementptr inbounds ([22 x i8], [22 x i8]* @ui64max_p1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS:%.*]], i64 1 +; CHECK-NEXT: store i64 [[MAXP1]], i64* [[PS1]], align 4 +; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoull(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 +; CHECK-NEXT: store i64 [[NWS]], i64* [[PS2]], align 4 +; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoull(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** null, i32 10) +; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 +; CHECK-NEXT: store i64 [[NWSP6]], i64* [[PS3]], align 4 +; CHECK-NEXT: ret void +; +; Do not fold the result of conversion that overflows uint64_t. This +; could be folded into a constant provided errnor were set to ERANGE. + %psui64maxp1 = getelementptr [22 x i8], [22 x i8]* @ui64max_p1, i32 0, i32 0 + %maxp1 = call i64 @strtoull(i8* %psui64maxp1, i8** null, i32 10) + %ps1 = getelementptr i64, i64* %ps, i32 1 + store i64 %maxp1, i64* %ps1 + +; Do not fold a sequence consisting of just whitespace characters. + %psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0 + %nws = call i64 @strtoull(i8* %psws, i8** null, i32 10) + %ps2 = getelementptr i64, i64* %ps, i32 2 + store i64 %nws, i64* %ps2 + +; Do not fold an empty sequence. The library call may or may not end up +; storing EINVAL in errno. + %pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6 + %nwsp6 = call i64 @strtoull(i8* %pswsp6, i8** null, i32 10) + %ps3 = getelementptr i64, i64* %ps, i32 3 + store i64 %nwsp6, i64* %ps3 + + ret void +} Index: llvm/test/Transforms/InstCombine/str-int.ll =================================================================== --- llvm/test/Transforms/InstCombine/str-int.ll +++ llvm/test/Transforms/InstCombine/str-int.ll @@ -70,7 +70,7 @@ define i32 @atoi_not_const_str(i8* %s) #0 { ; CHECK-LABEL: @atoi_not_const_str( -; CHECK-NEXT: [[CALL:%.*]] = call i32 @atoi(i8* [[S:%.*]]) +; CHECK-NEXT: [[CALL:%.*]] = call i32 @atoi(i8* nocapture [[S:%.*]]) ; CHECK-NEXT: ret i32 [[CALL]] ; %call = call i32 @atoi(i8* %s) #4 @@ -116,7 +116,7 @@ define i32 @atoll_test() #0 { ; CHECK-LABEL: @atoll_test( -; CHECK-NEXT: [[CALL:%.*]] = call i32 @atoll(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.5, i64 0, i64 0)) +; CHECK-NEXT: [[CALL:%.*]] = call i32 @atoll(i8* nocapture getelementptr inbounds ([11 x i8], [11 x i8]* @.str.5, i64 0, i64 0)) ; CHECK-NEXT: ret i32 [[CALL]] ; %call = call i32 @atoll(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.5, i32 0, i32 0)) #3 Index: llvm/test/Transforms/InstCombine/strcall-no-nul.ll =================================================================== --- llvm/test/Transforms/InstCombine/strcall-no-nul.ll +++ llvm/test/Transforms/InstCombine/strcall-no-nul.ll @@ -31,6 +31,14 @@ declare i64 @strspn(i8*, i8*) declare i64 @strcspn(i8*, i8*) +declare i32 @atoi(i8*) +declare i64 @atol(i8*) +declare i64 @atoll(i8*) +declare i64 @strtol(i8*, i8**, i32) +declare i64 @strtoll(i8*, i8**, i32) +declare i64 @strtoul(i8*, i8**, i32) +declare i64 @strtoull(i8*, i8**, i32) + declare i32 @sprintf(i8*, i8*, ...) declare i32 @snprintf(i8*, i64, i8*, ...) @@ -267,6 +275,74 @@ } +; TODO: Fold the 32-bit atoi(a5 + 5) to zero. +; Verify that processing the invalid call doesn't run into trouble. + +define i32 @fold_atoi_past_end() { +; CHECK-LABEL: @fold_atoi_past_end( +; CHECK-NEXT: [[I:%.*]] = call i32 @atoi(i8* nocapture getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0)) +; CHECK-NEXT: ret i32 [[I]] +; + %p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5 + %i = call i32 @atoi(i8* %p5) + ret i32 %i +} + +; TODO: Likewise, fold the 64-bit atol(a5 + 5) to zero, and similarly +; for atoll and strtrol and similar. +; Verify that processing the invalid call doesn't run into trouble. + +define void @fold_atol_strtol_past_end(i64* %ps) { +; CHECK-LABEL: @fold_atol_strtol_past_end( +; CHECK-NEXT: [[I0:%.*]] = call i64 @atol(i8* nocapture getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0)) +; CHECK-NEXT: store i64 [[I0]], i64* [[PS:%.*]], align 4 +; CHECK-NEXT: [[I1:%.*]] = call i64 @atoll(i8* nocapture getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0)) +; CHECK-NEXT: [[P1:%.*]] = getelementptr i64, i64* [[PS]], i64 1 +; CHECK-NEXT: store i64 [[I1]], i64* [[P1]], align 4 +; CHECK-NEXT: [[I2:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[P2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 +; CHECK-NEXT: store i64 [[I2]], i64* [[P2]], align 4 +; CHECK-NEXT: [[I3:%.*]] = call i64 @strtoul(i8* nocapture getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0), i8** null, i32 8) +; CHECK-NEXT: [[P3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 +; CHECK-NEXT: store i64 [[I3]], i64* [[P3]], align 4 +; CHECK-NEXT: [[I4:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[P4:%.*]] = getelementptr i64, i64* [[PS]], i64 4 +; CHECK-NEXT: store i64 [[I4]], i64* [[P4]], align 4 +; CHECK-NEXT: [[I5:%.*]] = call i64 @strtoul(i8* nocapture getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0), i8** null, i32 16) +; CHECK-NEXT: [[P5:%.*]] = getelementptr i64, i64* [[PS]], i64 5 +; CHECK-NEXT: store i64 [[I5]], i64* [[P5]], align 4 +; CHECK-NEXT: ret void +; + %pa5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5 + + %i0 = call i64 @atol(i8* %pa5) + %p0 = getelementptr i64, i64* %ps, i32 0 + store i64 %i0, i64* %p0 + + %i1 = call i64 @atoll(i8* %pa5) + %p1 = getelementptr i64, i64* %ps, i32 1 + store i64 %i1, i64* %p1 + + %i2 = call i64 @strtol(i8* %pa5, i8** null, i32 0) + %p2 = getelementptr i64, i64* %ps, i32 2 + store i64 %i2, i64* %p2 + + %i3 = call i64 @strtoul(i8* %pa5, i8** null, i32 8) + %p3 = getelementptr i64, i64* %ps, i32 3 + store i64 %i3, i64* %p3 + + %i4 = call i64 @strtoll(i8* %pa5, i8** null, i32 10) + %p4 = getelementptr i64, i64* %ps, i32 4 + store i64 %i4, i64* %p4 + + %i5 = call i64 @strtoul(i8* %pa5, i8** null, i32 16) + %p5 = getelementptr i64, i64* %ps, i32 5 + store i64 %i5, i64* %p5 + + ret void +} + + ; Fold sprintf(dst, a5 + 5) to zero, and also ; TODO: fold sprintf(dst, "%s", a5 + 5) to zero.