diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -2520,6 +2520,23 @@ void visitAShr(BinaryOperator &I) { handleShift(I); } void visitLShr(BinaryOperator &I) { handleShift(I); } + void handleFunnelShift(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + // If any of the S2 bits are poisoned, the whole thing is poisoned. + // Otherwise perform the same shift on S0 and S1. + Value *S0 = getShadow(&I, 0); + Value *S1 = getShadow(&I, 1); + Value *S2 = getShadow(&I, 2); + Value *S2Conv = + IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType()); + Value *V2 = I.getOperand(2); + Function *Intrin = Intrinsic::getDeclaration( + I.getModule(), I.getIntrinsicID(), S2Conv->getType()); + Value *Shift = IRB.CreateCall(Intrin, {S0, S1, V2}); + setShadow(&I, IRB.CreateOr(Shift, S2Conv)); + setOriginForNaryOp(I); + } + /// Instrument llvm.memmove /// /// At this point we don't know if llvm.memmove will be inlined or not. @@ -3512,6 +3529,11 @@ handleBinarySdIntrinsic(I); break; + case Intrinsic::fshl: + case Intrinsic::fshr: + handleFunnelShift(I); + break; + case Intrinsic::is_constant: // The result of llvm.is.constant() is always defined. setShadow(&I, getCleanShadow(&I)); diff --git a/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll b/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll --- a/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll @@ -19,18 +19,26 @@ ; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <8 x i64>*), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <8 x i64>*), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[I64:%.*]] = call i64 @llvm.fshl.i64(i64 [[A64:%.*]], i64 [[B64:%.*]], i64 [[C64:%.*]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <2 x i64> [[TMP4]], [[TMP5]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <2 x i64> [[_MSPROP2]], [[TMP6]] -; CHECK-NEXT: [[V2I64:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[A128:%.*]], <2 x i64> [[B128:%.*]], <2 x i64> [[C128:%.*]]) -; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i64> [[TMP7]], [[TMP8]] -; CHECK-NEXT: [[_MSPROP5:%.*]] = or <4 x i64> [[_MSPROP4]], [[TMP9]] -; CHECK-NEXT: [[V4I64:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[A256:%.*]], <4 x i64> [[B256:%.*]], <4 x i64> [[C256:%.*]]) -; CHECK-NEXT: [[_MSPROP6:%.*]] = or <8 x i64> [[TMP10]], [[TMP11]] -; CHECK-NEXT: [[_MSPROP7:%.*]] = or <8 x i64> [[_MSPROP6]], [[TMP12]] -; CHECK-NEXT: [[V8I64:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[A512:%.*]], <8 x i64> [[B512:%.*]], <8 x i64> [[C512:%.*]]) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i64 +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP1]], i64 [[TMP2]], i64 [[C64:%.*]]) +; CHECK-NEXT: [[TMP16:%.*]] = or i64 [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[I64:%.*]] = call i64 @llvm.fshl.i64(i64 [[A64:%.*]], i64 [[B64:%.*]], i64 [[C64]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <2 x i64> [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = sext <2 x i1> [[TMP17]] to <2 x i64> +; CHECK-NEXT: [[TMP19:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[TMP4]], <2 x i64> [[TMP5]], <2 x i64> [[C128:%.*]]) +; CHECK-NEXT: [[TMP20:%.*]] = or <2 x i64> [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[V2I64:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[A128:%.*]], <2 x i64> [[B128:%.*]], <2 x i64> [[C128]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i64> [[TMP9]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i64> +; CHECK-NEXT: [[TMP23:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <4 x i64> [[C256:%.*]]) +; CHECK-NEXT: [[TMP24:%.*]] = or <4 x i64> [[TMP23]], [[TMP22]] +; CHECK-NEXT: [[V4I64:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[A256:%.*]], <4 x i64> [[B256:%.*]], <4 x i64> [[C256]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <8 x i64> [[TMP12]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = sext <8 x i1> [[TMP25]] to <8 x i64> +; CHECK-NEXT: [[TMP27:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[TMP10]], <8 x i64> [[TMP11]], <8 x i64> [[C512:%.*]]) +; CHECK-NEXT: [[TMP28:%.*]] = or <8 x i64> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[V8I64:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[A512:%.*]], <8 x i64> [[B512:%.*]], <8 x i64> [[C512]]) ; CHECK-NEXT: ret void ; %I64 = call i64 @llvm.fshl.i64(i64 %a64, i64 %b64, i64 %c64) @@ -55,18 +63,26 @@ ; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <16 x i32>*), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <16 x i32>*), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[I32:%.*]] = call i32 @llvm.fshl.i32(i32 [[A32:%.*]], i32 [[B32:%.*]], i32 [[C32:%.*]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP4]], [[TMP5]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP6]] -; CHECK-NEXT: [[V2I32:%.*]] = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> [[A128:%.*]], <4 x i32> [[B128:%.*]], <4 x i32> [[C128:%.*]]) -; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[TMP7]], [[TMP8]] -; CHECK-NEXT: [[_MSPROP5:%.*]] = or <8 x i32> [[_MSPROP4]], [[TMP9]] -; CHECK-NEXT: [[V4I32:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[A256:%.*]], <8 x i32> [[B256:%.*]], <8 x i32> [[C256:%.*]]) -; CHECK-NEXT: [[_MSPROP6:%.*]] = or <16 x i32> [[TMP10]], [[TMP11]] -; CHECK-NEXT: [[_MSPROP7:%.*]] = or <16 x i32> [[_MSPROP6]], [[TMP12]] -; CHECK-NEXT: [[V8I32:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[A512:%.*]], <16 x i32> [[B512:%.*]], <16 x i32> [[C512:%.*]]) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP3]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i32 +; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.fshl.i32(i32 [[TMP1]], i32 [[TMP2]], i32 [[C32:%.*]]) +; CHECK-NEXT: [[TMP16:%.*]] = or i32 [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[I32:%.*]] = call i32 @llvm.fshl.i32(i32 [[A32:%.*]], i32 [[B32:%.*]], i32 [[C32]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <4 x i32> [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = sext <4 x i1> [[TMP17]] to <4 x i32> +; CHECK-NEXT: [[TMP19:%.*]] = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> [[C128:%.*]]) +; CHECK-NEXT: [[TMP20:%.*]] = or <4 x i32> [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[V2I32:%.*]] = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> [[A128:%.*]], <4 x i32> [[B128:%.*]], <4 x i32> [[C128]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP9]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32> +; CHECK-NEXT: [[TMP23:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[TMP7]], <8 x i32> [[TMP8]], <8 x i32> [[C256:%.*]]) +; CHECK-NEXT: [[TMP24:%.*]] = or <8 x i32> [[TMP23]], [[TMP22]] +; CHECK-NEXT: [[V4I32:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[A256:%.*]], <8 x i32> [[B256:%.*]], <8 x i32> [[C256]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i32> [[TMP12]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = sext <16 x i1> [[TMP25]] to <16 x i32> +; CHECK-NEXT: [[TMP27:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[TMP10]], <16 x i32> [[TMP11]], <16 x i32> [[C512:%.*]]) +; CHECK-NEXT: [[TMP28:%.*]] = or <16 x i32> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[V8I32:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[A512:%.*]], <16 x i32> [[B512:%.*]], <16 x i32> [[C512]]) ; CHECK-NEXT: ret void ; %I32 = call i32 @llvm.fshl.i32(i32 %a32, i32 %b32, i32 %c32) @@ -91,18 +107,26 @@ ; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <32 x i16>*), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <32 x i16>*), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or i16 [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or i16 [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[I16:%.*]] = call i16 @llvm.fshl.i16(i16 [[A16:%.*]], i16 [[B16:%.*]], i16 [[C16:%.*]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i16> [[TMP4]], [[TMP5]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i16> [[_MSPROP2]], [[TMP6]] -; CHECK-NEXT: [[V8I16:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[A128:%.*]], <8 x i16> [[B128:%.*]], <8 x i16> [[C128:%.*]]) -; CHECK-NEXT: [[_MSPROP4:%.*]] = or <16 x i16> [[TMP7]], [[TMP8]] -; CHECK-NEXT: [[_MSPROP5:%.*]] = or <16 x i16> [[_MSPROP4]], [[TMP9]] -; CHECK-NEXT: [[V16I16:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[A256:%.*]], <16 x i16> [[B256:%.*]], <16 x i16> [[C256:%.*]]) -; CHECK-NEXT: [[_MSPROP6:%.*]] = or <32 x i16> [[TMP10]], [[TMP11]] -; CHECK-NEXT: [[_MSPROP7:%.*]] = or <32 x i16> [[_MSPROP6]], [[TMP12]] -; CHECK-NEXT: [[V32I16:%.*]] = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> [[A512:%.*]], <32 x i16> [[B512:%.*]], <32 x i16> [[C512:%.*]]) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i16 [[TMP3]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i16 +; CHECK-NEXT: [[TMP15:%.*]] = call i16 @llvm.fshl.i16(i16 [[TMP1]], i16 [[TMP2]], i16 [[C16:%.*]]) +; CHECK-NEXT: [[TMP16:%.*]] = or i16 [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[I16:%.*]] = call i16 @llvm.fshl.i16(i16 [[A16:%.*]], i16 [[B16:%.*]], i16 [[C16]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <8 x i16> [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = sext <8 x i1> [[TMP17]] to <8 x i16> +; CHECK-NEXT: [[TMP19:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[TMP4]], <8 x i16> [[TMP5]], <8 x i16> [[C128:%.*]]) +; CHECK-NEXT: [[TMP20:%.*]] = or <8 x i16> [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[V8I16:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[A128:%.*]], <8 x i16> [[B128:%.*]], <8 x i16> [[C128]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i16> [[TMP9]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <16 x i1> [[TMP21]] to <16 x i16> +; CHECK-NEXT: [[TMP23:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[TMP7]], <16 x i16> [[TMP8]], <16 x i16> [[C256:%.*]]) +; CHECK-NEXT: [[TMP24:%.*]] = or <16 x i16> [[TMP23]], [[TMP22]] +; CHECK-NEXT: [[V16I16:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[A256:%.*]], <16 x i16> [[B256:%.*]], <16 x i16> [[C256]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i16> [[TMP12]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = sext <32 x i1> [[TMP25]] to <32 x i16> +; CHECK-NEXT: [[TMP27:%.*]] = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> [[TMP10]], <32 x i16> [[TMP11]], <32 x i16> [[C512:%.*]]) +; CHECK-NEXT: [[TMP28:%.*]] = or <32 x i16> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[V32I16:%.*]] = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> [[A512:%.*]], <32 x i16> [[B512:%.*]], <32 x i16> [[C512]]) ; CHECK-NEXT: ret void ; %I16 = call i16 @llvm.fshl.i16(i16 %a16, i16 %b16, i16 %c16) @@ -127,18 +151,26 @@ ; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <64 x i8>*), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <64 x i8>*), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or i8 [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or i8 [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[I8:%.*]] = call i8 @llvm.fshl.i8(i8 [[A8:%.*]], i8 [[B8:%.*]], i8 [[C8:%.*]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <16 x i8> [[_MSPROP2]], [[TMP6]] -; CHECK-NEXT: [[V16I8:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[A128:%.*]], <16 x i8> [[B128:%.*]], <16 x i8> [[C128:%.*]]) -; CHECK-NEXT: [[_MSPROP4:%.*]] = or <32 x i8> [[TMP7]], [[TMP8]] -; CHECK-NEXT: [[_MSPROP5:%.*]] = or <32 x i8> [[_MSPROP4]], [[TMP9]] -; CHECK-NEXT: [[V32I8:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[A256:%.*]], <32 x i8> [[B256:%.*]], <32 x i8> [[C256:%.*]]) -; CHECK-NEXT: [[_MSPROP6:%.*]] = or <64 x i8> [[TMP10]], [[TMP11]] -; CHECK-NEXT: [[_MSPROP7:%.*]] = or <64 x i8> [[_MSPROP6]], [[TMP12]] -; CHECK-NEXT: [[V64I8:%.*]] = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> [[A512:%.*]], <64 x i8> [[B512:%.*]], <64 x i8> [[C512:%.*]]) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP3]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i8 +; CHECK-NEXT: [[TMP15:%.*]] = call i8 @llvm.fshl.i8(i8 [[TMP1]], i8 [[TMP2]], i8 [[C8:%.*]]) +; CHECK-NEXT: [[TMP16:%.*]] = or i8 [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[I8:%.*]] = call i8 @llvm.fshl.i8(i8 [[A8:%.*]], i8 [[B8:%.*]], i8 [[C8]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <16 x i8> [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = sext <16 x i1> [[TMP17]] to <16 x i8> +; CHECK-NEXT: [[TMP19:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP4]], <16 x i8> [[TMP5]], <16 x i8> [[C128:%.*]]) +; CHECK-NEXT: [[TMP20:%.*]] = or <16 x i8> [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[V16I8:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[A128:%.*]], <16 x i8> [[B128:%.*]], <16 x i8> [[C128]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[TMP9]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <32 x i1> [[TMP21]] to <32 x i8> +; CHECK-NEXT: [[TMP23:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP7]], <32 x i8> [[TMP8]], <32 x i8> [[C256:%.*]]) +; CHECK-NEXT: [[TMP24:%.*]] = or <32 x i8> [[TMP23]], [[TMP22]] +; CHECK-NEXT: [[V32I8:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[A256:%.*]], <32 x i8> [[B256:%.*]], <32 x i8> [[C256]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <64 x i8> [[TMP12]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = sext <64 x i1> [[TMP25]] to <64 x i8> +; CHECK-NEXT: [[TMP27:%.*]] = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> [[TMP10]], <64 x i8> [[TMP11]], <64 x i8> [[C512:%.*]]) +; CHECK-NEXT: [[TMP28:%.*]] = or <64 x i8> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[V64I8:%.*]] = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> [[A512:%.*]], <64 x i8> [[B512:%.*]], <64 x i8> [[C512]]) ; CHECK-NEXT: ret void ; %I8 = call i8 @llvm.fshl.i8(i8 %a8, i8 %b8, i8 %c8) @@ -159,18 +191,26 @@ ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <8 x i64>*), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <8 x i64>*), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP1]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], [[TMP2]] -; CHECK-NEXT: [[I64:%.*]] = call i64 @llvm.fshl.i64(i64 [[A64:%.*]], i64 [[A64]], i64 [[C64:%.*]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <2 x i64> [[TMP3]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <2 x i64> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[V2I64:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[A128:%.*]], <2 x i64> [[A128]], <2 x i64> [[C128:%.*]]) -; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i64> [[TMP5]], [[TMP5]] -; CHECK-NEXT: [[_MSPROP5:%.*]] = or <4 x i64> [[_MSPROP4]], [[TMP6]] -; CHECK-NEXT: [[V4I64:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[A256:%.*]], <4 x i64> [[A256]], <4 x i64> [[C256:%.*]]) -; CHECK-NEXT: [[_MSPROP6:%.*]] = or <8 x i64> [[TMP7]], [[TMP7]] -; CHECK-NEXT: [[_MSPROP7:%.*]] = or <8 x i64> [[_MSPROP6]], [[TMP8]] -; CHECK-NEXT: [[V8I64:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[A512:%.*]], <8 x i64> [[A512]], <8 x i64> [[C512:%.*]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP1]], i64 [[TMP1]], i64 [[C64:%.*]]) +; CHECK-NEXT: [[TMP12:%.*]] = or i64 [[TMP11]], [[TMP10]] +; CHECK-NEXT: [[I64:%.*]] = call i64 @llvm.fshl.i64(i64 [[A64:%.*]], i64 [[A64]], i64 [[C64]]) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <2 x i64> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = sext <2 x i1> [[TMP13]] to <2 x i64> +; CHECK-NEXT: [[TMP15:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[TMP3]], <2 x i64> [[TMP3]], <2 x i64> [[C128:%.*]]) +; CHECK-NEXT: [[TMP16:%.*]] = or <2 x i64> [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[V2I64:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[A128:%.*]], <2 x i64> [[A128]], <2 x i64> [[C128]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <4 x i64> [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = sext <4 x i1> [[TMP17]] to <4 x i64> +; CHECK-NEXT: [[TMP19:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP5]], <4 x i64> [[TMP5]], <4 x i64> [[C256:%.*]]) +; CHECK-NEXT: [[TMP20:%.*]] = or <4 x i64> [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[V4I64:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[A256:%.*]], <4 x i64> [[A256]], <4 x i64> [[C256]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i64> [[TMP8]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i64> +; CHECK-NEXT: [[TMP23:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[TMP7]], <8 x i64> [[TMP7]], <8 x i64> [[C512:%.*]]) +; CHECK-NEXT: [[TMP24:%.*]] = or <8 x i64> [[TMP23]], [[TMP22]] +; CHECK-NEXT: [[V8I64:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[A512:%.*]], <8 x i64> [[A512]], <8 x i64> [[C512]]) ; CHECK-NEXT: ret void ; %I64 = call i64 @llvm.fshl.i64(i64 %a64, i64 %a64, i64 %c64) @@ -191,18 +231,26 @@ ; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <16 x i32>*), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <16 x i32>*), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP1]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], [[TMP2]] -; CHECK-NEXT: [[I32:%.*]] = call i32 @llvm.fshl.i32(i32 [[A32:%.*]], i32 [[A32]], i32 [[C32:%.*]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP3]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[V2I32:%.*]] = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> [[A128:%.*]], <4 x i32> [[A128]], <4 x i32> [[C128:%.*]]) -; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[TMP5]], [[TMP5]] -; CHECK-NEXT: [[_MSPROP5:%.*]] = or <8 x i32> [[_MSPROP4]], [[TMP6]] -; CHECK-NEXT: [[V4I32:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[A256:%.*]], <8 x i32> [[A256]], <8 x i32> [[C256:%.*]]) -; CHECK-NEXT: [[_MSPROP6:%.*]] = or <16 x i32> [[TMP7]], [[TMP7]] -; CHECK-NEXT: [[_MSPROP7:%.*]] = or <16 x i32> [[_MSPROP6]], [[TMP8]] -; CHECK-NEXT: [[V8I32:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[A512:%.*]], <16 x i32> [[A512]], <16 x i32> [[C512:%.*]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP2]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i32 +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.fshl.i32(i32 [[TMP1]], i32 [[TMP1]], i32 [[C32:%.*]]) +; CHECK-NEXT: [[TMP12:%.*]] = or i32 [[TMP11]], [[TMP10]] +; CHECK-NEXT: [[I32:%.*]] = call i32 @llvm.fshl.i32(i32 [[A32:%.*]], i32 [[A32]], i32 [[C32]]) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <4 x i32> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = sext <4 x i1> [[TMP13]] to <4 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> [[TMP3]], <4 x i32> [[TMP3]], <4 x i32> [[C128:%.*]]) +; CHECK-NEXT: [[TMP16:%.*]] = or <4 x i32> [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[V2I32:%.*]] = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> [[A128:%.*]], <4 x i32> [[A128]], <4 x i32> [[C128]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <8 x i32> [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = sext <8 x i1> [[TMP17]] to <8 x i32> +; CHECK-NEXT: [[TMP19:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[TMP5]], <8 x i32> [[TMP5]], <8 x i32> [[C256:%.*]]) +; CHECK-NEXT: [[TMP20:%.*]] = or <8 x i32> [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[V4I32:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[A256:%.*]], <8 x i32> [[A256]], <8 x i32> [[C256]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i32> [[TMP8]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <16 x i1> [[TMP21]] to <16 x i32> +; CHECK-NEXT: [[TMP23:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[TMP7]], <16 x i32> [[TMP7]], <16 x i32> [[C512:%.*]]) +; CHECK-NEXT: [[TMP24:%.*]] = or <16 x i32> [[TMP23]], [[TMP22]] +; CHECK-NEXT: [[V8I32:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[A512:%.*]], <16 x i32> [[A512]], <16 x i32> [[C512]]) ; CHECK-NEXT: ret void ; %I32 = call i32 @llvm.fshl.i32(i32 %a32, i32 %a32, i32 %c32) @@ -223,18 +271,26 @@ ; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <32 x i16>*), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <32 x i16>*), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or i16 [[TMP1]], [[TMP1]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or i16 [[_MSPROP]], [[TMP2]] -; CHECK-NEXT: [[I16:%.*]] = call i16 @llvm.fshl.i16(i16 [[A16:%.*]], i16 [[A16]], i16 [[C16:%.*]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i16> [[TMP3]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i16> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[V8I16:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[A128:%.*]], <8 x i16> [[A128]], <8 x i16> [[C128:%.*]]) -; CHECK-NEXT: [[_MSPROP4:%.*]] = or <16 x i16> [[TMP5]], [[TMP5]] -; CHECK-NEXT: [[_MSPROP5:%.*]] = or <16 x i16> [[_MSPROP4]], [[TMP6]] -; CHECK-NEXT: [[V16I16:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[A256:%.*]], <16 x i16> [[A256]], <16 x i16> [[C256:%.*]]) -; CHECK-NEXT: [[_MSPROP6:%.*]] = or <32 x i16> [[TMP7]], [[TMP7]] -; CHECK-NEXT: [[_MSPROP7:%.*]] = or <32 x i16> [[_MSPROP6]], [[TMP8]] -; CHECK-NEXT: [[V32I16:%.*]] = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> [[A512:%.*]], <32 x i16> [[A512]], <32 x i16> [[C512:%.*]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i16 [[TMP2]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i16 +; CHECK-NEXT: [[TMP11:%.*]] = call i16 @llvm.fshl.i16(i16 [[TMP1]], i16 [[TMP1]], i16 [[C16:%.*]]) +; CHECK-NEXT: [[TMP12:%.*]] = or i16 [[TMP11]], [[TMP10]] +; CHECK-NEXT: [[I16:%.*]] = call i16 @llvm.fshl.i16(i16 [[A16:%.*]], i16 [[A16]], i16 [[C16]]) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <8 x i16> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = sext <8 x i1> [[TMP13]] to <8 x i16> +; CHECK-NEXT: [[TMP15:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[TMP3]], <8 x i16> [[TMP3]], <8 x i16> [[C128:%.*]]) +; CHECK-NEXT: [[TMP16:%.*]] = or <8 x i16> [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[V8I16:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[A128:%.*]], <8 x i16> [[A128]], <8 x i16> [[C128]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <16 x i16> [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = sext <16 x i1> [[TMP17]] to <16 x i16> +; CHECK-NEXT: [[TMP19:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[TMP5]], <16 x i16> [[TMP5]], <16 x i16> [[C256:%.*]]) +; CHECK-NEXT: [[TMP20:%.*]] = or <16 x i16> [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[V16I16:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[A256:%.*]], <16 x i16> [[A256]], <16 x i16> [[C256]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i16> [[TMP8]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <32 x i1> [[TMP21]] to <32 x i16> +; CHECK-NEXT: [[TMP23:%.*]] = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> [[TMP7]], <32 x i16> [[TMP7]], <32 x i16> [[C512:%.*]]) +; CHECK-NEXT: [[TMP24:%.*]] = or <32 x i16> [[TMP23]], [[TMP22]] +; CHECK-NEXT: [[V32I16:%.*]] = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> [[A512:%.*]], <32 x i16> [[A512]], <32 x i16> [[C512]]) ; CHECK-NEXT: ret void ; %I16 = call i16 @llvm.fshl.i16(i16 %a16, i16 %a16, i16 %c16) @@ -255,18 +311,26 @@ ; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <64 x i8>*), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <64 x i8>*), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or i8 [[TMP1]], [[TMP1]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or i8 [[_MSPROP]], [[TMP2]] -; CHECK-NEXT: [[I8:%.*]] = call i8 @llvm.fshl.i8(i8 [[A8:%.*]], i8 [[A8]], i8 [[C8:%.*]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <16 x i8> [[TMP3]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <16 x i8> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[V16I8:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[A128:%.*]], <16 x i8> [[A128]], <16 x i8> [[C128:%.*]]) -; CHECK-NEXT: [[_MSPROP4:%.*]] = or <32 x i8> [[TMP5]], [[TMP5]] -; CHECK-NEXT: [[_MSPROP5:%.*]] = or <32 x i8> [[_MSPROP4]], [[TMP6]] -; CHECK-NEXT: [[V32I8:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[A256:%.*]], <32 x i8> [[A256]], <32 x i8> [[C256:%.*]]) -; CHECK-NEXT: [[_MSPROP6:%.*]] = or <64 x i8> [[TMP7]], [[TMP7]] -; CHECK-NEXT: [[_MSPROP7:%.*]] = or <64 x i8> [[_MSPROP6]], [[TMP8]] -; CHECK-NEXT: [[V64I8:%.*]] = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> [[A512:%.*]], <64 x i8> [[A512]], <64 x i8> [[C512:%.*]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP2]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i8 +; CHECK-NEXT: [[TMP11:%.*]] = call i8 @llvm.fshl.i8(i8 [[TMP1]], i8 [[TMP1]], i8 [[C8:%.*]]) +; CHECK-NEXT: [[TMP12:%.*]] = or i8 [[TMP11]], [[TMP10]] +; CHECK-NEXT: [[I8:%.*]] = call i8 @llvm.fshl.i8(i8 [[A8:%.*]], i8 [[A8]], i8 [[C8]]) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP3]], <16 x i8> [[TMP3]], <16 x i8> [[C128:%.*]]) +; CHECK-NEXT: [[TMP16:%.*]] = or <16 x i8> [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[V16I8:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[A128:%.*]], <16 x i8> [[A128]], <16 x i8> [[C128]]) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <32 x i8> [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP18:%.*]] = sext <32 x i1> [[TMP17]] to <32 x i8> +; CHECK-NEXT: [[TMP19:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP5]], <32 x i8> [[TMP5]], <32 x i8> [[C256:%.*]]) +; CHECK-NEXT: [[TMP20:%.*]] = or <32 x i8> [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[V32I8:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[A256:%.*]], <32 x i8> [[A256]], <32 x i8> [[C256]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[TMP8]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <64 x i1> [[TMP21]] to <64 x i8> +; CHECK-NEXT: [[TMP23:%.*]] = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> [[TMP7]], <64 x i8> [[TMP7]], <64 x i8> [[C512:%.*]]) +; CHECK-NEXT: [[TMP24:%.*]] = or <64 x i8> [[TMP23]], [[TMP22]] +; CHECK-NEXT: [[V64I8:%.*]] = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> [[A512:%.*]], <64 x i8> [[A512]], <64 x i8> [[C512]]) ; CHECK-NEXT: ret void ; %I8 = call i8 @llvm.fshl.i8(i8 %a8, i8 %a8, i8 %c8)