Index: llvm/trunk/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp +++ llvm/trunk/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp @@ -671,7 +671,7 @@ NumFastpaths++; return true; } - if (Alignment == 0 || Alignment >= 8 || (Alignment % TypeSizeBytes) == 0) + if (Alignment == 0 || (Alignment % TypeSizeBytes) == 0) OnAccessFunc = IsStore ? EsanAlignedStore[Idx] : EsanAlignedLoad[Idx]; else OnAccessFunc = IsStore ? EsanUnalignedStore[Idx] : EsanUnalignedLoad[Idx]; @@ -832,7 +832,7 @@ // getMemoryAccessFuncIndex has already ruled out a size larger than 16 // and thus larger than a cache line for platforms this tool targets // (and our shadow memory setup assumes 64-byte cache lines). - assert(TypeSize <= 64); + assert(TypeSize <= 128); if (!(TypeSize == 8 || (Alignment % (TypeSize / 8)) == 0)) { if (ClAssumeIntraCacheLine) Index: llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll =================================================================== --- llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll +++ llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll @@ -90,6 +90,27 @@ ; CHECK-NEXT: ret i64 %tmp1 } +define i128 @aligned16(i128* %a) { +entry: + %tmp1 = load i128, i128* %a, align 16 + ret i128 %tmp1 +; CHECK: %0 = ptrtoint i128* %a to i64 +; CHECK-NEXT: %1 = and i64 %0, 17592186044415 +; CHECK-NEXT: %2 = add i64 %1, 1337006139375616 +; CHECK-NEXT: %3 = lshr i64 %2, 6 +; CHECK-NEXT: %4 = inttoptr i64 %3 to i8* +; CHECK-NEXT: %5 = load i8, i8* %4 +; CHECK-NEXT: %6 = and i8 %5, -127 +; CHECK-NEXT: %7 = icmp ne i8 %6, -127 +; CHECK-NEXT: br i1 %7, label %8, label %11 +; CHECK: %9 = or i8 %5, -127 +; CHECK-NEXT: %10 = inttoptr i64 %3 to i8* +; CHECK-NEXT: store i8 %9, i8* %10 +; CHECK-NEXT: br label %11 +; CHECK: %tmp1 = load i128, i128* %a, align 16 +; CHECK-NEXT: ret i128 %tmp1 +} + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Not guaranteed to be intra-cache-line, but our defaults are to ; assume they are: @@ -157,6 +178,27 @@ ; CHECK-NEXT: ret i64 %tmp1 } +define i128 @unaligned16(i128* %a) { +entry: + %tmp1 = load i128, i128* %a, align 8 + ret i128 %tmp1 +; CHECK: %0 = ptrtoint i128* %a to i64 +; CHECK-NEXT: %1 = and i64 %0, 17592186044415 +; CHECK-NEXT: %2 = add i64 %1, 1337006139375616 +; CHECK-NEXT: %3 = lshr i64 %2, 6 +; CHECK-NEXT: %4 = inttoptr i64 %3 to i8* +; CHECK-NEXT: %5 = load i8, i8* %4 +; CHECK-NEXT: %6 = and i8 %5, -127 +; CHECK-NEXT: %7 = icmp ne i8 %6, -127 +; CHECK-NEXT: br i1 %7, label %8, label %11 +; CHECK: %9 = or i8 %5, -127 +; CHECK-NEXT: %10 = inttoptr i64 %3 to i8* +; CHECK-NEXT: store i8 %9, i8* %10 +; CHECK-NEXT: br label %11 +; CHECK: %tmp1 = load i128, i128* %a, align 8 +; CHECK-NEXT: ret i128 %tmp1 +} + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Ensure that esan converts intrinsics to calls: Index: llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll =================================================================== --- llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll +++ llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll @@ -91,6 +91,27 @@ ; CHECK-NEXT: ret i64 %tmp1 } +define i128 @aligned16(i128* %a) { +entry: + %tmp1 = load i128, i128* %a, align 16 + ret i128 %tmp1 +; CHECK: %0 = ptrtoint i128* %a to i64 +; CHECK-NEXT: %1 = and i64 %0, 17592186044415 +; CHECK-NEXT: %2 = add i64 %1, 1337006139375616 +; CHECK-NEXT: %3 = lshr i64 %2, 6 +; CHECK-NEXT: %4 = inttoptr i64 %3 to i8* +; CHECK-NEXT: %5 = load i8, i8* %4 +; CHECK-NEXT: %6 = and i8 %5, -127 +; CHECK-NEXT: %7 = icmp ne i8 %6, -127 +; CHECK-NEXT: br i1 %7, label %8, label %11 +; CHECK: %9 = or i8 %5, -127 +; CHECK-NEXT: %10 = inttoptr i64 %3 to i8* +; CHECK-NEXT: store i8 %9, i8* %10 +; CHECK-NEXT: br label %11 +; CHECK: %tmp1 = load i128, i128* %a, align 16 +; CHECK-NEXT: ret i128 %tmp1 +} + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Not guaranteed to be intra-cache-line @@ -123,3 +144,13 @@ ; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 4 ; CHECK-NEXT: ret i64 %tmp1 } + +define i128 @unaligned16(i128* %a) { +entry: + %tmp1 = load i128, i128* %a, align 8 + ret i128 %tmp1 +; CHECK: %0 = bitcast i128* %a to i8* +; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0) +; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 8 +; CHECK-NEXT: ret i128 %tmp1 +}