diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h --- a/llvm/include/llvm/Analysis/MemoryBuiltins.h +++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h @@ -28,6 +28,7 @@ namespace llvm { class AllocaInst; +class AAResults; class Argument; class CallInst; class ConstantPointerNull; @@ -152,6 +153,8 @@ /// though they can't be evaluated. Otherwise, null is always considered to /// point to a 0 byte region of memory. bool NullIsUnknownSize = false; + /// If set, used for more accurate evaluation + AAResults *AA = nullptr; }; /// Compute the size of the object pointed by Ptr. Returns true and the @@ -171,8 +174,9 @@ /// argument of the call to objectsize. Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed); - - +Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, + const TargetLibraryInfo *TLI, AAResults *AA, + bool MustSucceed); using SizeOffsetType = std::pair; @@ -229,6 +233,10 @@ SizeOffsetType visitInstruction(Instruction &I); private: + SizeOffsetType findLoadSizeOffset( + LoadInst &LoadFrom, BasicBlock &BB, BasicBlock::iterator From, + SmallDenseMap &VisitedBlocks, + unsigned &ScannedInstCount); SizeOffsetType combineSizeOffset(SizeOffsetType LHS, SizeOffsetType RHS); SizeOffsetType computeImpl(Value *V); bool CheckedZextOrTrunc(APInt &I); diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -17,6 +17,7 @@ #include "llvm/ADT/Optional.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/TargetFolder.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/Utils/Local.h" @@ -153,7 +154,6 @@ {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, {LibFunc_dunder_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1, MallocFamily::KmpcAllocShared}}, - // TODO: Handle "int posix_memalign(void **, size_t, size_t)" }; // clang-format on @@ -569,11 +569,21 @@ const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed) { + return lowerObjectSizeCall(ObjectSize, DL, TLI, /*AAResults=*/nullptr, + MustSucceed); +} + +Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, + const DataLayout &DL, + const TargetLibraryInfo *TLI, AAResults *AA, + bool MustSucceed) { assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && "ObjectSize must be a call to llvm.objectsize!"); bool MaxVal = cast(ObjectSize->getArgOperand(1))->isZero(); ObjectSizeOpts EvalOptions; + EvalOptions.AA = AA; + // Unless we have to fold this to something, try to be as accurate as // possible. if (MustSucceed) @@ -803,9 +813,130 @@ return unknown(); } -SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { - ++ObjectVisitorLoad; - return unknown(); +SizeOffsetType ObjectSizeOffsetVisitor::findLoadSizeOffset( + LoadInst &Load, BasicBlock &BB, BasicBlock::iterator From, + SmallDenseMap &VisitedBlocks, + unsigned &ScannedInstCount) { + constexpr unsigned MaxInstsToScan = 128; + + auto Where = VisitedBlocks.find(&BB); + if (Where != VisitedBlocks.end()) + return Where->second; + + auto Unknown = [this, &BB, &VisitedBlocks]() { + return VisitedBlocks[&BB] = unknown(); + }; + auto Known = [this, &BB, &VisitedBlocks](SizeOffsetType SO) { + return VisitedBlocks[&BB] = SO; + }; + + do { + Instruction &I = *From; + + if (I.isDebugOrPseudoInst()) + continue; + + if (++ScannedInstCount > MaxInstsToScan) + return Unknown(); + + if (!I.mayWriteToMemory()) + continue; + + if (auto *SI = dyn_cast(&I)) { + AliasResult AR = + Options.AA->alias(SI->getPointerOperand(), Load.getPointerOperand()); + switch ((AliasResult::Kind)AR) { + case AliasResult::NoAlias: + continue; + case AliasResult::MustAlias: + if (SI->getValueOperand()->getType()->isPointerTy()) + return Known(compute(SI->getValueOperand())); + else + return Unknown(); // No handling of non-pointer values by `compute`. + default: + return Unknown(); + } + } + + if (auto *CB = dyn_cast(&I)) { + Function *Callee = CB->getCalledFunction(); + // Bail out on indirect call. + if (!Callee) + return Unknown(); + + LibFunc TLIFn; + if (!TLI || !TLI->getLibFunc(*CB->getCalledFunction(), TLIFn) || + !TLI->has(TLIFn)) + return Unknown(); + + // TODO: There's probably more interesting case to support here. + if (TLIFn != LibFunc_posix_memalign) + return Unknown(); + + AliasResult AR = + Options.AA->alias(CB->getOperand(0), Load.getPointerOperand()); + switch ((AliasResult::Kind)AR) { + case AliasResult::NoAlias: + continue; + case AliasResult::MustAlias: + break; + default: + return Unknown(); + } + + // Is the error status of posix_memalign correctly checked? If not it + // would be incorrect to assume it succeeds and load doesn't see the + // previous value. + Optional Checked = isImpliedByDomCondition( + ICmpInst::ICMP_EQ, CB, ConstantInt::get(CB->getType(), 0), &Load, DL); + if (!Checked || !*Checked) + return Unknown(); + + Value *Size = CB->getOperand(2); + auto *C = dyn_cast(Size); + if (!C) + return Unknown(); + + return Known({C->getValue(), APInt(C->getValue().getBitWidth(), 0)}); + } + + return Unknown(); + } while (From-- != BB.begin()); + + SmallVector PredecessorSizeOffsets; + for (auto *PredBB : predecessors(&BB)) { + PredecessorSizeOffsets.push_back(findLoadSizeOffset( + Load, *PredBB, BasicBlock::iterator(PredBB->getTerminator()), + VisitedBlocks, ScannedInstCount)); + if (!bothKnown(PredecessorSizeOffsets.back())) + return Unknown(); + } + + if (PredecessorSizeOffsets.empty()) + return Unknown(); + + return Known(std::accumulate(PredecessorSizeOffsets.begin() + 1, + PredecessorSizeOffsets.end(), + PredecessorSizeOffsets.front(), + [this](SizeOffsetType LHS, SizeOffsetType RHS) { + return combineSizeOffset(LHS, RHS); + })); +} + +SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst &LI) { + if (!Options.AA) { + ++ObjectVisitorLoad; + return unknown(); + } + + SmallDenseMap VisitedBlocks; + unsigned ScannedInstCount = 0; + SizeOffsetType SO = + findLoadSizeOffset(LI, *LI.getParent(), BasicBlock::iterator(LI), + VisitedBlocks, ScannedInstCount); + if (!bothKnown(SO)) + ++ObjectVisitorLoad; + return SO; } SizeOffsetType ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetType LHS, @@ -1010,7 +1141,7 @@ return unknown(); } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) { +SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst &LI) { return unknown(); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1188,7 +1188,7 @@ Intrinsic::ID IID = II->getIntrinsicID(); switch (IID) { case Intrinsic::objectsize: - if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false)) + if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false)) return replaceInstUsesWith(CI, V); return nullptr; case Intrinsic::abs: { diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2810,7 +2810,7 @@ if (IntrinsicInst *II = dyn_cast(I)) { if (II->getIntrinsicID() == Intrinsic::objectsize) { Value *Result = - lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/true); + lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/true); replaceInstUsesWith(*I, Result); eraseInstFromFunction(*I); Users[i] = nullptr; // Skip examining in the next loop. diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-load.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-load.ll @@ -0,0 +1,45 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt --instcombine -lower-constant-intrinsics -S < %s | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg) + + +define dso_local i64 @check_store_load(i1 %cond) local_unnamed_addr { +; CHECK-LABEL: @check_store_load( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTR01:%.*]] = alloca [10 x i8], align 1 +; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] +; CHECK: if.then: +; CHECK-NEXT: [[PTR01_SUB:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[PTR01]], i64 0, i64 0 +; CHECK-NEXT: br label [[RETURN:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[PTR12:%.*]] = alloca [12 x i8], align 1 +; CHECK-NEXT: [[PTR12_SUB:%.*]] = getelementptr inbounds [12 x i8], [12 x i8]* [[PTR12]], i64 0, i64 0 +; CHECK-NEXT: br label [[RETURN]] +; CHECK: return: +; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i8* [ [[PTR12_SUB]], [[IF_END]] ], [ [[PTR01_SUB]], [[IF_THEN]] ] +; CHECK-NEXT: ret i64 12 +; +entry: + %holder = alloca i8* + %ptr0 = alloca i8, i64 10 + br i1 %cond, label %if.then, label %if.end + +if.then: + store i8* %ptr0, i8** %holder + br label %return + +if.end: + %ptr1 = alloca i8, i64 12 + store i8* %ptr1, i8** %holder + br label %return + +return: + %held = load i8*, i8** %holder + %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %held, i1 false, i1 true, i1 false) + ret i64 %objsize + +} diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-posix-memalign.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-posix-memalign.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-posix-memalign.ll @@ -0,0 +1,219 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt --instcombine -lower-constant-intrinsics -S < %s | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare dso_local i32 @posix_memalign(i8** noundef, i64 noundef, i64 noundef) +declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg) + +; Check posix_memalign call with proper handlig of return value +define dso_local i64 @check_posix_memalign(i32 noundef %n) local_unnamed_addr { +; CHECK-LABEL: @check_posix_memalign( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8*, align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10) +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0 +; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[EXIT:%.*]] +; CHECK: cond.false: +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ 10, [[COND_FALSE]] ] +; CHECK-NEXT: ret i64 [[COND]] +; +entry: + %obj = alloca i8* + %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10) + %tobool = icmp ne i32 %call, 0 + br i1 %tobool, label %exit, label %cond.false + +cond.false: + %val = load i8*, i8** %obj + %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false) + br label %exit + +exit: + %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ] + ret i64 %cond + +} + + +; Same test case as above but with idiomatic NULL initialization +define dso_local i64 @check_posix_memalign_null() { +; CHECK-LABEL: @check_posix_memalign_null( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8*, align 8 +; CHECK-NEXT: store i8* null, i8** [[OBJ]], align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10) +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0 +; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[EXIT:%.*]] +; CHECK: cond.false: +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ 10, [[COND_FALSE]] ] +; CHECK-NEXT: ret i64 [[COND]] +; +entry: + %obj = alloca i8* + store i8* null, i8** %obj + %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10) + %tobool = icmp ne i32 %call, 0 + br i1 %tobool, label %exit, label %cond.false + +cond.false: + %val = load i8*, i8** %obj + %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false) + br label %exit + +exit: + %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ] + ret i64 %cond +} + +; Using argument storage instead of local storage for the allocated pointer. +define dso_local i64 @check_posix_memalign_arg(i8** noalias noundef %obj) { +; CHECK-LABEL: @check_posix_memalign_arg( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef [[OBJ:%.*]], i64 noundef 8, i64 noundef 10) +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0 +; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[EXIT:%.*]] +; CHECK: cond.false: +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ 10, [[COND_FALSE]] ] +; CHECK-NEXT: ret i64 [[COND]] +; +entry: + %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10) + %tobool = icmp ne i32 %call, 0 + br i1 %tobool, label %exit, label %cond.false + +cond.false: + %val = load i8*, i8** %obj + %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false) + br label %exit + +exit: + %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ] + ret i64 %cond + +} + +; posix_memalign can fail, in that case no object_size can be guessed. +define dso_local i64 @check_posix_memalign_unchecked() { +; CHECK-LABEL: @check_posix_memalign_unchecked( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8*, align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10) +; CHECK-NEXT: [[VAL:%.*]] = load i8*, i8** [[OBJ]], align 8 +; CHECK-NEXT: ret i64 -1 +; +entry: + %obj = alloca i8* + %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10) + %val = load i8*, i8** %obj + %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false) + ret i64 %objsize +} + +; Checks that bo upon posix_memalign failure behaves correctly +define dso_local i64 @check_posix_memalign_inverted_cond() { +; CHECK-LABEL: @check_posix_memalign_inverted_cond( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8*, align 8 +; CHECK-NEXT: store i8* null, i8** [[OBJ]], align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10) +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[CALL]], 0 +; CHECK-NEXT: br i1 [[TOBOOL]], label [[EXIT:%.*]], label [[COND_FALSE:%.*]] +; CHECK: cond.false: +; CHECK-NEXT: [[VAL:%.*]] = load i8*, i8** [[OBJ]], align 8 +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ -1, [[COND_FALSE]] ] +; CHECK-NEXT: ret i64 [[COND]] +; +entry: + %obj = alloca i8* + store i8* null, i8** %obj + %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10) + %tobool = icmp eq i32 %call, 0 + br i1 %tobool, label %exit, label %cond.false + +cond.false: + %val = load i8*, i8** %obj + %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false) + br label %exit + +exit: + %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ] + ret i64 %cond +} + +; Check posix_memalign call with a runtime condition check +define dso_local i64 @check_posix_memalign_runtime_cond(i32 noundef %n) local_unnamed_addr { +; CHECK-LABEL: @check_posix_memalign_runtime_cond( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8*, align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10) +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], [[N:%.*]] +; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[EXIT:%.*]] +; CHECK: cond.false: +; CHECK-NEXT: [[VAL:%.*]] = load i8*, i8** [[OBJ]], align 8 +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ -1, [[COND_FALSE]] ] +; CHECK-NEXT: ret i64 [[COND]] +; +entry: + %obj = alloca i8* + %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10) + %tobool = icmp ne i32 %call, %n + br i1 %tobool, label %exit, label %cond.false + +cond.false: + %val = load i8*, i8** %obj + %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false) + br label %exit + +exit: + %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ] + ret i64 %cond + +} + +; Check posix_memalign call with two different paths leading to the same alloc. +define dso_local i64 @check_posix_memalign_diamond() local_unnamed_addr { +; CHECK-LABEL: @check_posix_memalign_diamond( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8*, align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10) +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0 +; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]] +; CHECK: cond.true: +; CHECK-NEXT: br label [[EXIT:%.*]] +; CHECK: cond.false: +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: [[COND:%.*]] = phi i64 [ -2, [[COND_TRUE]] ], [ 10, [[COND_FALSE]] ] +; CHECK-NEXT: ret i64 [[COND]] +; +entry: + %obj = alloca i8* + %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10) + %tobool = icmp ne i32 %call, 0 + br i1 %tobool, label %cond.true, label %cond.false + +cond.true: + br label %exit + +cond.false: + %val = load i8*, i8** %obj + %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false) + br label %exit + +exit: + %cond = phi i64 [ -2, %cond.true ], [ %objsize, %cond.false ] + ret i64 %cond + +}