1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7//===----------------------------------------------------------------------===//
9// This file defines several CodeGen-specific LLVM IR analysis utilities.
11//===----------------------------------------------------------------------===//
30/// Compute the linearized index of a member in a nested aggregate/struct/array
31/// by recursing and accumulating CurIndex as long as there are indices in the
34 const unsigned *Indices,
35 const unsigned *IndicesEnd,
37 // Base case: We're done.
38 if (Indices && Indices == IndicesEnd)
41 // Given a struct type, recursively traverse the elements.
45 if (Indices && *Indices ==
I.index())
49 assert(!Indices &&
"Unexpected out of bound");
52 // Given an array type, recursively traverse the elements.
54 Type *EltTy = ATy->getElementType();
55 unsigned NumElts = ATy->getNumElements();
56 // Compute the Linear offset when jumping one element of the array
59 assert(*Indices < NumElts &&
"Unexpected out of bound");
60 // If the indice is inside the array, compute the index to the requested
61 // elt and recurse inside the element with the end of the indices list
62 CurIndex += EltLinearOffset* *Indices;
65 CurIndex += EltLinearOffset*NumElts;
68 // We haven't found the type we're looking for, so keep searching.
77 StartingOffset.
isZero()) &&
78 "Offset/TypeSize mismatch!");
79 // Given a struct type, recursively traverse the elements.
81 // If the Offsets aren't needed, don't query the struct layout. This allows
82 // us to support structs with scalable vectors for operations that don't
84 const StructLayout *SL = Offsets ?
DL.getStructLayout(STy) :
nullptr;
86 EE = STy->element_end();
88 // Don't compute the element offset if we didn't get a StructLayout above.
95 // Given an array type, recursively traverse the elements.
97 Type *EltTy = ATy->getElementType();
99 for (
unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
101 StartingOffset + i * EltSize);
104 // Interpret void as zero return values.
109 Offsets->push_back(StartingOffset);
112/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
113/// EVTs that represent all the individual underlying
114/// non-aggregate types that comprise it.
116/// If Offsets is non-null, it points to a vector to be filled in
117/// with the in-memory offsets of each of the individual values.
126 for (
Type *Ty : Types) {
153 // Given a struct type, recursively traverse the elements.
155 // If the Offsets aren't needed, don't query the struct layout. This allows
156 // us to support structs with scalable vectors for operations that don't
158 const StructLayout *SL = Offsets ?
DL.getStructLayout(STy) :
nullptr;
159 for (
unsigned I = 0, E = STy->getNumElements();
I != E; ++
I) {
162 StartingOffset + EltOffset);
166 // Given an array type, recursively traverse the elements.
168 Type *EltTy = ATy->getElementType();
169 uint64_t EltSize =
DL.getTypeAllocSize(EltTy).getFixedValue();
170 for (
unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
172 StartingOffset + i * EltSize);
175 // Interpret void as zero return values.
178 // Base case: we can get an LLT for this LLVM IR type.
180 if (Offsets !=
nullptr)
181 Offsets->push_back(StartingOffset * 8);
184/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
190 if (Var && Var->
getName() ==
"llvm.eh.catch.all.value") {
192 "The EH catch-all value must have an initializer");
199 "TypeInfo must be a global variable or NULL");
203/// getFCmpCondCode - Return the ISD condition code corresponding to
204/// the given LLVM IR floating-point condition code. This includes
205/// consideration of global floating-point math flags.
287 return T1 == T2 || (
T1->isPointerTy() && T2->isPointerTy()) ||
292/// Look through operations that will be free to find the earliest source of
295/// @param ValLoc If V has aggregate type, we will be interested in a particular
296/// scalar component. This records its address; the reverse of this list gives a
297/// sequence of indices appropriate for an extractvalue to locate the important
298/// value. This value is updated during the function and on exit will indicate
299/// similar information for the Value returned.
301/// @param DataBits If this function looks through truncate instructions, this
302/// will record the smallest size attained.
309 // Try to look through V1; if V1 is not an instruction, it can't be looked
312 if (!
I ||
I->getNumOperands() == 0)
return V;
313 const Value *NoopInput =
nullptr;
317 // Look through truly no-op bitcasts.
321 // Look through getelementptr
325 // Look through inttoptr.
326 // Make sure this isn't a truncating or extending cast. We could
327 // support this eventually, but don't bother for now.
329 DL.getPointerSizeInBits() ==
333 // Look through ptrtoint.
334 // Make sure this isn't a truncating or extending cast. We could
335 // support this eventually, but don't bother for now.
337 DL.getPointerSizeInBits() ==
344 I->getType()->getPrimitiveSizeInBits().getFixedValue());
347 const Value *ReturnedOp = CB->getReturnedArgOperand();
349 NoopInput = ReturnedOp;
351 // Value may come from either the aggregate or the scalar
353 if (ValLoc.
size() >= InsertLoc.
size() &&
354 std::equal(InsertLoc.
begin(), InsertLoc.
end(), ValLoc.
rbegin())) {
355 // The type being inserted is a nested sub-type of the aggregate; we
356 // have to remove those initial indices to get the location we're
357 // interested in for the operand.
359 NoopInput = IVI->getInsertedValueOperand();
361 // The struct we're inserting into has the value we're interested in, no
362 // change of address.
366 // The part we're interested in will inevitably be some sub-section of the
367 // previous aggregate. Combine the two paths to obtain the true address of
373 // Terminate if we couldn't find anything to look through.
381/// Return true if this scalar return value only has bits discarded on its path
382/// from the "tail call" to the "ret". This includes the obvious noop
383/// instructions handled by getNoopInput above as well as free truncations (or
384/// extensions prior to the call).
388 bool AllowDifferingSizes,
392 // Trace the sub-value needed by the return value as far back up the graph as
393 // possible, in the hope that it will intersect with the value produced by the
394 // call. In the simple case with no "returned" attribute, the hope is actually
395 // that we end up back at the tail call instruction itself.
396 unsigned BitsRequired = UINT_MAX;
399 // If this slot in the value returned is undef, it doesn't matter what the
400 // call puts there, it'll be fine.
404 // Now do a similar search up through the graph to find where the value
405 // actually returned by the "tail call" comes from. In the simple case without
406 // a "returned" attribute, the search will be blocked immediately and the loop
408 unsigned BitsProvided = UINT_MAX;
409 CallVal =
getNoopInput(CallVal, CallIndices, BitsProvided, TLI,
DL);
411 // There's no hope if we can't actually trace them to (the same part of!) the
413 if (CallVal != RetVal || CallIndices != RetIndices)
416 // However, intervening truncates may have made the call non-tail. Make sure
417 // all the bits that are needed by the "ret" have been provided by the "tail
418 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
420 if (BitsProvided < BitsRequired ||
421 (!AllowDifferingSizes && BitsProvided != BitsRequired))
427/// For an aggregate type, determine whether a given index is within bounds or
431 return Idx < AT->getNumElements();
433 return Idx < cast<StructType>(
T)->getNumElements();
436/// Move the given iterators to the next leaf type in depth first traversal.
438/// Performs a depth-first traversal of the type as specified by its arguments,
439/// stopping at the next leaf node (which may be a legitimate scalar type or an
440/// empty struct or array).
442/// @param SubTypes List of the partial components making up the type from
443/// outermost to innermost non-empty aggregate. The element currently
444/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
446/// @param Path Set of extractvalue indices leading from the outermost type
447/// (SubTypes[0]) to the leaf node currently represented.
449/// @returns true if a new type was found, false otherwise. Calling this
450/// function again on a finished iterator will repeatedly return
451/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
452/// aggregate or a non-aggregate
455 // First march back up the tree until we can successfully increment one of the
456 // coordinates in Path.
462 // If we reached the top, then the iterator is done.
466 // We know there's *some* valid leaf now, so march back down the tree picking
467 // out the left-most element at each node.
484/// Find the first non-empty, scalar-like type in Next and setup the iterator
487/// Assuming Next is an aggregate of some kind, this function will traverse the
488/// tree from left to right (i.e. depth-first) looking for the first
489/// non-aggregate type which will play a role in function return.
491/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
492/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
496 // First initialise the iterator components to the first "leaf" node
497 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
498 // despite nominally being an aggregate).
505 // If there's no Path now, Next was originally scalar already (or empty
506 // leaf). We're done.
510 // Otherwise, use normal iteration to keep looking through the tree until we
511 // find a non-aggregate type.
513 ->isAggregateType()) {
521/// Set the iterator data-structures to the next non-empty, non-aggregate
529 assert(!Path.empty() &&
"found a leaf but didn't set the path?");
531 ->isAggregateType());
537/// Test if the given instruction is in a position to be optimized
538/// with a tail-call. This roughly means that it's in a block with
539/// a return and there's nothing that needs to be scheduled
540/// between it and the return.
542/// This function only tests target-independent requirements.
544 bool ReturnsFirstArg) {
549 // The block must end in a return statement or unreachable.
551 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
552 // an unreachable, for now. The way tailcall optimization is currently
553 // implemented means it will add an epilogue followed by a jump. That is
554 // not profitable. Also, if the callee is a special function (e.g.
555 // longjmp on x86), it can end up causing miscompilation that has not
556 // been fully understood.
557 if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
563 // If I will have a chain, make sure no other instruction that will have a
564 // chain interposes between I and the return.
565 // Check for all calls including speculatable functions.
569 // Debug info intrinsics do not get in the way of tail call optimization.
570 // Pseudo probe intrinsics do not block tail call optimization either.
571 if (BBI->isDebugOrPseudoInst())
573 // A lifetime end, assume or noalias.decl intrinsic should not stop tail
574 // call optimization.
576 if (
II->getIntrinsicID() == Intrinsic::lifetime_end ||
577 II->getIntrinsicID() == Intrinsic::assume ||
578 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl ||
579 II->getIntrinsicID() == Intrinsic::fake_use)
581 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
588 F, &
Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering(),
595 bool *AllowDifferingSizes) {
596 // ADS may be null, so don't write to it directly.
598 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
601 AttrBuilder CallerAttrs(
F->getContext(),
F->getAttributes().getRetAttrs());
602 AttrBuilder CalleeAttrs(
F->getContext(),
605 // Following attributes are completely benign as far as calling convention
606 // goes, they shouldn't affect whether the call is a tail call.
607 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
608 Attribute::DereferenceableOrNull, Attribute::NoAlias,
609 Attribute::NonNull, Attribute::NoUndef,
610 Attribute::Range, Attribute::NoFPClass}) {
611 CallerAttrs.removeAttribute(Attr);
612 CalleeAttrs.removeAttribute(Attr);
615 if (CallerAttrs.contains(Attribute::ZExt)) {
616 if (!CalleeAttrs.contains(Attribute::ZExt))
620 CallerAttrs.removeAttribute(Attribute::ZExt);
621 CalleeAttrs.removeAttribute(Attribute::ZExt);
622 }
else if (CallerAttrs.contains(Attribute::SExt)) {
623 if (!CalleeAttrs.contains(Attribute::SExt))
627 CallerAttrs.removeAttribute(Attribute::SExt);
628 CalleeAttrs.removeAttribute(Attribute::SExt);
631 // Drop sext and zext return attributes if the result is not used.
632 // This enables tail calls for code like:
634 // define void @caller() {
636 // %unused_result = tail call zeroext i1 @callee()
637 // br label %retlabel
641 if (
I->use_empty()) {
642 CalleeAttrs.removeAttribute(Attribute::SExt);
643 CalleeAttrs.removeAttribute(Attribute::ZExt);
646 // If they're still different, there's some facet we don't understand
647 // (currently only "inreg", but in future who knows). It may be OK but the
648 // only safe option is to reject the tail call.
649 return CallerAttrs == CalleeAttrs;
656 bool ReturnsFirstArg) {
657 // If the block ends with a void return or unreachable, it doesn't matter
658 // what the call's return type is.
659 if (!Ret || Ret->getNumOperands() == 0)
return true;
661 // If the return value is undef, it doesn't matter what the call's
665 // Make sure the attributes attached to each return are compatible.
666 bool AllowDifferingSizes;
670 // If the return value is the first argument of the call.
674 const Value *RetVal = Ret->getOperand(0), *CallVal =
I;
679 bool CallEmpty = !
firstRealType(CallVal->getType(), CallSubTypes, CallPath);
681 // Nothing's actually returned, it doesn't matter what the callee put there
682 // it's a valid tail call.
686 // Iterate pairwise through each of the value types making up the tail call
687 // and the corresponding return. For each one we want to know whether it's
688 // essentially going directly from the tail call to the ret, via operations
689 // that end up not generating any code.
691 // We allow a certain amount of covariance here. For example it's permitted
692 // for the tail call to define more bits than the ret actually cares about
693 // (e.g. via a truncate).
696 // We've exhausted the values produced by the tail call instruction, the
697 // rest are essentially undef. The type doesn't really matter, but we need
704 // The manipulations performed when we're looking through an insertvalue or
705 // an extractvalue would happen at the front of the RetPath list, so since
706 // we have to copy it anyway it's more efficient to create a reversed copy.
710 // Finally, we can check whether the value produced by the tail call at this
711 // index is compatible with the value we return.
713 AllowDifferingSizes, TLI,
725 Value *RetVal = Ret ? Ret->getReturnValue() :
nullptr;
726 bool ReturnsFirstArg =
false;
728 ReturnsFirstArg =
true;
729 return ReturnsFirstArg;
736 while (!Worklist.
empty()) {
738 // Don't follow blocks which start new scopes.
742 // Add this MBB to our scope.
743 auto P = EHScopeMembership.
insert(std::make_pair(Visiting, EHScope));
745 // Don't revisit blocks.
747 assert(
P.first->second == EHScope &&
"MBB is part of two scopes!");
751 // Returns are boundaries where scope transfer can occur, don't follow
764 // We don't have anything to do if there aren't any EH pads.
766 return EHScopeMembership;
778 if (
MBB.isEHScopeEntry()) {
780 }
else if (IsSEH &&
MBB.isEHPad()) {
782 }
else if (
MBB.pred_empty()) {
788 // CatchPads are not scopes for SEH so do not consider CatchRet to
789 // transfer control to another scope.
790 if (
MBBI ==
MBB.end() ||
MBBI->getOpcode() !=
TII->getCatchReturnOpcode())
793 // FIXME: SEH CatchPads are not necessarily in the parent function:
794 // they could be inside a finally block.
801 // We don't have anything to do if there aren't any EH pads.
802 if (EHScopeBlocks.
empty())
803 return EHScopeMembership;
805 // Identify all the basic blocks reachable from the function entry.
807 // All blocks not part of a scope are in the parent function.
810 // Next, identify all the blocks inside the scopes.
813 // SEH CatchPads aren't really scopes, handle them separately.
816 // Finally, identify all the targets of a catchret.
817 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
821 return EHScopeMembership;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isNoopBitcast(Type *T1, Type *T2, const TargetLoweringBase &TLI)
static bool firstRealType(Type *Next, SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Find the first non-empty, scalar-like type in Next and setup the iterator components.
static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, SmallVectorImpl< unsigned > &RetIndices, SmallVectorImpl< unsigned > &CallIndices, bool AllowDifferingSizes, const TargetLoweringBase &TLI, const DataLayout &DL)
Return true if this scalar return value only has bits discarded on its path from the "tail call" to t...
static void collectEHScopeMembers(DenseMap< const MachineBasicBlock *, int > &EHScopeMembership, int EHScope, const MachineBasicBlock *MBB)
static bool indexReallyValid(Type *T, unsigned Idx)
For an aggregate type, determine whether a given index is within bounds or not.
static bool nextRealType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Set the iterator data-structures to the next non-empty, non-aggregate subtype.
static bool advanceToNextLeafType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Move the given iterators to the next leaf type in depth first traversal.
static const Value * getNoopInput(const Value *V, SmallVectorImpl< unsigned > &ValLoc, unsigned &DataBits, const TargetLoweringBase &TLI, const DataLayout &DL)
Look through operations that will be free to find the earliest source of this value.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
reverse_iterator rend() const
size_t size() const
size - Get the array size.
reverse_iterator rbegin() const
Class to represent array types.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::const_iterator const_iterator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
const Constant * stripPointerCasts() const
A parsed version of the target data layout string in and methods for querying it.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
This instruction inserts a struct field of array element value into an aggregate value.
A wrapper class for inspecting calls to intrinsic functions.
bool isEHPad() const
Returns true if the block is a landing pad.
MachineInstrBundleIterator< const MachineInstr > const_iterator
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator_range< succ_iterator > successors()
bool isEHScopeReturnBlock() const
Convenience function that returns true if the bock ends in a EH scope return instruction.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
Return a value (possibly void), from a function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Type::subtype_iterator element_iterator
TargetInstrInfo - Interface to description of machine instruction set.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
bool isAggregateType() const
Return true if the type is an aggregate type.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isZero() const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This is an optimization pass for GlobalISel generic memory operations.
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto reverse(ContainerTy &&C)
bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool ReturnsFirstArg=false)
Test if given that the input instruction is in the tail call position if the return type or any attri...
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
bool attributesPermitTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool *AllowDifferingSizes=nullptr)
Test if given that the input instruction is in the tail call position, if there is an attribute misma...
FunctionAddr VTableAddr Next
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
DenseMap< const MachineBasicBlock *, int > getEHScopeMembership(const MachineFunction &MF)
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.