1//===-- Operator.cpp - Implement the LLVM operators -----------------------===//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7//===----------------------------------------------------------------------===//
9// This file implements the non-inline methods for the LLVM Operator classes.
11//===----------------------------------------------------------------------===//
24 case Instruction::Add:
25 case Instruction::Sub:
26 case Instruction::Mul:
27 case Instruction::Shl: {
29 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
31 case Instruction::Trunc: {
33 return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
36 case Instruction::UDiv:
37 case Instruction::SDiv:
38 case Instruction::AShr:
39 case Instruction::LShr:
43 case Instruction::GetElementPtr: {
45 // Note: inrange exists on constexpr only
47 GEP->getInRange() != std::nullopt;
49 case Instruction::UIToFP:
50 case Instruction::ZExt:
52 return NNI->hasNonNeg();
54 case Instruction::ICmp:
58 return FP->hasNoNaNs() ||
FP->hasNoInfs();
67 return I && (
I->hasPoisonGeneratingReturnAttributes() ||
68 I->hasPoisonGeneratingMetadata());
73 return I->getSourceElementType();
79 return I->getResultElementType();
85 return CE->getInRange();
90 /// compute the worse possible offset for every level of the GEP et accumulate
91 /// the minimum alignment into Result.
99 if (
StructType *STy = GTI.getStructTypeOrNull()) {
103 assert(GTI.isSequential() &&
"should be sequencial");
104 /// If the index isn't known, we take 1 because it is the index that will
105 /// give the worse alignment of the offset.
107 Offset = GTI.getSequentialElementStride(
DL) * ElemCount;
119 "The offset bit width does not match DL specification.");
128 // Fast path for canonical getelementptr i8 form.
129 if (SourceType->
isIntegerTy(8) && !Index.empty() && !ExternalAnalysis) {
131 if (CI && CI->getType()->isIntegerTy()) {
132 Offset += CI->getValue().sextOrTrunc(
Offset.getBitWidth());
138 bool UsedExternalAnalysis =
false;
140 Index = Index.sextOrTrunc(
Offset.getBitWidth());
141 // Truncate if type size exceeds index space.
143 /*implcitTrunc=*/true);
144 // For array or vector indices, scale the index by the size of the type.
145 if (!UsedExternalAnalysis) {
146 Offset += Index * IndexedSize;
148 // External Analysis can return a result higher/lower than the value
149 // represents. We need to detect overflow/underflow.
150 bool Overflow =
false;
151 APInt OffsetPlus = Index.
smul_ov(IndexedSize, Overflow);
161 SourceType, Index.begin());
163 for (
auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
164 // Scalable vectors are multiplied by a runtime constant.
165 bool ScalableType = GTI.getIndexedType()->isScalableTy();
167 Value *V = GTI.getOperand();
169 // Handle ConstantInt if possible.
171 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
172 if (ConstOffset->isZero())
174 // if the type is scalable and the constant is not zero (vscale * n * 0 =
178 // Handle a struct index, which adds its field offset to the pointer.
180 unsigned ElementIdx = ConstOffset->getZExtValue();
182 // Element offset is in bytes.
183 if (!AccumulateOffset(
189 if (!AccumulateOffset(ConstOffset->getValue(),
190 GTI.getSequentialElementStride(
DL)))
195 // The operand is not constant, check if an external analysis was provided.
196 // External analsis is not applicable to a struct type.
197 if (!ExternalAnalysis || STy || ScalableType)
200 if (!ExternalAnalysis(*V, AnalysisIndex))
202 UsedExternalAnalysis =
true;
203 if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(
DL)))
212 APInt &ConstantOffset)
const {
214 "The offset bit width does not match DL specification.");
217 Index = Index.sextOrTrunc(
BitWidth);
218 // Truncate if type size exceeds index space.
220 /*implcitTrunc=*/true);
221 ConstantOffset += Index * IndexedSize;
226 // Scalable vectors are multiplied by a runtime constant.
227 bool ScalableType = GTI.getIndexedType()->isScalableTy();
229 Value *V = GTI.getOperand();
231 // Handle ConstantInt if possible.
233 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
234 if (ConstOffset->isZero())
236 // If the type is scalable and the constant is not zero (vscale * n * 0 =
238 // TODO: If the runtime value is accessible at any point before DWARF
239 // emission, then we could potentially keep a forward reference to it
240 // in the debug value to be filled in later.
243 // Handle a struct index, which adds its field offset to the pointer.
245 unsigned ElementIdx = ConstOffset->getZExtValue();
247 // Element offset is in bytes.
252 CollectConstantOffset(ConstOffset->getValue(),
253 GTI.getSequentialElementStride(
DL));
257 if (STy || ScalableType)
259 // Truncate if type size exceeds index space.
261 /*isSigned=*/false,
/*implicitTrunc=*/true);
262 // Insert an initial offset of 0 for V iff none exists already, then
263 // increment the offset by IndexedSize.
264 if (!IndexedSize.
isZero()) {
266 It->second += IndexedSize;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Class for arbitrary precision integers.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
bool noSignedZeros() const
bool allowReciprocal() const
bool allowReassoc() const
Flag queries.
bool allowContract() const
static GEPNoWrapFlags none()
LLVM_ABI std::optional< ConstantRange > getInRange() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
Collect the offset of this GEP as a map of Values to their associated APInt multipliers,...
LLVM_ABI Type * getResultElementType() const
LLVM_ABI Type * getSourceElementType() const
LLVM_ABI Align getMaxPreservedAlignment(const DataLayout &DL) const
Compute the maximum alignment that this GEP is garranteed to preserve.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset, function_ref< bool(Value &, APInt &)> ExternalAnalysis=nullptr) const
Accumulate the constant address offset of this GEP if possible.
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
LLVM_ABI bool hasPoisonGeneratingFlags() const
Return true if this operator has flags which may cause this operator to evaluate to poison despite ha...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
LLVM_ABI bool hasPoisonGeneratingAnnotations() const
Return true if this operator has poison-generating flags, return attributes or metadata.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isIntegerTy() const
True if this is an instance of IntegerType.
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
static constexpr uint64_t MaximumAlignment
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_end(const User *GEP)
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
generic_gep_type_iterator<> gep_type_iterator
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
This struct is a compact representation of a valid (non-zero power of two) alignment.
A MapVector that performs no allocations if smaller than a certain size.