llvm-project/llvm/lib/IR/Instruction.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1378 lines
48 KiB
C++
Raw Normal View History

//===-- Instruction.cpp - Implement the Instruction class -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
2001-06-06 20:29:01 +00:00
//
// This file implements the Instruction class for the IR library.
2001-06-06 20:29:01 +00:00
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/Instruction.h"
#include "llvm/ADT/DenseSet.h"
Only guard loop metadata that has non-debug info in it (#118825) This PR is motivated by a mismatch we discovered between compilation results with vs. without `-g3`. We noticed this when compiling SPEC2017 testcases. The specific instance we saw is fixed in this PR by modifying a guard (see below), but it is likely similar instances exist elsewhere in the codebase. The specific case fixed in this PR manifests itself in the `SimplifyCFG` pass doing different things depending on whether DebugInfo is generated or not. At the end of this comment, there is reduced example code that shows the behavior in question. The differing behavior has two root causes: 1. Commit https://github.com/llvm/llvm-project/commit/c07e19b adds loop metadata including debug locations to loops that otherwise would not have loop metadata 2. Commit https://github.com/llvm/llvm-project/commit/ac28efa6c100 adds a guard to a simplification action in `SImplifyCFG` that prevents it from simplifying away loop metadata So, the change in 2. does not consider that when compiling with debug symbols, loops that otherwise would not have metadata that needs preserving, now have debug locations in their loop metadata. Thus, with `-g3`, `SimplifyCFG` behaves differently than without it. The larger issue is that while debug info is not supposed to influence the final compilation result, commits like 1. blur the line between what is and is not debug info, and not all optimization passes account for this. This PR does not address that and rather just modifies this particular guard in order to restore equivalent behavior between debug and non-debug builds in this one instance. --- Here is a reduced version of a file from `f526.blender_r` that showcases the behavior in question: ```C struct LinkNode; typedef struct LinkNode { struct LinkNode *next; void *link; } LinkNode; void do_projectpaint_thread_ph_v_state() { int *ps = do_projectpaint_thread_ph_v_state; LinkNode *node; while (do_projectpaint_thread_ph_v_state) for (node = ps; node; node = node->next) ; } ``` Compiling this with and without DebugInfo, and then disassembling the results, leads to different outcomes (tested on SystemZ and X86). The reason for this is that the `SimplifyCFG` pass does different things in either case.
2024-12-20 15:15:51 +01:00
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
Only guard loop metadata that has non-debug info in it (#118825) This PR is motivated by a mismatch we discovered between compilation results with vs. without `-g3`. We noticed this when compiling SPEC2017 testcases. The specific instance we saw is fixed in this PR by modifying a guard (see below), but it is likely similar instances exist elsewhere in the codebase. The specific case fixed in this PR manifests itself in the `SimplifyCFG` pass doing different things depending on whether DebugInfo is generated or not. At the end of this comment, there is reduced example code that shows the behavior in question. The differing behavior has two root causes: 1. Commit https://github.com/llvm/llvm-project/commit/c07e19b adds loop metadata including debug locations to loops that otherwise would not have loop metadata 2. Commit https://github.com/llvm/llvm-project/commit/ac28efa6c100 adds a guard to a simplification action in `SImplifyCFG` that prevents it from simplifying away loop metadata So, the change in 2. does not consider that when compiling with debug symbols, loops that otherwise would not have metadata that needs preserving, now have debug locations in their loop metadata. Thus, with `-g3`, `SimplifyCFG` behaves differently than without it. The larger issue is that while debug info is not supposed to influence the final compilation result, commits like 1. blur the line between what is and is not debug info, and not all optimization passes account for this. This PR does not address that and rather just modifies this particular guard in order to restore equivalent behavior between debug and non-debug builds in this one instance. --- Here is a reduced version of a file from `f526.blender_r` that showcases the behavior in question: ```C struct LinkNode; typedef struct LinkNode { struct LinkNode *next; void *link; } LinkNode; void do_projectpaint_thread_ph_v_state() { int *ps = do_projectpaint_thread_ph_v_state; LinkNode *node; while (do_projectpaint_thread_ph_v_state) for (node = ps; node; node = node->next) ; } ``` Compiling this with and without DebugInfo, and then disassembling the results, leads to different outcomes (tested on SystemZ and X86). The reason for this is that the `SimplifyCFG` pass does different things in either case.
2024-12-20 15:15:51 +01:00
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ProfDataUtils.h"
#include "llvm/IR/Type.h"
using namespace llvm;
InsertPosition::InsertPosition(Instruction *InsertBefore)
: InsertAt(InsertBefore ? InsertBefore->getIterator()
: InstListType::iterator()) {}
InsertPosition::InsertPosition(BasicBlock *InsertAtEnd)
: InsertAt(InsertAtEnd ? InsertAtEnd->end() : InstListType::iterator()) {}
Don't rely on undefined behavior to store how a `User` object's allocation is laid out (#105714) In `User::operator new` a single allocation is created to store the `User` object itself, "intrusive" operands or a pointer for "hung off" operands, and the descriptor. After allocation, details about the layout (number of operands, how the operands are stored, if there is a descriptor) are stored in the `User` object by settings its fields. The `Value` and `User` constructors are then very careful not to initialize these fields so that the values set during allocation can be subsequently read. However, when the `User` object is returned from `operator new` [its value is technically "indeterminate" and so reading a field without first initializing it is undefined behavior (and will be erroneous in C++26)](https://en.cppreference.com/w/cpp/language/default_initialization#Indeterminate_and_erroneous_values). We discovered this issue when trying to build LLVM using MSVC's [`/sdl` flag](https://learn.microsoft.com/en-us/cpp/build/reference/sdl-enable-additional-security-checks?view=msvc-170) which clears class fields after allocation (the docs say that this feature shouldn't be turned on for custom allocators and should only clear pointers, but that doesn't seem to match the implementation). MSVC's behavior both with and without the `/sdl` flag is standards conforming since a program is supposed to initialize storage before reading from it, thus the compiler implementation changing any values will never be observed in a well-formed program. The standard also provides no provisions for making storage bytes not indeterminate by setting them during allocation or `operator new`. The fix for this is to create a set of types that encode the layout and provide these to both `operator new` and the constructor: * The `AllocMarker` types are used to select which `operator new` to use. * `AllocMarker` can then be implicitly converted to a `AllocInfo` which tells the constructor how the type was laid out.
2024-09-11 11:34:26 -07:00
Instruction::Instruction(Type *ty, unsigned it, AllocInfo AllocInfo,
InsertPosition InsertBefore)
Don't rely on undefined behavior to store how a `User` object's allocation is laid out (#105714) In `User::operator new` a single allocation is created to store the `User` object itself, "intrusive" operands or a pointer for "hung off" operands, and the descriptor. After allocation, details about the layout (number of operands, how the operands are stored, if there is a descriptor) are stored in the `User` object by settings its fields. The `Value` and `User` constructors are then very careful not to initialize these fields so that the values set during allocation can be subsequently read. However, when the `User` object is returned from `operator new` [its value is technically "indeterminate" and so reading a field without first initializing it is undefined behavior (and will be erroneous in C++26)](https://en.cppreference.com/w/cpp/language/default_initialization#Indeterminate_and_erroneous_values). We discovered this issue when trying to build LLVM using MSVC's [`/sdl` flag](https://learn.microsoft.com/en-us/cpp/build/reference/sdl-enable-additional-security-checks?view=msvc-170) which clears class fields after allocation (the docs say that this feature shouldn't be turned on for custom allocators and should only clear pointers, but that doesn't seem to match the implementation). MSVC's behavior both with and without the `/sdl` flag is standards conforming since a program is supposed to initialize storage before reading from it, thus the compiler implementation changing any values will never be observed in a well-formed program. The standard also provides no provisions for making storage bytes not indeterminate by setting them during allocation or `operator new`. The fix for this is to create a set of types that encode the layout and provide these to both `operator new` and the constructor: * The `AllocMarker` types are used to select which `operator new` to use. * `AllocMarker` can then be implicitly converted to a `AllocInfo` which tells the constructor how the type was laid out.
2024-09-11 11:34:26 -07:00
: User(ty, Value::InstructionVal + it, AllocInfo) {
// When called with an iterator, there must be a block to insert into.
if (InstListType::iterator InsertIt = InsertBefore; InsertIt.isValid()) {
BasicBlock *BB = InsertIt.getNodeParent();
assert(BB && "Instruction to insert before is not in a basic block!");
insertInto(BB, InsertBefore);
}
2001-06-06 20:29:01 +00:00
}
Instruction::~Instruction() {
assert(!getParent() && "Instruction still linked in the program!");
// Replace any extant metadata uses of this instruction with poison to
// preserve debug info accuracy. Some alternatives include:
// - Treat Instruction like any other Value, and point its extant metadata
// uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
// trivially dead (i.e. fair game for deletion in many passes), leading to
// stale dbg.values being in effect for too long.
// - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
// correct. OTOH results in wasted work in some common cases (e.g. when all
// instructions in a BasicBlock are deleted).
if (isUsedByMetadata())
ValueAsMetadata::handleRAUW(this, PoisonValue::get(getType()));
Reapply [Assignment Tracking][5/*] Add core infrastructure for instruction reference Previously reverted in 41f5a0004e442ae71c8e754fdadb4bd1e172fb2d. Fold in D133576 previously reverted in d29d5ffb6332569e85d5eda5130603bbd8664635. --- The Assignment Tracking debug-info feature is outlined in this RFC: https://discourse.llvm.org/t/ rfc-assignment-tracking-a-better-way-of-specifying-variable-locations-in-ir Overview It's possible to find intrinsics linked to an instruction by looking at the MetadataAsValue uses of the attached DIAssignID. That covers instruction -> intrinsic(s) lookup. Add a global DIAssignID -> instruction(s) map which gives us the ability to perform intrinsic -> instruction(s) lookup. Add plumbing to keep the map up to date through optimisations and add utility functions including two that perform those lookups. Finally, add a unittest. Details In llvm/lib/IR/LLVMContextImpl.h add AssignmentIDToInstrs which maps DIAssignID * attachments to Instruction *s. Because the DIAssignID * is the key we can't use a TrackingMDNodeRef for it, and therefore cannot easily update the mapping when a temporary DIAssignID is replaced. Temporary DIAssignID's are only used in IR parsing to deal with metadata forward references. Update llvm/lib/AsmParser/LLParser.cpp to avoid using temporary DIAssignID's for attachments. In llvm/lib/IR/Metadata.cpp add Instruction::updateDIAssignIDMapping which is called to remove or add an entry (or both) to AssignmentIDToInstrs. Call this from Instruction::setMetadata and add a call to setMetadata in Intruction's dtor that explicitly unsets the DIAssignID so that the mappging gets updated. In llvm/lib/IR/DebugInfo.cpp and DebugInfo.h add utility functions: getAssignmentInsts(const DbgAssignIntrinsic *DAI) getAssignmentMarkers(const Instruction *Inst) RAUW(DIAssignID *Old, DIAssignID *New) deleteAll(Function *F) deleteAssignmentMarkers(const Instruction *Inst) These core utils are tested in llvm/unittests/IR/DebugInfoTest.cpp. Reviewed By: jmorse Differential Revision: https://reviews.llvm.org/D132224
2022-11-07 11:56:36 +00:00
// Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
// mapping in LLVMContext.
setMetadata(LLVMContext::MD_DIAssignID, nullptr);
}
const Module *Instruction::getModule() const {
return getParent()->getModule();
}
const Function *Instruction::getFunction() const {
return getParent()->getParent();
}
const DataLayout &Instruction::getDataLayout() const {
return getModule()->getDataLayout();
}
void Instruction::removeFromParent() {
// Perform any debug-info maintenence required.
handleMarkerRemoval();
getParent()->getInstList().remove(getIterator());
}
void Instruction::handleMarkerRemoval() {
if (!getParent()->IsNewDbgInfoFormat || !DebugMarker)
return;
DebugMarker->removeMarker();
}
BasicBlock::iterator Instruction::eraseFromParent() {
handleMarkerRemoval();
return getParent()->getInstList().erase(getIterator());
}
void Instruction::insertBefore(Instruction *InsertPos) {
insertBefore(InsertPos->getIterator());
}
2016-01-06 00:18:29 +00:00
/// Insert an unlinked instruction into a basic block immediately before the
/// specified instruction.
void Instruction::insertBefore(BasicBlock::iterator InsertPos) {
insertBefore(*InsertPos->getParent(), InsertPos);
}
2016-01-06 00:18:29 +00:00
/// Insert an unlinked instruction into a basic block immediately after the
/// specified instruction.
void Instruction::insertAfter(Instruction *InsertPos) {
BasicBlock *DestParent = InsertPos->getParent();
DestParent->getInstList().insertAfter(InsertPos->getIterator(), this);
}
void Instruction::insertAfter(BasicBlock::iterator InsertPos) {
BasicBlock *DestParent = InsertPos->getParent();
DestParent->getInstList().insertAfter(InsertPos, this);
}
BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
BasicBlock::iterator It) {
assert(getParent() == nullptr && "Expected detached instruction");
assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
"It not in ParentBB");
insertBefore(*ParentBB, It);
return getIterator();
}
extern cl::opt<bool> UseNewDbgInfoFormat;
void Instruction::insertBefore(BasicBlock &BB,
InstListType::iterator InsertPos) {
assert(!DebugMarker);
BB.getInstList().insert(InsertPos, this);
if (!BB.IsNewDbgInfoFormat)
return;
// We've inserted "this": if InsertAtHead is set then it comes before any
[RemoveDIs][NFC] Rename DPValue -> DbgVariableRecord (#85216) This is the major rename patch that prior patches have built towards. The DPValue class is being renamed to DbgVariableRecord, which reflects the updated terminology for the "final" implementation of the RemoveDI feature. This is a pure string substitution + clang-format patch. The only manual component of this patch was determining where to perform these string substitutions: `DPValue` and `DPV` are almost exclusively used for DbgRecords, *except* for: - llvm/lib/target, where 'DP' is used to mean double-precision, and so appears as part of .td files and in variable names. NB: There is a single existing use of `DPValue` here that refers to debug info, which I've manually updated. - llvm/tools/gold, where 'LDPV' is used as a prefix for symbol visibility enums. Outside of these places, I've applied several basic string substitutions, with the intent that they only affect DbgRecord-related identifiers; I've checked them as I went through to verify this, with reasonable confidence that there are no unintended changes that slipped through the cracks. The substitutions applied are all case-sensitive, and are applied in the order shown: ``` DPValue -> DbgVariableRecord DPVal -> DbgVarRec DPV -> DVR ``` Following the previous rename patches, it should be the case that there are no instances of any of these strings that are meant to refer to the general case of DbgRecords, or anything other than the DPValue class. The idea behind this patch is therefore that pure string substitution is correct in all cases as long as these assumptions hold.
2024-03-19 20:07:07 +00:00
// DbgVariableRecords attached to InsertPos. But if it's not set, then any
// DbgRecords should now come before "this".
bool InsertAtHead = InsertPos.getHeadBit();
if (!InsertAtHead) {
DbgMarker *SrcMarker = BB.getMarker(InsertPos);
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
if (SrcMarker && !SrcMarker->empty()) {
// If this assertion fires, the calling code is about to insert a PHI
// after debug-records, which would form a sequence like:
// %0 = PHI
// #dbg_value
// %1 = PHI
// Which is de-normalised and undesired -- hence the assertion. To avoid
// this, you must insert at that position using an iterator, and it must
// be aquired by calling getFirstNonPHIIt / begin or similar methods on
// the block. This will signal to this behind-the-scenes debug-info
// maintenence code that you intend the PHI to be ahead of everything,
// including any debug-info.
assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!");
adoptDbgRecords(&BB, InsertPos, false);
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
}
}
// If we're inserting a terminator, check if we need to flush out
// TrailingDbgRecords. Inserting instructions at the end of an incomplete
// block is handled by the code block above.
if (isTerminator())
getParent()->flushTerminatorDbgRecords();
}
2016-01-06 00:18:29 +00:00
/// Unlink this instruction from its current basic block and insert it into the
/// basic block that MovePos lives in, right before MovePos.
void Instruction::moveBefore(Instruction *MovePos) {
moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), false);
}
void Instruction::moveBefore(BasicBlock::iterator MovePos) {
moveBeforeImpl(*MovePos->getParent(), MovePos, false);
}
void Instruction::moveBeforePreserving(Instruction *MovePos) {
moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), true);
}
void Instruction::moveBeforePreserving(BasicBlock::iterator MovePos) {
moveBeforeImpl(*MovePos->getParent(), MovePos, true);
}
void Instruction::moveAfter(Instruction *MovePos) {
auto NextIt = std::next(MovePos->getIterator());
// We want this instruction to be moved to before NextIt in the instruction
// list, but before NextIt's debug value range.
NextIt.setHeadBit(true);
moveBeforeImpl(*MovePos->getParent(), NextIt, false);
}
void Instruction::moveAfterPreserving(Instruction *MovePos) {
auto NextIt = std::next(MovePos->getIterator());
// We want this instruction and its debug range to be moved to before NextIt
// in the instruction list, but before NextIt's debug value range.
NextIt.setHeadBit(true);
moveBeforeImpl(*MovePos->getParent(), NextIt, true);
}
void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
moveBeforeImpl(BB, I, false);
}
void Instruction::moveBeforePreserving(BasicBlock &BB,
InstListType::iterator I) {
moveBeforeImpl(BB, I, true);
}
void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I,
bool Preserve) {
assert(I == BB.end() || I->getParent() == &BB);
bool InsertAtHead = I.getHeadBit();
// If we've been given the "Preserve" flag, then just move the DbgRecords with
// the instruction, no more special handling needed.
if (BB.IsNewDbgInfoFormat && DebugMarker && !Preserve) {
if (I != this->getIterator() || InsertAtHead) {
// "this" is definitely moving in the list, or it's moving ahead of its
[RemoveDIs][NFC] Rename DPValue -> DbgVariableRecord (#85216) This is the major rename patch that prior patches have built towards. The DPValue class is being renamed to DbgVariableRecord, which reflects the updated terminology for the "final" implementation of the RemoveDI feature. This is a pure string substitution + clang-format patch. The only manual component of this patch was determining where to perform these string substitutions: `DPValue` and `DPV` are almost exclusively used for DbgRecords, *except* for: - llvm/lib/target, where 'DP' is used to mean double-precision, and so appears as part of .td files and in variable names. NB: There is a single existing use of `DPValue` here that refers to debug info, which I've manually updated. - llvm/tools/gold, where 'LDPV' is used as a prefix for symbol visibility enums. Outside of these places, I've applied several basic string substitutions, with the intent that they only affect DbgRecord-related identifiers; I've checked them as I went through to verify this, with reasonable confidence that there are no unintended changes that slipped through the cracks. The substitutions applied are all case-sensitive, and are applied in the order shown: ``` DPValue -> DbgVariableRecord DPVal -> DbgVarRec DPV -> DVR ``` Following the previous rename patches, it should be the case that there are no instances of any of these strings that are meant to refer to the general case of DbgRecords, or anything other than the DPValue class. The idea behind this patch is therefore that pure string substitution is correct in all cases as long as these assumptions hold.
2024-03-19 20:07:07 +00:00
// attached DbgVariableRecords. Detach any existing DbgRecords.
handleMarkerRemoval();
}
}
// Move this single instruction. Use the list splice method directly, not
// the block splicer, which will do more debug-info things.
BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
if (BB.IsNewDbgInfoFormat && !Preserve) {
DbgMarker *NextMarker = getParent()->getNextMarker(this);
// If we're inserting at point I, and not in front of the DbgRecords
// attached there, then we should absorb the DbgRecords attached to I.
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
if (!InsertAtHead && NextMarker && !NextMarker->empty()) {
adoptDbgRecords(&BB, I, false);
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
}
}
if (isTerminator())
getParent()->flushTerminatorDbgRecords();
}
iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom(
const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere,
bool InsertAtHead) {
if (!From->DebugMarker)
return DbgMarker::getEmptyDbgRecordRange();
assert(getParent()->IsNewDbgInfoFormat);
assert(getParent()->IsNewDbgInfoFormat ==
From->getParent()->IsNewDbgInfoFormat);
if (!DebugMarker)
getParent()->createMarker(this);
return DebugMarker->cloneDebugInfoFrom(From->DebugMarker, FromHere,
InsertAtHead);
}
std::optional<DbgRecord::self_iterator>
Instruction::getDbgReinsertionPosition() {
// Is there a marker on the next instruction?
DbgMarker *NextMarker = getParent()->getNextMarker(this);
if (!NextMarker)
return std::nullopt;
// Are there any DbgRecords in the next marker?
if (NextMarker->StoredDbgRecords.empty())
return std::nullopt;
return NextMarker->StoredDbgRecords.begin();
}
bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); }
void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It,
bool InsertAtHead) {
DbgMarker *SrcMarker = BB->getMarker(It);
auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() {
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
if (BB->end() == It) {
SrcMarker->eraseFromParent();
BB->deleteTrailingDbgRecords();
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
}
};
if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) {
ReleaseTrailingDbgRecords();
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
return;
}
// If we have DbgMarkers attached to this instruction, we have to honour the
// ordering of DbgRecords between this and the other marker. Fall back to just
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
// absorbing from the source.
if (DebugMarker || It == BB->end()) {
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
// Ensure we _do_ have a marker.
getParent()->createMarker(this);
DebugMarker->absorbDebugValues(*SrcMarker, InsertAtHead);
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
// Having transferred everything out of SrcMarker, we _could_ clean it up
// and free the marker now. However, that's a lot of heap-accounting for a
// small amount of memory with a good chance of re-use. Leave it for the
// moment. It will be released when the Instruction is freed in the worst
// case.
// However: if we transferred from a trailing marker off the end of the
// block, it's important to not leave the empty marker trailing. It will
// give a misleading impression that some debug records have been left
// trailing.
ReleaseTrailingDbgRecords();
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
} else {
// Optimisation: we're transferring all the DbgRecords from the source
// marker onto this empty location: just adopt the other instructions
// marker.
DebugMarker = SrcMarker;
DebugMarker->MarkedInstr = this;
It->DebugMarker = nullptr;
[DebugInfo][RemoveDIs] Don't allocate one DPMarker per instruction (#79345) This is an optimisation patch that shouldn't have any functional effect. There's no need for all instructions to have a DPMarker attached to them, because not all instructions have adjacent DPValues (aka dbg.values). This patch inserts the appropriate conditionals into functions like BasicBlock::spliceDebugInfo to ensure we don't step on a null pointer when there isn't a DPMarker allocated. Mostly, this is a case of calling createMarker occasionally, which will create a marker on an instruction if there isn't one there already. Also folded into this is the use of adoptDbgValues, which is a natural extension: if we have a sequence of instructions and debug records: %foo = add i32 %0,... # dbg_value { %foo, ... # dbg_value { %bar, ... %baz = add i32 %... %qux = add i32 %... and delete, for example, the %baz instruction, then the dbg_value records would naturally be transferred onto the %qux instruction (they "fall down" onto it). There's no point in creating and splicing DPMarkers in the case shown when %qux doesn't have a DPMarker already, we can instead just change the owner of %baz's DPMarker from %baz to %qux. This also avoids calling setParent on every DPValue. Update LoopRotationUtils: it was relying on each instruction having it's own distinct end(), so that we could express ranges and lack-of-ranges. That's no longer true though: so switch to storing the range of DPValues on the next instruction when we want to consider it's range next time around the loop (see the nearby comment).
2024-02-06 13:08:44 +00:00
}
}
void Instruction::dropDbgRecords() {
if (DebugMarker)
DebugMarker->dropDbgRecords();
}
[RemoveDIs][NFC] Rename DPValue -> DbgVariableRecord (#85216) This is the major rename patch that prior patches have built towards. The DPValue class is being renamed to DbgVariableRecord, which reflects the updated terminology for the "final" implementation of the RemoveDI feature. This is a pure string substitution + clang-format patch. The only manual component of this patch was determining where to perform these string substitutions: `DPValue` and `DPV` are almost exclusively used for DbgRecords, *except* for: - llvm/lib/target, where 'DP' is used to mean double-precision, and so appears as part of .td files and in variable names. NB: There is a single existing use of `DPValue` here that refers to debug info, which I've manually updated. - llvm/tools/gold, where 'LDPV' is used as a prefix for symbol visibility enums. Outside of these places, I've applied several basic string substitutions, with the intent that they only affect DbgRecord-related identifiers; I've checked them as I went through to verify this, with reasonable confidence that there are no unintended changes that slipped through the cracks. The substitutions applied are all case-sensitive, and are applied in the order shown: ``` DPValue -> DbgVariableRecord DPVal -> DbgVarRec DPV -> DVR ``` Following the previous rename patches, it should be the case that there are no instances of any of these strings that are meant to refer to the general case of DbgRecords, or anything other than the DPValue class. The idea behind this patch is therefore that pure string substitution is correct in all cases as long as these assumptions hold.
2024-03-19 20:07:07 +00:00
void Instruction::dropOneDbgRecord(DbgRecord *DVR) {
DebugMarker->dropOneDbgRecord(DVR);
}
bool Instruction::comesBefore(const Instruction *Other) const {
assert(getParent() && Other->getParent() &&
"instructions without BB parents have no order");
assert(getParent() == Other->getParent() &&
"cross-BB instruction order comparison");
if (!getParent()->isInstrOrderValid())
const_cast<BasicBlock *>(getParent())->renumberInstructions();
return Order < Other->Order;
}
std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() {
assert(!getType()->isVoidTy() && "Instruction must define result");
BasicBlock *InsertBB;
BasicBlock::iterator InsertPt;
if (auto *PN = dyn_cast<PHINode>(this)) {
InsertBB = PN->getParent();
InsertPt = InsertBB->getFirstInsertionPt();
} else if (auto *II = dyn_cast<InvokeInst>(this)) {
InsertBB = II->getNormalDest();
InsertPt = InsertBB->getFirstInsertionPt();
} else if (isa<CallBrInst>(this)) {
// Def is available in multiple successors, there's no single dominating
// insertion point.
return std::nullopt;
} else {
assert(!isTerminator() && "Only invoke/callbr terminators return value");
InsertBB = getParent();
InsertPt = std::next(getIterator());
// Any instruction inserted immediately after "this" will come before any
// debug-info records take effect -- thus, set the head bit indicating that
// to debug-info-transfer code.
InsertPt.setHeadBit(true);
}
// catchswitch blocks don't have any legal insertion point (because they
// are both an exception pad and a terminator).
if (InsertPt == InsertBB->end())
return std::nullopt;
return InsertPt;
}
bool Instruction::isOnlyUserOfAnyOperand() {
return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
}
void Instruction::setHasNoUnsignedWrap(bool b) {
if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
Inst->setHasNoUnsignedWrap(b);
else
cast<TruncInst>(this)->setHasNoUnsignedWrap(b);
}
void Instruction::setHasNoSignedWrap(bool b) {
if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
Inst->setHasNoSignedWrap(b);
else
cast<TruncInst>(this)->setHasNoSignedWrap(b);
}
void Instruction::setIsExact(bool b) {
cast<PossiblyExactOperator>(this)->setIsExact(b);
}
void Instruction::setNonNeg(bool b) {
assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
(b * PossiblyNonNegInst::NonNeg);
}
bool Instruction::hasNoUnsignedWrap() const {
if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
return Inst->hasNoUnsignedWrap();
return cast<TruncInst>(this)->hasNoUnsignedWrap();
}
bool Instruction::hasNoSignedWrap() const {
if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
return Inst->hasNoSignedWrap();
return cast<TruncInst>(this)->hasNoSignedWrap();
}
bool Instruction::hasNonNeg() const {
assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
}
bool Instruction::hasPoisonGeneratingFlags() const {
return cast<Operator>(this)->hasPoisonGeneratingFlags();
}
void Instruction::dropPoisonGeneratingFlags() {
switch (getOpcode()) {
case Instruction::Add:
case Instruction::Sub:
case Instruction::Mul:
case Instruction::Shl:
cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
break;
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::AShr:
case Instruction::LShr:
cast<PossiblyExactOperator>(this)->setIsExact(false);
break;
case Instruction::Or:
cast<PossiblyDisjointInst>(this)->setIsDisjoint(false);
break;
case Instruction::GetElementPtr:
cast<GetElementPtrInst>(this)->setNoWrapFlags(GEPNoWrapFlags::none());
break;
case Instruction::UIToFP:
case Instruction::ZExt:
setNonNeg(false);
break;
case Instruction::Trunc:
cast<TruncInst>(this)->setHasNoUnsignedWrap(false);
cast<TruncInst>(this)->setHasNoSignedWrap(false);
break;
case Instruction::ICmp:
cast<ICmpInst>(this)->setSameSign(false);
break;
}
if (isa<FPMathOperator>(this)) {
setHasNoNaNs(false);
setHasNoInfs(false);
}
assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
}
bool Instruction::hasPoisonGeneratingMetadata() const {
return any_of(Metadata::PoisonGeneratingIDs,
[this](unsigned ID) { return hasMetadata(ID); });
}
Only guard loop metadata that has non-debug info in it (#118825) This PR is motivated by a mismatch we discovered between compilation results with vs. without `-g3`. We noticed this when compiling SPEC2017 testcases. The specific instance we saw is fixed in this PR by modifying a guard (see below), but it is likely similar instances exist elsewhere in the codebase. The specific case fixed in this PR manifests itself in the `SimplifyCFG` pass doing different things depending on whether DebugInfo is generated or not. At the end of this comment, there is reduced example code that shows the behavior in question. The differing behavior has two root causes: 1. Commit https://github.com/llvm/llvm-project/commit/c07e19b adds loop metadata including debug locations to loops that otherwise would not have loop metadata 2. Commit https://github.com/llvm/llvm-project/commit/ac28efa6c100 adds a guard to a simplification action in `SImplifyCFG` that prevents it from simplifying away loop metadata So, the change in 2. does not consider that when compiling with debug symbols, loops that otherwise would not have metadata that needs preserving, now have debug locations in their loop metadata. Thus, with `-g3`, `SimplifyCFG` behaves differently than without it. The larger issue is that while debug info is not supposed to influence the final compilation result, commits like 1. blur the line between what is and is not debug info, and not all optimization passes account for this. This PR does not address that and rather just modifies this particular guard in order to restore equivalent behavior between debug and non-debug builds in this one instance. --- Here is a reduced version of a file from `f526.blender_r` that showcases the behavior in question: ```C struct LinkNode; typedef struct LinkNode { struct LinkNode *next; void *link; } LinkNode; void do_projectpaint_thread_ph_v_state() { int *ps = do_projectpaint_thread_ph_v_state; LinkNode *node; while (do_projectpaint_thread_ph_v_state) for (node = ps; node; node = node->next) ; } ``` Compiling this with and without DebugInfo, and then disassembling the results, leads to different outcomes (tested on SystemZ and X86). The reason for this is that the `SimplifyCFG` pass does different things in either case.
2024-12-20 15:15:51 +01:00
bool Instruction::hasNonDebugLocLoopMetadata() const {
// If there is no loop metadata at all, we also don't have
// non-debug loop metadata, obviously.
if (!hasMetadata(LLVMContext::MD_loop))
return false;
// If we do have loop metadata, retrieve it.
MDNode *LoopMD = getMetadata(LLVMContext::MD_loop);
// Check if the existing operands are debug locations. This loop
// should terminate after at most three iterations. Skip
// the first item because it is a self-reference.
for (const MDOperand &Op : llvm::drop_begin(LoopMD->operands())) {
// check for debug location type by attempting a cast.
if (!dyn_cast<DILocation>(Op)) {
return true;
}
}
// If we get here, then all we have is debug locations in the loop metadata.
return false;
}
void Instruction::dropPoisonGeneratingMetadata() {
for (unsigned ID : Metadata::PoisonGeneratingIDs)
eraseMetadata(ID);
}
bool Instruction::hasPoisonGeneratingReturnAttributes() const {
if (const auto *CB = dyn_cast<CallBase>(this)) {
AttributeSet RetAttrs = CB->getAttributes().getRetAttrs();
return RetAttrs.hasAttribute(Attribute::Range) ||
RetAttrs.hasAttribute(Attribute::Alignment) ||
RetAttrs.hasAttribute(Attribute::NonNull);
}
return false;
}
void Instruction::dropPoisonGeneratingReturnAttributes() {
if (auto *CB = dyn_cast<CallBase>(this)) {
AttributeMask AM;
AM.addAttribute(Attribute::Range);
AM.addAttribute(Attribute::Alignment);
AM.addAttribute(Attribute::NonNull);
CB->removeRetAttrs(AM);
}
assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync");
}
void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
ArrayRef<unsigned> KnownIDs) {
dropUnknownNonDebugMetadata(KnownIDs);
auto *CB = dyn_cast<CallBase>(this);
if (!CB)
return;
// For call instructions, we also need to drop parameter and return attributes
// that are can cause UB if the call is moved to a location where the
// attribute is not valid.
AttributeList AL = CB->getAttributes();
if (AL.isEmpty())
return;
AttributeMask UBImplyingAttributes =
AttributeFuncs::getUBImplyingAttributes();
for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
CB->removeRetAttrs(UBImplyingAttributes);
}
void Instruction::dropUBImplyingAttrsAndMetadata() {
// !annotation metadata does not impact semantics.
// !range, !nonnull and !align produce poison, so they are safe to speculate.
// !noundef and various AA metadata must be dropped, as it generally produces
// immediate undefined behavior.
unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range,
LLVMContext::MD_nonnull, LLVMContext::MD_align};
dropUBImplyingAttrsAndUnknownMetadata(KnownIDs);
}
bool Instruction::isExact() const {
return cast<PossiblyExactOperator>(this)->isExact();
}
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 llvm-svn: 317488
2017-11-06 16:27:15 +00:00
void Instruction::setFast(bool B) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 llvm-svn: 317488
2017-11-06 16:27:15 +00:00
cast<FPMathOperator>(this)->setFast(B);
}
void Instruction::setHasAllowReassoc(bool B) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
cast<FPMathOperator>(this)->setHasAllowReassoc(B);
}
void Instruction::setHasNoNaNs(bool B) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
cast<FPMathOperator>(this)->setHasNoNaNs(B);
}
void Instruction::setHasNoInfs(bool B) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
cast<FPMathOperator>(this)->setHasNoInfs(B);
}
void Instruction::setHasNoSignedZeros(bool B) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
}
void Instruction::setHasAllowReciprocal(bool B) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
}
void Instruction::setHasAllowContract(bool B) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
cast<FPMathOperator>(this)->setHasAllowContract(B);
}
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 llvm-svn: 317488
2017-11-06 16:27:15 +00:00
void Instruction::setHasApproxFunc(bool B) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
cast<FPMathOperator>(this)->setHasApproxFunc(B);
}
void Instruction::setFastMathFlags(FastMathFlags FMF) {
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
cast<FPMathOperator>(this)->setFastMathFlags(FMF);
}
void Instruction::copyFastMathFlags(FastMathFlags FMF) {
assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
}
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 llvm-svn: 317488
2017-11-06 16:27:15 +00:00
bool Instruction::isFast() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 llvm-svn: 317488
2017-11-06 16:27:15 +00:00
return cast<FPMathOperator>(this)->isFast();
}
bool Instruction::hasAllowReassoc() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasAllowReassoc();
}
bool Instruction::hasNoNaNs() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasNoNaNs();
}
bool Instruction::hasNoInfs() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasNoInfs();
}
bool Instruction::hasNoSignedZeros() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasNoSignedZeros();
}
bool Instruction::hasAllowReciprocal() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasAllowReciprocal();
}
bool Instruction::hasAllowContract() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasAllowContract();
}
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 llvm-svn: 317488
2017-11-06 16:27:15 +00:00
bool Instruction::hasApproxFunc() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasApproxFunc();
}
FastMathFlags Instruction::getFastMathFlags() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->getFastMathFlags();
}
void Instruction::copyFastMathFlags(const Instruction *I) {
copyFastMathFlags(I->getFastMathFlags());
}
void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
// Copy the wrapping flags.
if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
setHasNoSignedWrap(OB->hasNoSignedWrap());
setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
}
}
if (auto *TI = dyn_cast<TruncInst>(V)) {
if (isa<TruncInst>(this)) {
setHasNoSignedWrap(TI->hasNoSignedWrap());
setHasNoUnsignedWrap(TI->hasNoUnsignedWrap());
}
}
// Copy the exact flag.
if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
if (isa<PossiblyExactOperator>(this))
setIsExact(PE->isExact());
if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
DestPD->setIsDisjoint(SrcPD->isDisjoint());
// Copy the fast-math flags.
if (auto *FP = dyn_cast<FPMathOperator>(V))
if (isa<FPMathOperator>(this))
copyFastMathFlags(FP->getFastMathFlags());
if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() |
DestGEP->getNoWrapFlags());
if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
if (isa<PossiblyNonNegInst>(this))
setNonNeg(NNI->hasNonNeg());
if (auto *SrcICmp = dyn_cast<ICmpInst>(V))
if (auto *DestICmp = dyn_cast<ICmpInst>(this))
DestICmp->setSameSign(SrcICmp->hasSameSign());
}
void Instruction::andIRFlags(const Value *V) {
if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
if (isa<OverflowingBinaryOperator>(this)) {
setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
}
}
if (auto *TI = dyn_cast<TruncInst>(V)) {
if (isa<TruncInst>(this)) {
setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap());
setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap());
}
}
if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
if (isa<PossiblyExactOperator>(this))
setIsExact(isExact() && PE->isExact());
if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint());
if (auto *FP = dyn_cast<FPMathOperator>(V)) {
if (isa<FPMathOperator>(this)) {
FastMathFlags FM = getFastMathFlags();
FM &= FP->getFastMathFlags();
copyFastMathFlags(FM);
}
}
if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() &
DestGEP->getNoWrapFlags());
if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
if (isa<PossiblyNonNegInst>(this))
setNonNeg(hasNonNeg() && NNI->hasNonNeg());
if (auto *SrcICmp = dyn_cast<ICmpInst>(V))
if (auto *DestICmp = dyn_cast<ICmpInst>(this))
DestICmp->setSameSign(DestICmp->hasSameSign() && SrcICmp->hasSameSign());
}
const char *Instruction::getOpcodeName(unsigned OpCode) {
switch (OpCode) {
// Terminators
case Ret: return "ret";
case Br: return "br";
case Switch: return "switch";
case IndirectBr: return "indirectbr";
case Invoke: return "invoke";
case Resume: return "resume";
case Unreachable: return "unreachable";
case CleanupRet: return "cleanupret";
case CatchRet: return "catchret";
case CatchPad: return "catchpad";
[IR] Reformulate LLVM's EH funclet IR While we have successfully implemented a funclet-oriented EH scheme on top of LLVM IR, our scheme has some notable deficiencies: - catchendpad and cleanupendpad are necessary in the current design but they are difficult to explain to others, even to seasoned LLVM experts. - catchendpad and cleanupendpad are optimization barriers. They cannot be split and force all potentially throwing call-sites to be invokes. This has a noticable effect on the quality of our code generation. - catchpad, while similar in some aspects to invoke, is fairly awkward. It is unsplittable, starts a funclet, and has control flow to other funclets. - The nesting relationship between funclets is currently a property of control flow edges. Because of this, we are forced to carefully analyze the flow graph to see if there might potentially exist illegal nesting among funclets. While we have logic to clone funclets when they are illegally nested, it would be nicer if we had a representation which forbade them upfront. Let's clean this up a bit by doing the following: - Instead, make catchpad more like cleanuppad and landingpad: no control flow, just a bunch of simple operands; catchpad would be splittable. - Introduce catchswitch, a control flow instruction designed to model the constraints of funclet oriented EH. - Make funclet scoping explicit by having funclet instructions consume the token produced by the funclet which contains them. - Remove catchendpad and cleanupendpad. Their presence can be inferred implicitly using coloring information. N.B. The state numbering code for the CLR has been updated but the veracity of it's output cannot be spoken for. An expert should take a look to make sure the results are reasonable. Reviewers: rnk, JosephTremoulet, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D15139 llvm-svn: 255422
2015-12-12 05:38:55 +00:00
case CatchSwitch: return "catchswitch";
case CallBr: return "callbr";
// Standard unary operators...
case FNeg: return "fneg";
// Standard binary operators...
case Add: return "add";
case FAdd: return "fadd";
case Sub: return "sub";
case FSub: return "fsub";
case Mul: return "mul";
case FMul: return "fmul";
case UDiv: return "udiv";
case SDiv: return "sdiv";
case FDiv: return "fdiv";
case URem: return "urem";
case SRem: return "srem";
case FRem: return "frem";
// Logical operators...
case And: return "and";
case Or : return "or";
case Xor: return "xor";
// Memory instructions...
case Alloca: return "alloca";
case Load: return "load";
case Store: return "store";
case AtomicCmpXchg: return "cmpxchg";
case AtomicRMW: return "atomicrmw";
case Fence: return "fence";
case GetElementPtr: return "getelementptr";
// Convert instructions...
case Trunc: return "trunc";
case ZExt: return "zext";
case SExt: return "sext";
case FPTrunc: return "fptrunc";
case FPExt: return "fpext";
case FPToUI: return "fptoui";
case FPToSI: return "fptosi";
case UIToFP: return "uitofp";
case SIToFP: return "sitofp";
case IntToPtr: return "inttoptr";
case PtrToInt: return "ptrtoint";
case BitCast: return "bitcast";
case AddrSpaceCast: return "addrspacecast";
// Other instructions...
case ICmp: return "icmp";
case FCmp: return "fcmp";
case PHI: return "phi";
case Select: return "select";
case Call: return "call";
case Shl: return "shl";
case LShr: return "lshr";
case AShr: return "ashr";
case VAArg: return "va_arg";
case ExtractElement: return "extractelement";
case InsertElement: return "insertelement";
case ShuffleVector: return "shufflevector";
case ExtractValue: return "extractvalue";
case InsertValue: return "insertvalue";
case LandingPad: return "landingpad";
case CleanupPad: return "cleanuppad";
case Freeze: return "freeze";
default: return "<Invalid operator> ";
}
}
/// This must be kept in sync with FunctionComparator::cmpOperations in
/// lib/Transforms/IPO/MergeFunctions.cpp.
bool Instruction::hasSameSpecialState(const Instruction *I2,
bool IgnoreAlignment,
bool IntersectAttrs) const {
auto I1 = this;
assert(I1->getOpcode() == I2->getOpcode() &&
"Can not compare special state of different instructions");
auto CheckAttrsSame = [IntersectAttrs](const CallBase *CB0,
const CallBase *CB1) {
return IntersectAttrs
? CB0->getAttributes()
.intersectWith(CB0->getContext(), CB1->getAttributes())
.has_value()
: CB0->getAttributes() == CB1->getAttributes();
};
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
(AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
IgnoreAlignment);
if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
(LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
IgnoreAlignment) &&
LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
(SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
IgnoreAlignment) &&
SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
if (const CallInst *CI = dyn_cast<CallInst>(I1))
return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
CheckAttrsSame(CI, cast<CallInst>(I2)) &&
CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
CheckAttrsSame(CI, cast<InvokeInst>(I2)) &&
CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
CheckAttrsSame(CI, cast<CallBrInst>(I2)) &&
CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
CXI->getSuccessOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
CXI->getFailureOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
CXI->getSyncScopeID() ==
cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
return SVI->getShuffleMask() ==
cast<ShuffleVectorInst>(I2)->getShuffleMask();
if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
return GEP->getSourceElementType() ==
cast<GetElementPtrInst>(I2)->getSourceElementType();
return true;
}
bool Instruction::isIdenticalTo(const Instruction *I) const {
return isIdenticalToWhenDefined(I) &&
SubclassOptionalData == I->SubclassOptionalData;
}
bool Instruction::isIdenticalToWhenDefined(const Instruction *I,
bool IntersectAttrs) const {
2004-11-30 02:51:53 +00:00
if (getOpcode() != I->getOpcode() ||
getNumOperands() != I->getNumOperands() || getType() != I->getType())
2004-11-30 02:51:53 +00:00
return false;
// If both instructions have no operands, they are identical.
if (getNumOperands() == 0 && I->getNumOperands() == 0)
return this->hasSameSpecialState(I, /*IgnoreAlignment=*/false,
IntersectAttrs);
[InstCombine] Take 2: Perform trivial PHI CSE The original take was 6102310d814ad73eab60a88b21dd70874f7a056f, which taught InstSimplify to do that, which seemed better at time, since we got EarlyCSE support for free. However, it was proven that we can not do that there, the simplified-to PHI would not be reachable from the original PHI, and that is not something InstSimplify is allowed to do, as noted in the commit ed90f15efb40d26b5d3ead3bb8e9e284218e0186 that reverted it : > It appears to cause compilation non-determinism and caused stage3 mismatches. However InstCombine already does many different optimizations, so it should be a safe place to do it here. Note that we still can't just compare incoming values ranges, because there is no guarantee that these PHI's we'd simplify to were already re-visited and sorted. However coming up with a test is problematic. Effects on vanilla llvm test-suite + RawSpeed: ``` | statistic name | baseline | proposed | Δ | % | |%| | |----------------------------------------------------|-----------|-----------|-------:|---------:|---------:| | instcombine.NumPHICSEs | 0 | 22228 | 22228 | 0.00% | 0.00% | | asm-printer.EmittedInsts | 7942329 | 7942456 | 127 | 0.00% | 0.00% | | assembler.ObjectBytes | 254295632 | 254313792 | 18160 | 0.01% | 0.01% | | early-cse.NumCSE | 2183283 | 2183272 | -11 | 0.00% | 0.00% | | early-cse.NumSimplify | 550105 | 541842 | -8263 | -1.50% | 1.50% | | instcombine.NumAggregateReconstructionsSimplified | 73 | 4506 | 4433 | 6072.60% | 6072.60% | | instcombine.NumCombined | 3640311 | 3666911 | 26600 | 0.73% | 0.73% | | instcombine.NumDeadInst | 1778204 | 1783318 | 5114 | 0.29% | 0.29% | | instcount.NumCallInst | 1758395 | 1758804 | 409 | 0.02% | 0.02% | | instcount.NumInvokeInst | 59478 | 59502 | 24 | 0.04% | 0.04% | | instcount.NumPHIInst | 330557 | 330549 | -8 | 0.00% | 0.00% | | instcount.TotalBlocks | 1077138 | 1077221 | 83 | 0.01% | 0.01% | | instcount.TotalFuncs | 101442 | 101441 | -1 | 0.00% | 0.00% | | instcount.TotalInsts | 8831946 | 8832611 | 665 | 0.01% | 0.01% | | simplifycfg.NumInvokes | 4300 | 4410 | 110 | 2.56% | 2.56% | | simplifycfg.NumSimpl | 1019813 | 999740 | -20073 | -1.97% | 1.97% | ``` So it fires ~22k times, which is less than ~24k the take 1 did. It allows foldAggregateConstructionIntoAggregateReuse() to actually work after PHI-of-extractvalue folds did their thing. Previously SimplifyCFG would have done this PHI CSE, of all places. Additionally, allows some more `invoke`->`call` folds to happen (+110, +2.56%). All in all, expectedly, this catches less things overall, but all the motivational cases are still caught, so all good.
2020-08-29 10:42:38 +03:00
// We have two instructions of identical opcode and #operands. Check to see
// if all operands are the same.
if (!std::equal(op_begin(), op_end(), I->op_begin()))
return false;
// WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
const PHINode *otherPHI = cast<PHINode>(I);
return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
otherPHI->block_begin());
}
return this->hasSameSpecialState(I, /*IgnoreAlignment=*/false,
IntersectAttrs);
}
// Keep this in sync with FunctionComparator::cmpOperations in
// lib/Transforms/IPO/MergeFunctions.cpp.
bool Instruction::isSameOperationAs(const Instruction *I,
unsigned flags) const {
bool IgnoreAlignment = flags & CompareIgnoringAlignment;
bool UseScalarTypes = flags & CompareUsingScalarTypes;
bool IntersectAttrs = flags & CompareUsingIntersectedAttrs;
if (getOpcode() != I->getOpcode() ||
getNumOperands() != I->getNumOperands() ||
(UseScalarTypes ?
getType()->getScalarType() != I->getType()->getScalarType() :
getType() != I->getType()))
return false;
// We have two instructions of identical opcode and #operands. Check to see
// if all operands are the same type
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
if (UseScalarTypes ?
getOperand(i)->getType()->getScalarType() !=
I->getOperand(i)->getType()->getScalarType() :
getOperand(i)->getType() != I->getOperand(i)->getType())
return false;
return this->hasSameSpecialState(I, IgnoreAlignment, IntersectAttrs);
2004-11-30 02:51:53 +00:00
}
bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 03:16:01 +00:00
for (const Use &U : uses()) {
// PHI nodes uses values in the corresponding predecessor block. For other
// instructions, just check to see whether the parent of the use matches up.
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 03:16:01 +00:00
const Instruction *I = cast<Instruction>(U.getUser());
const PHINode *PN = dyn_cast<PHINode>(I);
if (!PN) {
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 03:16:01 +00:00
if (I->getParent() != BB)
return true;
continue;
}
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 03:16:01 +00:00
if (PN->getIncomingBlock(U) != BB)
return true;
}
return false;
}
bool Instruction::mayReadFromMemory() const {
switch (getOpcode()) {
default: return false;
case Instruction::VAArg:
case Instruction::Load:
case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW:
case Instruction::CatchPad:
case Instruction::CatchRet:
return true;
case Instruction::Call:
case Instruction::Invoke:
case Instruction::CallBr:
return !cast<CallBase>(this)->onlyWritesMemory();
case Instruction::Store:
return !cast<StoreInst>(this)->isUnordered();
}
}
bool Instruction::mayWriteToMemory() const {
switch (getOpcode()) {
default: return false;
case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
case Instruction::Store:
case Instruction::VAArg:
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW:
case Instruction::CatchPad:
case Instruction::CatchRet:
return true;
case Instruction::Call:
case Instruction::Invoke:
case Instruction::CallBr:
return !cast<CallBase>(this)->onlyReadsMemory();
case Instruction::Load:
return !cast<LoadInst>(this)->isUnordered();
}
}
bool Instruction::isAtomic() const {
switch (getOpcode()) {
default:
return false;
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW:
case Instruction::Fence:
return true;
case Instruction::Load:
return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
case Instruction::Store:
return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
}
}
bool Instruction::hasAtomicLoad() const {
assert(isAtomic());
switch (getOpcode()) {
default:
return false;
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW:
case Instruction::Load:
return true;
}
}
bool Instruction::hasAtomicStore() const {
assert(isAtomic());
switch (getOpcode()) {
default:
return false;
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW:
case Instruction::Store:
return true;
}
}
bool Instruction::isVolatile() const {
switch (getOpcode()) {
default:
return false;
case Instruction::AtomicRMW:
return cast<AtomicRMWInst>(this)->isVolatile();
case Instruction::Store:
return cast<StoreInst>(this)->isVolatile();
case Instruction::Load:
return cast<LoadInst>(this)->isVolatile();
case Instruction::AtomicCmpXchg:
return cast<AtomicCmpXchgInst>(this)->isVolatile();
case Instruction::Call:
case Instruction::Invoke:
// There are a very limited number of intrinsics with volatile flags.
if (auto *II = dyn_cast<IntrinsicInst>(this)) {
if (auto *MI = dyn_cast<MemIntrinsic>(II))
return MI->isVolatile();
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::matrix_column_major_load:
return cast<ConstantInt>(II->getArgOperand(2))->isOne();
case Intrinsic::matrix_column_major_store:
return cast<ConstantInt>(II->getArgOperand(3))->isOne();
}
}
return false;
}
}
Type *Instruction::getAccessType() const {
switch (getOpcode()) {
case Instruction::Store:
return cast<StoreInst>(this)->getValueOperand()->getType();
case Instruction::Load:
case Instruction::AtomicRMW:
return getType();
case Instruction::AtomicCmpXchg:
return cast<AtomicCmpXchgInst>(this)->getNewValOperand()->getType();
case Instruction::Call:
case Instruction::Invoke:
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(this)) {
switch (II->getIntrinsicID()) {
case Intrinsic::masked_load:
case Intrinsic::masked_gather:
case Intrinsic::masked_expandload:
case Intrinsic::vp_load:
case Intrinsic::vp_gather:
case Intrinsic::experimental_vp_strided_load:
return II->getType();
case Intrinsic::masked_store:
case Intrinsic::masked_scatter:
case Intrinsic::masked_compressstore:
case Intrinsic::vp_store:
case Intrinsic::vp_scatter:
case Intrinsic::experimental_vp_strided_store:
return II->getOperand(0)->getType();
default:
break;
}
}
}
return nullptr;
}
static bool canUnwindPastLandingPad(const LandingPadInst *LP,
bool IncludePhaseOneUnwind) {
// Because phase one unwinding skips cleanup landingpads, we effectively
// unwind past this frame, and callers need to have valid unwind info.
if (LP->isCleanup())
return IncludePhaseOneUnwind;
for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
Constant *Clause = LP->getClause(I);
// catch ptr null catches all exceptions.
if (LP->isCatch(I) && isa<ConstantPointerNull>(Clause))
return false;
// filter [0 x ptr] catches all exceptions.
if (LP->isFilter(I) && Clause->getType()->getArrayNumElements() == 0)
return false;
}
// May catch only some subset of exceptions, in which case other exceptions
// will continue unwinding.
return true;
}
bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
switch (getOpcode()) {
case Instruction::Call:
return !cast<CallInst>(this)->doesNotThrow();
case Instruction::CleanupRet:
return cast<CleanupReturnInst>(this)->unwindsToCaller();
case Instruction::CatchSwitch:
return cast<CatchSwitchInst>(this)->unwindsToCaller();
case Instruction::Resume:
return true;
case Instruction::Invoke: {
// Landingpads themselves don't unwind -- however, an invoke of a skipped
// landingpad may continue unwinding.
BasicBlock *UnwindDest = cast<InvokeInst>(this)->getUnwindDest();
BasicBlock::iterator Pad = UnwindDest->getFirstNonPHIIt();
if (auto *LP = dyn_cast<LandingPadInst>(Pad))
return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
return false;
}
case Instruction::CleanupPad:
// Treat the same as cleanup landingpad.
return IncludePhaseOneUnwind;
default:
return false;
}
}
bool Instruction::mayHaveSideEffects() const {
return mayWriteToMemory() || mayThrow() || !willReturn();
}
bool Instruction::isSafeToRemove() const {
return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
!this->isTerminator() && !this->isEHPad();
}
bool Instruction::willReturn() const {
// Volatile store isn't guaranteed to return; see LangRef.
if (auto *SI = dyn_cast<StoreInst>(this))
return !SI->isVolatile();
if (const auto *CB = dyn_cast<CallBase>(this))
return CB->hasFnAttr(Attribute::WillReturn);
return true;
}
bool Instruction::isLifetimeStartOrEnd() const {
auto *II = dyn_cast<IntrinsicInst>(this);
if (!II)
return false;
Intrinsic::ID ID = II->getIntrinsicID();
return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
}
bool Instruction::isLaunderOrStripInvariantGroup() const {
auto *II = dyn_cast<IntrinsicInst>(this);
if (!II)
return false;
Intrinsic::ID ID = II->getIntrinsicID();
return ID == Intrinsic::launder_invariant_group ||
ID == Intrinsic::strip_invariant_group;
}
bool Instruction::isDebugOrPseudoInst() const {
return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
}
[CSSPGO] IR intrinsic for pseudo-probe block instrumentation This change introduces a new IR intrinsic named `llvm.pseudoprobe` for pseudo-probe block instrumentation. Please refer to https://reviews.llvm.org/D86193 for the whole story. A pseudo probe is used to collect the execution count of the block where the probe is instrumented. This requires a pseudo probe to be persisting. The LLVM PGO instrumentation also instruments in similar places by placing a counter in the form of atomic read/write operations or runtime helper calls. While these operations are very persisting or optimization-resilient, in theory we can borrow the atomic read/write implementation from PGO counters and cut it off at the end of compilation with all the atomics converted into binary data. This was our initial design and we’ve seen promising sample correlation quality with it. However, the atomics approach has a couple issues: 1. IR Optimizations are blocked unexpectedly. Those atomic instructions are not going to be physically present in the binary code, but since they are on the IR till very end of compilation, they can still prevent certain IR optimizations and result in lower code quality. 2. The counter atomics may not be fully cleaned up from the code stream eventually. 3. Extra work is needed for re-targeting. We choose to implement pseudo probes based on a special LLVM intrinsic, which is expected to have most of the semantics that comes with an atomic operation but does not block desired optimizations as much as possible. More specifically the semantics associated with the new intrinsic enforces a pseudo probe to be virtually executed exactly the same number of times before and after an IR optimization. The intrinsic also comes with certain flags that are carefully chosen so that the places they are probing are not going to be messed up by the optimizer while most of the IR optimizations still work. The core flags given to the special intrinsic is `IntrInaccessibleMemOnly`, which means the intrinsic accesses memory and does have a side effect so that it is not removable, but is does not access memory locations that are accessible by any original instructions. This way the intrinsic does not alias with any original instruction and thus it does not block optimizations as much as an atomic operation does. We also assign a function GUID and a block index to an intrinsic so that they are uniquely identified and not merged in order to achieve good correlation quality. Let's now look at an example. Given the following LLVM IR: ``` define internal void @foo2(i32 %x, void (i32)* %f) !dbg !4 { bb0: %cmp = icmp eq i32 %x, 0 br i1 %cmp, label %bb1, label %bb2 bb1: br label %bb3 bb2: br label %bb3 bb3: ret void } ``` The instrumented IR will look like below. Note that each `llvm.pseudoprobe` intrinsic call represents a pseudo probe at a block, of which the first parameter is the GUID of the probe’s owner function and the second parameter is the probe’s ID. ``` define internal void @foo2(i32 %x, void (i32)* %f) !dbg !4 { bb0: %cmp = icmp eq i32 %x, 0 call void @llvm.pseudoprobe(i64 837061429793323041, i64 1) br i1 %cmp, label %bb1, label %bb2 bb1: call void @llvm.pseudoprobe(i64 837061429793323041, i64 2) br label %bb3 bb2: call void @llvm.pseudoprobe(i64 837061429793323041, i64 3) br label %bb3 bb3: call void @llvm.pseudoprobe(i64 837061429793323041, i64 4) ret void } ``` Reviewed By: wmi Differential Revision: https://reviews.llvm.org/D86490
2020-11-18 12:42:51 -08:00
const Instruction *
Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
[CSSPGO] IR intrinsic for pseudo-probe block instrumentation This change introduces a new IR intrinsic named `llvm.pseudoprobe` for pseudo-probe block instrumentation. Please refer to https://reviews.llvm.org/D86193 for the whole story. A pseudo probe is used to collect the execution count of the block where the probe is instrumented. This requires a pseudo probe to be persisting. The LLVM PGO instrumentation also instruments in similar places by placing a counter in the form of atomic read/write operations or runtime helper calls. While these operations are very persisting or optimization-resilient, in theory we can borrow the atomic read/write implementation from PGO counters and cut it off at the end of compilation with all the atomics converted into binary data. This was our initial design and we’ve seen promising sample correlation quality with it. However, the atomics approach has a couple issues: 1. IR Optimizations are blocked unexpectedly. Those atomic instructions are not going to be physically present in the binary code, but since they are on the IR till very end of compilation, they can still prevent certain IR optimizations and result in lower code quality. 2. The counter atomics may not be fully cleaned up from the code stream eventually. 3. Extra work is needed for re-targeting. We choose to implement pseudo probes based on a special LLVM intrinsic, which is expected to have most of the semantics that comes with an atomic operation but does not block desired optimizations as much as possible. More specifically the semantics associated with the new intrinsic enforces a pseudo probe to be virtually executed exactly the same number of times before and after an IR optimization. The intrinsic also comes with certain flags that are carefully chosen so that the places they are probing are not going to be messed up by the optimizer while most of the IR optimizations still work. The core flags given to the special intrinsic is `IntrInaccessibleMemOnly`, which means the intrinsic accesses memory and does have a side effect so that it is not removable, but is does not access memory locations that are accessible by any original instructions. This way the intrinsic does not alias with any original instruction and thus it does not block optimizations as much as an atomic operation does. We also assign a function GUID and a block index to an intrinsic so that they are uniquely identified and not merged in order to achieve good correlation quality. Let's now look at an example. Given the following LLVM IR: ``` define internal void @foo2(i32 %x, void (i32)* %f) !dbg !4 { bb0: %cmp = icmp eq i32 %x, 0 br i1 %cmp, label %bb1, label %bb2 bb1: br label %bb3 bb2: br label %bb3 bb3: ret void } ``` The instrumented IR will look like below. Note that each `llvm.pseudoprobe` intrinsic call represents a pseudo probe at a block, of which the first parameter is the GUID of the probe’s owner function and the second parameter is the probe’s ID. ``` define internal void @foo2(i32 %x, void (i32)* %f) !dbg !4 { bb0: %cmp = icmp eq i32 %x, 0 call void @llvm.pseudoprobe(i64 837061429793323041, i64 1) br i1 %cmp, label %bb1, label %bb2 bb1: call void @llvm.pseudoprobe(i64 837061429793323041, i64 2) br label %bb3 bb2: call void @llvm.pseudoprobe(i64 837061429793323041, i64 3) br label %bb3 bb3: call void @llvm.pseudoprobe(i64 837061429793323041, i64 4) ret void } ``` Reviewed By: wmi Differential Revision: https://reviews.llvm.org/D86490
2020-11-18 12:42:51 -08:00
if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
return I;
return nullptr;
}
[CSSPGO] IR intrinsic for pseudo-probe block instrumentation This change introduces a new IR intrinsic named `llvm.pseudoprobe` for pseudo-probe block instrumentation. Please refer to https://reviews.llvm.org/D86193 for the whole story. A pseudo probe is used to collect the execution count of the block where the probe is instrumented. This requires a pseudo probe to be persisting. The LLVM PGO instrumentation also instruments in similar places by placing a counter in the form of atomic read/write operations or runtime helper calls. While these operations are very persisting or optimization-resilient, in theory we can borrow the atomic read/write implementation from PGO counters and cut it off at the end of compilation with all the atomics converted into binary data. This was our initial design and we’ve seen promising sample correlation quality with it. However, the atomics approach has a couple issues: 1. IR Optimizations are blocked unexpectedly. Those atomic instructions are not going to be physically present in the binary code, but since they are on the IR till very end of compilation, they can still prevent certain IR optimizations and result in lower code quality. 2. The counter atomics may not be fully cleaned up from the code stream eventually. 3. Extra work is needed for re-targeting. We choose to implement pseudo probes based on a special LLVM intrinsic, which is expected to have most of the semantics that comes with an atomic operation but does not block desired optimizations as much as possible. More specifically the semantics associated with the new intrinsic enforces a pseudo probe to be virtually executed exactly the same number of times before and after an IR optimization. The intrinsic also comes with certain flags that are carefully chosen so that the places they are probing are not going to be messed up by the optimizer while most of the IR optimizations still work. The core flags given to the special intrinsic is `IntrInaccessibleMemOnly`, which means the intrinsic accesses memory and does have a side effect so that it is not removable, but is does not access memory locations that are accessible by any original instructions. This way the intrinsic does not alias with any original instruction and thus it does not block optimizations as much as an atomic operation does. We also assign a function GUID and a block index to an intrinsic so that they are uniquely identified and not merged in order to achieve good correlation quality. Let's now look at an example. Given the following LLVM IR: ``` define internal void @foo2(i32 %x, void (i32)* %f) !dbg !4 { bb0: %cmp = icmp eq i32 %x, 0 br i1 %cmp, label %bb1, label %bb2 bb1: br label %bb3 bb2: br label %bb3 bb3: ret void } ``` The instrumented IR will look like below. Note that each `llvm.pseudoprobe` intrinsic call represents a pseudo probe at a block, of which the first parameter is the GUID of the probe’s owner function and the second parameter is the probe’s ID. ``` define internal void @foo2(i32 %x, void (i32)* %f) !dbg !4 { bb0: %cmp = icmp eq i32 %x, 0 call void @llvm.pseudoprobe(i64 837061429793323041, i64 1) br i1 %cmp, label %bb1, label %bb2 bb1: call void @llvm.pseudoprobe(i64 837061429793323041, i64 2) br label %bb3 bb2: call void @llvm.pseudoprobe(i64 837061429793323041, i64 3) br label %bb3 bb3: call void @llvm.pseudoprobe(i64 837061429793323041, i64 4) ret void } ``` Reviewed By: wmi Differential Revision: https://reviews.llvm.org/D86490
2020-11-18 12:42:51 -08:00
const Instruction *
Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
return I;
return nullptr;
}
const DebugLoc &Instruction::getStableDebugLoc() const {
if (isa<DbgInfoIntrinsic>(this))
if (const Instruction *Next = getNextNonDebugInstruction())
return Next->getDebugLoc();
return getDebugLoc();
}
bool Instruction::isAssociative() const {
if (auto *II = dyn_cast<IntrinsicInst>(this))
return II->isAssociative();
unsigned Opcode = getOpcode();
if (isAssociative(Opcode))
return true;
switch (Opcode) {
case FMul:
case FAdd:
return cast<FPMathOperator>(this)->hasAllowReassoc() &&
cast<FPMathOperator>(this)->hasNoSignedZeros();
default:
return false;
}
}
bool Instruction::isCommutative() const {
if (auto *II = dyn_cast<IntrinsicInst>(this))
return II->isCommutative();
// TODO: Should allow icmp/fcmp?
return isCommutative(getOpcode());
}
unsigned Instruction::getNumSuccessors() const {
switch (getOpcode()) {
#define HANDLE_TERM_INST(N, OPC, CLASS) \
case Instruction::OPC: \
return static_cast<const CLASS *>(this)->getNumSuccessors();
#include "llvm/IR/Instruction.def"
default:
break;
}
llvm_unreachable("not a terminator");
}
BasicBlock *Instruction::getSuccessor(unsigned idx) const {
switch (getOpcode()) {
#define HANDLE_TERM_INST(N, OPC, CLASS) \
case Instruction::OPC: \
return static_cast<const CLASS *>(this)->getSuccessor(idx);
#include "llvm/IR/Instruction.def"
default:
break;
}
llvm_unreachable("not a terminator");
}
void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
switch (getOpcode()) {
#define HANDLE_TERM_INST(N, OPC, CLASS) \
case Instruction::OPC: \
return static_cast<CLASS *>(this)->setSuccessor(idx, B);
#include "llvm/IR/Instruction.def"
default:
break;
}
llvm_unreachable("not a terminator");
}
void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
Idx != NumSuccessors; ++Idx)
if (getSuccessor(Idx) == OldBB)
setSuccessor(Idx, NewBB);
}
Instruction *Instruction::cloneImpl() const {
llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
}
void Instruction::swapProfMetadata() {
MDNode *ProfileData = getBranchWeightMDNode(*this);
if (!ProfileData)
return;
unsigned FirstIdx = getBranchWeightOffset(ProfileData);
if (ProfileData->getNumOperands() != 2 + FirstIdx)
return;
unsigned SecondIdx = FirstIdx + 1;
SmallVector<Metadata *, 4> Ops;
// If there are more weights past the second, we can't swap them
if (ProfileData->getNumOperands() > SecondIdx + 1)
return;
for (unsigned Idx = 0; Idx < FirstIdx; ++Idx) {
Ops.push_back(ProfileData->getOperand(Idx));
}
// Switch the order of the weights
Ops.push_back(ProfileData->getOperand(SecondIdx));
Ops.push_back(ProfileData->getOperand(FirstIdx));
setMetadata(LLVMContext::MD_prof,
MDNode::get(ProfileData->getContext(), Ops));
}
void Instruction::copyMetadata(const Instruction &SrcInst,
ArrayRef<unsigned> WL) {
if (!SrcInst.hasMetadata())
return;
SmallDenseSet<unsigned, 4> WLS(WL.begin(), WL.end());
// Otherwise, enumerate and copy over metadata from the old instruction to the
// new one.
SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
for (const auto &MD : TheMDs) {
if (WL.empty() || WLS.count(MD.first))
setMetadata(MD.first, MD.second);
}
if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
setDebugLoc(SrcInst.getDebugLoc());
}
Instruction *Instruction::clone() const {
Instruction *New = nullptr;
switch (getOpcode()) {
default:
llvm_unreachable("Unhandled Opcode.");
#define HANDLE_INST(num, opc, clas) \
case Instruction::opc: \
New = cast<clas>(this)->cloneImpl(); \
break;
#include "llvm/IR/Instruction.def"
#undef HANDLE_INST
}
New->SubclassOptionalData = SubclassOptionalData;
New->copyMetadata(*this);
return New;
}