2018-03-08 13:05:02 +00:00
|
|
|
//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 08:50:56 +00:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2018-03-08 13:05:02 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
///
|
|
|
|
/// This file implements the InstrBuilder interface.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-12-17 08:08:31 +00:00
|
|
|
#include "llvm/MCA/InstrBuilder.h"
|
2018-06-20 10:08:11 +00:00
|
|
|
#include "llvm/ADT/APInt.h"
|
2018-06-04 12:23:07 +00:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2024-06-28 10:46:46 -07:00
|
|
|
#include "llvm/ADT/Hashing.h"
|
2022-06-05 12:06:01 -07:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2018-03-08 13:05:02 +00:00
|
|
|
#include "llvm/MC/MCInst.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2018-05-04 13:52:12 +00:00
|
|
|
#include "llvm/Support/WithColor.h"
|
2018-07-09 12:30:55 +00:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2022-06-05 12:06:01 -07:00
|
|
|
#define DEBUG_TYPE "llvm-mca-instrbuilder"
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2018-10-30 15:56:08 +00:00
|
|
|
namespace llvm {
|
2018-03-08 13:05:02 +00:00
|
|
|
namespace mca {
|
|
|
|
|
2022-06-05 12:06:01 -07:00
|
|
|
char RecycledInstErr::ID = 0;
|
|
|
|
|
2018-10-25 11:51:34 +00:00
|
|
|
InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
|
|
|
|
const llvm::MCInstrInfo &mcii,
|
|
|
|
const llvm::MCRegisterInfo &mri,
|
2022-11-04 08:51:39 -07:00
|
|
|
const llvm::MCInstrAnalysis *mcia,
|
2024-05-22 13:51:55 -07:00
|
|
|
const mca::InstrumentManager &im, unsigned cl)
|
2022-11-04 08:51:39 -07:00
|
|
|
: STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), IM(im), FirstCallInst(true),
|
2024-05-22 13:51:55 -07:00
|
|
|
FirstReturnInst(true), CallLatency(cl) {
|
2019-01-10 13:59:13 +00:00
|
|
|
const MCSchedModel &SM = STI.getSchedModel();
|
|
|
|
ProcResourceMasks.resize(SM.getNumProcResourceKinds());
|
2018-10-25 11:51:34 +00:00
|
|
|
computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
|
|
|
|
}
|
|
|
|
|
2018-03-24 16:05:36 +00:00
|
|
|
static void initializeUsedResources(InstrDesc &ID,
|
|
|
|
const MCSchedClassDesc &SCDesc,
|
|
|
|
const MCSubtargetInfo &STI,
|
|
|
|
ArrayRef<uint64_t> ProcResourceMasks) {
|
2018-03-08 13:05:02 +00:00
|
|
|
const MCSchedModel &SM = STI.getSchedModel();
|
|
|
|
|
|
|
|
// Populate resources consumed.
|
|
|
|
using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
|
2021-06-16 14:39:14 +01:00
|
|
|
SmallVector<ResourcePlusCycles, 4> Worklist;
|
2018-06-04 12:23:07 +00:00
|
|
|
|
|
|
|
// Track cycles contributed by resources that are in a "Super" relationship.
|
|
|
|
// This is required if we want to correctly match the behavior of method
|
|
|
|
// SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
|
|
|
|
// of "consumed" processor resources and resource cycles, the logic in
|
|
|
|
// ExpandProcResource() doesn't update the number of resource cycles
|
|
|
|
// contributed by a "Super" resource to a group.
|
|
|
|
// We need to take this into account when we find that a processor resource is
|
|
|
|
// part of a group, and it is also used as the "Super" of other resources.
|
|
|
|
// This map stores the number of cycles contributed by sub-resources that are
|
|
|
|
// part of a "Super" resource. The key value is the "Super" resource mask ID.
|
|
|
|
DenseMap<uint64_t, unsigned> SuperResources;
|
|
|
|
|
2018-11-09 19:30:20 +00:00
|
|
|
unsigned NumProcResources = SM.getNumProcResourceKinds();
|
|
|
|
APInt Buffers(NumProcResources, 0);
|
|
|
|
|
2019-01-04 15:08:38 +00:00
|
|
|
bool AllInOrderResources = true;
|
|
|
|
bool AnyDispatchHazards = false;
|
2018-03-08 13:05:02 +00:00
|
|
|
for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
|
|
|
|
const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
|
|
|
|
const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
|
2023-08-22 17:00:50 -07:00
|
|
|
if (!PRE->ReleaseAtCycle) {
|
2019-06-14 13:31:21 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
WithColor::warning()
|
|
|
|
<< "Ignoring invalid write of zero cycles on processor resource "
|
|
|
|
<< PR.Name << "\n";
|
|
|
|
WithColor::note() << "found in scheduling class " << SCDesc.Name
|
|
|
|
<< " (write index #" << I << ")\n";
|
|
|
|
#endif
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-03-08 13:05:02 +00:00
|
|
|
uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
|
2019-01-04 15:08:38 +00:00
|
|
|
if (PR.BufferSize < 0) {
|
|
|
|
AllInOrderResources = false;
|
|
|
|
} else {
|
2019-08-15 12:39:55 +00:00
|
|
|
Buffers.setBit(getResourceStateIndex(Mask));
|
2019-01-04 15:08:38 +00:00
|
|
|
AnyDispatchHazards |= (PR.BufferSize == 0);
|
|
|
|
AllInOrderResources &= (PR.BufferSize <= 1);
|
|
|
|
}
|
|
|
|
|
2023-08-22 17:00:50 -07:00
|
|
|
CycleSegment RCy(0, PRE->ReleaseAtCycle, false);
|
2018-03-08 13:05:02 +00:00
|
|
|
Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
|
2018-06-04 12:23:07 +00:00
|
|
|
if (PR.SuperIdx) {
|
|
|
|
uint64_t Super = ProcResourceMasks[PR.SuperIdx];
|
2023-08-22 17:00:50 -07:00
|
|
|
SuperResources[Super] += PRE->ReleaseAtCycle;
|
2018-06-04 12:23:07 +00:00
|
|
|
}
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-04 15:08:38 +00:00
|
|
|
ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
|
|
|
|
|
2018-03-08 13:05:02 +00:00
|
|
|
// Sort elements by mask popcount, so that we prioritize resource units over
|
|
|
|
// resource groups, and smaller groups over larger groups.
|
2018-09-28 10:47:24 +00:00
|
|
|
sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
|
2023-01-22 12:48:51 -08:00
|
|
|
unsigned popcntA = llvm::popcount(A.first);
|
|
|
|
unsigned popcntB = llvm::popcount(B.first);
|
2018-09-28 10:47:24 +00:00
|
|
|
if (popcntA < popcntB)
|
|
|
|
return true;
|
|
|
|
if (popcntA > popcntB)
|
|
|
|
return false;
|
|
|
|
return A.first < B.first;
|
|
|
|
});
|
2018-03-08 13:05:02 +00:00
|
|
|
|
|
|
|
uint64_t UsedResourceUnits = 0;
|
2019-02-13 14:56:06 +00:00
|
|
|
uint64_t UsedResourceGroups = 0;
|
2022-09-07 11:27:22 +01:00
|
|
|
uint64_t UnitsFromResourceGroups = 0;
|
|
|
|
|
|
|
|
// Remove cycles contributed by smaller resources, and check if there
|
|
|
|
// are partially overlapping resource groups.
|
|
|
|
ID.HasPartiallyOverlappingGroups = false;
|
2018-03-08 13:05:02 +00:00
|
|
|
|
|
|
|
for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
|
|
|
|
ResourcePlusCycles &A = Worklist[I];
|
|
|
|
if (!A.second.size()) {
|
2023-01-22 12:48:51 -08:00
|
|
|
assert(llvm::popcount(A.first) > 1 && "Expected a group!");
|
2023-01-28 09:06:31 -08:00
|
|
|
UsedResourceGroups |= llvm::bit_floor(A.first);
|
2018-03-08 13:05:02 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ID.Resources.emplace_back(A);
|
|
|
|
uint64_t NormalizedMask = A.first;
|
2022-09-07 11:27:22 +01:00
|
|
|
|
2023-01-22 12:48:51 -08:00
|
|
|
if (llvm::popcount(A.first) == 1) {
|
2018-03-08 13:05:02 +00:00
|
|
|
UsedResourceUnits |= A.first;
|
|
|
|
} else {
|
|
|
|
// Remove the leading 1 from the resource group mask.
|
2023-01-28 09:06:31 -08:00
|
|
|
NormalizedMask ^= llvm::bit_floor(NormalizedMask);
|
2022-09-07 11:27:22 +01:00
|
|
|
if (UnitsFromResourceGroups & NormalizedMask)
|
|
|
|
ID.HasPartiallyOverlappingGroups = true;
|
2021-06-16 14:39:14 +01:00
|
|
|
|
2022-09-07 11:27:22 +01:00
|
|
|
UnitsFromResourceGroups |= NormalizedMask;
|
|
|
|
UsedResourceGroups |= (A.first ^ NormalizedMask);
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned J = I + 1; J < E; ++J) {
|
|
|
|
ResourcePlusCycles &B = Worklist[J];
|
|
|
|
if ((NormalizedMask & B.first) == NormalizedMask) {
|
2018-10-01 23:01:45 +00:00
|
|
|
B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
|
2023-01-22 12:48:51 -08:00
|
|
|
if (llvm::popcount(B.first) > 1)
|
2018-03-08 13:05:02 +00:00
|
|
|
B.second.NumUnits++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A SchedWrite may specify a number of cycles in which a resource group
|
|
|
|
// is reserved. For example (on target x86; cpu Haswell):
|
|
|
|
//
|
|
|
|
// SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
|
2023-08-22 17:00:50 -07:00
|
|
|
// let ReleaseAtCycles = [2, 2, 3];
|
2018-03-08 13:05:02 +00:00
|
|
|
// }
|
|
|
|
//
|
|
|
|
// This means:
|
|
|
|
// Resource units HWPort0 and HWPort1 are both used for 2cy.
|
|
|
|
// Resource group HWPort01 is the union of HWPort0 and HWPort1.
|
|
|
|
// Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
|
|
|
|
// will not be usable for 2 entire cycles from instruction issue.
|
|
|
|
//
|
|
|
|
// On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
|
|
|
|
// of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
|
|
|
|
// extra delay on top of the 2 cycles latency.
|
|
|
|
// During those extra cycles, HWPort01 is not usable by other instructions.
|
|
|
|
for (ResourcePlusCycles &RPC : ID.Resources) {
|
2023-01-22 12:48:51 -08:00
|
|
|
if (llvm::popcount(RPC.first) > 1 && !RPC.second.isReserved()) {
|
2018-03-08 13:05:02 +00:00
|
|
|
// Remove the leading 1 from the resource group mask.
|
2023-01-28 09:06:31 -08:00
|
|
|
uint64_t Mask = RPC.first ^ llvm::bit_floor(RPC.first);
|
2023-01-22 12:48:51 -08:00
|
|
|
uint64_t MaxResourceUnits = llvm::popcount(Mask);
|
|
|
|
if (RPC.second.NumUnits > (unsigned)llvm::popcount(Mask)) {
|
2018-03-08 13:05:02 +00:00
|
|
|
RPC.second.setReserved();
|
2020-05-10 18:55:58 +01:00
|
|
|
RPC.second.NumUnits = MaxResourceUnits;
|
|
|
|
}
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-09 19:30:20 +00:00
|
|
|
// Identify extra buffers that are consumed through super resources.
|
|
|
|
for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
|
|
|
|
for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
|
|
|
|
const MCProcResourceDesc &PR = *SM.getProcResource(I);
|
|
|
|
if (PR.BufferSize == -1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
uint64_t Mask = ProcResourceMasks[I];
|
|
|
|
if (Mask != SR.first && ((Mask & SR.first) == SR.first))
|
2019-08-15 12:39:55 +00:00
|
|
|
Buffers.setBit(getResourceStateIndex(Mask));
|
2018-11-09 19:30:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-15 12:39:55 +00:00
|
|
|
ID.UsedBuffers = Buffers.getZExtValue();
|
|
|
|
ID.UsedProcResUnits = UsedResourceUnits;
|
|
|
|
ID.UsedProcResGroups = UsedResourceGroups;
|
2018-11-09 19:30:20 +00:00
|
|
|
|
2018-05-14 12:53:11 +00:00
|
|
|
LLVM_DEBUG({
|
2018-03-08 13:05:02 +00:00
|
|
|
for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
|
2019-02-13 14:56:06 +00:00
|
|
|
dbgs() << "\t\tResource Mask=" << format_hex(R.first, 16) << ", "
|
|
|
|
<< "Reserved=" << R.second.isReserved() << ", "
|
|
|
|
<< "#Units=" << R.second.NumUnits << ", "
|
2019-01-10 13:59:13 +00:00
|
|
|
<< "cy=" << R.second.size() << '\n';
|
2019-08-15 12:39:55 +00:00
|
|
|
uint64_t BufferIDs = ID.UsedBuffers;
|
|
|
|
while (BufferIDs) {
|
|
|
|
uint64_t Current = BufferIDs & (-BufferIDs);
|
|
|
|
dbgs() << "\t\tBuffer Mask=" << format_hex(Current, 16) << '\n';
|
|
|
|
BufferIDs ^= Current;
|
|
|
|
}
|
2019-06-14 13:31:21 +00:00
|
|
|
dbgs() << "\t\t Used Units=" << format_hex(ID.UsedProcResUnits, 16) << '\n';
|
|
|
|
dbgs() << "\t\tUsed Groups=" << format_hex(ID.UsedProcResGroups, 16)
|
|
|
|
<< '\n';
|
2022-09-07 11:27:22 +01:00
|
|
|
dbgs() << "\t\tHasPartiallyOverlappingGroups="
|
|
|
|
<< ID.HasPartiallyOverlappingGroups << '\n';
|
2018-03-20 12:58:34 +00:00
|
|
|
});
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
|
|
|
|
const MCSchedClassDesc &SCDesc,
|
2024-05-22 13:51:55 -07:00
|
|
|
const MCSubtargetInfo &STI,
|
|
|
|
unsigned CallLatency) {
|
2018-03-08 13:05:02 +00:00
|
|
|
if (MCDesc.isCall()) {
|
|
|
|
// We cannot estimate how long this call will take.
|
2024-05-22 13:51:55 -07:00
|
|
|
// Artificially set an arbitrarily high latency.
|
|
|
|
ID.MaxLatency = CallLatency;
|
2018-03-13 15:59:59 +00:00
|
|
|
return;
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 15:59:59 +00:00
|
|
|
int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
|
2024-05-22 13:51:55 -07:00
|
|
|
// If latency is unknown, then conservatively assume the MaxLatency set for
|
|
|
|
// calls.
|
|
|
|
ID.MaxLatency = Latency < 0 ? CallLatency : static_cast<unsigned>(Latency);
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2018-11-23 20:26:57 +00:00
|
|
|
static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
|
|
|
|
// Count register definitions, and skip non register operands in the process.
|
|
|
|
unsigned I, E;
|
|
|
|
unsigned NumExplicitDefs = MCDesc.getNumDefs();
|
|
|
|
for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
|
|
|
|
const MCOperand &Op = MCI.getOperand(I);
|
|
|
|
if (Op.isReg())
|
|
|
|
--NumExplicitDefs;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NumExplicitDefs) {
|
|
|
|
return make_error<InstructionError<MCInst>>(
|
|
|
|
"Expected more register operand definitions.", MCI);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MCDesc.hasOptionalDef()) {
|
|
|
|
// Always assume that the optional definition is the last operand.
|
|
|
|
const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
|
|
|
|
if (I == MCI.getNumOperands() || !Op.isReg()) {
|
|
|
|
std::string Message =
|
|
|
|
"expected a register operand for an optional definition. Instruction "
|
|
|
|
"has not been correctly analyzed.";
|
|
|
|
return make_error<InstructionError<MCInst>>(Message, MCI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ErrorSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
|
|
|
|
unsigned SchedClassID) {
|
2018-07-09 12:30:55 +00:00
|
|
|
const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
|
|
|
|
const MCSchedModel &SM = STI.getSchedModel();
|
|
|
|
const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
|
|
|
|
|
2018-11-23 20:26:57 +00:00
|
|
|
// Assumptions made by this algorithm:
|
|
|
|
// 1. The number of explicit and implicit register definitions in a MCInst
|
|
|
|
// matches the number of explicit and implicit definitions according to
|
|
|
|
// the opcode descriptor (MCInstrDesc).
|
|
|
|
// 2. Uses start at index #(MCDesc.getNumDefs()).
|
|
|
|
// 3. There can only be a single optional register definition, an it is
|
2020-11-24 18:27:59 +03:00
|
|
|
// either the last operand of the sequence (excluding extra operands
|
|
|
|
// contributed by variadic opcodes) or one of the explicit register
|
|
|
|
// definitions. The latter occurs for some Thumb1 instructions.
|
2018-03-08 13:05:02 +00:00
|
|
|
//
|
|
|
|
// These assumptions work quite well for most out-of-order in-tree targets
|
|
|
|
// like x86. This is mainly because the vast majority of instructions is
|
|
|
|
// expanded to MCInst using a straightforward lowering logic that preserves
|
|
|
|
// the ordering of the operands.
|
2018-11-23 20:26:57 +00:00
|
|
|
//
|
|
|
|
// About assumption 1.
|
|
|
|
// The algorithm allows non-register operands between register operand
|
|
|
|
// definitions. This helps to handle some special ARM instructions with
|
|
|
|
// implicit operand increment (-mtriple=armv7):
|
|
|
|
//
|
|
|
|
// vld1.32 {d18, d19}, [r1]! @ <MCInst #1463 VLD1q32wb_fixed
|
|
|
|
// @ <MCOperand Reg:59>
|
|
|
|
// @ <MCOperand Imm:0> (!!)
|
|
|
|
// @ <MCOperand Reg:67>
|
|
|
|
// @ <MCOperand Imm:0>
|
|
|
|
// @ <MCOperand Imm:14>
|
|
|
|
// @ <MCOperand Reg:0>>
|
|
|
|
//
|
|
|
|
// MCDesc reports:
|
|
|
|
// 6 explicit operands.
|
|
|
|
// 1 optional definition
|
|
|
|
// 2 explicit definitions (!!)
|
|
|
|
//
|
|
|
|
// The presence of an 'Imm' operand between the two register definitions
|
|
|
|
// breaks the assumption that "register definitions are always at the
|
|
|
|
// beginning of the operand sequence".
|
|
|
|
//
|
|
|
|
// To workaround this issue, this algorithm ignores (i.e. skips) any
|
|
|
|
// non-register operands between register definitions. The optional
|
|
|
|
// definition is still at index #(NumOperands-1).
|
|
|
|
//
|
|
|
|
// According to assumption 2. register reads start at #(NumExplicitDefs-1).
|
|
|
|
// That means, register R1 from the example is both read and written.
|
2018-03-08 13:05:02 +00:00
|
|
|
unsigned NumExplicitDefs = MCDesc.getNumDefs();
|
2023-01-11 12:20:02 +00:00
|
|
|
unsigned NumImplicitDefs = MCDesc.implicit_defs().size();
|
2018-03-08 13:05:02 +00:00
|
|
|
unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
|
|
|
|
unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
|
|
|
|
if (MCDesc.hasOptionalDef())
|
|
|
|
TotalDefs++;
|
2018-11-23 20:26:57 +00:00
|
|
|
|
2018-11-25 12:46:24 +00:00
|
|
|
unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
|
|
|
|
ID.Writes.resize(TotalDefs + NumVariadicOps);
|
2024-05-03 10:30:22 +01:00
|
|
|
// Iterate over the operands list, and skip non-register or constant register
|
|
|
|
// operands. The first NumExplicitDefs register operands are expected to be
|
|
|
|
// register definitions.
|
2018-03-08 13:05:02 +00:00
|
|
|
unsigned CurrentDef = 0;
|
2020-11-24 18:27:59 +03:00
|
|
|
unsigned OptionalDefIdx = MCDesc.getNumOperands() - 1;
|
2018-03-08 13:05:02 +00:00
|
|
|
unsigned i = 0;
|
|
|
|
for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
|
|
|
|
const MCOperand &Op = MCI.getOperand(i);
|
|
|
|
if (!Op.isReg())
|
|
|
|
continue;
|
|
|
|
|
2023-01-13 13:56:47 +00:00
|
|
|
if (MCDesc.operands()[CurrentDef].isOptionalDef()) {
|
2020-11-24 18:27:59 +03:00
|
|
|
OptionalDefIdx = CurrentDef++;
|
|
|
|
continue;
|
|
|
|
}
|
2024-05-03 10:30:22 +01:00
|
|
|
if (MRI.isConstant(Op.getReg())) {
|
|
|
|
CurrentDef++;
|
|
|
|
continue;
|
|
|
|
}
|
2020-11-24 18:27:59 +03:00
|
|
|
|
2018-03-08 13:05:02 +00:00
|
|
|
WriteDescriptor &Write = ID.Writes[CurrentDef];
|
|
|
|
Write.OpIndex = i;
|
|
|
|
if (CurrentDef < NumWriteLatencyEntries) {
|
|
|
|
const MCWriteLatencyEntry &WLE =
|
|
|
|
*STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
|
|
|
|
// Conservatively default to MaxLatency.
|
2018-07-09 12:30:55 +00:00
|
|
|
Write.Latency =
|
|
|
|
WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
|
2018-03-08 13:05:02 +00:00
|
|
|
Write.SClassOrWriteResourceID = WLE.WriteResourceID;
|
|
|
|
} else {
|
|
|
|
// Assign a default latency for this write.
|
|
|
|
Write.Latency = ID.MaxLatency;
|
|
|
|
Write.SClassOrWriteResourceID = 0;
|
|
|
|
}
|
|
|
|
Write.IsOptionalDef = false;
|
2018-05-14 12:53:11 +00:00
|
|
|
LLVM_DEBUG({
|
2018-11-23 20:26:57 +00:00
|
|
|
dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
|
2018-07-13 14:55:47 +00:00
|
|
|
<< ", Latency=" << Write.Latency
|
2018-03-20 12:58:34 +00:00
|
|
|
<< ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
|
|
|
|
});
|
2018-03-08 13:05:02 +00:00
|
|
|
CurrentDef++;
|
|
|
|
}
|
|
|
|
|
2018-11-23 20:26:57 +00:00
|
|
|
assert(CurrentDef == NumExplicitDefs &&
|
|
|
|
"Expected more register operand definitions.");
|
2018-03-08 13:05:02 +00:00
|
|
|
for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
|
|
|
|
unsigned Index = NumExplicitDefs + CurrentDef;
|
|
|
|
WriteDescriptor &Write = ID.Writes[Index];
|
2018-06-22 16:37:05 +00:00
|
|
|
Write.OpIndex = ~CurrentDef;
|
2023-01-11 12:20:02 +00:00
|
|
|
Write.RegisterID = MCDesc.implicit_defs()[CurrentDef];
|
2018-04-02 13:46:49 +00:00
|
|
|
if (Index < NumWriteLatencyEntries) {
|
|
|
|
const MCWriteLatencyEntry &WLE =
|
|
|
|
*STI.getWriteLatencyEntry(&SCDesc, Index);
|
|
|
|
// Conservatively default to MaxLatency.
|
2018-07-09 12:30:55 +00:00
|
|
|
Write.Latency =
|
|
|
|
WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
|
2018-04-02 13:46:49 +00:00
|
|
|
Write.SClassOrWriteResourceID = WLE.WriteResourceID;
|
|
|
|
} else {
|
|
|
|
// Assign a default latency for this write.
|
|
|
|
Write.Latency = ID.MaxLatency;
|
|
|
|
Write.SClassOrWriteResourceID = 0;
|
|
|
|
}
|
|
|
|
|
2018-03-08 13:05:02 +00:00
|
|
|
Write.IsOptionalDef = false;
|
|
|
|
assert(Write.RegisterID != 0 && "Expected a valid phys register!");
|
2018-07-13 14:55:47 +00:00
|
|
|
LLVM_DEBUG({
|
2018-11-23 20:26:57 +00:00
|
|
|
dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
|
2018-07-13 14:55:47 +00:00
|
|
|
<< ", PhysReg=" << MRI.getName(Write.RegisterID)
|
|
|
|
<< ", Latency=" << Write.Latency
|
|
|
|
<< ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
|
|
|
|
});
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (MCDesc.hasOptionalDef()) {
|
2018-11-23 20:26:57 +00:00
|
|
|
WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
|
2020-11-24 18:27:59 +03:00
|
|
|
Write.OpIndex = OptionalDefIdx;
|
2018-03-08 13:05:02 +00:00
|
|
|
// Assign a default latency for this write.
|
|
|
|
Write.Latency = ID.MaxLatency;
|
|
|
|
Write.SClassOrWriteResourceID = 0;
|
|
|
|
Write.IsOptionalDef = true;
|
2018-11-23 20:26:57 +00:00
|
|
|
LLVM_DEBUG({
|
|
|
|
dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
|
|
|
|
<< ", Latency=" << Write.Latency
|
|
|
|
<< ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
|
|
|
|
});
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
2018-11-25 12:46:24 +00:00
|
|
|
|
|
|
|
if (!NumVariadicOps)
|
|
|
|
return;
|
|
|
|
|
2021-06-15 00:52:16 +01:00
|
|
|
bool AssumeUsesOnly = !MCDesc.variadicOpsAreDefs();
|
2018-11-25 12:46:24 +00:00
|
|
|
CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
|
|
|
|
for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
|
|
|
|
I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
|
|
|
|
const MCOperand &Op = MCI.getOperand(OpIndex);
|
|
|
|
if (!Op.isReg())
|
|
|
|
continue;
|
2024-05-03 10:30:22 +01:00
|
|
|
if (MRI.isConstant(Op.getReg()))
|
|
|
|
continue;
|
2018-11-25 12:46:24 +00:00
|
|
|
|
|
|
|
WriteDescriptor &Write = ID.Writes[CurrentDef];
|
|
|
|
Write.OpIndex = OpIndex;
|
|
|
|
// Assign a default latency for this write.
|
|
|
|
Write.Latency = ID.MaxLatency;
|
|
|
|
Write.SClassOrWriteResourceID = 0;
|
|
|
|
Write.IsOptionalDef = false;
|
|
|
|
++CurrentDef;
|
|
|
|
LLVM_DEBUG({
|
|
|
|
dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
|
|
|
|
<< ", Latency=" << Write.Latency
|
|
|
|
<< ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
ID.Writes.resize(CurrentDef);
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2018-11-23 20:26:57 +00:00
|
|
|
void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
|
|
|
|
unsigned SchedClassID) {
|
2018-07-09 12:30:55 +00:00
|
|
|
const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
|
2018-11-23 20:26:57 +00:00
|
|
|
unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
|
2023-01-11 12:20:02 +00:00
|
|
|
unsigned NumImplicitUses = MCDesc.implicit_uses().size();
|
2018-11-23 20:26:57 +00:00
|
|
|
// Remove the optional definition.
|
|
|
|
if (MCDesc.hasOptionalDef())
|
|
|
|
--NumExplicitUses;
|
2018-11-25 12:46:24 +00:00
|
|
|
unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
|
|
|
|
unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
|
2018-03-08 13:05:02 +00:00
|
|
|
ID.Reads.resize(TotalUses);
|
2018-11-25 12:46:24 +00:00
|
|
|
unsigned CurrentUse = 0;
|
|
|
|
for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
|
|
|
|
++I, ++OpIndex) {
|
|
|
|
const MCOperand &Op = MCI.getOperand(OpIndex);
|
|
|
|
if (!Op.isReg())
|
|
|
|
continue;
|
2024-05-03 10:30:22 +01:00
|
|
|
if (MRI.isConstant(Op.getReg()))
|
|
|
|
continue;
|
2018-11-25 12:46:24 +00:00
|
|
|
|
|
|
|
ReadDescriptor &Read = ID.Reads[CurrentUse];
|
|
|
|
Read.OpIndex = OpIndex;
|
2018-11-23 20:26:57 +00:00
|
|
|
Read.UseIndex = I;
|
2018-03-08 13:05:02 +00:00
|
|
|
Read.SchedClassID = SchedClassID;
|
2018-11-25 12:46:24 +00:00
|
|
|
++CurrentUse;
|
2018-11-23 20:26:57 +00:00
|
|
|
LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
|
2018-07-13 14:55:47 +00:00
|
|
|
<< ", UseIndex=" << Read.UseIndex << '\n');
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2018-11-23 20:26:57 +00:00
|
|
|
// For the purpose of ReadAdvance, implicit uses come directly after explicit
|
|
|
|
// uses. The "UseIndex" must be updated according to that implicit layout.
|
|
|
|
for (unsigned I = 0; I < NumImplicitUses; ++I) {
|
2018-11-25 12:46:24 +00:00
|
|
|
ReadDescriptor &Read = ID.Reads[CurrentUse + I];
|
2018-11-23 20:26:57 +00:00
|
|
|
Read.OpIndex = ~I;
|
|
|
|
Read.UseIndex = NumExplicitUses + I;
|
2023-01-11 12:20:02 +00:00
|
|
|
Read.RegisterID = MCDesc.implicit_uses()[I];
|
2024-05-03 10:30:22 +01:00
|
|
|
if (MRI.isConstant(Read.RegisterID))
|
|
|
|
continue;
|
2018-03-08 13:05:02 +00:00
|
|
|
Read.SchedClassID = SchedClassID;
|
2018-11-23 20:26:57 +00:00
|
|
|
LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
|
|
|
|
<< ", UseIndex=" << Read.UseIndex << ", RegisterID="
|
2018-07-13 14:55:47 +00:00
|
|
|
<< MRI.getName(Read.RegisterID) << '\n');
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
2018-11-25 12:46:24 +00:00
|
|
|
|
|
|
|
CurrentUse += NumImplicitUses;
|
|
|
|
|
2021-06-15 00:52:16 +01:00
|
|
|
bool AssumeDefsOnly = MCDesc.variadicOpsAreDefs();
|
2018-11-25 12:46:24 +00:00
|
|
|
for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
|
|
|
|
I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
|
|
|
|
const MCOperand &Op = MCI.getOperand(OpIndex);
|
|
|
|
if (!Op.isReg())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ReadDescriptor &Read = ID.Reads[CurrentUse];
|
|
|
|
Read.OpIndex = OpIndex;
|
|
|
|
Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
|
|
|
|
Read.SchedClassID = SchedClassID;
|
|
|
|
++CurrentUse;
|
|
|
|
LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
|
|
|
|
<< ", UseIndex=" << Read.UseIndex << '\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
ID.Reads.resize(CurrentUse);
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2024-06-28 10:46:46 -07:00
|
|
|
hash_code hashMCOperand(const MCOperand &MCO) {
|
|
|
|
hash_code TypeHash = hash_combine(MCO.isReg(), MCO.isImm(), MCO.isSFPImm(),
|
|
|
|
MCO.isDFPImm(), MCO.isExpr(), MCO.isInst());
|
|
|
|
if (MCO.isReg())
|
|
|
|
return hash_combine(TypeHash, MCO.getReg());
|
|
|
|
|
|
|
|
return TypeHash;
|
|
|
|
}
|
|
|
|
|
|
|
|
hash_code hashMCInst(const MCInst &MCI) {
|
|
|
|
hash_code InstructionHash = hash_combine(MCI.getOpcode(), MCI.getFlags());
|
|
|
|
for (unsigned I = 0; I < MCI.getNumOperands(); ++I) {
|
|
|
|
InstructionHash =
|
|
|
|
hash_combine(InstructionHash, hashMCOperand(MCI.getOperand(I)));
|
|
|
|
}
|
|
|
|
return InstructionHash;
|
|
|
|
}
|
|
|
|
|
2018-10-04 10:36:49 +00:00
|
|
|
Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
|
|
|
|
const MCInst &MCI) const {
|
|
|
|
if (ID.NumMicroOps != 0)
|
|
|
|
return ErrorSuccess();
|
|
|
|
|
2019-08-15 12:39:55 +00:00
|
|
|
bool UsesBuffers = ID.UsedBuffers;
|
2018-10-04 10:36:49 +00:00
|
|
|
bool UsesResources = !ID.Resources.empty();
|
2020-02-05 12:51:29 +00:00
|
|
|
if (!UsesBuffers && !UsesResources)
|
2018-10-04 10:36:49 +00:00
|
|
|
return ErrorSuccess();
|
|
|
|
|
2020-02-05 12:51:29 +00:00
|
|
|
// FIXME: see PR44797. We should revisit these checks and possibly move them
|
|
|
|
// in CodeGenSchedule.cpp.
|
|
|
|
StringRef Message = "found an inconsistent instruction that decodes to zero "
|
|
|
|
"opcodes and that consumes scheduler resources.";
|
2020-01-28 20:23:46 +01:00
|
|
|
return make_error<InstructionError<MCInst>>(std::string(Message), MCI);
|
2018-10-04 10:36:49 +00:00
|
|
|
}
|
|
|
|
|
2024-06-28 10:46:46 -07:00
|
|
|
Expected<unsigned> InstrBuilder::getVariantSchedClassID(const MCInst &MCI,
|
|
|
|
unsigned SchedClassID) {
|
|
|
|
const MCSchedModel &SM = STI.getSchedModel();
|
|
|
|
unsigned CPUID = SM.getProcessorID();
|
|
|
|
while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
|
|
|
|
SchedClassID =
|
|
|
|
STI.resolveVariantSchedClass(SchedClassID, &MCI, &MCII, CPUID);
|
|
|
|
|
|
|
|
if (!SchedClassID) {
|
|
|
|
return make_error<InstructionError<MCInst>>(
|
|
|
|
"unable to resolve scheduling class for write variant.", MCI);
|
|
|
|
}
|
|
|
|
|
|
|
|
return SchedClassID;
|
|
|
|
}
|
|
|
|
|
2018-08-13 18:11:48 +00:00
|
|
|
Expected<const InstrDesc &>
|
2022-11-04 08:51:39 -07:00
|
|
|
InstrBuilder::createInstrDescImpl(const MCInst &MCI,
|
2023-05-17 13:48:18 -07:00
|
|
|
const SmallVector<Instrument *> &IVec) {
|
2018-03-08 13:05:02 +00:00
|
|
|
assert(STI.getSchedModel().hasInstrSchedModel() &&
|
|
|
|
"Itineraries are not yet supported!");
|
|
|
|
|
|
|
|
// Obtain the instruction descriptor from the opcode.
|
2018-07-09 12:30:55 +00:00
|
|
|
unsigned short Opcode = MCI.getOpcode();
|
2018-03-08 13:05:02 +00:00
|
|
|
const MCInstrDesc &MCDesc = MCII.get(Opcode);
|
|
|
|
const MCSchedModel &SM = STI.getSchedModel();
|
|
|
|
|
|
|
|
// Then obtain the scheduling class information from the instruction.
|
2022-11-04 08:51:39 -07:00
|
|
|
// Allow InstrumentManager to override and use a different SchedClassID
|
|
|
|
unsigned SchedClassID = IM.getSchedClassID(MCII, MCI, IVec);
|
2018-11-25 12:46:24 +00:00
|
|
|
bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
|
2018-06-04 15:43:09 +00:00
|
|
|
|
|
|
|
// Try to solve variant scheduling classes.
|
2018-11-25 12:46:24 +00:00
|
|
|
if (IsVariant) {
|
2024-06-28 10:46:46 -07:00
|
|
|
Expected<unsigned> VariantSchedClassIDOrErr =
|
|
|
|
getVariantSchedClassID(MCI, SchedClassID);
|
|
|
|
if (!VariantSchedClassIDOrErr) {
|
|
|
|
return VariantSchedClassIDOrErr.takeError();
|
2018-08-13 18:11:48 +00:00
|
|
|
}
|
2024-06-28 10:46:46 -07:00
|
|
|
|
|
|
|
SchedClassID = *VariantSchedClassIDOrErr;
|
2018-06-04 15:43:09 +00:00
|
|
|
}
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2018-08-13 18:11:48 +00:00
|
|
|
// Check if this instruction is supported. Otherwise, report an error.
|
2018-07-09 12:30:55 +00:00
|
|
|
const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
|
|
|
|
if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
|
2018-10-24 10:56:47 +00:00
|
|
|
return make_error<InstructionError<MCInst>>(
|
[llvm-mca] Add -skip-unsupported-instructions option (#89733)
Prior to this patch, if llvm-mca encountered an instruction which parses
but has no scheduler info, the instruction is always reported as
unsupported, and llvm-mca halts with an error.
However, it would still be useful to allow MCA to continue even in the
case of instructions lacking scheduling information. Obviously if
scheduling information is lacking, it's not possible to give an accurate
analysis for those instructions, and therefore a warning is emitted.
A user could previously have worked around such unsupported instructions
manually by deleting such instructions from the input, but this provides
them a way of doing this for bulk inputs where they may not have a list
of such unsupported instructions to drop up front.
Note that this behaviour of instructions with no scheduling information
under -skip-unsupported-instructions is analagous to current
instructions which fail to parse: those are currently dropped from the
input with a message printed, after which the analysis continues.
~Testing the feature is a little awkward currently, it relies on an
instruction
which is currently marked as unsupported, which may not remain so;
should the
situation change it would be necessary to find an alternative
unsupported
instruction or drop the test.~
A test is added to check that analysis still reports an error if all
instructions are removed from the input, to mirror the current behaviour
of giving an error if no instructions are supplied.
2024-04-29 08:39:15 +01:00
|
|
|
"found an unsupported instruction in the input assembly sequence", MCI);
|
2018-07-09 12:30:55 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 13:59:13 +00:00
|
|
|
LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
|
|
|
|
LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
|
2022-03-11 19:52:36 -08:00
|
|
|
LLVM_DEBUG(dbgs() << "\t\tOpcode=" << Opcode << '\n');
|
2019-01-10 13:59:13 +00:00
|
|
|
|
2018-03-08 13:05:02 +00:00
|
|
|
// Create a new empty descriptor.
|
2019-08-15 15:54:37 +00:00
|
|
|
std::unique_ptr<InstrDesc> ID = std::make_unique<InstrDesc>();
|
2018-06-04 15:43:09 +00:00
|
|
|
ID->NumMicroOps = SCDesc.NumMicroOps;
|
2019-01-23 16:35:07 +00:00
|
|
|
ID->SchedClassID = SchedClassID;
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2018-11-24 18:40:45 +00:00
|
|
|
if (MCDesc.isCall() && FirstCallInst) {
|
2018-03-08 13:05:02 +00:00
|
|
|
// We don't correctly model calls.
|
2018-05-04 13:52:12 +00:00
|
|
|
WithColor::warning() << "found a call in the input assembly sequence.\n";
|
|
|
|
WithColor::note() << "call instructions are not correctly modeled. "
|
2024-05-22 13:51:55 -07:00
|
|
|
<< "Assume a latency of " << CallLatency << "cy.\n";
|
2018-11-24 18:40:45 +00:00
|
|
|
FirstCallInst = false;
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2018-11-24 18:40:45 +00:00
|
|
|
if (MCDesc.isReturn() && FirstReturnInst) {
|
2018-05-04 13:52:12 +00:00
|
|
|
WithColor::warning() << "found a return instruction in the input"
|
|
|
|
<< " assembly sequence.\n";
|
|
|
|
WithColor::note() << "program counter updates are ignored.\n";
|
2018-11-24 18:40:45 +00:00
|
|
|
FirstReturnInst = false;
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
|
2024-05-22 13:51:55 -07:00
|
|
|
computeMaxLatency(*ID, MCDesc, SCDesc, STI, CallLatency);
|
2018-11-23 20:26:57 +00:00
|
|
|
|
|
|
|
if (Error Err = verifyOperands(MCDesc, MCI))
|
2021-08-26 19:53:17 +01:00
|
|
|
return std::move(Err);
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2018-11-23 20:26:57 +00:00
|
|
|
populateWrites(*ID, MCI, SchedClassID);
|
|
|
|
populateReads(*ID, MCI, SchedClassID);
|
|
|
|
|
2018-05-14 12:53:11 +00:00
|
|
|
LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
|
|
|
|
LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2021-11-24 12:47:59 -05:00
|
|
|
// Validation check on the instruction descriptor.
|
2018-10-04 10:36:49 +00:00
|
|
|
if (Error Err = verifyInstrDesc(*ID, MCI))
|
2021-08-26 19:53:17 +01:00
|
|
|
return std::move(Err);
|
2018-10-04 10:36:49 +00:00
|
|
|
|
2018-03-08 13:05:02 +00:00
|
|
|
// Now add the new descriptor.
|
2018-11-25 12:46:24 +00:00
|
|
|
bool IsVariadic = MCDesc.isVariadic();
|
2022-06-05 12:06:01 -07:00
|
|
|
if ((ID->IsRecyclable = !IsVariadic && !IsVariant)) {
|
2022-11-04 08:51:39 -07:00
|
|
|
auto DKey = std::make_pair(MCI.getOpcode(), SchedClassID);
|
|
|
|
Descriptors[DKey] = std::move(ID);
|
|
|
|
return *Descriptors[DKey];
|
2018-06-04 15:43:09 +00:00
|
|
|
}
|
|
|
|
|
2024-06-28 10:46:46 -07:00
|
|
|
auto VDKey = std::make_pair(hashMCInst(MCI), SchedClassID);
|
|
|
|
assert(
|
|
|
|
!VariantDescriptors.contains(VDKey) &&
|
|
|
|
"Expected VariantDescriptors to not already have a value for this key.");
|
2022-11-04 08:51:39 -07:00
|
|
|
VariantDescriptors[VDKey] = std::move(ID);
|
|
|
|
return *VariantDescriptors[VDKey];
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2022-11-15 08:03:41 -08:00
|
|
|
Expected<const InstrDesc &>
|
2022-11-04 08:51:39 -07:00
|
|
|
InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI,
|
2023-05-17 13:48:18 -07:00
|
|
|
const SmallVector<Instrument *> &IVec) {
|
2022-11-04 08:51:39 -07:00
|
|
|
// Cache lookup using SchedClassID from Instrumentation
|
|
|
|
unsigned SchedClassID = IM.getSchedClassID(MCII, MCI, IVec);
|
|
|
|
|
|
|
|
auto DKey = std::make_pair(MCI.getOpcode(), SchedClassID);
|
|
|
|
if (Descriptors.find_as(DKey) != Descriptors.end())
|
|
|
|
return *Descriptors[DKey];
|
|
|
|
|
2024-06-28 10:46:46 -07:00
|
|
|
Expected<unsigned> VariantSchedClassIDOrErr =
|
|
|
|
getVariantSchedClassID(MCI, SchedClassID);
|
|
|
|
if (!VariantSchedClassIDOrErr) {
|
|
|
|
return VariantSchedClassIDOrErr.takeError();
|
|
|
|
}
|
|
|
|
|
|
|
|
SchedClassID = *VariantSchedClassIDOrErr;
|
|
|
|
|
|
|
|
auto VDKey = std::make_pair(hashMCInst(MCI), SchedClassID);
|
2024-10-01 00:30:33 -07:00
|
|
|
auto It = VariantDescriptors.find(VDKey);
|
|
|
|
if (It != VariantDescriptors.end())
|
|
|
|
return *It->second;
|
2022-11-04 08:51:39 -07:00
|
|
|
|
|
|
|
return createInstrDescImpl(MCI, IVec);
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
|
2022-06-05 12:06:01 -07:00
|
|
|
STATISTIC(NumVariantInst, "Number of MCInsts that doesn't have static Desc");
|
|
|
|
|
2022-11-15 08:03:41 -08:00
|
|
|
Expected<std::unique_ptr<Instruction>>
|
2022-11-04 08:51:39 -07:00
|
|
|
InstrBuilder::createInstruction(const MCInst &MCI,
|
2023-05-17 13:48:18 -07:00
|
|
|
const SmallVector<Instrument *> &IVec) {
|
2022-11-04 08:51:39 -07:00
|
|
|
Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI, IVec);
|
2018-08-13 18:11:48 +00:00
|
|
|
if (!DescOrErr)
|
|
|
|
return DescOrErr.takeError();
|
|
|
|
const InstrDesc &D = *DescOrErr;
|
2022-06-05 12:06:01 -07:00
|
|
|
Instruction *NewIS = nullptr;
|
|
|
|
std::unique_ptr<Instruction> CreatedIS;
|
|
|
|
bool IsInstRecycled = false;
|
|
|
|
|
|
|
|
if (!D.IsRecyclable)
|
|
|
|
++NumVariantInst;
|
|
|
|
|
|
|
|
if (D.IsRecyclable && InstRecycleCB) {
|
|
|
|
if (auto *I = InstRecycleCB(D)) {
|
|
|
|
NewIS = I;
|
|
|
|
NewIS->reset();
|
|
|
|
IsInstRecycled = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!IsInstRecycled) {
|
|
|
|
CreatedIS = std::make_unique<Instruction>(D, MCI.getOpcode());
|
|
|
|
NewIS = CreatedIS.get();
|
|
|
|
}
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2022-03-11 19:52:36 -08:00
|
|
|
const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
|
|
|
|
const MCSchedClassDesc &SCDesc =
|
|
|
|
*STI.getSchedModel().getSchedClassDesc(D.SchedClassID);
|
|
|
|
|
|
|
|
NewIS->setMayLoad(MCDesc.mayLoad());
|
|
|
|
NewIS->setMayStore(MCDesc.mayStore());
|
|
|
|
NewIS->setHasSideEffects(MCDesc.hasUnmodeledSideEffects());
|
|
|
|
NewIS->setBeginGroup(SCDesc.BeginGroup);
|
|
|
|
NewIS->setEndGroup(SCDesc.EndGroup);
|
|
|
|
NewIS->setRetireOOO(SCDesc.RetireOOO);
|
|
|
|
|
2018-09-18 15:00:06 +00:00
|
|
|
// Check if this is a dependency breaking instruction.
|
[TableGen][SubtargetEmitter] Add the ability for processor models to describe dependency breaking instructions.
This patch adds the ability for processor models to describe dependency breaking
instructions.
Different processors may specify a different set of dependency-breaking
instructions.
That means, we cannot assume that all processors of the same target would use
the same rules to classify dependency breaking instructions.
The main goal of this patch is to provide the means to describe dependency
breaking instructions directly via tablegen, and have the following
TargetSubtargetInfo hooks redefined in overrides by tabegen'd
XXXGenSubtargetInfo classes (here, XXX is a Target name).
```
virtual bool isZeroIdiom(const MachineInstr *MI, APInt &Mask) const {
return false;
}
virtual bool isDependencyBreaking(const MachineInstr *MI, APInt &Mask) const {
return isZeroIdiom(MI);
}
```
An instruction MI is a dependency-breaking instruction if a call to method
isDependencyBreaking(MI) on the STI (TargetSubtargetInfo object) evaluates to
true. Similarly, an instruction MI is a special case of zero-idiom dependency
breaking instruction if a call to STI.isZeroIdiom(MI) returns true.
The extra APInt is used for those targets that may want to select which machine
operands have their dependency broken (see comments in code).
Note that by default, subtargets don't know about the existence of
dependency-breaking. In the absence of external information, those method calls
would always return false.
A new tablegen class named STIPredicate has been added by this patch to let
processor models classify instructions that have properties in common. The idea
is that, a MCInstrPredicate definition can be used to "generate" an instruction
equivalence class, with the idea that instructions of a same class all have a
property in common.
STIPredicate definitions are essentially a collection of instruction equivalence
classes.
Also, different processor models can specify a different variant of the same
STIPredicate with different rules (i.e. predicates) to classify instructions.
Tablegen backends (in this particular case, the SubtargetEmitter) will be able
to process STIPredicate definitions, and automatically generate functions in
XXXGenSubtargetInfo.
This patch introduces two special kind of STIPredicate classes named
IsZeroIdiomFunction and IsDepBreakingFunction in tablegen. It also adds a
definition for those in the BtVer2 scheduling model only.
This patch supersedes the one committed at r338372 (phabricator review: D49310).
The main advantages are:
- We can describe subtarget predicates via tablegen using STIPredicates.
- We can describe zero-idioms / dep-breaking instructions directly via
tablegen in the scheduling models.
In future, the STIPredicates framework can be used for solving other problems.
Examples of future developments are:
- Teach how to identify optimizable register-register moves
- Teach how to identify slow LEA instructions (each subtarget defining its own
concept of "slow" LEA).
- Teach how to identify instructions that have undocumented false dependencies
on the output registers on some processors only.
It is also (in my opinion) an elegant way to expose knowledge to both external
tools like llvm-mca, and codegen passes.
For example, machine schedulers in LLVM could reuse that information when
internally constructing the data dependency graph for a code region.
This new design feature is also an "opt-in" feature. Processor models don't have
to use the new STIPredicates. It has all been designed to be as unintrusive as
possible.
Differential Revision: https://reviews.llvm.org/D52174
llvm-svn: 342555
2018-09-19 15:57:45 +00:00
|
|
|
APInt Mask;
|
|
|
|
|
2018-12-17 14:00:37 +00:00
|
|
|
bool IsZeroIdiom = false;
|
|
|
|
bool IsDepBreaking = false;
|
|
|
|
if (MCIA) {
|
|
|
|
unsigned ProcID = STI.getSchedModel().getProcessorID();
|
|
|
|
IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
|
|
|
|
IsDepBreaking =
|
|
|
|
IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
|
|
|
|
if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
|
|
|
|
NewIS->setOptimizableMove();
|
|
|
|
}
|
2018-09-18 15:00:06 +00:00
|
|
|
|
2018-04-25 09:38:58 +00:00
|
|
|
// Initialize Reads first.
|
2019-08-22 13:32:17 +00:00
|
|
|
MCPhysReg RegID = 0;
|
2022-06-05 12:06:01 -07:00
|
|
|
size_t Idx = 0U;
|
2018-03-08 13:05:02 +00:00
|
|
|
for (const ReadDescriptor &RD : D.Reads) {
|
2018-06-22 16:37:05 +00:00
|
|
|
if (!RD.isImplicitRead()) {
|
2018-03-08 13:05:02 +00:00
|
|
|
// explicit read.
|
|
|
|
const MCOperand &Op = MCI.getOperand(RD.OpIndex);
|
|
|
|
// Skip non-register operands.
|
|
|
|
if (!Op.isReg())
|
|
|
|
continue;
|
|
|
|
RegID = Op.getReg();
|
|
|
|
} else {
|
|
|
|
// Implicit read.
|
|
|
|
RegID = RD.RegisterID;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip invalid register operands.
|
|
|
|
if (!RegID)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Okay, this is a register operand. Create a ReadState for it.
|
2022-06-05 12:06:01 -07:00
|
|
|
ReadState *RS = nullptr;
|
|
|
|
if (IsInstRecycled && Idx < NewIS->getUses().size()) {
|
|
|
|
NewIS->getUses()[Idx] = ReadState(RD, RegID);
|
|
|
|
RS = &NewIS->getUses()[Idx++];
|
|
|
|
} else {
|
|
|
|
NewIS->getUses().emplace_back(RD, RegID);
|
|
|
|
RS = &NewIS->getUses().back();
|
|
|
|
++Idx;
|
|
|
|
}
|
2018-09-18 15:00:06 +00:00
|
|
|
|
[TableGen][SubtargetEmitter] Add the ability for processor models to describe dependency breaking instructions.
This patch adds the ability for processor models to describe dependency breaking
instructions.
Different processors may specify a different set of dependency-breaking
instructions.
That means, we cannot assume that all processors of the same target would use
the same rules to classify dependency breaking instructions.
The main goal of this patch is to provide the means to describe dependency
breaking instructions directly via tablegen, and have the following
TargetSubtargetInfo hooks redefined in overrides by tabegen'd
XXXGenSubtargetInfo classes (here, XXX is a Target name).
```
virtual bool isZeroIdiom(const MachineInstr *MI, APInt &Mask) const {
return false;
}
virtual bool isDependencyBreaking(const MachineInstr *MI, APInt &Mask) const {
return isZeroIdiom(MI);
}
```
An instruction MI is a dependency-breaking instruction if a call to method
isDependencyBreaking(MI) on the STI (TargetSubtargetInfo object) evaluates to
true. Similarly, an instruction MI is a special case of zero-idiom dependency
breaking instruction if a call to STI.isZeroIdiom(MI) returns true.
The extra APInt is used for those targets that may want to select which machine
operands have their dependency broken (see comments in code).
Note that by default, subtargets don't know about the existence of
dependency-breaking. In the absence of external information, those method calls
would always return false.
A new tablegen class named STIPredicate has been added by this patch to let
processor models classify instructions that have properties in common. The idea
is that, a MCInstrPredicate definition can be used to "generate" an instruction
equivalence class, with the idea that instructions of a same class all have a
property in common.
STIPredicate definitions are essentially a collection of instruction equivalence
classes.
Also, different processor models can specify a different variant of the same
STIPredicate with different rules (i.e. predicates) to classify instructions.
Tablegen backends (in this particular case, the SubtargetEmitter) will be able
to process STIPredicate definitions, and automatically generate functions in
XXXGenSubtargetInfo.
This patch introduces two special kind of STIPredicate classes named
IsZeroIdiomFunction and IsDepBreakingFunction in tablegen. It also adds a
definition for those in the BtVer2 scheduling model only.
This patch supersedes the one committed at r338372 (phabricator review: D49310).
The main advantages are:
- We can describe subtarget predicates via tablegen using STIPredicates.
- We can describe zero-idioms / dep-breaking instructions directly via
tablegen in the scheduling models.
In future, the STIPredicates framework can be used for solving other problems.
Examples of future developments are:
- Teach how to identify optimizable register-register moves
- Teach how to identify slow LEA instructions (each subtarget defining its own
concept of "slow" LEA).
- Teach how to identify instructions that have undocumented false dependencies
on the output registers on some processors only.
It is also (in my opinion) an elegant way to expose knowledge to both external
tools like llvm-mca, and codegen passes.
For example, machine schedulers in LLVM could reuse that information when
internally constructing the data dependency graph for a code region.
This new design feature is also an "opt-in" feature. Processor models don't have
to use the new STIPredicates. It has all been designed to be as unintrusive as
possible.
Differential Revision: https://reviews.llvm.org/D52174
llvm-svn: 342555
2018-09-19 15:57:45 +00:00
|
|
|
if (IsDepBreaking) {
|
|
|
|
// A mask of all zeroes means: explicit input operands are not
|
|
|
|
// independent.
|
2021-09-08 22:13:13 -07:00
|
|
|
if (Mask.isZero()) {
|
[TableGen][SubtargetEmitter] Add the ability for processor models to describe dependency breaking instructions.
This patch adds the ability for processor models to describe dependency breaking
instructions.
Different processors may specify a different set of dependency-breaking
instructions.
That means, we cannot assume that all processors of the same target would use
the same rules to classify dependency breaking instructions.
The main goal of this patch is to provide the means to describe dependency
breaking instructions directly via tablegen, and have the following
TargetSubtargetInfo hooks redefined in overrides by tabegen'd
XXXGenSubtargetInfo classes (here, XXX is a Target name).
```
virtual bool isZeroIdiom(const MachineInstr *MI, APInt &Mask) const {
return false;
}
virtual bool isDependencyBreaking(const MachineInstr *MI, APInt &Mask) const {
return isZeroIdiom(MI);
}
```
An instruction MI is a dependency-breaking instruction if a call to method
isDependencyBreaking(MI) on the STI (TargetSubtargetInfo object) evaluates to
true. Similarly, an instruction MI is a special case of zero-idiom dependency
breaking instruction if a call to STI.isZeroIdiom(MI) returns true.
The extra APInt is used for those targets that may want to select which machine
operands have their dependency broken (see comments in code).
Note that by default, subtargets don't know about the existence of
dependency-breaking. In the absence of external information, those method calls
would always return false.
A new tablegen class named STIPredicate has been added by this patch to let
processor models classify instructions that have properties in common. The idea
is that, a MCInstrPredicate definition can be used to "generate" an instruction
equivalence class, with the idea that instructions of a same class all have a
property in common.
STIPredicate definitions are essentially a collection of instruction equivalence
classes.
Also, different processor models can specify a different variant of the same
STIPredicate with different rules (i.e. predicates) to classify instructions.
Tablegen backends (in this particular case, the SubtargetEmitter) will be able
to process STIPredicate definitions, and automatically generate functions in
XXXGenSubtargetInfo.
This patch introduces two special kind of STIPredicate classes named
IsZeroIdiomFunction and IsDepBreakingFunction in tablegen. It also adds a
definition for those in the BtVer2 scheduling model only.
This patch supersedes the one committed at r338372 (phabricator review: D49310).
The main advantages are:
- We can describe subtarget predicates via tablegen using STIPredicates.
- We can describe zero-idioms / dep-breaking instructions directly via
tablegen in the scheduling models.
In future, the STIPredicates framework can be used for solving other problems.
Examples of future developments are:
- Teach how to identify optimizable register-register moves
- Teach how to identify slow LEA instructions (each subtarget defining its own
concept of "slow" LEA).
- Teach how to identify instructions that have undocumented false dependencies
on the output registers on some processors only.
It is also (in my opinion) an elegant way to expose knowledge to both external
tools like llvm-mca, and codegen passes.
For example, machine schedulers in LLVM could reuse that information when
internally constructing the data dependency graph for a code region.
This new design feature is also an "opt-in" feature. Processor models don't have
to use the new STIPredicates. It has all been designed to be as unintrusive as
possible.
Differential Revision: https://reviews.llvm.org/D52174
llvm-svn: 342555
2018-09-19 15:57:45 +00:00
|
|
|
if (!RD.isImplicitRead())
|
2022-06-05 12:06:01 -07:00
|
|
|
RS->setIndependentFromDef();
|
[TableGen][SubtargetEmitter] Add the ability for processor models to describe dependency breaking instructions.
This patch adds the ability for processor models to describe dependency breaking
instructions.
Different processors may specify a different set of dependency-breaking
instructions.
That means, we cannot assume that all processors of the same target would use
the same rules to classify dependency breaking instructions.
The main goal of this patch is to provide the means to describe dependency
breaking instructions directly via tablegen, and have the following
TargetSubtargetInfo hooks redefined in overrides by tabegen'd
XXXGenSubtargetInfo classes (here, XXX is a Target name).
```
virtual bool isZeroIdiom(const MachineInstr *MI, APInt &Mask) const {
return false;
}
virtual bool isDependencyBreaking(const MachineInstr *MI, APInt &Mask) const {
return isZeroIdiom(MI);
}
```
An instruction MI is a dependency-breaking instruction if a call to method
isDependencyBreaking(MI) on the STI (TargetSubtargetInfo object) evaluates to
true. Similarly, an instruction MI is a special case of zero-idiom dependency
breaking instruction if a call to STI.isZeroIdiom(MI) returns true.
The extra APInt is used for those targets that may want to select which machine
operands have their dependency broken (see comments in code).
Note that by default, subtargets don't know about the existence of
dependency-breaking. In the absence of external information, those method calls
would always return false.
A new tablegen class named STIPredicate has been added by this patch to let
processor models classify instructions that have properties in common. The idea
is that, a MCInstrPredicate definition can be used to "generate" an instruction
equivalence class, with the idea that instructions of a same class all have a
property in common.
STIPredicate definitions are essentially a collection of instruction equivalence
classes.
Also, different processor models can specify a different variant of the same
STIPredicate with different rules (i.e. predicates) to classify instructions.
Tablegen backends (in this particular case, the SubtargetEmitter) will be able
to process STIPredicate definitions, and automatically generate functions in
XXXGenSubtargetInfo.
This patch introduces two special kind of STIPredicate classes named
IsZeroIdiomFunction and IsDepBreakingFunction in tablegen. It also adds a
definition for those in the BtVer2 scheduling model only.
This patch supersedes the one committed at r338372 (phabricator review: D49310).
The main advantages are:
- We can describe subtarget predicates via tablegen using STIPredicates.
- We can describe zero-idioms / dep-breaking instructions directly via
tablegen in the scheduling models.
In future, the STIPredicates framework can be used for solving other problems.
Examples of future developments are:
- Teach how to identify optimizable register-register moves
- Teach how to identify slow LEA instructions (each subtarget defining its own
concept of "slow" LEA).
- Teach how to identify instructions that have undocumented false dependencies
on the output registers on some processors only.
It is also (in my opinion) an elegant way to expose knowledge to both external
tools like llvm-mca, and codegen passes.
For example, machine schedulers in LLVM could reuse that information when
internally constructing the data dependency graph for a code region.
This new design feature is also an "opt-in" feature. Processor models don't have
to use the new STIPredicates. It has all been designed to be as unintrusive as
possible.
Differential Revision: https://reviews.llvm.org/D52174
llvm-svn: 342555
2018-09-19 15:57:45 +00:00
|
|
|
} else {
|
|
|
|
// Check if this register operand is independent according to `Mask`.
|
|
|
|
// Note that Mask may not have enough bits to describe all explicit and
|
|
|
|
// implicit input operands. If this register operand doesn't have a
|
|
|
|
// corresponding bit in Mask, then conservatively assume that it is
|
|
|
|
// dependent.
|
|
|
|
if (Mask.getBitWidth() > RD.UseIndex) {
|
|
|
|
// Okay. This map describe register use `RD.UseIndex`.
|
|
|
|
if (Mask[RD.UseIndex])
|
2022-06-05 12:06:01 -07:00
|
|
|
RS->setIndependentFromDef();
|
[TableGen][SubtargetEmitter] Add the ability for processor models to describe dependency breaking instructions.
This patch adds the ability for processor models to describe dependency breaking
instructions.
Different processors may specify a different set of dependency-breaking
instructions.
That means, we cannot assume that all processors of the same target would use
the same rules to classify dependency breaking instructions.
The main goal of this patch is to provide the means to describe dependency
breaking instructions directly via tablegen, and have the following
TargetSubtargetInfo hooks redefined in overrides by tabegen'd
XXXGenSubtargetInfo classes (here, XXX is a Target name).
```
virtual bool isZeroIdiom(const MachineInstr *MI, APInt &Mask) const {
return false;
}
virtual bool isDependencyBreaking(const MachineInstr *MI, APInt &Mask) const {
return isZeroIdiom(MI);
}
```
An instruction MI is a dependency-breaking instruction if a call to method
isDependencyBreaking(MI) on the STI (TargetSubtargetInfo object) evaluates to
true. Similarly, an instruction MI is a special case of zero-idiom dependency
breaking instruction if a call to STI.isZeroIdiom(MI) returns true.
The extra APInt is used for those targets that may want to select which machine
operands have their dependency broken (see comments in code).
Note that by default, subtargets don't know about the existence of
dependency-breaking. In the absence of external information, those method calls
would always return false.
A new tablegen class named STIPredicate has been added by this patch to let
processor models classify instructions that have properties in common. The idea
is that, a MCInstrPredicate definition can be used to "generate" an instruction
equivalence class, with the idea that instructions of a same class all have a
property in common.
STIPredicate definitions are essentially a collection of instruction equivalence
classes.
Also, different processor models can specify a different variant of the same
STIPredicate with different rules (i.e. predicates) to classify instructions.
Tablegen backends (in this particular case, the SubtargetEmitter) will be able
to process STIPredicate definitions, and automatically generate functions in
XXXGenSubtargetInfo.
This patch introduces two special kind of STIPredicate classes named
IsZeroIdiomFunction and IsDepBreakingFunction in tablegen. It also adds a
definition for those in the BtVer2 scheduling model only.
This patch supersedes the one committed at r338372 (phabricator review: D49310).
The main advantages are:
- We can describe subtarget predicates via tablegen using STIPredicates.
- We can describe zero-idioms / dep-breaking instructions directly via
tablegen in the scheduling models.
In future, the STIPredicates framework can be used for solving other problems.
Examples of future developments are:
- Teach how to identify optimizable register-register moves
- Teach how to identify slow LEA instructions (each subtarget defining its own
concept of "slow" LEA).
- Teach how to identify instructions that have undocumented false dependencies
on the output registers on some processors only.
It is also (in my opinion) an elegant way to expose knowledge to both external
tools like llvm-mca, and codegen passes.
For example, machine schedulers in LLVM could reuse that information when
internally constructing the data dependency graph for a code region.
This new design feature is also an "opt-in" feature. Processor models don't have
to use the new STIPredicates. It has all been designed to be as unintrusive as
possible.
Differential Revision: https://reviews.llvm.org/D52174
llvm-svn: 342555
2018-09-19 15:57:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-03-20 12:25:54 +00:00
|
|
|
}
|
2022-06-05 12:06:01 -07:00
|
|
|
if (IsInstRecycled && Idx < NewIS->getUses().size())
|
|
|
|
NewIS->getUses().pop_back_n(NewIS->getUses().size() - Idx);
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2018-06-20 10:08:11 +00:00
|
|
|
// Early exit if there are no writes.
|
2022-06-05 12:06:01 -07:00
|
|
|
if (D.Writes.empty()) {
|
|
|
|
if (IsInstRecycled)
|
|
|
|
return llvm::make_error<RecycledInstErr>(NewIS);
|
|
|
|
else
|
|
|
|
return std::move(CreatedIS);
|
|
|
|
}
|
2018-06-20 10:08:11 +00:00
|
|
|
|
|
|
|
// Track register writes that implicitly clear the upper portion of the
|
|
|
|
// underlying super-registers using an APInt.
|
|
|
|
APInt WriteMask(D.Writes.size(), 0);
|
|
|
|
|
|
|
|
// Now query the MCInstrAnalysis object to obtain information about which
|
|
|
|
// register writes implicitly clear the upper portion of a super-register.
|
2018-12-17 14:00:37 +00:00
|
|
|
if (MCIA)
|
|
|
|
MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
|
2018-06-20 10:08:11 +00:00
|
|
|
|
2018-04-25 09:38:58 +00:00
|
|
|
// Initialize writes.
|
2018-06-20 10:08:11 +00:00
|
|
|
unsigned WriteIndex = 0;
|
2022-06-05 12:06:01 -07:00
|
|
|
Idx = 0U;
|
2018-03-08 13:05:02 +00:00
|
|
|
for (const WriteDescriptor &WD : D.Writes) {
|
2024-08-26 09:37:49 -07:00
|
|
|
RegID = WD.isImplicitWrite() ? MCRegister(WD.RegisterID)
|
2019-08-22 13:32:17 +00:00
|
|
|
: MCI.getOperand(WD.OpIndex).getReg();
|
2024-05-03 10:30:22 +01:00
|
|
|
// Check if this is a optional definition that references NoReg or a write
|
|
|
|
// to a constant register.
|
|
|
|
if ((WD.IsOptionalDef && !RegID) || MRI.isConstant(RegID)) {
|
2018-06-20 10:08:11 +00:00
|
|
|
++WriteIndex;
|
2018-03-08 13:05:02 +00:00
|
|
|
continue;
|
2018-06-20 10:08:11 +00:00
|
|
|
}
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2018-03-22 10:19:20 +00:00
|
|
|
assert(RegID && "Expected a valid register ID!");
|
2022-06-05 12:06:01 -07:00
|
|
|
if (IsInstRecycled && Idx < NewIS->getDefs().size()) {
|
|
|
|
NewIS->getDefs()[Idx++] =
|
|
|
|
WriteState(WD, RegID,
|
|
|
|
/* ClearsSuperRegs */ WriteMask[WriteIndex],
|
|
|
|
/* WritesZero */ IsZeroIdiom);
|
|
|
|
} else {
|
|
|
|
NewIS->getDefs().emplace_back(WD, RegID,
|
|
|
|
/* ClearsSuperRegs */ WriteMask[WriteIndex],
|
|
|
|
/* WritesZero */ IsZeroIdiom);
|
|
|
|
++Idx;
|
|
|
|
}
|
2018-06-20 10:08:11 +00:00
|
|
|
++WriteIndex;
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
2022-06-05 12:06:01 -07:00
|
|
|
if (IsInstRecycled && Idx < NewIS->getDefs().size())
|
|
|
|
NewIS->getDefs().pop_back_n(NewIS->getDefs().size() - Idx);
|
2018-03-08 13:05:02 +00:00
|
|
|
|
2022-06-05 12:06:01 -07:00
|
|
|
if (IsInstRecycled)
|
|
|
|
return llvm::make_error<RecycledInstErr>(NewIS);
|
|
|
|
else
|
|
|
|
return std::move(CreatedIS);
|
2018-03-08 13:05:02 +00:00
|
|
|
}
|
|
|
|
} // namespace mca
|
2018-10-30 15:56:08 +00:00
|
|
|
} // namespace llvm
|