[ModuloSchedule] Peel out prologs and epilogs, generate actual code
Summary:
This extends the PeelingModuloScheduleExpander to generate prolog and epilog code,
and correctly stitch uses through the prolog, kernel, epilog DAG.
The key concept in this patch is to ensure that all transforms are *local*; only a
function of a block and its immediate predecessor and successor. By defining the problem in this way
we can inductively rewrite the entire DAG using only local knowledge that is easy to
reason about.
For example, we assume that all prologs and epilogs are near-perfect clones of the
steady-state kernel. This means that if a block has an instruction that is predicated out,
we can redirect all users of that instruction to that equivalent instruction in our
immediate predecessor. As all blocks are clones, every instruction must have an equivalent in
every other block.
Similarly we can make the assumption by construction that if a value defined in a block is used
outside that block, the only possible user is its immediate successors. We maintain this
even for values that are used outside the loop by creating a limited form of LCSSA.
This code isn't small, but it isn't complex.
Enabled a bunch of testing from Hexagon. There are a couple of tests not enabled yet;
I'm about 80% sure there isn't buggy codegen but the tests are checking for patterns
that we don't produce. Those still need a bit more investigation. In the meantime we
(Google) are happy with the code produced by this on our downstream SMS implementation,
and believe it generates correct code.
Subscribers: mgorny, hiraditya, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68205
llvm-svn: 373462
2019-10-02 12:46:44 +00:00
|
|
|
//=- MachineLoopUtils.cpp - Functions for manipulating loops ----------------=//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/CodeGen/MachineLoopUtils.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// MI's parent and BB are clones of each other. Find the equivalent copy of MI
|
|
|
|
// in BB.
|
|
|
|
MachineInstr &findEquivalentInstruction(MachineInstr &MI,
|
|
|
|
MachineBasicBlock *BB) {
|
|
|
|
MachineBasicBlock *PB = MI.getParent();
|
|
|
|
unsigned Offset = std::distance(PB->instr_begin(), MachineBasicBlock::instr_iterator(MI));
|
|
|
|
return *std::next(BB->instr_begin(), Offset);
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
MachineBasicBlock *llvm::PeelSingleBlockLoop(LoopPeelDirection Direction,
|
|
|
|
MachineBasicBlock *Loop,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
const TargetInstrInfo *TII) {
|
|
|
|
MachineFunction &MF = *Loop->getParent();
|
|
|
|
MachineBasicBlock *Preheader = *Loop->pred_begin();
|
|
|
|
if (Preheader == Loop)
|
|
|
|
Preheader = *std::next(Loop->pred_begin());
|
|
|
|
MachineBasicBlock *Exit = *Loop->succ_begin();
|
|
|
|
if (Exit == Loop)
|
|
|
|
Exit = *std::next(Loop->succ_begin());
|
|
|
|
|
|
|
|
MachineBasicBlock *NewBB = MF.CreateMachineBasicBlock(Loop->getBasicBlock());
|
|
|
|
if (Direction == LPD_Front)
|
|
|
|
MF.insert(Loop->getIterator(), NewBB);
|
|
|
|
else
|
|
|
|
MF.insert(std::next(Loop->getIterator()), NewBB);
|
|
|
|
|
2020-04-09 10:42:16 +01:00
|
|
|
DenseMap<Register, Register> Remaps;
|
[ModuloSchedule] Peel out prologs and epilogs, generate actual code
Summary:
This extends the PeelingModuloScheduleExpander to generate prolog and epilog code,
and correctly stitch uses through the prolog, kernel, epilog DAG.
The key concept in this patch is to ensure that all transforms are *local*; only a
function of a block and its immediate predecessor and successor. By defining the problem in this way
we can inductively rewrite the entire DAG using only local knowledge that is easy to
reason about.
For example, we assume that all prologs and epilogs are near-perfect clones of the
steady-state kernel. This means that if a block has an instruction that is predicated out,
we can redirect all users of that instruction to that equivalent instruction in our
immediate predecessor. As all blocks are clones, every instruction must have an equivalent in
every other block.
Similarly we can make the assumption by construction that if a value defined in a block is used
outside that block, the only possible user is its immediate successors. We maintain this
even for values that are used outside the loop by creating a limited form of LCSSA.
This code isn't small, but it isn't complex.
Enabled a bunch of testing from Hexagon. There are a couple of tests not enabled yet;
I'm about 80% sure there isn't buggy codegen but the tests are checking for patterns
that we don't produce. Those still need a bit more investigation. In the meantime we
(Google) are happy with the code produced by this on our downstream SMS implementation,
and believe it generates correct code.
Subscribers: mgorny, hiraditya, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68205
llvm-svn: 373462
2019-10-02 12:46:44 +00:00
|
|
|
auto InsertPt = NewBB->end();
|
|
|
|
for (MachineInstr &MI : *Loop) {
|
|
|
|
MachineInstr *NewMI = MF.CloneMachineInstr(&MI);
|
|
|
|
NewBB->insert(InsertPt, NewMI);
|
|
|
|
for (MachineOperand &MO : NewMI->defs()) {
|
|
|
|
Register OrigR = MO.getReg();
|
|
|
|
if (OrigR.isPhysical())
|
|
|
|
continue;
|
|
|
|
Register &R = Remaps[OrigR];
|
|
|
|
R = MRI.createVirtualRegister(MRI.getRegClass(OrigR));
|
|
|
|
MO.setReg(R);
|
|
|
|
|
|
|
|
if (Direction == LPD_Back) {
|
|
|
|
// Replace all uses outside the original loop with the new register.
|
|
|
|
// FIXME: is the use_iterator stable enough to mutate register uses
|
|
|
|
// while iterating?
|
|
|
|
SmallVector<MachineOperand *, 4> Uses;
|
|
|
|
for (auto &Use : MRI.use_operands(OrigR))
|
|
|
|
if (Use.getParent()->getParent() != Loop)
|
|
|
|
Uses.push_back(&Use);
|
|
|
|
for (auto *Use : Uses) {
|
[MachinePipeliner] Handle failing constrainRegClass
The included test hits a verifier problems as one of the instructions:
```
%113:tgpreven, %114:tgprodd = MVE_VMLSLDAVas16 %12:tgpreven(tied-def 0), %11:tgprodd(tied-def 1), %7:mqpr, %8:mqpr, 0, $noreg, $noreg
```
Has two inputs that come from different PHIs with the same base reg, but
conflicting regclasses:
```
%11:tgprodd = PHI %103:gpr, %bb.1, %16:gpr, %bb.2
%12:tgpreven = PHI %103:gpr, %bb.1, %17:gpr, %bb.2
```
The MachinePipeliner would attempt to use %103 for both the %11 and %12
operands in the prolog, constraining the register class to the common
subset of both. Unfortunately there are no registers that are both odd
and even, so the second constrainRegClass fails. Fix this situation by
inserting a COPY for the second if the call to constrainRegClass fails.
The register allocation can then fold that extra copy away. The register
allocation of Q regs changed with this test, but the R regs were the
same and no new instructions are needed in the final assembly.
Differential Revision: https://reviews.llvm.org/D127971
2022-06-19 18:55:19 +01:00
|
|
|
const TargetRegisterClass *ConstrainRegClass =
|
|
|
|
MRI.constrainRegClass(R, MRI.getRegClass(Use->getReg()));
|
|
|
|
assert(ConstrainRegClass &&
|
|
|
|
"Expected a valid constrained register class!");
|
2022-06-19 20:52:00 +02:00
|
|
|
(void)ConstrainRegClass;
|
[ModuloSchedule] Peel out prologs and epilogs, generate actual code
Summary:
This extends the PeelingModuloScheduleExpander to generate prolog and epilog code,
and correctly stitch uses through the prolog, kernel, epilog DAG.
The key concept in this patch is to ensure that all transforms are *local*; only a
function of a block and its immediate predecessor and successor. By defining the problem in this way
we can inductively rewrite the entire DAG using only local knowledge that is easy to
reason about.
For example, we assume that all prologs and epilogs are near-perfect clones of the
steady-state kernel. This means that if a block has an instruction that is predicated out,
we can redirect all users of that instruction to that equivalent instruction in our
immediate predecessor. As all blocks are clones, every instruction must have an equivalent in
every other block.
Similarly we can make the assumption by construction that if a value defined in a block is used
outside that block, the only possible user is its immediate successors. We maintain this
even for values that are used outside the loop by creating a limited form of LCSSA.
This code isn't small, but it isn't complex.
Enabled a bunch of testing from Hexagon. There are a couple of tests not enabled yet;
I'm about 80% sure there isn't buggy codegen but the tests are checking for patterns
that we don't produce. Those still need a bit more investigation. In the meantime we
(Google) are happy with the code produced by this on our downstream SMS implementation,
and believe it generates correct code.
Subscribers: mgorny, hiraditya, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68205
llvm-svn: 373462
2019-10-02 12:46:44 +00:00
|
|
|
Use->setReg(R);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto I = NewBB->getFirstNonPHI(); I != NewBB->end(); ++I)
|
|
|
|
for (MachineOperand &MO : I->uses())
|
|
|
|
if (MO.isReg() && Remaps.count(MO.getReg()))
|
|
|
|
MO.setReg(Remaps[MO.getReg()]);
|
|
|
|
|
|
|
|
for (auto I = NewBB->begin(); I->isPHI(); ++I) {
|
|
|
|
MachineInstr &MI = *I;
|
|
|
|
unsigned LoopRegIdx = 3, InitRegIdx = 1;
|
|
|
|
if (MI.getOperand(2).getMBB() != Preheader)
|
|
|
|
std::swap(LoopRegIdx, InitRegIdx);
|
|
|
|
MachineInstr &OrigPhi = findEquivalentInstruction(MI, Loop);
|
|
|
|
assert(OrigPhi.isPHI());
|
|
|
|
if (Direction == LPD_Front) {
|
|
|
|
// When peeling front, we are only left with the initial value from the
|
|
|
|
// preheader.
|
|
|
|
Register R = MI.getOperand(LoopRegIdx).getReg();
|
|
|
|
if (Remaps.count(R))
|
|
|
|
R = Remaps[R];
|
|
|
|
OrigPhi.getOperand(InitRegIdx).setReg(R);
|
2022-03-16 20:21:25 +08:00
|
|
|
MI.removeOperand(LoopRegIdx + 1);
|
|
|
|
MI.removeOperand(LoopRegIdx + 0);
|
[ModuloSchedule] Peel out prologs and epilogs, generate actual code
Summary:
This extends the PeelingModuloScheduleExpander to generate prolog and epilog code,
and correctly stitch uses through the prolog, kernel, epilog DAG.
The key concept in this patch is to ensure that all transforms are *local*; only a
function of a block and its immediate predecessor and successor. By defining the problem in this way
we can inductively rewrite the entire DAG using only local knowledge that is easy to
reason about.
For example, we assume that all prologs and epilogs are near-perfect clones of the
steady-state kernel. This means that if a block has an instruction that is predicated out,
we can redirect all users of that instruction to that equivalent instruction in our
immediate predecessor. As all blocks are clones, every instruction must have an equivalent in
every other block.
Similarly we can make the assumption by construction that if a value defined in a block is used
outside that block, the only possible user is its immediate successors. We maintain this
even for values that are used outside the loop by creating a limited form of LCSSA.
This code isn't small, but it isn't complex.
Enabled a bunch of testing from Hexagon. There are a couple of tests not enabled yet;
I'm about 80% sure there isn't buggy codegen but the tests are checking for patterns
that we don't produce. Those still need a bit more investigation. In the meantime we
(Google) are happy with the code produced by this on our downstream SMS implementation,
and believe it generates correct code.
Subscribers: mgorny, hiraditya, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68205
llvm-svn: 373462
2019-10-02 12:46:44 +00:00
|
|
|
} else {
|
|
|
|
// When peeling back, the initial value is the loop-carried value from
|
|
|
|
// the original loop.
|
|
|
|
Register LoopReg = OrigPhi.getOperand(LoopRegIdx).getReg();
|
|
|
|
MI.getOperand(LoopRegIdx).setReg(LoopReg);
|
2022-03-16 20:21:25 +08:00
|
|
|
MI.removeOperand(InitRegIdx + 1);
|
|
|
|
MI.removeOperand(InitRegIdx + 0);
|
[ModuloSchedule] Peel out prologs and epilogs, generate actual code
Summary:
This extends the PeelingModuloScheduleExpander to generate prolog and epilog code,
and correctly stitch uses through the prolog, kernel, epilog DAG.
The key concept in this patch is to ensure that all transforms are *local*; only a
function of a block and its immediate predecessor and successor. By defining the problem in this way
we can inductively rewrite the entire DAG using only local knowledge that is easy to
reason about.
For example, we assume that all prologs and epilogs are near-perfect clones of the
steady-state kernel. This means that if a block has an instruction that is predicated out,
we can redirect all users of that instruction to that equivalent instruction in our
immediate predecessor. As all blocks are clones, every instruction must have an equivalent in
every other block.
Similarly we can make the assumption by construction that if a value defined in a block is used
outside that block, the only possible user is its immediate successors. We maintain this
even for values that are used outside the loop by creating a limited form of LCSSA.
This code isn't small, but it isn't complex.
Enabled a bunch of testing from Hexagon. There are a couple of tests not enabled yet;
I'm about 80% sure there isn't buggy codegen but the tests are checking for patterns
that we don't produce. Those still need a bit more investigation. In the meantime we
(Google) are happy with the code produced by this on our downstream SMS implementation,
and believe it generates correct code.
Subscribers: mgorny, hiraditya, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68205
llvm-svn: 373462
2019-10-02 12:46:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DebugLoc DL;
|
|
|
|
if (Direction == LPD_Front) {
|
2022-05-25 15:18:55 +00:00
|
|
|
Preheader->ReplaceUsesOfBlockWith(Loop, NewBB);
|
[ModuloSchedule] Peel out prologs and epilogs, generate actual code
Summary:
This extends the PeelingModuloScheduleExpander to generate prolog and epilog code,
and correctly stitch uses through the prolog, kernel, epilog DAG.
The key concept in this patch is to ensure that all transforms are *local*; only a
function of a block and its immediate predecessor and successor. By defining the problem in this way
we can inductively rewrite the entire DAG using only local knowledge that is easy to
reason about.
For example, we assume that all prologs and epilogs are near-perfect clones of the
steady-state kernel. This means that if a block has an instruction that is predicated out,
we can redirect all users of that instruction to that equivalent instruction in our
immediate predecessor. As all blocks are clones, every instruction must have an equivalent in
every other block.
Similarly we can make the assumption by construction that if a value defined in a block is used
outside that block, the only possible user is its immediate successors. We maintain this
even for values that are used outside the loop by creating a limited form of LCSSA.
This code isn't small, but it isn't complex.
Enabled a bunch of testing from Hexagon. There are a couple of tests not enabled yet;
I'm about 80% sure there isn't buggy codegen but the tests are checking for patterns
that we don't produce. Those still need a bit more investigation. In the meantime we
(Google) are happy with the code produced by this on our downstream SMS implementation,
and believe it generates correct code.
Subscribers: mgorny, hiraditya, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68205
llvm-svn: 373462
2019-10-02 12:46:44 +00:00
|
|
|
NewBB->addSuccessor(Loop);
|
|
|
|
Loop->replacePhiUsesWith(Preheader, NewBB);
|
2022-05-25 15:18:55 +00:00
|
|
|
Preheader->updateTerminator(Loop);
|
[ModuloSchedule] Peel out prologs and epilogs, generate actual code
Summary:
This extends the PeelingModuloScheduleExpander to generate prolog and epilog code,
and correctly stitch uses through the prolog, kernel, epilog DAG.
The key concept in this patch is to ensure that all transforms are *local*; only a
function of a block and its immediate predecessor and successor. By defining the problem in this way
we can inductively rewrite the entire DAG using only local knowledge that is easy to
reason about.
For example, we assume that all prologs and epilogs are near-perfect clones of the
steady-state kernel. This means that if a block has an instruction that is predicated out,
we can redirect all users of that instruction to that equivalent instruction in our
immediate predecessor. As all blocks are clones, every instruction must have an equivalent in
every other block.
Similarly we can make the assumption by construction that if a value defined in a block is used
outside that block, the only possible user is its immediate successors. We maintain this
even for values that are used outside the loop by creating a limited form of LCSSA.
This code isn't small, but it isn't complex.
Enabled a bunch of testing from Hexagon. There are a couple of tests not enabled yet;
I'm about 80% sure there isn't buggy codegen but the tests are checking for patterns
that we don't produce. Those still need a bit more investigation. In the meantime we
(Google) are happy with the code produced by this on our downstream SMS implementation,
and believe it generates correct code.
Subscribers: mgorny, hiraditya, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68205
llvm-svn: 373462
2019-10-02 12:46:44 +00:00
|
|
|
TII->removeBranch(*NewBB);
|
|
|
|
TII->insertBranch(*NewBB, Loop, nullptr, {}, DL);
|
|
|
|
} else {
|
|
|
|
Loop->replaceSuccessor(Exit, NewBB);
|
|
|
|
Exit->replacePhiUsesWith(Loop, NewBB);
|
|
|
|
NewBB->addSuccessor(Exit);
|
|
|
|
|
|
|
|
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
|
|
|
|
SmallVector<MachineOperand, 4> Cond;
|
|
|
|
bool CanAnalyzeBr = !TII->analyzeBranch(*Loop, TBB, FBB, Cond);
|
|
|
|
(void)CanAnalyzeBr;
|
|
|
|
assert(CanAnalyzeBr && "Must be able to analyze the loop branch!");
|
|
|
|
TII->removeBranch(*Loop);
|
|
|
|
TII->insertBranch(*Loop, TBB == Exit ? NewBB : TBB,
|
|
|
|
FBB == Exit ? NewBB : FBB, Cond, DL);
|
|
|
|
if (TII->removeBranch(*NewBB) > 0)
|
|
|
|
TII->insertBranch(*NewBB, Exit, nullptr, {}, DL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NewBB;
|
|
|
|
}
|