llvm/tools/llvm-mca/RetireControlUnit.cpp
Matt Davis cf35b20cbe [llvm-mca] Add the RetireStage.
Summary:
This class maintains the same logic as the original RetireControlUnit.

This is just an intermediate patch to make the RCU a Stage.  Future patches will remove the dependency on the DispatchStage, and then more properly populate the pre/execute/post Stage interface.  

Reviewers: andreadb, RKSimon, courbet

Reviewed By: andreadb, courbet

Subscribers: javed.absar, mgorny, tschuett, gbedwell, llvm-commits

Differential Revision: https://reviews.llvm.org/D47244

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@333292 91177308-0d34-0410-b5e6-96231b3b80d8
2018-05-25 18:00:25 +00:00

89 lines
3.2 KiB
C++

//===---------------------- RetireControlUnit.cpp ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file simulates the hardware responsible for retiring instructions.
///
//===----------------------------------------------------------------------===//
#include "RetireControlUnit.h"
#include "DispatchStage.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
#define DEBUG_TYPE "llvm-mca"
namespace mca {
RetireControlUnit::RetireControlUnit(const llvm::MCSchedModel &SM)
: NextAvailableSlotIdx(0), CurrentInstructionSlotIdx(0),
AvailableSlots(SM.MicroOpBufferSize), MaxRetirePerCycle(0) {
// Check if the scheduling model provides extra information about the machine
// processor. If so, then use that information to set the reorder buffer size
// and the maximum number of instructions retired per cycle.
if (SM.hasExtraProcessorInfo()) {
const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
if (EPI.ReorderBufferSize)
AvailableSlots = EPI.ReorderBufferSize;
MaxRetirePerCycle = EPI.MaxRetirePerCycle;
}
assert(AvailableSlots && "Invalid reorder buffer size!");
Queue.resize(AvailableSlots);
}
// Reserves a number of slots, and returns a new token.
unsigned RetireControlUnit::reserveSlot(const InstRef &IR,
unsigned NumMicroOps) {
assert(isAvailable(NumMicroOps));
unsigned NormalizedQuantity =
std::min(NumMicroOps, static_cast<unsigned>(Queue.size()));
// Zero latency instructions may have zero mOps. Artificially bump this
// value to 1. Although zero latency instructions don't consume scheduler
// resources, they still consume one slot in the retire queue.
NormalizedQuantity = std::max(NormalizedQuantity, 1U);
unsigned TokenID = NextAvailableSlotIdx;
Queue[NextAvailableSlotIdx] = {IR, NormalizedQuantity, false};
NextAvailableSlotIdx += NormalizedQuantity;
NextAvailableSlotIdx %= Queue.size();
AvailableSlots -= NormalizedQuantity;
return TokenID;
}
const RetireControlUnit::RUToken &RetireControlUnit::peekCurrentToken() const {
return Queue[CurrentInstructionSlotIdx];
}
void RetireControlUnit::consumeCurrentToken() {
const RetireControlUnit::RUToken &Current = peekCurrentToken();
assert(Current.NumSlots && "Reserved zero slots?");
assert(Current.IR.isValid() && "Invalid RUToken in the RCU queue.");
// Update the slot index to be the next item in the circular queue.
CurrentInstructionSlotIdx += Current.NumSlots;
CurrentInstructionSlotIdx %= Queue.size();
AvailableSlots += Current.NumSlots;
}
void RetireControlUnit::onInstructionExecuted(unsigned TokenID) {
assert(Queue.size() > TokenID);
assert(Queue[TokenID].Executed == false && Queue[TokenID].IR.isValid());
Queue[TokenID].Executed = true;
}
#ifndef NDEBUG
void RetireControlUnit::dump() const {
dbgs() << "Retire Unit: { Total Slots=" << Queue.size()
<< ", Available Slots=" << AvailableSlots << " }\n";
}
#endif
} // namespace mca