Shivani Parekh has uploaded this change for review. (
https://gem5-review.googlesource.com/c/public/gem5/+/33635 )
Change subject: mem,gpu-compute,dev,cpu,arch: masterId() to requestorId()
......................................................................
mem,gpu-compute,dev,cpu,arch: masterId() to requestorId()
Change-Id: I5854dc9ccd8f3200b03f71123718f297e2b71869
---
M src/arch/arm/stage2_lookup.hh
M src/arch/gcn3/gpu_mem_helpers.hh
M src/arch/gcn3/insts/op_encodings.hh
M src/cpu/base_dyn_inst.hh
M src/cpu/minor/lsq.cc
M src/cpu/o3/lsq.hh
M src/cpu/o3/lsq_impl.hh
M src/cpu/testers/rubytest/Check.cc
M src/cpu/testers/rubytest/RubyTester.hh
M src/dev/arm/amba.hh
M src/gpu-compute/compute_unit.cc
M src/gpu-compute/compute_unit.hh
M src/gpu-compute/fetch_unit.cc
M src/gpu-compute/shader.cc
M src/mem/abstract_mem.cc
M src/mem/cache/base.cc
M src/mem/cache/base.hh
M src/mem/cache/cache.cc
M src/mem/cache/noncoherent_cache.cc
M src/mem/cache/prefetch/base.cc
M src/mem/cache/tags/base.cc
M src/mem/dram_ctrl.cc
M src/mem/dram_ctrl.hh
M src/mem/packet.hh
M src/mem/qos/mem_ctrl.cc
M src/mem/qos/mem_ctrl.hh
M src/mem/qos/mem_sink.cc
M src/mem/qos/policy.cc
M src/mem/qos/q_policy.cc
M src/mem/request.hh
M src/mem/ruby/system/RubyPort.cc
M src/mem/ruby/system/RubySystem.cc
M src/sim/probe/mem.hh
M src/sim/system.cc
34 files changed, 93 insertions(+), 92 deletions(-)
diff --git a/src/arch/arm/stage2_lookup.hh b/src/arch/arm/stage2_lookup.hh
index a5a984f..66b1359 100644
--- a/src/arch/arm/stage2_lookup.hh
+++ b/src/arch/arm/stage2_lookup.hh
@@ -82,7 +82,7 @@
{
req = std::make_shared<Request>();
req->setVirt(s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
- s1Req->getFlags(), s1Req->masterId(), 0);
+ s1Req->getFlags(), s1Req->requestorId(), 0);
}
Fault getTe(ThreadContext *tc, TlbEntry *destTe);
diff --git a/src/arch/gcn3/gpu_mem_helpers.hh
b/src/arch/gcn3/gpu_mem_helpers.hh
index 562158d..9846f41 100644
--- a/src/arch/gcn3/gpu_mem_helpers.hh
+++ b/src/arch/gcn3/gpu_mem_helpers.hh
@@ -87,14 +87,14 @@
assert(!misaligned_acc);
req = std::make_shared<Request>(vaddr, sizeof(T), 0,
- gpuDynInst->computeUnit()->masterId(), 0,
+ gpuDynInst->computeUnit()->requestorId(), 0,
gpuDynInst->wfDynId,
gpuDynInst->makeAtomicOpFunctor<T>(
&(reinterpret_cast<T*>(gpuDynInst->a_data))[lane],
&(reinterpret_cast<T*>(gpuDynInst->x_data))[lane]));
} else {
req = std::make_shared<Request>(vaddr, req_size, 0,
- gpuDynInst->computeUnit()->masterId(), 0,
+
gpuDynInst->computeUnit()->requestorId(), 0,
gpuDynInst->wfDynId);
}
@@ -158,7 +158,7 @@
bool misaligned_acc = split_addr > vaddr;
RequestPtr req = std::make_shared<Request>(vaddr, req_size, 0,
- gpuDynInst->computeUnit()->masterId(), 0,
+ gpuDynInst->computeUnit()->requestorId(),
0,
gpuDynInst->wfDynId);
if (misaligned_acc) {
diff --git a/src/arch/gcn3/insts/op_encodings.hh
b/src/arch/gcn3/insts/op_encodings.hh
index b35fb3d..e9dcac7 100644
--- a/src/arch/gcn3/insts/op_encodings.hh
+++ b/src/arch/gcn3/insts/op_encodings.hh
@@ -584,7 +584,7 @@
gpuDynInst->setStatusVector(0, 1);
RequestPtr req = std::make_shared<Request>(0, 0, 0,
gpuDynInst->computeUnit()->
- masterId(), 0,
+ requestorId(), 0,
gpuDynInst->wfDynId);
gpuDynInst->setRequestFlags(req);
gpuDynInst->computeUnit()->
diff --git a/src/cpu/base_dyn_inst.hh b/src/cpu/base_dyn_inst.hh
index b98cbaa..dfc2b2b 100644
--- a/src/cpu/base_dyn_inst.hh
+++ b/src/cpu/base_dyn_inst.hh
@@ -441,7 +441,7 @@
uint32_t socketId() const { return cpu->socketId(); }
/** Read this CPU's data requestor ID */
- MasterID masterId() const { return cpu->dataMasterId(); }
+ MasterID requestorId() const { return cpu->dataUniqueId(); }
/** Read this context's system-wide ID **/
ContextID contextId() const { return thread->contextId(); }
diff --git a/src/cpu/minor/lsq.cc b/src/cpu/minor/lsq.cc
index e4a9dc0..f55d3a1 100644
--- a/src/cpu/minor/lsq.cc
+++ b/src/cpu/minor/lsq.cc
@@ -498,7 +498,7 @@
if (byte_enable.empty()) {
fragment->setVirt(
fragment_addr, fragment_size, request->getFlags(),
- request->masterId(), request->getPC());
+ request->requestorId(), request->getPC());
} else {
// Set up byte-enable mask for the current fragment
auto it_start = byte_enable.begin() +
@@ -508,7 +508,7 @@
if (isAnyActiveElement(it_start, it_end)) {
fragment->setVirt(
fragment_addr, fragment_size, request->getFlags(),
- request->masterId(), request->getPC());
+ request->requestorId(), request->getPC());
fragment->setByteEnable(std::vector<bool>(it_start,
it_end));
} else {
disabled_fragment = true;
diff --git a/src/cpu/o3/lsq.hh b/src/cpu/o3/lsq.hh
index 9ef3b0c..1a90865 100644
--- a/src/cpu/o3/lsq.hh
+++ b/src/cpu/o3/lsq.hh
@@ -409,7 +409,7 @@
if (byte_enable.empty() ||
isAnyActiveElement(byte_enable.begin(),
byte_enable.end())) {
auto request = std::make_shared<Request>(
- addr, size, _flags, _inst->masterId(),
+ addr, size, _flags, _inst->requestorId(),
_inst->instAddr(), _inst->contextId(),
std::move(_amo_op));
if (!byte_enable.empty()) {
diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh
index 1ca7d53..fee4468 100644
--- a/src/cpu/o3/lsq_impl.hh
+++ b/src/cpu/o3/lsq_impl.hh
@@ -895,7 +895,7 @@
uint32_t size_so_far = 0;
mainReq = std::make_shared<Request>(base_addr,
- _size, _flags, _inst->masterId(),
+ _size, _flags, _inst->requestorId(),
_inst->instAddr(), _inst->contextId());
if (!_byteEnable.empty()) {
mainReq->setByteEnable(_byteEnable);
diff --git a/src/cpu/testers/rubytest/Check.cc
b/src/cpu/testers/rubytest/Check.cc
index e3732bf..cf60097 100644
--- a/src/cpu/testers/rubytest/Check.cc
+++ b/src/cpu/testers/rubytest/Check.cc
@@ -108,7 +108,7 @@
// Prefetches are assumed to be 0 sized
RequestPtr req = std::make_shared<Request>(
- m_address, 0, flags, m_tester_ptr->masterId());
+ m_address, 0, flags, m_tester_ptr->requestorId());
req->setPC(m_pc);
req->setContext(index);
@@ -147,7 +147,7 @@
Request::Flags flags;
RequestPtr req = std::make_shared<Request>(
- m_address, CHECK_SIZE, flags, m_tester_ptr->masterId());
+ m_address, CHECK_SIZE, flags, m_tester_ptr->requestorId());
req->setPC(m_pc);
Packet::Command cmd;
@@ -181,7 +181,7 @@
// Stores are assumed to be 1 byte-sized
RequestPtr req = std::make_shared<Request>(
- writeAddr, 1, flags, m_tester_ptr->masterId());
+ writeAddr, 1, flags, m_tester_ptr->requestorId());
req->setPC(m_pc);
req->setContext(index);
@@ -246,7 +246,7 @@
// Checks are sized depending on the number of bytes written
RequestPtr req = std::make_shared<Request>(
- m_address, CHECK_SIZE, flags, m_tester_ptr->masterId());
+ m_address, CHECK_SIZE, flags, m_tester_ptr->requestorId());
req->setPC(m_pc);
req->setContext(index);
diff --git a/src/cpu/testers/rubytest/RubyTester.hh
b/src/cpu/testers/rubytest/RubyTester.hh
index e63729a..2eac9f6 100644
--- a/src/cpu/testers/rubytest/RubyTester.hh
+++ b/src/cpu/testers/rubytest/RubyTester.hh
@@ -117,7 +117,7 @@
void print(std::ostream& out) const;
bool getCheckFlush() { return m_check_flush; }
- MasterID masterId() { return _masterId; }
+ MasterID requestorId() { return _masterId; }
protected:
EventFunctionWrapper checkStartEvent;
diff --git a/src/dev/arm/amba.hh b/src/dev/arm/amba.hh
index 4bfba34..891d1a4 100644
--- a/src/dev/arm/amba.hh
+++ b/src/dev/arm/amba.hh
@@ -48,7 +48,7 @@
static OrderID
orderId(PacketPtr pkt)
{
- return pkt->req->masterId();
+ return pkt->req->requestorId();
}
} // namespace AMBA
diff --git a/src/gpu-compute/compute_unit.cc
b/src/gpu-compute/compute_unit.cc
index 9a41233..075e926 100644
--- a/src/gpu-compute/compute_unit.cc
+++ b/src/gpu-compute/compute_unit.cc
@@ -1228,7 +1228,7 @@
if (!req) {
req = std::make_shared<Request>(
- 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
+ 0, 0, 0, requestorId(), 0, gpuDynInst->wfDynId);
}
// all mem sync requests have Paddr == 0
@@ -1493,7 +1493,7 @@
RequestPtr prefetch_req = std::make_shared<Request>(
vaddr + stride * pf * TheISA::PageBytes,
sizeof(uint8_t), 0,
- computeUnit->masterId(),
+ computeUnit->requestorId(),
0, 0, nullptr);
PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd);
diff --git a/src/gpu-compute/compute_unit.hh
b/src/gpu-compute/compute_unit.hh
index 211dd53..a821859 100644
--- a/src/gpu-compute/compute_unit.hh
+++ b/src/gpu-compute/compute_unit.hh
@@ -458,7 +458,7 @@
void processFetchReturn(PacketPtr pkt);
void updatePageDivergenceDist(Addr addr);
- MasterID masterId() { return _masterId; }
+ MasterID requestorId() { return _masterId; }
bool isDone() const;
bool isVectorAluIdle(uint32_t simdId) const;
diff --git a/src/gpu-compute/fetch_unit.cc b/src/gpu-compute/fetch_unit.cc
index ac9a5a6..1029ce5 100644
--- a/src/gpu-compute/fetch_unit.cc
+++ b/src/gpu-compute/fetch_unit.cc
@@ -160,7 +160,7 @@
// set up virtual request
RequestPtr req = std::make_shared<Request>(
vaddr, computeUnit.cacheLineSize(), Request::INST_FETCH,
- computeUnit.masterId(), 0, 0, nullptr);
+ computeUnit.requestorId(), 0, 0, nullptr);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
diff --git a/src/gpu-compute/shader.cc b/src/gpu-compute/shader.cc
index 1d88e85..3b8d6c8 100644
--- a/src/gpu-compute/shader.cc
+++ b/src/gpu-compute/shader.cc
@@ -205,7 +205,7 @@
// create a request to hold INV info; the request's fields will
// be updated in cu before use
auto req = std::make_shared<Request>(0, 0, 0,
- cuList[i_cu]->masterId(),
+ cuList[i_cu]->requestorId(),
0, -1);
_dispatcher.updateInvCounter(kernId, +1);
@@ -456,7 +456,7 @@
RequestPtr req = std::make_shared<Request>(
gen.addr(), gen.size(), 0,
- cuList[0]->masterId(), 0, 0, nullptr);
+ cuList[0]->requestorId(), 0, 0, nullptr);
doFunctionalAccess(req, cmd, data_buf, suppress_func_errors,
cu_id);
data_buf += gen.size();
diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc
index f1e9dba..cdece0c 100644
--- a/src/mem/abstract_mem.cc
+++ b/src/mem/abstract_mem.cc
@@ -350,14 +350,15 @@
if (size == 1 || size == 2 || size == 4 || size == 8) {
ByteOrder byte_order = sys->getGuestByteOrder();
DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data "
- "%#x %c\n", label,
sys->getMasterName(pkt->req->masterId()),
+ "%#x %c\n", label, sys->getRequestorName(pkt->req->
+ requestorId()), size, pkt->getAddr(),
size, pkt->getAddr(), pkt->getUintX(byte_order),
pkt->req->isUncacheable() ? 'U' : 'C');
return;
}
#endif
DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
- label, sys->getMasterName(pkt->req->masterId()),
+ label, sys->getRequestorName(pkt->req->requestorId()),
size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
}
@@ -424,7 +425,7 @@
assert(!pkt->req->isInstFetch());
TRACE_PACKET("Read/Write");
- stats.numOther[pkt->req->masterId()]++;
+ stats.numOther[pkt->req->requestorId()]++;
}
} else if (pkt->isRead()) {
assert(!pkt->isWrite());
@@ -438,10 +439,10 @@
pkt->setData(host_addr);
}
TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
- stats.numReads[pkt->req->masterId()]++;
- stats.bytesRead[pkt->req->masterId()] += pkt->getSize();
+ stats.numReads[pkt->req->requestorId()]++;
+ stats.bytesRead[pkt->req->requestorId()] += pkt->getSize();
if (pkt->req->isInstFetch())
- stats.bytesInstRead[pkt->req->masterId()] += pkt->getSize();
+ stats.bytesInstRead[pkt->req->requestorId()] += pkt->getSize();
} else if (pkt->isInvalidate() || pkt->isClean()) {
assert(!pkt->isWrite());
// in a fastmem system invalidating and/or cleaning packets
@@ -457,8 +458,8 @@
}
assert(!pkt->req->isInstFetch());
TRACE_PACKET("Write");
- stats.numWrites[pkt->req->masterId()]++;
- stats.bytesWritten[pkt->req->masterId()] += pkt->getSize();
+ stats.numWrites[pkt->req->requestorId()]++;
+ stats.bytesWritten[pkt->req->requestorId()] += pkt->getSize();
}
} else {
panic("Unexpected packet %s", pkt->print());
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 0187703..c93fbef 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -270,8 +270,8 @@
DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
pkt->print());
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).mshr_hits[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).mshr_hits[pkt->req->requestorId()]++;
// We use forward_time here because it is the same
// considering new targets. We have multiple
@@ -294,8 +294,8 @@
}
} else {
// no MSHR
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
// We use forward_time here because there is an
@@ -441,13 +441,13 @@
const QueueEntry::Target *initial_tgt = mshr->getTarget();
const Tick miss_latency = curTick() - initial_tgt->recvTime;
if (pkt->req->isUncacheable()) {
- assert(pkt->req->masterId() < system->maxMasters());
+ assert(pkt->req->requestorId() < system->maxRequestors());
stats.cmdStats(initial_tgt->pkt)
- .mshr_uncacheable_lat[pkt->req->masterId()] += miss_latency;
+ .mshr_uncacheable_lat[pkt->req->requestorId()] += miss_latency;
} else {
- assert(pkt->req->masterId() < system->maxMasters());
+ assert(pkt->req->requestorId() < system->maxRequestors());
stats.cmdStats(initial_tgt->pkt)
- .mshr_miss_latency[pkt->req->masterId()] += miss_latency;
+ .mshr_miss_latency[pkt->req->requestorId()] += miss_latency;
}
PacketList writebacks;
@@ -774,8 +774,8 @@
!writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
// Update statistic on number of prefetches issued
// (hwpf_mshr_misses)
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
// allocate an MSHR and return it, note
// that we send the packet straight away, so do not
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index c129661..c5b7f37 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -1219,8 +1219,8 @@
void incMissCount(PacketPtr pkt)
{
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).misses[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).misses[pkt->req->requestorId()]++;
pkt->req->incAccessDepth();
if (missCount) {
--missCount;
@@ -1230,8 +1230,8 @@
}
void incHitCount(PacketPtr pkt)
{
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).hits[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).hits[pkt->req->requestorId()]++;
}
/**
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 6fb6f11..ea2cf76 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -326,7 +326,7 @@
// should have flushed and have no valid block
assert(!blk || !blk->isValid());
- stats.cmdStats(pkt).mshr_uncacheable[pkt->req->masterId()]++;
+ stats.cmdStats(pkt).mshr_uncacheable[pkt->req->requestorId()]++;
if (pkt->isWrite()) {
allocateWriteBuffer(pkt, forward_time);
@@ -371,9 +371,9 @@
if (!mshr) {
// copy the request and create a new SoftPFReq packet
RequestPtr req =
std::make_shared<Request>(pkt->req->getPaddr(),
- pkt->req->getSize(),
-
pkt->req->getFlags(),
-
pkt->req->masterId());
+ pkt->req->getSize(),
+ pkt->req->getFlags(),
+
pkt->req->requestorId());
pf = new Packet(req, pkt->cmd);
pf->allocate();
assert(pf->matchAddr(pkt));
@@ -774,9 +774,9 @@
assert(!tgt_pkt->req->isUncacheable());
- assert(tgt_pkt->req->masterId() < system->maxMasters());
+ assert(tgt_pkt->req->requestorId() <
system->maxRequestors());
stats.cmdStats(tgt_pkt)
- .missLatency[tgt_pkt->req->masterId()] +=
+ .missLatency[tgt_pkt->req->requestorId()] +=
completion_time - target.recvTime;
} else if (pkt->cmd == MemCmd::UpgradeFailResp) {
// failed StoreCond upgrade
diff --git a/src/mem/cache/noncoherent_cache.cc
b/src/mem/cache/noncoherent_cache.cc
index 01c7072..9e936ea 100644
--- a/src/mem/cache/noncoherent_cache.cc
+++ b/src/mem/cache/noncoherent_cache.cc
@@ -269,8 +269,8 @@
completion_time += clockEdge(responseLatency) +
(transfer_offset ? pkt->payloadDelay : 0);
- assert(tgt_pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(tgt_pkt).missLatency[tgt_pkt->req->masterId()]
+=
+ assert(tgt_pkt->req->requestorId() < system->maxRequestors());
+
stats.cmdStats(tgt_pkt).missLatency[tgt_pkt->req->requestorId()] +=
completion_time - target.recvTime;
tgt_pkt->makeTimingResponse();
diff --git a/src/mem/cache/prefetch/base.cc b/src/mem/cache/prefetch/base.cc
index d4223aa..d52f7fd 100644
--- a/src/mem/cache/prefetch/base.cc
+++ b/src/mem/cache/prefetch/base.cc
@@ -57,7 +57,7 @@
Base::PrefetchInfo::PrefetchInfo(PacketPtr pkt, Addr addr, bool miss)
: address(addr), pc(pkt->req->hasPC() ? pkt->req->getPC() : 0),
- masterId(pkt->req->masterId()), validPC(pkt->req->hasPC()),
+ masterId(pkt->req->requestorId()), validPC(pkt->req->hasPC()),
secure(pkt->isSecure()), size(pkt->req->getSize()),
write(pkt->isWrite()),
paddress(pkt->req->getPaddr()), cacheMiss(miss)
{
diff --git a/src/mem/cache/tags/base.cc b/src/mem/cache/tags/base.cc
index faad7be..38698c9 100644
--- a/src/mem/cache/tags/base.cc
+++ b/src/mem/cache/tags/base.cc
@@ -105,8 +105,8 @@
// to insert the new one
// Deal with what we are bringing in
- MasterID master_id = pkt->req->masterId();
- assert(master_id < system->maxMasters());
+ MasterID master_id = pkt->req->requestorId();
+ assert(master_id < system->maxRequestors());
stats.occupancies[master_id]++;
// Insert block with tag, src master id and task id
diff --git a/src/mem/dram_ctrl.cc b/src/mem/dram_ctrl.cc
index 5f0fcc7..e451e4b 100644
--- a/src/mem/dram_ctrl.cc
+++ b/src/mem/dram_ctrl.cc
@@ -402,7 +402,7 @@
base_addr + pkt->getSize()) - addr;
stats.readPktSize[ceilLog2(size)]++;
stats.readBursts++;
- stats.masterReadAccesses[pkt->masterId()]++;
+ stats.masterReadAccesses[pkt->requestorId()]++;
// First check write buffer to see if the data is already at
// the controller
@@ -456,7 +456,7 @@
++dram_pkt->rankRef.readEntries;
// log packet
- logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(),
+ logRequest(MemCtrl::READ, pkt->requestorId(), pkt->qosValue(),
dram_pkt->addr, 1);
// Update stats
@@ -501,7 +501,7 @@
base_addr + pkt->getSize()) - addr;
stats.writePktSize[ceilLog2(size)]++;
stats.writeBursts++;
- stats.masterWriteAccesses[pkt->masterId()]++;
+ stats.masterWriteAccesses[pkt->requestorId()]++;
// see if we can merge with an existing item in the write
// queue and keep track of whether we have merged or not
@@ -522,7 +522,7 @@
isInWriteQueue.insert(burstAlign(addr));
// log packet
- logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(),
+ logRequest(MemCtrl::WRITE, pkt->requestorId(), pkt->qosValue(),
dram_pkt->addr, 1);
assert(totalWriteQueueSize == isInWriteQueue.size());
@@ -1451,20 +1451,20 @@
// Update latency stats
stats.totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
- stats.masterReadTotalLat[dram_pkt->masterId()] +=
+ stats.masterReadTotalLat[dram_pkt->requestorId()] +=
dram_pkt->readyTime - dram_pkt->entryTime;
stats.totBusLat += tBURST;
stats.totQLat += cmd_at - dram_pkt->entryTime;
- stats.masterReadBytes[dram_pkt->masterId()] += dram_pkt->size;
+ stats.masterReadBytes[dram_pkt->requestorId()] += dram_pkt->size;
} else {
++writesThisTime;
if (row_hit)
stats.writeRowHits++;
stats.bytesWritten += burstSize;
stats.perBankWrBursts[dram_pkt->bankId]++;
- stats.masterWriteBytes[dram_pkt->masterId()] += dram_pkt->size;
- stats.masterWriteTotalLat[dram_pkt->masterId()] +=
+ stats.masterWriteBytes[dram_pkt->requestorId()] += dram_pkt->size;
+ stats.masterWriteTotalLat[dram_pkt->requestorId()] +=
dram_pkt->readyTime - dram_pkt->entryTime;
}
}
@@ -1630,7 +1630,7 @@
assert(dram_pkt->readyTime >= curTick());
// log the response
- logResponse(MemCtrl::READ, (*to_read)->masterId(),
+ logResponse(MemCtrl::READ, (*to_read)->requestorId(),
dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
dram_pkt->readyTime - dram_pkt->entryTime);
@@ -1730,7 +1730,7 @@
isInWriteQueue.erase(burstAlign(dram_pkt->addr));
// log the response
- logResponse(MemCtrl::WRITE, dram_pkt->masterId(),
+ logResponse(MemCtrl::WRITE, dram_pkt->requestorId(),
dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
dram_pkt->readyTime - dram_pkt->entryTime);
diff --git a/src/mem/dram_ctrl.hh b/src/mem/dram_ctrl.hh
index 0fe78da..a902150 100644
--- a/src/mem/dram_ctrl.hh
+++ b/src/mem/dram_ctrl.hh
@@ -677,7 +677,7 @@
* Get the packet MasterID
* (interface compatibility with Packet)
*/
- inline MasterID masterId() const { return _masterId; }
+ inline MasterID requestorId() const { return _masterId; }
/**
* Get the packet size
@@ -708,7 +708,7 @@
uint32_t _row, uint16_t bank_id, Addr _addr,
unsigned int _size, Bank& bank_ref, Rank& rank_ref)
: entryTime(curTick()), readyTime(curTick()), pkt(_pkt),
- _masterId(pkt->masterId()),
+ _masterId(pkt->requestorId()),
read(is_read), rank(_rank), bank(_bank), row(_row),
bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL),
bankRef(bank_ref), rankRef(rank_ref),
_qosValue(_pkt->qosValue())
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index 4af0d0b..365e13a 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -706,7 +706,7 @@
inline void qosValue(const uint8_t qos_value)
{ _qosValue = qos_value; }
- inline MasterID masterId() const { return req->masterId(); }
+ inline MasterID requestorId() const { return req->requestorId(); }
// Network error conditions... encapsulate them as methods since
// their encoding keeps changing (from result field to command
diff --git a/src/mem/qos/mem_ctrl.cc b/src/mem/qos/mem_ctrl.cc
index 50e6035..9e71fcf 100644
--- a/src/mem/qos/mem_ctrl.cc
+++ b/src/mem/qos/mem_ctrl.cc
@@ -228,7 +228,7 @@
assert(pkt->req);
if (policy) {
- return schedule(pkt->req->masterId(), pkt->getSize());
+ return schedule(pkt->req->requestorId(), pkt->getSize());
} else {
DPRINTF(QOS, "QoSScheduler::schedule Packet received [Qv %d], "
"but QoS scheduler not initialized\n",
diff --git a/src/mem/qos/mem_ctrl.hh b/src/mem/qos/mem_ctrl.hh
index 0e29fcc..eec9561 100644
--- a/src/mem/qos/mem_ctrl.hh
+++ b/src/mem/qos/mem_ctrl.hh
@@ -364,10 +364,10 @@
DPRINTF(QOS,
"QoSMemCtrl::escalate checking priority %d packet "
"m_id %d address %d\n", curr_prio,
- pkt->masterId(), pkt->getAddr());
+ pkt->requestorId(), pkt->getAddr());
// Found a packet to move
- if (pkt->masterId() == m_id) {
+ if (pkt->requestorId() == m_id) {
uint64_t moved_entries = divCeil(pkt->getSize(),
queue_entry_size);
@@ -485,7 +485,7 @@
// Call the scheduling function on all other masters.
for (const auto& m : masters) {
- if (m.first == pkt->masterId())
+ if (m.first == pkt->requestorId())
continue;
uint8_t prio = schedule(m.first, 0);
@@ -505,9 +505,9 @@
DPRINTF(QOS,
"QoSMemCtrl::qosSchedule: escalating "
"MASTER %s to assigned priority %d\n",
- _system->getMasterName(pkt->masterId()),
+ _system->getRequestorName(pkt->requestorId()),
pkt_priority);
- escalate(queues, queue_entry_size, pkt->masterId(), pkt_priority);
+ escalate(queues, queue_entry_size, pkt->requestorId(),
pkt_priority);
}
// Update last service tick for selected priority
diff --git a/src/mem/qos/mem_sink.cc b/src/mem/qos/mem_sink.cc
index 1f104e4..b73dc19 100644
--- a/src/mem/qos/mem_sink.cc
+++ b/src/mem/qos/mem_sink.cc
@@ -134,7 +134,7 @@
DPRINTF(QOS,
"%s: MASTER %s request %s addr %lld size %d\n",
__func__,
- _system->getMasterName(pkt->req->masterId()),
+ _system->getRequestorName(pkt->req->requestorId()),
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
uint64_t required_entries = divCeil(pkt->getSize(), memoryPacketSize);
@@ -178,7 +178,7 @@
if (req_accepted) {
// The packet is accepted - log it
logRequest(pkt->isRead()? READ : WRITE,
- pkt->req->masterId(),
+ pkt->req->requestorId(),
pkt->qosValue(),
pkt->getAddr(),
required_entries);
@@ -221,7 +221,7 @@
for (uint8_t i = 0; i < numPriorities(); ++i) {
std::string plist = "";
for (auto& e : (busState == WRITE ? writeQueue[i]:
readQueue[i])) {
- plist += (std::to_string(e->req->masterId())) + " ";
+ plist += (std::to_string(e->req->requestorId())) + " ";
}
DPRINTF(QOS,
"%s priority Queue [%i] contains %i elements, "
@@ -253,7 +253,7 @@
DPRINTF(QOS,
"%s scheduling packet address %d for master %s from "
"priority queue %d\n", __func__, pkt->getAddr(),
- _system->getMasterName(pkt->req->masterId()),
+ _system->getRequestorName(pkt->req->requestorId()),
curr_prio);
break;
}
@@ -270,7 +270,7 @@
DPRINTF(QOS,
"%s scheduled packet address %d for master %s size is %d, "
"corresponds to %d memory packets\n", __func__, pkt->getAddr(),
- _system->getMasterName(pkt->req->masterId()),
+ _system->getRequestorName(pkt->req->requestorId()),
pkt->getSize(), removed_entries);
// Schedule response
@@ -283,7 +283,7 @@
// Log the response
logResponse(pkt->isRead()? READ : WRITE,
- pkt->req->masterId(),
+ pkt->req->requestorId(),
pkt->qosValue(),
pkt->getAddr(),
removed_entries, responseLatency);
diff --git a/src/mem/qos/policy.cc b/src/mem/qos/policy.cc
index b5431d2..93c841d 100644
--- a/src/mem/qos/policy.cc
+++ b/src/mem/qos/policy.cc
@@ -51,7 +51,7 @@
Policy::schedule(const PacketPtr pkt)
{
assert(pkt->req);
- return schedule(pkt->req->masterId(), pkt->getSize());
+ return schedule(pkt->req->requestorId(), pkt->getSize());
}
} // namespace QoS
diff --git a/src/mem/qos/q_policy.cc b/src/mem/qos/q_policy.cc
index 88ce95d..5ebd063 100644
--- a/src/mem/qos/q_policy.cc
+++ b/src/mem/qos/q_policy.cc
@@ -79,7 +79,7 @@
"QoSQPolicy::lrg detected packet without request");
// Get Request MasterID
- MasterID m_id = pkt->req->masterId();
+ MasterID m_id = pkt->req->requestorId();
DPRINTF(QOS, "QoSQPolicy::lrg checking packet "
"from queue with id %d\n", m_id);
@@ -138,8 +138,8 @@
void
LrgQueuePolicy::enqueuePacket(PacketPtr pkt)
{
- MasterID m_id = pkt->masterId();
- if (!memCtrl->hasMaster(m_id)) {
+ MasterID m_id = pkt->requestorId();
+ if (!memCtrl->hasRequestor(m_id)) {
toServe.push_back(m_id);
}
};
diff --git a/src/mem/request.hh b/src/mem/request.hh
index 7b324dc..9cd72cb 100644
--- a/src/mem/request.hh
+++ b/src/mem/request.hh
@@ -654,7 +654,7 @@
/** Accesssor for the requestor id. */
MasterID
- masterId() const
+ requestorId() const
{
return _masterId;
}
diff --git a/src/mem/ruby/system/RubyPort.cc
b/src/mem/ruby/system/RubyPort.cc
index 4510e3a..3347f36 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -556,7 +556,7 @@
// We must check device memory first in case it overlaps with the
// system memory range.
if (ruby_port->system->isDeviceMemAddr(pkt)) {
- auto dmem =
ruby_port->system->getDeviceMemory(pkt->masterId());
+ auto dmem =
ruby_port->system->getDeviceMemory(pkt->requestorId());
dmem->access(pkt);
} else if (ruby_port->system->isMemAddr(pkt->getAddr())) {
rs->getPhysMem()->access(pkt);
diff --git a/src/mem/ruby/system/RubySystem.cc
b/src/mem/ruby/system/RubySystem.cc
index c35ab02..f07682b 100644
--- a/src/mem/ruby/system/RubySystem.cc
+++ b/src/mem/ruby/system/RubySystem.cc
@@ -491,8 +491,8 @@
unsigned int num_invalid = 0;
// Only send functional requests within the same network.
- assert(masterToNetwork.count(pkt->masterId()));
- int master_net_id = masterToNetwork[pkt->masterId()];
+ assert(masterToNetwork.count(pkt->requestorId()));
+ int master_net_id = masterToNetwork[pkt->requestorId()];
assert(netCntrls.count(master_net_id));
AbstractController *ctrl_ro = nullptr;
@@ -605,8 +605,8 @@
uint32_t M5_VAR_USED num_functional_writes = 0;
// Only send functional requests within the same network.
- assert(masterToNetwork.count(pkt->masterId()));
- int master_net_id = masterToNetwork[pkt->masterId()];
+ assert(masterToNetwork.count(pkt->requestorId()));
+ int master_net_id = masterToNetwork[pkt->requestorId()];
assert(netCntrls.count(master_net_id));
for (auto& cntrl : netCntrls[master_net_id]) {
diff --git a/src/sim/probe/mem.hh b/src/sim/probe/mem.hh
index fed7bcf..093ea7f 100644
--- a/src/sim/probe/mem.hh
+++ b/src/sim/probe/mem.hh
@@ -64,7 +64,7 @@
size(pkt->getSize()),
flags(pkt->req->getFlags()),
pc(pkt->req->hasPC() ? pkt->req->getPC() : 0),
- master(pkt->req->masterId()) { }
+ master(pkt->req->requestorId()) { }
};
/**
diff --git a/src/sim/system.cc b/src/sim/system.cc
index 8185f13..f80c5db 100644
--- a/src/sim/system.cc
+++ b/src/sim/system.cc
@@ -430,7 +430,7 @@
bool
System::isDeviceMemAddr(PacketPtr pkt) const
{
- const MasterID& mid = pkt->masterId();
+ const MasterID& mid = pkt->requestorId();
return (deviceMemMap.count(mid) &&
deviceMemMap.at(mid)->getAddrRange().contains(pkt->getAddr()));
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/33635
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I5854dc9ccd8f3200b03f71123718f297e2b71869
Gerrit-Change-Number: 33635
Gerrit-PatchSet: 1
Gerrit-Owner: Shivani Parekh <[email protected]>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- [email protected]
To unsubscribe send an email to [email protected]
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s