Hoa Nguyen has submitted this change. (
https://gem5-review.googlesource.com/c/public/gem5/+/39975 )
Change subject: mem: Fix/Improve stats in src/mem
......................................................................
mem: Fix/Improve stats in src/mem
* Improved/Fixed the stat description of several stats.
* Fixed the value assigned to `warmupCycle` stat of cache tags,
it was set to curTick().
* Use ADD_STAT in CacheCmdStats.
Change-Id: Iabff7d42318fcc73df79ee7f3c5430f4c45555e5
Signed-off-by: Hoa Nguyen <hoangu...@ucdavis.edu>
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/39975
Reviewed-by: Bobby R. Bruce <bbr...@ucdavis.edu>
Maintainer: Bobby R. Bruce <bbr...@ucdavis.edu>
Tested-by: kokoro <noreply+kok...@google.com>
---
M src/mem/cache/base.cc
M src/mem/cache/base.hh
M src/mem/cache/cache.cc
M src/mem/cache/tags/base.cc
M src/mem/cache/tags/base.hh
M src/mem/xbar.cc
6 files changed, 74 insertions(+), 96 deletions(-)
Approvals:
Bobby R. Bruce: Looks good to me, approved; Looks good to me, approved
kokoro: Regressions pass
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 023d0a9..b76b98f 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -280,7 +280,7 @@
pkt->print());
assert(pkt->req->requestorId() < system->maxRequestors());
- stats.cmdStats(pkt).mshr_hits[pkt->req->requestorId()]++;
+ stats.cmdStats(pkt).mshrHits[pkt->req->requestorId()]++;
// We use forward_time here because it is the same
// considering new targets. We have multiple
@@ -304,7 +304,7 @@
} else {
// no MSHR
assert(pkt->req->requestorId() < system->maxRequestors());
- stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
+ stats.cmdStats(pkt).mshrMisses[pkt->req->requestorId()]++;
if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
// We use forward_time here because there is an
@@ -453,11 +453,11 @@
if (pkt->req->isUncacheable()) {
assert(pkt->req->requestorId() < system->maxRequestors());
stats.cmdStats(initial_tgt->pkt)
- .mshr_uncacheable_lat[pkt->req->requestorId()] += miss_latency;
+ .mshrUncacheableLatency[pkt->req->requestorId()] +=
miss_latency;
} else {
assert(pkt->req->requestorId() < system->maxRequestors());
stats.cmdStats(initial_tgt->pkt)
- .mshr_miss_latency[pkt->req->requestorId()] += miss_latency;
+ .mshrMissLatency[pkt->req->requestorId()] += miss_latency;
}
PacketList writebacks;
@@ -824,7 +824,7 @@
// Update statistic on number of prefetches issued
// (hwpf_mshr_misses)
assert(pkt->req->requestorId() < system->maxRequestors());
- stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
+ stats.cmdStats(pkt).mshrMisses[pkt->req->requestorId()]++;
// allocate an MSHR and return it, note
// that we send the packet straight away, so do not
@@ -1929,50 +1929,28 @@
BaseCache::CacheCmdStats::CacheCmdStats(BaseCache &c,
const std::string &name)
- : Stats::Group(&c), cache(c),
-
- hits(
- this, (name + "_hits").c_str(),
- ("number of " + name + " hits").c_str()),
- misses(
- this, (name + "_misses").c_str(),
- ("number of " + name + " misses").c_str()),
- missLatency(
- this, (name + "_miss_latency").c_str(),
- ("number of " + name + " miss cycles").c_str()),
- accesses(
- this, (name + "_accesses").c_str(),
- ("number of " + name + " accesses(hits+misses)").c_str()),
- missRate(
- this, (name + "_miss_rate").c_str(),
- ("miss rate for " + name + " accesses").c_str()),
- avgMissLatency(
- this, (name + "_avg_miss_latency").c_str(),
- ("average " + name + " miss latency").c_str()),
- mshr_hits(
- this, (name + "_mshr_hits").c_str(),
- ("number of " + name + " MSHR hits").c_str()),
- mshr_misses(
- this, (name + "_mshr_misses").c_str(),
- ("number of " + name + " MSHR misses").c_str()),
- mshr_uncacheable(
- this, (name + "_mshr_uncacheable").c_str(),
- ("number of " + name + " MSHR uncacheable").c_str()),
- mshr_miss_latency(
- this, (name + "_mshr_miss_latency").c_str(),
- ("number of " + name + " MSHR miss cycles").c_str()),
- mshr_uncacheable_lat(
- this, (name + "_mshr_uncacheable_latency").c_str(),
- ("number of " + name + " MSHR uncacheable cycles").c_str()),
- mshrMissRate(
- this, (name + "_mshr_miss_rate").c_str(),
- ("mshr miss rate for " + name + " accesses").c_str()),
- avgMshrMissLatency(
- this, (name + "_avg_mshr_miss_latency").c_str(),
- ("average " + name + " mshr miss latency").c_str()),
- avgMshrUncacheableLatency(
- this, (name + "_avg_mshr_uncacheable_latency").c_str(),
- ("average " + name + " mshr uncacheable latency").c_str())
+ : Stats::Group(&c, name.c_str()), cache(c),
+ ADD_STAT(hits, ("number of " + name + " hits").c_str()),
+ ADD_STAT(misses, ("number of " + name + " misses").c_str()),
+ ADD_STAT(missLatency, ("number of " + name + " miss ticks").c_str()),
+ ADD_STAT(accesses,
+ ("number of " + name + " accesses(hits+misses)").c_str()),
+ ADD_STAT(missRate, ("miss rate for " + name + " accesses").c_str()),
+ ADD_STAT(avgMissLatency, ("average " + name + " miss
latency").c_str()),
+ ADD_STAT(mshrHits, ("number of " + name + " MSHR hits").c_str()),
+ ADD_STAT(mshrMisses, ("number of " + name + " MSHR misses").c_str()),
+ ADD_STAT(mshrUncacheable,
+ ("number of " + name + " MSHR uncacheable").c_str()),
+ ADD_STAT(mshrMissLatency,
+ ("number of " + name + " MSHR miss ticks").c_str()),
+ ADD_STAT(mshrUncacheableLatency,
+ ("number of " + name + " MSHR uncacheable ticks").c_str()),
+ ADD_STAT(mshrMissRate,
+ ("mshr miss rate for " + name + " accesses").c_str()),
+ ADD_STAT(avgMshrMissLatency,
+ ("average " + name + " mshr miss latency").c_str()),
+ ADD_STAT(avgMshrUncacheableLatency,
+ ("average " + name + " mshr uncacheable latency").c_str())
{
}
@@ -2034,53 +2012,53 @@
// MSHR statistics
// MSHR hit statistics
- mshr_hits
+ mshrHits
.init(max_requestors)
.flags(total | nozero | nonan)
;
for (int i = 0; i < max_requestors; i++) {
- mshr_hits.subname(i, system->getRequestorName(i));
+ mshrHits.subname(i, system->getRequestorName(i));
}
// MSHR miss statistics
- mshr_misses
+ mshrMisses
.init(max_requestors)
.flags(total | nozero | nonan)
;
for (int i = 0; i < max_requestors; i++) {
- mshr_misses.subname(i, system->getRequestorName(i));
+ mshrMisses.subname(i, system->getRequestorName(i));
}
// MSHR miss latency statistics
- mshr_miss_latency
+ mshrMissLatency
.init(max_requestors)
.flags(total | nozero | nonan)
;
for (int i = 0; i < max_requestors; i++) {
- mshr_miss_latency.subname(i, system->getRequestorName(i));
+ mshrMissLatency.subname(i, system->getRequestorName(i));
}
// MSHR uncacheable statistics
- mshr_uncacheable
+ mshrUncacheable
.init(max_requestors)
.flags(total | nozero | nonan)
;
for (int i = 0; i < max_requestors; i++) {
- mshr_uncacheable.subname(i, system->getRequestorName(i));
+ mshrUncacheable.subname(i, system->getRequestorName(i));
}
// MSHR miss latency statistics
- mshr_uncacheable_lat
+ mshrUncacheableLatency
.init(max_requestors)
.flags(total | nozero | nonan)
;
for (int i = 0; i < max_requestors; i++) {
- mshr_uncacheable_lat.subname(i, system->getRequestorName(i));
+ mshrUncacheableLatency.subname(i, system->getRequestorName(i));
}
// MSHR miss rate formulas
mshrMissRate.flags(total | nozero | nonan);
- mshrMissRate = mshr_misses / accesses;
+ mshrMissRate = mshrMisses / accesses;
for (int i = 0; i < max_requestors; i++) {
mshrMissRate.subname(i, system->getRequestorName(i));
@@ -2088,14 +2066,14 @@
// mshrMiss latency formulas
avgMshrMissLatency.flags(total | nozero | nonan);
- avgMshrMissLatency = mshr_miss_latency / mshr_misses;
+ avgMshrMissLatency = mshrMissLatency / mshrMisses;
for (int i = 0; i < max_requestors; i++) {
avgMshrMissLatency.subname(i, system->getRequestorName(i));
}
// mshrUncacheable latency formulas
avgMshrUncacheableLatency.flags(total | nozero | nonan);
- avgMshrUncacheableLatency = mshr_uncacheable_lat / mshr_uncacheable;
+ avgMshrUncacheableLatency = mshrUncacheableLatency / mshrUncacheable;
for (int i = 0; i < max_requestors; i++) {
avgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
}
@@ -2108,8 +2086,8 @@
ADD_STAT(overallHits, "number of overall hits"),
ADD_STAT(demandMisses, "number of demand (read+write) misses"),
ADD_STAT(overallMisses, "number of overall misses"),
- ADD_STAT(demandMissLatency, "number of demand (read+write) miss
cycles"),
- ADD_STAT(overallMissLatency, "number of overall miss cycles"),
+ ADD_STAT(demandMissLatency, "number of demand (read+write) miss
ticks"),
+ ADD_STAT(overallMissLatency, "number of overall miss ticks"),
ADD_STAT(demandAccesses, "number of demand (read+write) accesses"),
ADD_STAT(overallAccesses, "number of overall (read+write) accesses"),
ADD_STAT(demandMissRate, "miss rate for demand accesses"),
@@ -2129,12 +2107,12 @@
ADD_STAT(overallMshrUncacheable,
"number of overall MSHR uncacheable misses"),
ADD_STAT(demandMshrMissLatency,
- "number of demand (read+write) MSHR miss cycles"),
- ADD_STAT(overallMshrMissLatency, "number of overall MSHR miss cycles"),
+ "number of demand (read+write) MSHR miss ticks"),
+ ADD_STAT(overallMshrMissLatency, "number of overall MSHR miss ticks"),
ADD_STAT(overallMshrUncacheableLatency,
- "number of overall MSHR uncacheable cycles"),
- ADD_STAT(demandMshrMissRate, "mshr miss rate for demand accesses"),
- ADD_STAT(overallMshrMissRate, "mshr miss rate for overall accesses"),
+ "number of overall MSHR uncacheable ticks"),
+ ADD_STAT(demandMshrMissRate, "mshr miss ratio for demand accesses"),
+ ADD_STAT(overallMshrMissRate, "mshr miss ratio for overall accesses"),
ADD_STAT(demandAvgMshrMissLatency, "average overall mshr miss
latency"),
ADD_STAT(overallAvgMshrMissLatency, "average overall mshr miss
latency"),
ADD_STAT(overallAvgMshrUncacheableLatency,
@@ -2276,45 +2254,45 @@
}
demandMshrHits.flags(total | nozero | nonan);
- demandMshrHits = SUM_DEMAND(mshr_hits);
+ demandMshrHits = SUM_DEMAND(mshrHits);
for (int i = 0; i < max_requestors; i++) {
demandMshrHits.subname(i, system->getRequestorName(i));
}
overallMshrHits.flags(total | nozero | nonan);
- overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
+ overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshrHits);
for (int i = 0; i < max_requestors; i++) {
overallMshrHits.subname(i, system->getRequestorName(i));
}
demandMshrMisses.flags(total | nozero | nonan);
- demandMshrMisses = SUM_DEMAND(mshr_misses);
+ demandMshrMisses = SUM_DEMAND(mshrMisses);
for (int i = 0; i < max_requestors; i++) {
demandMshrMisses.subname(i, system->getRequestorName(i));
}
overallMshrMisses.flags(total | nozero | nonan);
- overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
+ overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshrMisses);
for (int i = 0; i < max_requestors; i++) {
overallMshrMisses.subname(i, system->getRequestorName(i));
}
demandMshrMissLatency.flags(total | nozero | nonan);
- demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
+ demandMshrMissLatency = SUM_DEMAND(mshrMissLatency);
for (int i = 0; i < max_requestors; i++) {
demandMshrMissLatency.subname(i, system->getRequestorName(i));
}
overallMshrMissLatency.flags(total | nozero | nonan);
overallMshrMissLatency =
- demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
+ demandMshrMissLatency + SUM_NON_DEMAND(mshrMissLatency);
for (int i = 0; i < max_requestors; i++) {
overallMshrMissLatency.subname(i, system->getRequestorName(i));
}
overallMshrUncacheable.flags(total | nozero | nonan);
overallMshrUncacheable =
- SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
+ SUM_DEMAND(mshrUncacheable) + SUM_NON_DEMAND(mshrUncacheable);
for (int i = 0; i < max_requestors; i++) {
overallMshrUncacheable.subname(i, system->getRequestorName(i));
}
@@ -2322,8 +2300,8 @@
overallMshrUncacheableLatency.flags(total | nozero | nonan);
overallMshrUncacheableLatency =
- SUM_DEMAND(mshr_uncacheable_lat) +
- SUM_NON_DEMAND(mshr_uncacheable_lat);
+ SUM_DEMAND(mshrUncacheableLatency) +
+ SUM_NON_DEMAND(mshrUncacheableLatency);
for (int i = 0; i < max_requestors; i++) {
overallMshrUncacheableLatency.subname(i,
system->getRequestorName(i));
}
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index 1a47bd7..8951a5e 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -1009,15 +1009,15 @@
/** The average miss latency per command and thread. */
Stats::Formula avgMissLatency;
/** Number of misses that hit in the MSHRs per command and thread.
*/
- Stats::Vector mshr_hits;
+ Stats::Vector mshrHits;
/** Number of misses that miss in the MSHRs, per command and
thread. */
- Stats::Vector mshr_misses;
+ Stats::Vector mshrMisses;
/** Number of misses that miss in the MSHRs, per command and
thread. */
- Stats::Vector mshr_uncacheable;
+ Stats::Vector mshrUncacheable;
/** Total cycle latency of each MSHR miss, per command and thread.
*/
- Stats::Vector mshr_miss_latency;
+ Stats::Vector mshrMissLatency;
/** Total cycle latency of each MSHR miss, per command and thread.
*/
- Stats::Vector mshr_uncacheable_lat;
+ Stats::Vector mshrUncacheableLatency;
/** The miss rate in the MSHRs pre command and thread. */
Stats::Formula mshrMissRate;
/** The average latency of an MSHR miss, per command and thread. */
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index e3e382b..4bec7de 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -328,7 +328,7 @@
// should have flushed and have no valid block
assert(!blk || !blk->isValid());
- stats.cmdStats(pkt).mshr_uncacheable[pkt->req->requestorId()]++;
+ stats.cmdStats(pkt).mshrUncacheable[pkt->req->requestorId()]++;
if (pkt->isWrite()) {
allocateWriteBuffer(pkt, forward_time);
diff --git a/src/mem/cache/tags/base.cc b/src/mem/cache/tags/base.cc
index b12fdf0..97d2831 100644
--- a/src/mem/cache/tags/base.cc
+++ b/src/mem/cache/tags/base.cc
@@ -115,7 +115,7 @@
// Check if cache warm up is done
if (!warmedUp && stats.tagsInUse.value() >= warmupBound) {
warmedUp = true;
- stats.warmupCycle = curTick();
+ stats.warmupCycle = ticksToCycles(curTick());
}
// We only need to write into one tag and one data block.
@@ -216,16 +216,17 @@
: Stats::Group(&_tags),
tags(_tags),
- ADD_STAT(tagsInUse, "Cycle average of tags in use"),
+ ADD_STAT(tagsInUse, "Average ticks per tags in use"),
ADD_STAT(totalRefs, "Total number of references to valid blocks."),
ADD_STAT(sampledRefs, "Sample count of references to valid blocks."),
ADD_STAT(avgRefs, "Average number of references to valid blocks."),
ADD_STAT(warmupCycle, "Cycle when the warmup percentage was hit."),
- ADD_STAT(occupancies, "Average occupied blocks per requestor"),
+ ADD_STAT(occupancies, "Average occupied blocks per tick, per
requestor"),
ADD_STAT(avgOccs, "Average percentage of cache occupancy"),
ADD_STAT(occupanciesTaskId, "Occupied blocks per task id"),
- ADD_STAT(ageTaskId, "Occupied blocks per task id"),
- ADD_STAT(percentOccsTaskId, "Percentage of cache occupancy per task
id"),
+ ADD_STAT(ageTaskId, "Occupied blocks per task id, per block age"),
+ ADD_STAT(ratioOccsTaskId,
+ "Ratio of occupied blocks and all blocks, per task id"),
ADD_STAT(tagAccesses, "Number of tag accesses"),
ADD_STAT(dataAccesses, "Number of data accesses")
{
@@ -267,9 +268,9 @@
.flags(nozero | nonan)
;
- percentOccsTaskId.flags(nozero);
+ ratioOccsTaskId.flags(nozero);
- percentOccsTaskId = occupanciesTaskId /
Stats::constant(tags.numBlocks);
+ ratioOccsTaskId = occupanciesTaskId / Stats::constant(tags.numBlocks);
}
void
diff --git a/src/mem/cache/tags/base.hh b/src/mem/cache/tags/base.hh
index 5a407a6..79494e0 100644
--- a/src/mem/cache/tags/base.hh
+++ b/src/mem/cache/tags/base.hh
@@ -111,7 +111,7 @@
BaseTags &tags;
- /** Per cycle average of the number of tags that hold valid data.
*/
+ /** Per tick average of the number of tags that hold valid data. */
Stats::Average tagsInUse;
/** The total number of references to a block before it is
replaced. */
@@ -145,8 +145,8 @@
/** Occupancy of each context/cpu using the cache */
Stats::Vector2d ageTaskId;
- /** Occ % of each context/cpu using the cache */
- Stats::Formula percentOccsTaskId;
+ /** Occ ratio of each context/cpu using the cache */
+ Stats::Formula ratioOccsTaskId;
/** Number of tags consulted over all accesses. */
Stats::Scalar tagAccesses;
diff --git a/src/mem/xbar.cc b/src/mem/xbar.cc
index b75904c..7423374 100644
--- a/src/mem/xbar.cc
+++ b/src/mem/xbar.cc
@@ -64,8 +64,7 @@
useDefaultRange(p.use_default_range),
ADD_STAT(transDist, "Transaction distribution"),
- ADD_STAT(pktCount,
- "Packet count per connected requestor and responder
(bytes)"),
+ ADD_STAT(pktCount, "Packet count per connected requestor and
responder"),
ADD_STAT(pktSize, "Cumulative packet size per connected requestor
and "
"responder (bytes)")
{
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/39975
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: Iabff7d42318fcc73df79ee7f3c5430f4c45555e5
Gerrit-Change-Number: 39975
Gerrit-PatchSet: 15
Gerrit-Owner: Hoa Nguyen <hoangu...@ucdavis.edu>
Gerrit-Reviewer: Bobby R. Bruce <bbr...@ucdavis.edu>
Gerrit-Reviewer: Daniel Carvalho <oda...@yahoo.com.br>
Gerrit-Reviewer: Hoa Nguyen <hoangu...@ucdavis.edu>
Gerrit-Reviewer: Jason Lowe-Power <power...@gmail.com>
Gerrit-Reviewer: Nikos Nikoleris <nikos.nikole...@arm.com>
Gerrit-Reviewer: kokoro <noreply+kok...@google.com>
Gerrit-MessageType: merged
_______________________________________________
gem5-dev mailing list -- gem5-dev@gem5.org
To unsubscribe send an email to gem5-dev-le...@gem5.org
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s