changeset 929da5a00a10 in /z/repo/gem5
details: http://repo.gem5.org/gem5?cmd=changeset;node=929da5a00a10
description:
        mem: SimpleDRAM variable naming and whitespace fixes

        This patch fixes a number of small cosmetic issues in the SimpleDRAM
        module. The most important change is to move the accounting of
        received packets to after the check is made if the packet should be
        retried or not. Thus, packets are only counted if they are actually
        accepted.

diffstat:

 src/mem/simple_dram.cc |  354 ++++++++++++++++++++----------------------------
 src/mem/simple_dram.hh |   42 ++--
 2 files changed, 172 insertions(+), 224 deletions(-)

diffs (truncated from 885 to 300 lines):

diff -r b1e1409922ad -r 929da5a00a10 src/mem/simple_dram.cc
--- a/src/mem/simple_dram.cc    Fri Mar 01 13:20:22 2013 -0500
+++ b/src/mem/simple_dram.cc    Fri Mar 01 13:20:24 2013 -0500
@@ -43,7 +43,6 @@
 #include "debug/DRAM.hh"
 #include "debug/DRAMWR.hh"
 #include "mem/simple_dram.hh"
-#include "sim/stat_control.hh"
 
 using namespace std;
 
@@ -67,7 +66,7 @@
     tXAW(p->tXAW), activationLimit(p->activation_limit),
     memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
     pageMgmt(p->page_policy),
-    busBusyUntil(0), prevdramaccess(0), writeStartTime(0),
+    busBusyUntil(0), writeStartTime(0),
     prevArrival(0), numReqs(0)
 {
     // create the bank states based on the dimensions of the ranks and
@@ -91,24 +90,17 @@
         port.sendRangeChange();
     }
 
-    // get the cache line size from the connected port
+    // get the burst size from the connected port as it is currently
+    // assumed to be equal to the cache line size
     bytesPerCacheLine = port.peerBlockSize();
 
     // we could deal with plenty options here, but for now do a quick
     // sanity check
     if (bytesPerCacheLine != 64 && bytesPerCacheLine != 32)
-        panic("Unexpected cache line size %d", bytesPerCacheLine);
+        panic("Unexpected burst size %d", bytesPerCacheLine);
 
     // determine the rows per bank by looking at the total capacity
-    uint64_t capacity = AbstractMemory::size();
-    uint64_t i = 1;
-    while (i < 64 && capacity > ((1 << i))) {
-        ++i;
-    }
-
-    // rounded up to nearest power of two
-    DPRINTF(DRAM, "i is %lld\n", i);
-    capacity = 1 << i;
+    uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
 
     DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
             AbstractMemory::size());
@@ -141,10 +133,9 @@
     printParams();
 
     // kick off the refresh
-    schedule(&refreshEvent, curTick() + tREFI);
+    schedule(refreshEvent, curTick() + tREFI);
 }
 
-
 Tick
 SimpleDRAM::recvAtomic(PacketPtr pkt)
 {
@@ -166,20 +157,19 @@
 SimpleDRAM::readQueueFull() const
 {
     DPRINTF(DRAM, "Read queue limit %d current size %d\n",
-            readBufferSize, dramReadQueue.size() + dramRespQueue.size());
+            readBufferSize, readQueue.size() + respQueue.size());
 
-    return (dramReadQueue.size() + dramRespQueue.size()) == readBufferSize;
+    return (readQueue.size() + respQueue.size()) == readBufferSize;
 }
 
 bool
 SimpleDRAM::writeQueueFull() const
 {
     DPRINTF(DRAM, "Write queue limit %d current size %d\n",
-            writeBufferSize, dramWriteQueue.size());
-    return dramWriteQueue.size() == writeBufferSize;
+            writeBufferSize, writeQueue.size());
+    return writeQueue.size() == writeBufferSize;
 }
 
-
 SimpleDRAM::DRAMPacket*
 SimpleDRAM::decodeAddr(PacketPtr pkt)
 {
@@ -193,7 +183,6 @@
     uint16_t row;
 
     Addr addr = pkt->getAddr();
-    Addr temp = addr;
 
     // truncate the address to the access granularity
     addr = addr / bytesPerCacheLine;
@@ -258,14 +247,13 @@
     assert(row < rowsPerBank);
 
     DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
-            temp, rank, bank, row);
+            pkt->getAddr(), rank, bank, row);
 
     // create the corresponding DRAM packet with the entry time and
-    // ready time set to the current tick, they will be updated later
-    DRAMPacket* dram_pkt = new DRAMPacket(pkt, rank, bank, row, temp,
-                                          banks[rank][bank]);
-
-    return dram_pkt;
+    // ready time set to the current tick, the latter will be updated
+    // later
+    return new DRAMPacket(pkt, rank, bank, row, pkt->getAddr(),
+                          banks[rank][bank]);
 }
 
 void
@@ -277,14 +265,14 @@
 
     // First check write buffer to see if the data is already at
     // the controller
-    std::list<DRAMPacket*>::const_iterator i;
+    list<DRAMPacket*>::const_iterator i;
     Addr addr = pkt->getAddr();
 
     // @todo: add size check
-    for (i = dramWriteQueue.begin();  i != dramWriteQueue.end(); ++i) {
+    for (i = writeQueue.begin(); i != writeQueue.end(); ++i) {
         if ((*i)->addr == addr){
             servicedByWrQ++;
-            DPRINTF(DRAM,"Serviced by write Q\n");
+            DPRINTF(DRAM, "Read to %lld serviced by write queue\n", addr);
             bytesRead += bytesPerCacheLine;
             bytesConsumedRd += pkt->getSize();
             accessAndRespond(pkt);
@@ -294,31 +282,32 @@
 
     DRAMPacket* dram_pkt = decodeAddr(pkt);
 
-    assert(dramReadQueue.size() + dramRespQueue.size() < readBufferSize);
-    rdQLenPdf[dramReadQueue.size() + dramRespQueue.size()]++;
+    assert(readQueue.size() + respQueue.size() < readBufferSize);
+    rdQLenPdf[readQueue.size() + respQueue.size()]++;
 
     DPRINTF(DRAM, "Adding to read queue\n");
 
-    dramReadQueue.push_back(dram_pkt);
+    readQueue.push_back(dram_pkt);
 
     // Update stats
     uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
     assert(bank_id < ranksPerChannel * banksPerRank);
     perBankRdReqs[bank_id]++;
 
-    avgRdQLen = dramReadQueue.size() + dramRespQueue.size();
+    avgRdQLen = readQueue.size() + respQueue.size();
 
-    // Special case where no arbitration is required between requests
+    // If we are not already scheduled to get the read request out of
+    // the queue, do so now
     if (!nextReqEvent.scheduled() && !stopReads) {
-        DPRINTF(DRAM, "Request %lld - need to schedule immediately");
-        schedule(&nextReqEvent, curTick() + 1);
+        DPRINTF(DRAM, "Request scheduled immediately\n");
+        schedule(nextReqEvent, curTick());
     }
 }
 
 void
 SimpleDRAM::processWriteEvent()
 {
-    assert(!dramWriteQueue.empty());
+    assert(!writeQueue.empty());
     uint32_t numWritesThisTime = 0;
 
     DPRINTF(DRAMWR, "Beginning DRAM Writes\n");
@@ -326,13 +315,15 @@
     Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt());
 
     // @todo: are there any dangers with the untimed while loop?
-    while (!dramWriteQueue.empty()) {
-        if (numWritesThisTime > writeThreshold)
+    while (!writeQueue.empty()) {
+        if (numWritesThisTime > writeThreshold) {
+            DPRINTF(DRAMWR, "Hit write threshold %d\n", writeThreshold);
             break;
+        }
 
         chooseNextWrite();
-        DRAMPacket* dram_pkt = dramWriteQueue.front();
-        // What's the earlier the request can be put on the bus
+        DRAMPacket* dram_pkt = writeQueue.front();
+        // What's the earliest the request can be put on the bus
         Tick schedTime = std::max(curTick(), busBusyUntil);
 
         DPRINTF(DRAMWR, "Asking for latency estimate at %lld\n",
@@ -342,9 +333,6 @@
         Tick accessLat = lat.second;
 
         // look at the rowHitFlag set by estimateLatency
-
-        // @todo: Race condition here where another packet gives rise
-        // to another call to estimateLatency in the meanwhile?
         if (rowHitFlag)
             writeRowHits++;
 
@@ -372,13 +360,13 @@
         } else
             panic("Unknown page management policy chosen\n");
 
-        DPRINTF(DRAMWR,"Done writing to address %lld\n",dram_pkt->addr);
+        DPRINTF(DRAMWR, "Done writing to address %lld\n", dram_pkt->addr);
 
-        DPRINTF(DRAMWR,"schedtime is %lld, tBURST is %lld, "
+        DPRINTF(DRAMWR, "schedtime is %lld, tBURST is %lld, "
                 "busbusyuntil is %lld\n",
                 schedTime, tBURST, busBusyUntil);
 
-        dramWriteQueue.pop_front();
+        writeQueue.pop_front();
         delete dram_pkt;
 
         numWritesThisTime++;
@@ -389,7 +377,7 @@
             busBusyUntil - temp1, maxBankFreeAt() - temp2);
 
     // Update stats
-    avgWrQLen = dramWriteQueue.size();
+    avgWrQLen = writeQueue.size();
 
     // turn the bus back around for reads again
     busBusyUntil += tWTR;
@@ -401,15 +389,15 @@
     }
 
     // if there is nothing left in any queue, signal a drain
-    if (dramWriteQueue.empty() && dramReadQueue.empty() &&
-        dramRespQueue.empty () && drainManager) {
+    if (writeQueue.empty() && readQueue.empty() &&
+        respQueue.empty () && drainManager) {
         drainManager->signalDrainDone();
         drainManager = NULL;
     }
 
     // Once you're done emptying the write queue, check if there's
     // anything in the read queue, and call schedule if required
-    schedule(&nextReqEvent, busBusyUntil);
+    schedule(nextReqEvent, busBusyUntil);
 }
 
 void
@@ -425,7 +413,7 @@
 
     assert(writeStartTime >= curTick());
     assert(!writeEvent.scheduled());
-    schedule(&writeEvent, writeStartTime);
+    schedule(writeEvent, writeStartTime);
 }
 
 void
@@ -437,19 +425,19 @@
 
     DRAMPacket* dram_pkt = decodeAddr(pkt);
 
-    assert(dramWriteQueue.size() < writeBufferSize);
-    wrQLenPdf[dramWriteQueue.size()]++;
+    assert(writeQueue.size() < writeBufferSize);
+    wrQLenPdf[writeQueue.size()]++;
 
     DPRINTF(DRAM, "Adding to write queue\n");
 
-    dramWriteQueue.push_back(dram_pkt);
+    writeQueue.push_back(dram_pkt);
 
     // Update stats
     uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
     assert(bank_id < ranksPerChannel * banksPerRank);
     perBankWrReqs[bank_id]++;
 
-    avgWrQLen = dramWriteQueue.size();
+    avgWrQLen = writeQueue.size();
 
     // we do not wait for the writes to be send to the actual memory,
     // but instead take responsibility for the consistency here and
@@ -460,7 +448,7 @@
     accessAndRespond(pkt);
 
     // If your write buffer is starting to fill up, drain it!
-    if (dramWriteQueue.size() > writeThreshold  && !stopReads){
+    if (writeQueue.size() > writeThreshold && !stopReads){
         triggerWrites();
     }
 }
@@ -477,7 +465,7 @@
             "Banks per rank       %d\n"                         \
             "Ranks per channel    %d\n"                         \
             "Total mem capacity   %u\n",
-            name(), bytesPerCacheLine ,linesPerRowBuffer, rowsPerBank,
+            name(), bytesPerCacheLine, linesPerRowBuffer, rowsPerBank,
             banksPerRank, ranksPerChannel, bytesPerCacheLine *
             linesPerRowBuffer * rowsPerBank * banksPerRank * ranksPerChannel);
 
@@ -498,14 +486,16 @@
_______________________________________________
gem5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/gem5-dev

Reply via email to