Juan Manuel Cebrián González has uploaded this change for review. ( https://gem5-review.googlesource.com/c/public/gem5/+/28053 )

Change subject: mem-ruby: Fix for ruby latency
......................................................................

mem-ruby: Fix for ruby latency

The classic memory model clears L1 hits instantaneously and sets the 'when' to curTick + hit_latency. Ruby models however advance the curTick instead. Therefore the legacy '+1' adds an additional cycle to ruby L1 cache hits.

This causes significantly more front-end stalls of the o3 cpu when compared to real Intel hardware. By applying this fix both performance and simulation time are improved by around 30%. Front-end stalls look much closer to real hardware with this fix.

Change-Id: I4c91dc09bff5f45f1d1e42edc13d3c15d6205c46
---
M src/mem/packet_queue.cc
M src/mem/packet_queue.hh
M src/mem/request.hh
M src/mem/ruby/system/Sequencer.cc
4 files changed, 34 insertions(+), 3 deletions(-)



diff --git a/src/mem/packet_queue.cc b/src/mem/packet_queue.cc
index dd1ba3d..60b1889 100644
--- a/src/mem/packet_queue.cc
+++ b/src/mem/packet_queue.cc
@@ -148,12 +148,18 @@
     // either the packet list is empty or this has to be inserted
     // before every other packet
     transmitList.emplace_front(when, pkt);
-    schedSendEvent(when);
+    schedSendEvent(when,pkt);
 }

 void
 PacketQueue::schedSendEvent(Tick when)
 {
+    schedSendEvent(when,NULL);
+}
+
+void
+PacketQueue::schedSendEvent(Tick when, PacketPtr pkt)
+{
     // if we are waiting on a retry just hold off
     if (waitingOnRetry) {
         DPRINTF(PacketQueue, "Not scheduling send as waiting for retry\n");
@@ -162,10 +168,20 @@
     }

     if (when != MaxTick) {
+        // The classic memory model clears L1 hits instantaneously and
+        // sets the 'when' to curTick + hit_latency. Ruby models however
+        // advance the curTick instead. Therefore the legacy '+1' adds an
+        // additional cycle to ruby L1 cache hits.
         // we cannot go back in time, and to be consistent we stick to
         // one tick in the future
-        when = std::max(when, curTick() + 1);
+        Tick initWhen = when;
+        when = std::max(initWhen, curTick() + 1);
         // @todo Revisit the +1
+        if (pkt != NULL) {
+          if (pkt->req->wasHandledByRuby()) {
+            when = std::max(initWhen, curTick());
+          }
+        }

         if (!sendEvent.scheduled()) {
             em.schedule(&sendEvent, when);
diff --git a/src/mem/packet_queue.hh b/src/mem/packet_queue.hh
index b9c5b75..bac4db2 100644
--- a/src/mem/packet_queue.hh
+++ b/src/mem/packet_queue.hh
@@ -194,6 +194,12 @@
      *
      * @param when time to schedule an event
      */
+    void schedSendEvent(Tick when, PacketPtr pkt);
+
+    /**
+     * Wrapper for old schedSendEvent
+     * @param when time to schedule an event
+     */
     void schedSendEvent(Tick when);

     /**
diff --git a/src/mem/request.hh b/src/mem/request.hh
index aca9fe8..b127a4a 100644
--- a/src/mem/request.hh
+++ b/src/mem/request.hh
@@ -251,6 +251,9 @@
         ARG_SEGMENT            = 0x00000800,
     };

+    /* Flag set when a packet passes through Ruby */
+    bool handledByRuby;
+
   private:
     typedef uint16_t PrivateFlagsType;
     typedef ::Flags<PrivateFlagsType> PrivateFlags;
@@ -299,6 +302,7 @@
         privateFlags.set(VALID_PADDR|VALID_SIZE);
         depth = 0;
         accessDelta = 0;
+        handledByRuby = false;
         //translateDelta = 0;
     }

@@ -399,7 +403,7 @@
           _extraData(0), _contextId(0), _pc(0),
           _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
           accessDelta(0), depth(0)
-    {}
+    { handledByRuby = false; }

     Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
             InstSeqNum seq_num, ContextID cid)
@@ -540,6 +544,7 @@
         accessDelta = 0;
         translateDelta = 0;
         atomicOpFunctor = std::move(amo_op);
+        handledByRuby = false;
     }

     /**
@@ -908,6 +913,8 @@
     bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }

+    bool wasHandledByRuby() const { return handledByRuby; };
+
     bool
     isAtomic() const
     {
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index a90523e..4f1ba2a 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -520,6 +520,8 @@
         return RequestStatus_BufferFull;
     }

+    pkt->req->handledByRuby = true;
+
     RubyRequestType primary_type = RubyRequestType_NULL;
     RubyRequestType secondary_type = RubyRequestType_NULL;


--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/28053
To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: master
Gerrit-Change-Id: I4c91dc09bff5f45f1d1e42edc13d3c15d6205c46
Gerrit-Change-Number: 28053
Gerrit-PatchSet: 1
Gerrit-Owner: Juan Manuel Cebrián González <jm.cebriangonza...@gmail.com>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- gem5-dev@gem5.org
To unsubscribe send an email to gem5-dev-le...@gem5.org
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s

Reply via email to