# HG changeset patch
# User Derek Hower <d...@cs.wisc.edu>
# Date 1252638133 18000
# Node ID 1bf54187d685859f3e39942c03429d220eca627c
# Parent  dad8671f8769f10de29445c0a25b572e961697b1
# Parent  38da844de1148e391dc67779e3dcb4fed8fb5400
Automated merge with ssh://h...@m5sim.org/m5

diff --git a/src/mem/protocol/MI_example-cache.sm 
b/src/mem/protocol/MI_example-cache.sm
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -17,6 +17,7 @@
     II, desc="Not Present/Invalid, issued PUT";
     M,  desc="Modified";
     MI,  desc="Modified, issued PUT";
+    MII, desc="Modified, issued PUTX, received nack";
 
     IS,  desc="Issued request for LOAD/IFETCH";
     IM,  desc="Issued request for STORE/ATOMIC";
@@ -388,6 +389,16 @@
     o_popForwardedRequestQueue;
   }
 
+  transition(MI, Writeback_Nack, MII) {
+    o_popForwardedRequestQueue;
+  }
+
+  transition(MII, Fwd_GETX, I) {
+    ee_sendDataFromTBE;
+    w_deallocateTBE;
+    o_popForwardedRequestQueue;
+  }
+
   transition(II, Writeback_Nack, I) {
     w_deallocateTBE;
     o_popForwardedRequestQueue;
diff --git a/src/mem/ruby/config/MI_example-homogeneous.rb 
b/src/mem/ruby/config/MI_example-homogeneous.rb
--- a/src/mem/ruby/config/MI_example-homogeneous.rb
+++ b/src/mem/ruby/config/MI_example-homogeneous.rb
@@ -34,6 +34,13 @@
   elsif $*[i] == "-m"
     num_memories = $*[i+1].to_i
     i = i+1
+  elsif $*[i] == "-R"
+    if $*[i+1] == "rand"
+      RubySystem.random_seed = "rand"
+    else
+      RubySystem.random_seed = $*[i+1].to_i
+    end
+    i = i+ 1
   elsif $*[i] == "-s"
     memory_size_mb = $*[i+1].to_i
     i = i + 1
diff --git a/src/mem/ruby/config/TwoLevel_SplitL1UnifiedL2.rb 
b/src/mem/ruby/config/TwoLevel_SplitL1UnifiedL2.rb
--- a/src/mem/ruby/config/TwoLevel_SplitL1UnifiedL2.rb
+++ b/src/mem/ruby/config/TwoLevel_SplitL1UnifiedL2.rb
@@ -40,6 +40,13 @@
   elsif $*[i] == "-p"
     num_cores = $*[i+1].to_i
     i = i+1
+  elsif $*[i] == "-R"
+    if $*[i+1] == "rand"
+      RubySystem.random_seed = "rand"
+    else
+      RubySystem.random_seed = $*[i+1].to_i
+    end
+    i = i+ 1
   elsif $*[i] == "-s"
     memory_size_mb = $*[i+1].to_i
     i = i + 1
@@ -72,6 +79,8 @@
     net_ports << 
MOESI_CMP_directory_L2CacheController.new("L2CacheController_"+n.to_s,
                                                            "L2Cache",
                                                            cache)
+    net_ports.last.request_latency = l2_cache_latency + 2
+    net_ports.last.response_latency = l2_cache_latency + 2
   end
 }
 num_memories.times { |n|
diff --git a/src/mem/ruby/config/cfg.rb b/src/mem/ruby/config/cfg.rb
--- a/src/mem/ruby/config/cfg.rb
+++ b/src/mem/ruby/config/cfg.rb
@@ -11,7 +11,7 @@
 
 def assert(condition,message)
   unless condition
-    raise AssertionFailure, "\n\nAssertion failed: \n\n   #{message}\n\n"
+    raise AssertionFailure.new(message), "\n\nAssertion failed: \n\n   
#{message}\n\n"
   end
 end
 
diff --git a/src/mem/ruby/config/defaults.rb b/src/mem/ruby/config/defaults.rb
--- a/src/mem/ruby/config/defaults.rb
+++ b/src/mem/ruby/config/defaults.rb
@@ -163,8 +163,8 @@
 end
 
 class MOESI_CMP_directory_DMAController < DMAController
-  default_param :request_latency, Integer, 6
-  default_param :response_latency, Integer, 6
+  default_param :request_latency, Integer, 14
+  default_param :response_latency, Integer, 14
 end
 
 class RubySystem
@@ -179,7 +179,7 @@
   # you can still have a non-deterministic simulation if random seed
   # is set to "rand".  This is because the Ruby swtiches use random
   # link priority elevation
-  default_param :randomization, Boolean, false
+  default_param :randomization, Boolean, true
 
   # tech_nm is the device size used to calculate latency and area
   # information about system components
diff --git a/src/mem/ruby/libruby.cc b/src/mem/ruby/libruby.cc
--- a/src/mem/ruby/libruby.cc
+++ b/src/mem/ruby/libruby.cc
@@ -133,6 +133,10 @@
   RubySystem::getMemoryVector()->read(Address(paddr), data, len);
 }
 
+bool libruby_isReady(RubyPortHandle p, struct RubyRequest request) {
+  return static_cast<RubyPort*>(p)->isReady(request, true);
+}
+
 int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request)
 {
   return static_cast<RubyPort*>(p)->makeRequest(request);
diff --git a/src/mem/ruby/libruby.hh b/src/mem/ruby/libruby.hh
--- a/src/mem/ruby/libruby.hh
+++ b/src/mem/ruby/libruby.hh
@@ -34,7 +34,7 @@
   unsigned proc_id;
 
   RubyRequest() {}
-  RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc, 
RubyRequestType _type, RubyAccessMode _access_mode, unsigned _proc_id = 0)
+  RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc, 
RubyRequestType _type, RubyAccessMode _access_mode, unsigned _proc_id = 100)
     : paddr(_paddr), data(_data), len(_len), pc(_pc), type(_type), 
access_mode(_access_mode), proc_id(_proc_id)
   {}
 };
@@ -76,6 +76,12 @@
  */
 int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request);
 
+
+/**
+ *
+ */
+bool libruby_isReady(RubyPortHandle p, struct RubyRequest request);
+
 /**
  * writes data directly into Ruby's data array.  Note that this
  * ignores caches, and should be considered incoherent after
diff --git a/src/mem/ruby/system/CacheMemory.hh 
b/src/mem/ruby/system/CacheMemory.hh
--- a/src/mem/ruby/system/CacheMemory.hh
+++ b/src/mem/ruby/system/CacheMemory.hh
@@ -156,6 +156,7 @@
 
   // The first index is the # of cache lines.
   // The second index is the the amount associativity.
+  m5::hash_map<Address, int> m_tag_index;
   Vector<Vector<AbstractCacheEntry*> > m_cache;
   Vector<Vector<int> > m_locked;
 
@@ -286,6 +287,12 @@
 {
   assert(tag == line_address(tag));
   // search the set for the tags
+  m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
+  if (it != m_tag_index.end())
+    if (m_cache[cacheSet][it->second]->m_Permission != 
AccessPermission_NotPresent)
+      return it->second;
+  return -1; // Not found
+  /*
   for (int i=0; i < m_cache_assoc; i++) {
     if ((m_cache[cacheSet][i] != NULL) &&
         (m_cache[cacheSet][i]->m_Address == tag) &&
@@ -294,6 +301,7 @@
     }
   }
   return -1; // Not found
+  */
 }
 
 // Given a cache index: returns the index of the tag in a set.
@@ -303,11 +311,19 @@
 {
   assert(tag == line_address(tag));
   // search the set for the tags
+  m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
+  if (it != m_tag_index.end())
+    return it->second;
+  return -1; // Not found
+  /*
+  assert(tag == line_address(tag));
+  // search the set for the tags
   for (int i=0; i < m_cache_assoc; i++) {
     if (m_cache[cacheSet][i] != NULL && m_cache[cacheSet][i]->m_Address == tag)
       return i;
   }
   return -1; // Not found
+  */
 }
 
 // PUBLIC METHODS
@@ -418,6 +434,7 @@
       m_cache[cacheSet][i]->m_Address = address;
       m_cache[cacheSet][i]->m_Permission = AccessPermission_Invalid;
       m_locked[cacheSet][i] = -1;
+      m_tag_index[address] = i;
 
       m_replacementPolicy_ptr->touch(cacheSet, i, g_eventQueue_ptr->getTime());
 
@@ -439,6 +456,7 @@
     delete m_cache[cacheSet][location];
     m_cache[cacheSet][location] = NULL;
     m_locked[cacheSet][location] = -1;
+    m_tag_index.erase(address);
   }
 }
 
diff --git a/src/mem/ruby/system/DMASequencer.hh 
b/src/mem/ruby/system/DMASequencer.hh
--- a/src/mem/ruby/system/DMASequencer.hh
+++ b/src/mem/ruby/system/DMASequencer.hh
@@ -25,6 +25,7 @@
   void init(const vector<string> & argv);
   /* external interface */
   int64_t makeRequest(const RubyRequest & request);
+  bool isReady(const RubyRequest & request, bool dont_set = false) { 
assert(0); return false;};
   //  void issueRequest(uint64_t paddr, uint8* data, int len, bool rw);
   bool busy() { return m_is_busy;}
 
diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh
--- a/src/mem/ruby/system/RubyPort.hh
+++ b/src/mem/ruby/system/RubyPort.hh
@@ -21,6 +21,8 @@
 
   virtual int64_t makeRequest(const RubyRequest & request) = 0;
 
+  virtual bool isReady(const RubyRequest & request, bool dont_set = false) = 0;
+
   void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
     assert(m_hit_callback == NULL); // can't assign hit_callback twice
     m_hit_callback = hit_callback;
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -61,7 +61,7 @@
   m_instCache_ptr = NULL;
   m_dataCache_ptr = NULL;
   m_controller = NULL;
-  m_servicing_atomic = -1;
+  m_servicing_atomic = 200;
   m_atomics_counter = 0;
   for (size_t i=0; i<argv.size(); i+=2) {
     if ( argv[i] == "controller") {
@@ -108,6 +108,7 @@
       WARN_MSG("Possible Deadlock detected");
       WARN_EXPR(request);
       WARN_EXPR(m_version);
+      WARN_EXPR(request->ruby_request.paddr);
       WARN_EXPR(keys.size());
       WARN_EXPR(current_time);
       WARN_EXPR(request->issue_time);
@@ -344,13 +345,22 @@
       data.setData(ruby_request.data, request_address.getOffset(), 
ruby_request.len);
     }
   }
-
+  if (type == RubyRequestType_RMW_Write) {
+    if (m_servicing_atomic != ruby_request.proc_id) {
+      assert(0);
+    }
+    assert(m_atomics_counter > 0);
+    m_atomics_counter--;
+    if (m_atomics_counter == 0) {
+      m_servicing_atomic = 200;
+    }
+  }
   m_hit_callback(srequest->id);
   delete srequest;
 }
 
 // Returns true if the sequencer already has a load or store outstanding
-bool Sequencer::isReady(const RubyRequest& request) {
+bool Sequencer::isReady(const RubyRequest& request, bool dont_set) {
   // POLINA: check if we are currently flushing the write buffer, if so Ruby 
is returned as not ready
   // to simulate stalling of the front-end
   // Do we stall all the sequencers? If it is atomic instruction - yes!
@@ -365,27 +375,29 @@
     return false;
   }
 
-  if (m_servicing_atomic != -1 && m_servicing_atomic != (int)request.proc_id) {
+  if (m_servicing_atomic != 200 && m_servicing_atomic != request.proc_id) {
     assert(m_atomics_counter > 0);
     return false;
   }
   else {
-    if (request.type == RubyRequestType_RMW_Read) {
-      if (m_servicing_atomic == -1) {
-        assert(m_atomics_counter == 0);
-        m_servicing_atomic = (int)request.proc_id;
+    if (!dont_set) {
+      if (request.type == RubyRequestType_RMW_Read) {
+        if (m_servicing_atomic == 200) {
+          assert(m_atomics_counter == 0);
+          m_servicing_atomic = request.proc_id;
+        }
+        else {
+          assert(m_servicing_atomic == request.proc_id);
+        }
+        m_atomics_counter++;
       }
       else {
-        assert(m_servicing_atomic == (int)request.proc_id);
-      }
-      m_atomics_counter++;
-    }
-    else if (request.type == RubyRequestType_RMW_Write) {
-      assert(m_servicing_atomic == (int)request.proc_id);
-      assert(m_atomics_counter > 0);
-      m_atomics_counter--;
-      if (m_atomics_counter == 0) {
-        m_servicing_atomic = -1;
+        if (m_servicing_atomic == request.proc_id) {
+          if (request.type != RubyRequestType_RMW_Write) {
+            m_servicing_atomic = 200;
+            m_atomics_counter = 0;
+          }
+        }
       }
     }
   }
@@ -405,7 +417,7 @@
     int64_t id = makeUniqueRequestID();
     SequencerRequest *srequest = new SequencerRequest(request, id, 
g_eventQueue_ptr->getTime());
     bool found = insertRequest(srequest);
-    if (!found)
+    if (!found) {
       if (request.type == RubyRequestType_Locked_Write) {
         // NOTE: it is OK to check the locked flag here as the mandatory queue 
will be checked first
         // ensuring that nothing comes between checking the flag and servicing 
the store
@@ -423,6 +435,10 @@
 
     // TODO: issue hardware prefetches here
     return id;
+    }
+    else {
+      assert(0);
+    }
   }
   else {
     return -1;
@@ -444,14 +460,8 @@
     ctype = CacheRequestType_ST;
     break;
   case RubyRequestType_Locked_Read:
-    ctype = CacheRequestType_ST;
-    break;
   case RubyRequestType_Locked_Write:
-    ctype = CacheRequestType_ST;
-    break;
   case RubyRequestType_RMW_Read:
-    ctype = CacheRequestType_ATOMIC;
-    break;
   case RubyRequestType_RMW_Write:
     ctype = CacheRequestType_ATOMIC;
     break;
diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh
--- a/src/mem/ruby/system/Sequencer.hh
+++ b/src/mem/ruby/system/Sequencer.hh
@@ -84,7 +84,7 @@
 
   // called by Tester or Simics
   int64_t makeRequest(const RubyRequest & request);
-  bool isReady(const RubyRequest& request);
+  bool isReady(const RubyRequest& request, bool dont_set = false);
   bool empty() const;
 
   void print(ostream& out) const;
@@ -125,7 +125,7 @@
   // Global outstanding request count, across all request tables
   int m_outstanding_count;
   bool m_deadlock_check_scheduled;
-  int m_servicing_atomic;
+  unsigned m_servicing_atomic;
   int m_atomics_counter;
 };
 
diff --git a/util/style.py b/util/style.py
--- a/util/style.py
+++ b/util/style.py
@@ -65,7 +65,7 @@
     if filename.startswith("SCons"):
         return True
 
-    return False
+    return True 
 
 format_types = ( 'C', 'C++' )
 def format_file(filename):
@@ -77,11 +77,11 @@
 def checkwhite_line(line):
     match = lead.search(line)
     if match and match.group(1).find('\t') != -1:
-        return False
+        return True 
 
     match = trail.search(line)
     if match:
-        return False
+        return True
 
     return True
 
_______________________________________________
m5-dev mailing list
m5-dev@m5sim.org
http://m5sim.org/mailman/listinfo/m5-dev

Reply via email to