# HG changeset patch
# User Brad Beckmann <brad.beckm...@amd.com>
# Date 1263536248 28800
# Node ID 8f9794901524089b5b55ceb53f65545224228dfe
# Parent  09d89c2eddfe7d505233a4814e1f030917736ca9
ruby: MESI_CMP_directory updated to the new config system

diff -r 09d89c2eddfe -r 8f9794901524 configs/ruby/MESI_CMP_directory.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/configs/ruby/MESI_CMP_directory.py        Thu Jan 14 22:17:28 2010 -0800
@@ -0,0 +1,152 @@
+# Copyright (c) 2006-2007 The Regents of The University of Michigan
+# Copyright (c) 2009 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Brad Beckmann
+
+import math
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from m5.util import addToPath
+
+#
+# Note: the L1 Cache latency is only used by the sequencer on fast path hits
+#
+class L1Cache(RubyCache):
+    latency = 3
+
+#
+# Note: the L2 Cache latency is not currently used
+#
+class L2Cache(RubyCache):
+    latency = 15
+
+def create_system(options, phys_mem, piobus, dma_devices):
+    
+    if buildEnv['PROTOCOL'] != 'MESI_CMP_directory':
+        panic("This script requires the MESI_CMP_directory protocol to be 
built.")
+
+    cpu_sequencers = []
+    
+    #
+    # The ruby network creation expects the list of nodes in the system to be
+    # consistent with the NetDest list.  Therefore the l1 controller nodes 
must be
+    # listed before the directory nodes and directory nodes before dma nodes, 
etc.
+    #
+    l1_cntrl_nodes = []
+    l2_cntrl_nodes = []
+    dir_cntrl_nodes = []
+    dma_cntrl_nodes = []
+
+    #
+    # Must create the individual controllers before the network to ensure the
+    # controller constructors are called before the network constructor
+    #
+    
+    for i in xrange(options.num_cpus):
+        #
+        # First create the Ruby objects associated with this cpu
+        #
+        l1i_cache = L1Cache(size = options.l1i_size,
+                            assoc = options.l1i_assoc)
+        l1d_cache = L1Cache(size = options.l1d_size,
+                            assoc = options.l1d_assoc)
+
+        cpu_seq = RubySequencer(icache = l1i_cache,
+                                dcache = l1d_cache,
+                                physMemPort = phys_mem.port,
+                                physmem = phys_mem)
+
+        if piobus != None:
+            cpu_seq.pio_port = piobus.port
+
+        l1_cntrl = L1Cache_Controller(version = i,
+                                      sequencer = cpu_seq,
+                                      L1IcacheMemory = l1i_cache,
+                                      L1DcacheMemory = l1d_cache,
+                                      l2_select_num_bits = \
+                                        math.log(options.num_l2caches, 2))
+        #
+        # Add controllers and sequencers to the appropriate lists
+        #
+        cpu_sequencers.append(cpu_seq)
+        l1_cntrl_nodes.append(l1_cntrl)
+
+    for i in xrange(options.num_l2caches):
+        #
+        # First create the Ruby objects associated with this cpu
+        #
+        l2_cache = L2Cache(size = options.l2_size,
+                           assoc = options.l2_assoc)
+
+        l2_cntrl = L2Cache_Controller(version = i,
+                                      L2cacheMemory = l2_cache,
+                                      N_tokens = n_tokens)
+        
+        l2_cntrl_nodes.append(l2_cntrl)
+        
+    phys_mem_size = long(phys_mem.range.second) - long(phys_mem.range.first) + 
1
+    mem_module_size = phys_mem_size / options.num_dirs
+
+    for i in xrange(options.num_dirs):
+        #
+        # Create the Ruby objects associated with the directory controller
+        #
+
+        mem_cntrl = RubyMemoryControl(version = i)
+
+        dir_size = MemorySize('0B')
+        dir_size.value = mem_module_size
+
+        dir_cntrl = Directory_Controller(version = i,
+                                         directory = \
+                                         RubyDirectoryMemory(version = i,
+                                                             size = dir_size),
+                                         memBuffer = mem_cntrl)
+
+        dir_cntrl_nodes.append(dir_cntrl)
+
+    for i, dma_device in enumerate(dma_devices):
+        #
+        # Create the Ruby objects associated with the dma controller
+        #
+        dma_seq = DMASequencer(version = i,
+                               physMemPort = phys_mem.port,
+                               physmem = phys_mem)
+        
+        dma_cntrl = DMA_Controller(version = i,
+                                   dma_sequencer = dma_seq)
+
+        dma_cntrl.dma_sequencer.port = dma_device.dma
+        dma_cntrl_nodes.append(dma_cntrl)
+
+    all_cntrls = l1_cntrl_nodes + \
+                 l2_cntrl_nodes + \
+                 dir_cntrl_nodes + \
+                 dma_cntrl_nodes
+
+    return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)
diff -r 09d89c2eddfe -r 8f9794901524 configs/ruby/Ruby.py
--- a/configs/ruby/Ruby.py      Thu Jan 14 22:17:28 2010 -0800
+++ b/configs/ruby/Ruby.py      Thu Jan 14 22:17:28 2010 -0800
@@ -33,6 +33,7 @@
 from m5.util import addToPath
 
 import MOESI_hammer
+import MESI_CMP_directory
 import MOESI_CMP_directory
 import MI_example
 import MOESI_CMP_token
@@ -47,6 +48,12 @@
                                        physmem, \
                                        piobus, \
                                        dma_devices)
+    elif protocol == "MESI_CMP_directory":
+        (cpu_sequencers, dir_cntrls, all_cntrls) = \
+            MESI_CMP_directory.create_system(options, \
+                                             physmem, \
+                                             piobus, \
+                                             dma_devices)
     elif protocol == "MOESI_CMP_directory":
         (cpu_sequencers, dir_cntrls, all_cntrls) = \
             MOESI_CMP_directory.create_system(options, \
diff -r 09d89c2eddfe -r 8f9794901524 
src/mem/protocol/MESI_CMP_directory-L1cache.sm
--- a/src/mem/protocol/MESI_CMP_directory-L1cache.sm    Thu Jan 14 22:17:28 
2010 -0800
+++ b/src/mem/protocol/MESI_CMP_directory-L1cache.sm    Thu Jan 14 22:17:28 
2010 -0800
@@ -34,11 +34,14 @@
 
 
 machine(L1Cache, "MSI Directory L1 Cache CMP")
- : int l1_request_latency,
-   int l1_response_latency,
-   int to_l2_latency,
-   int l2_select_low_bit,
-   int l2_select_num_bits
+ : Sequencer * sequencer,
+   CacheMemory * L1IcacheMemory,
+   CacheMemory * L1DcacheMemory,
+   int l2_select_num_bits,
+   int l1_request_latency = 2,
+   int l1_response_latency = 2,
+   int to_l2_latency = 1,
+   int l2_select_low_bit = 6
 {
 
 
@@ -124,16 +127,6 @@
     int pendingAcks, default="0", desc="number of pending acks";
   }
 
-  external_type(CacheMemory) {
-    bool cacheAvail(Address);
-    Address cacheProbe(Address);
-    void allocate(Address, Entry);
-    void deallocate(Address);
-    Entry lookup(Address);
-    void changePermission(Address, AccessPermission);
-    bool isTagPresent(Address);
-  }
-
   external_type(TBETable) {
     TBE lookup(Address);
     void allocate(Address);
@@ -143,30 +136,16 @@
 
   TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
 
-//  CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", 
constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"',
 abstract_chip_ptr="true";
-//  CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", 
constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"',
 abstract_chip_ptr="true";
-
-  CacheMemory L1IcacheMemory, factory='RubySystem::getCache(m_cfg["icache"])';
-
-  CacheMemory L1DcacheMemory, factory='RubySystem::getCache(m_cfg["dcache"])';
-
-
-//  MessageBuffer mandatoryQueue, ordered="false", rank="100", 
abstract_chip_ptr="true";
-
-//  Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
-
   MessageBuffer mandatoryQueue, ordered="false";
-  Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
-
 
   int cache_state_to_int(State state);
 
   // inclusive cache returns L1 entries only
   Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
     if (L1DcacheMemory.isTagPresent(addr)) {
-      return L1DcacheMemory[addr];
+      return static_cast(Entry, L1DcacheMemory[addr]);
     } else {
-      return L1IcacheMemory[addr];
+      return static_cast(Entry, L1IcacheMemory[addr]);
     }
   }
 
diff -r 09d89c2eddfe -r 8f9794901524 
src/mem/protocol/MESI_CMP_directory-L2cache.sm
--- a/src/mem/protocol/MESI_CMP_directory-L2cache.sm    Thu Jan 14 22:17:28 
2010 -0800
+++ b/src/mem/protocol/MESI_CMP_directory-L2cache.sm    Thu Jan 14 22:17:28 
2010 -0800
@@ -33,9 +33,10 @@
  */
 
 machine(L2Cache, "MESI Directory L2 Cache CMP")
- : int l2_request_latency,  
-   int l2_response_latency,
-   int to_l1_latency
+ : CacheMemory * L2cacheMemory,
+   int l2_request_latency = 2,  
+   int l2_response_latency = 2,
+   int to_l1_latency = 1
 {
 
   // L2 BANK QUEUES
@@ -145,17 +146,6 @@
     int pendingAcks,            desc="number of pending acks for invalidates 
during writeback";
   }
 
-  external_type(CacheMemory) {
-    bool cacheAvail(Address);
-    Address cacheProbe(Address);
-    void allocate(Address, Entry);
-    void deallocate(Address);
-    Entry lookup(Address);
-    void changePermission(Address, AccessPermission);
-    bool isTagPresent(Address);
-    void setMRU(Address);
-  }
-
   external_type(TBETable) {
     TBE lookup(Address);
     void allocate(Address);
@@ -165,14 +155,9 @@
 
   TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
 
-//  CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", 
constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
-
-
-  CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
-
   // inclusive cache, returns L2 entries only
   Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
-    return L2cacheMemory[addr];
+    return static_cast(Entry, L2cacheMemory[addr]);
   }
 
   void changeL2Permission(Address addr, AccessPermission permission) {
@@ -190,13 +175,13 @@
   }
 
   bool isOneSharerLeft(Address addr, MachineID requestor) {
-    assert(L2cacheMemory[addr].Sharers.isElement(requestor));
-    return (L2cacheMemory[addr].Sharers.count() == 1);
+    assert(getL2CacheEntry(addr).Sharers.isElement(requestor));
+    return (getL2CacheEntry(addr).Sharers.count() == 1);
   }
 
   bool isSharer(Address addr, MachineID requestor) {
     if (L2cacheMemory.isTagPresent(addr)) {
-      return L2cacheMemory[addr].Sharers.isElement(requestor);
+      return getL2CacheEntry(addr).Sharers.isElement(requestor);
     } else {
       return false;
     }
@@ -206,7 +191,7 @@
     //DEBUG_EXPR(machineID);
     //DEBUG_EXPR(requestor);
     //DEBUG_EXPR(addr);
-    L2cacheMemory[addr].Sharers.add(requestor);
+    getL2CacheEntry(addr).Sharers.add(requestor);
   }
 
   State getState(Address addr) {
@@ -361,7 +346,7 @@
             trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, 
in_msg.Requestor), in_msg.Address);
           } else {
             // No room in the L2, so we need to make room before handling the 
request
-            if (L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) 
].Dirty ) {
+            if (getL2CacheEntry( L2cacheMemory.cacheProbe(in_msg.Address) 
).Dirty ) {
               trigger(Event:L2_Replacement, 
L2cacheMemory.cacheProbe(in_msg.Address));
             } else {
               trigger(Event:L2_Replacement_clean, 
L2cacheMemory.cacheProbe(in_msg.Address));
@@ -393,7 +378,7 @@
         out_msg.Address := address;
         out_msg.Type := in_msg.Type;
         out_msg.Requestor := in_msg.Requestor;
-        out_msg.Destination.add(L2cacheMemory[address].Exclusive);
+        out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
         out_msg.MessageSize := MessageSizeType:Request_Control;
       }
     }
@@ -537,7 +522,7 @@
       out_msg.Address := address;
       out_msg.Type := CoherenceRequestType:INV;
       out_msg.Requestor := machineID;
-      out_msg.Destination := L2cacheMemory[address].Sharers;
+      out_msg.Destination := getL2CacheEntry(address).Sharers;
       out_msg.MessageSize := MessageSizeType:Request_Control;
     }
   }
@@ -548,7 +533,7 @@
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:INV;
         out_msg.Requestor := in_msg.Requestor;
-        out_msg.Destination := L2cacheMemory[address].Sharers;
+        out_msg.Destination := getL2CacheEntry(address).Sharers;
         out_msg.MessageSize := MessageSizeType:Request_Control;
       }
     }
@@ -561,7 +546,7 @@
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:INV;
         out_msg.Requestor := in_msg.Requestor;
-        out_msg.Destination := L2cacheMemory[address].Sharers;
+        out_msg.Destination := getL2CacheEntry(address).Sharers;
         out_msg.Destination.remove(in_msg.Requestor);
         out_msg.MessageSize := MessageSizeType:Request_Control;
       }
@@ -713,28 +698,28 @@
 
   action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from 
list") {
     peek(L1RequestIntraChipL2Network_in, RequestMsg) {
-      L2cacheMemory[address].Sharers.remove(in_msg.Requestor);
+      getL2CacheEntry(address).Sharers.remove(in_msg.Requestor);
     }
   }
 
   action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
     peek(L1RequestIntraChipL2Network_in, RequestMsg) {
-      L2cacheMemory[address].Sharers.clear();
+      getL2CacheEntry(address).Sharers.clear();
     }
   }
 
   action(mm_markExclusive, "\m", desc="set the exclusive owner") {
     peek(L1RequestIntraChipL2Network_in, RequestMsg) {
-      L2cacheMemory[address].Sharers.clear();
-      L2cacheMemory[address].Exclusive := in_msg.Requestor;
+      getL2CacheEntry(address).Sharers.clear();
+      getL2CacheEntry(address).Exclusive := in_msg.Requestor;
       addSharer(address, in_msg.Requestor);
     }
   }
 
   action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
     peek(L1unblockNetwork_in, ResponseMsg) {
-      L2cacheMemory[address].Sharers.clear();
-      L2cacheMemory[address].Exclusive := in_msg.Sender;
+      getL2CacheEntry(address).Sharers.clear();
+      getL2CacheEntry(address).Exclusive := in_msg.Sender;
       addSharer(address, in_msg.Sender);
     }
   }
diff -r 09d89c2eddfe -r 8f9794901524 src/mem/protocol/MESI_CMP_directory-dma.sm
--- a/src/mem/protocol/MESI_CMP_directory-dma.sm        Thu Jan 14 22:17:28 
2010 -0800
+++ b/src/mem/protocol/MESI_CMP_directory-dma.sm        Thu Jan 14 22:17:28 
2010 -0800
@@ -1,6 +1,7 @@
 
 machine(DMA, "DMA Controller") 
-: int request_latency
+: DMASequencer * dma_sequencer,
+  int request_latency = 6
 {
 
   MessageBuffer responseFromDir, network="From", virtual_network="6", 
ordered="true", no_vector="true";
@@ -25,7 +26,6 @@
   }
 
   MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
-  DMASequencer dma_sequencer, 
factory='RubySystem::getDMASequencer(m_cfg["dma_sequencer"])', no_vector="true";
   State cur_state, no_vector="true";
 
   State getState(Address addr) {
diff -r 09d89c2eddfe -r 8f9794901524 src/mem/protocol/MESI_CMP_directory-mem.sm
--- a/src/mem/protocol/MESI_CMP_directory-mem.sm        Thu Jan 14 22:17:28 
2010 -0800
+++ b/src/mem/protocol/MESI_CMP_directory-mem.sm        Thu Jan 14 22:17:28 
2010 -0800
@@ -36,8 +36,10 @@
 
 
 machine(Directory, "MESI_CMP_filter_directory protocol") 
- : int to_mem_ctrl_latency,
-   int directory_latency
+ : DirectoryMemory * directory,
+   MemoryControl * memBuffer,
+   int to_mem_ctrl_latency = 1,
+   int directory_latency = 6
 {
 
   MessageBuffer requestToDir, network="From", virtual_network="2", 
ordered="false";
@@ -80,23 +82,13 @@
   // TYPES
 
   // DirectoryEntry
-  structure(Entry, desc="...") {
+  structure(Entry, desc="...", interface="AbstractEntry") {
     State DirectoryState,          desc="Directory state";
     DataBlock DataBlk,             desc="data for the block";
     NetDest Sharers,                   desc="Sharers for this block";
     NetDest Owner,                     desc="Owner of this block"; 
   }
 
-  external_type(DirectoryMemory) {
-    Entry lookup(Address);
-    bool isPresent(Address);
-  }
-
-  // to simulate detailed DRAM
-  external_type(MemoryControl, inport="yes", outport="yes") {
-
-  }
-
   // TBE entries for DMA requests
   structure(TBE, desc="TBE entries for outstanding DMA requests") {
     Address PhysicalAddress, desc="physical address";
@@ -115,21 +107,17 @@
 
   // ** OBJECTS **
 
-//  DirectoryMemory directory, constructor_hack="i";
-//  MemoryControl memBuffer, constructor_hack="i";
-
-  DirectoryMemory directory, 
factory='RubySystem::getDirectory(m_cfg["directory_name"])';
-
-  MemoryControl memBuffer, 
factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
-
-
   TBETable TBEs, template_hack="<Directory_TBE>";
       
+  Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
+    return static_cast(Entry, directory[addr]);
+  }
+
   State getState(Address addr) {
     if (TBEs.isPresent(addr)) {
       return TBEs[addr].TBEState;   
     } else if (directory.isPresent(addr)) {
-      return directory[addr].DirectoryState;
+      return getDirectoryEntry(addr).DirectoryState;
     } else {
       return State:I;
     }
@@ -145,14 +133,14 @@
     if (directory.isPresent(addr)) {
   
       if (state == State:I)  {
-        assert(directory[addr].Owner.count() == 0);
-        assert(directory[addr].Sharers.count() == 0);
+        assert(getDirectoryEntry(addr).Owner.count() == 0);
+        assert(getDirectoryEntry(addr).Sharers.count() == 0);
       } else if (state == State:M) {
-        assert(directory[addr].Owner.count() == 1);
-        assert(directory[addr].Sharers.count() == 0);
+        assert(getDirectoryEntry(addr).Owner.count() == 1);
+        assert(getDirectoryEntry(addr).Sharers.count() == 0);
       }
       
-      directory[addr].DirectoryState := state;
+      getDirectoryEntry(addr).DirectoryState := state;
     }
   }
 
@@ -296,7 +284,7 @@
         out_msg.OriginalRequestorMachId := in_msg.Requestor;
         out_msg.MessageSize := in_msg.MessageSize;
         out_msg.Prefetch := in_msg.Prefetch;
-        out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+        out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
 
         DEBUG_EXPR(out_msg);
       }
@@ -321,7 +309,7 @@
 
   action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
     peek(responseNetwork_in, ResponseMsg) {
-      directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+      getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
       DEBUG_EXPR(in_msg.Address);
       DEBUG_EXPR(in_msg.DataBlk);
     }
@@ -335,7 +323,7 @@
         out_msg.Sender := machineID;
         out_msg.OriginalRequestorMachId := machineID;
         out_msg.MessageSize := in_msg.MessageSize;
-        out_msg.DataBlk := directory[address].DataBlk;
+        out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
         DEBUG_EXPR(out_msg);
       }
     }
@@ -361,7 +349,7 @@
     peek(dmaRequestQueue_in, DMARequestMsg) {
       //directory[in_msg.PhysicalAddress].DataBlk.copyPartial(in_msg.DataBlk, 
in_msg.Offset, in_msg.Len);
 
-      directory[in_msg.PhysicalAddress].DataBlk.copyPartial(in_msg.DataBlk, 
addressOffset(in_msg.PhysicalAddress), in_msg.Len);
+      
getDirectoryEntry(in_msg.PhysicalAddress).DataBlk.copyPartial(in_msg.DataBlk, 
addressOffset(in_msg.PhysicalAddress), in_msg.Len);
     }
   }
 
@@ -403,8 +391,8 @@
 
   action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
     peek(requestNetwork_in, RequestMsg) {
-      directory[address].Owner.clear();
-      directory[address].Owner.add(in_msg.Requestor);
+      getDirectoryEntry(address).Owner.clear();
+      getDirectoryEntry(address).Owner.add(in_msg.Requestor);
     }
   }
 
@@ -415,7 +403,7 @@
       out_msg.Address := address;
       out_msg.Type := CoherenceResponseType:INV;
       out_msg.Sender := machineID;
-      out_msg.Destination := directory[in_msg.PhysicalAddress].Owner;
+      out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
       out_msg.MessageSize := MessageSizeType:Response_Control;
       }
     }
@@ -435,7 +423,7 @@
   }
 
   action(c_clearOwner, "c", desc="Clear the owner field") {
-    directory[address].Owner.clear();
+    getDirectoryEntry(address).Owner.clear();
   }
 
   action(v_allocateTBE, "v", desc="Allocate TBE") {
@@ -448,8 +436,8 @@
   }
 
   action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from 
TBE") {
-    //directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, 
TBEs[address].Offset, TBEs[address].Len);
-    directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, 
addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
+    //getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, 
TBEs[address].Offset, TBEs[address].Len);
+    getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, 
addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
 
 
   }

_______________________________________________
m5-dev mailing list
m5-dev@m5sim.org
http://m5sim.org/mailman/listinfo/m5-dev

Reply via email to