# HG changeset patch # User Brad Beckmann <brad.beckm...@amd.com> # Date 1263536243 28800 # Node ID 2a756159e6b0844df0d0b198bce8ad654cb58d46 # Parent 207d8bc75eb96dbcb9ee0edd8abaa85ae165c9f4 ruby: Ruby changes required to use the python config system This patch includes the necessary changes to connect ruby objects using the python configuration system. Mainly it consists of removing unnecessary ruby object pointers and connecting the necessary object pointers using the generated param objects. This patch includes the slicc changes necessary to connect generated ruby objects together using the python configuraiton system.
diff -r 207d8bc75eb9 -r 2a756159e6b0 configs/example/memtest-ruby.py --- a/configs/example/memtest-ruby.py Thu Jan 14 22:17:23 2010 -0800 +++ b/configs/example/memtest-ruby.py Thu Jan 14 22:17:23 2010 -0800 @@ -102,8 +102,6 @@ # consistent with the NetDest list. Therefore the l1 controller nodes must be # listed before the directory nodes and directory nodes before dma nodes, etc. # - -# net_nodes = [] l1_cntrl_nodes = [] dir_cntrl_nodes = [] @@ -112,23 +110,43 @@ # controller constructors are called before the network constructor # for (i, cpu) in enumerate(cpus): - l1_cntrl = L1Cache_Controller() - cpu_seq = RubySequencer(controller = l1_cntrl, - icache = L1Cache(controller = l1_cntrl), - dcache = L1Cache(controller = l1_cntrl)) - cpu.controller = l1_cntrl - cpu.sequencer = cpu_seq + # + # First create the Ruby objects associated with this cpu + # Eventually this code should go in a python file specific to the + # MOESI_hammer protocol + # + + l1i_cache = L1Cache() + l1d_cache = L1Cache() + l2_cache = L2Cache() + + cpu_seq = RubySequencer(icache = l1i_cache, + dcache = l1d_cache, + funcmem_port = system.physmem.port) + + l1_cntrl = L1Cache_Controller(version = i, + sequencer = cpu_seq, + L1IcacheMemory = l1i_cache, + L1DcacheMemory = l1d_cache, + L2cacheMemory = l2_cache) + + dir_cntrl = Directory_Controller(version = i, + directory = RubyDirectoryMemory(), + memBuffer = RubyMemoryControl()) + + # + # As noted above: Two independent list are track to maintain the order of + # nodes/controllers assumed by the ruby network + # + l1_cntrl_nodes.append(l1_cntrl) + dir_cntrl_nodes.append(dir_cntrl) + + # + # Finally tie the memtester ports to the correct system ports + # cpu.test = cpu_seq.port - cpu_seq.funcmem_port = system.physmem.port cpu.functional = system.funcmem.port - dir_cntrl = Directory_Controller(version = i, - directory = RubyDirectoryMemory(), - memory_control = RubyMemoryControl()) - - # net_nodes += [l1_cntrl, dir_cntrl] - l1_cntrl_nodes.append(l1_cntrl) - dir_cntrl_nodes.append(dir_cntrl) # # Important: the topology constructor must be called before the network diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/protocol/MOESI_hammer-cache.sm --- a/src/mem/protocol/MOESI_hammer-cache.sm Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/protocol/MOESI_hammer-cache.sm Thu Jan 14 22:17:23 2010 -0800 @@ -34,7 +34,11 @@ */ machine(L1Cache, "AMD Hammer-like protocol") -: int cache_response_latency = 12, +: Sequencer * sequencer, + CacheMemory * L1IcacheMemory, + CacheMemory * L1DcacheMemory, + CacheMemory * L2cacheMemory, + int cache_response_latency = 12, int issue_latency = 2 { @@ -104,7 +108,6 @@ // STRUCTURE DEFINITIONS MessageBuffer mandatoryQueue, ordered="false"; - Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])'; // CacheEntry structure(Entry, desc="...", interface="AbstractCacheEntry") { @@ -122,17 +125,6 @@ bool Sharers, desc="On a GetS, did we find any other sharers in the system"; } - external_type(CacheMemory) { - bool cacheAvail(Address); - Address cacheProbe(Address); - void allocate(Address, Entry); - void deallocate(Address); - Entry lookup(Address); - void changePermission(Address, AccessPermission); - bool isTagPresent(Address); - void profileMiss(CacheMsg); - } - external_type(TBETable) { TBE lookup(Address); void allocate(Address); @@ -141,17 +133,14 @@ } TBETable TBEs, template_hack="<L1Cache_TBE>"; - CacheMemory L1IcacheMemory, factory='RubySystem::getCache(m_cfg["icache"])'; - CacheMemory L1DcacheMemory, factory='RubySystem::getCache(m_cfg["dcache"])'; - CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["l2cache"])'; Entry getCacheEntry(Address addr), return_by_ref="yes" { if (L2cacheMemory.isTagPresent(addr)) { - return L2cacheMemory[addr]; + return static_cast(Entry, L2cacheMemory[addr]); } else if (L1DcacheMemory.isTagPresent(addr)) { - return L1DcacheMemory[addr]; + return static_cast(Entry, L1DcacheMemory[addr]); } else { - return L1IcacheMemory[addr]; + return static_cast(Entry, L1IcacheMemory[addr]); } } @@ -670,17 +659,21 @@ action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") { if (L1DcacheMemory.isTagPresent(address)) { - L2cacheMemory[address] := L1DcacheMemory[address]; + static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1DcacheMemory[address]).Dirty; + static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1DcacheMemory[address]).DataBlk; } else { - L2cacheMemory[address] := L1IcacheMemory[address]; + static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1IcacheMemory[address]).Dirty; + static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1IcacheMemory[address]).DataBlk; } } action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") { if (L1DcacheMemory.isTagPresent(address)) { - L1DcacheMemory[address] := L2cacheMemory[address]; + static_cast(Entry, L1DcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty; + static_cast(Entry, L1DcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk; } else { - L1IcacheMemory[address] := L2cacheMemory[address]; + static_cast(Entry, L1IcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty; + static_cast(Entry, L1IcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk; } } diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/protocol/MOESI_hammer-dir.sm --- a/src/mem/protocol/MOESI_hammer-dir.sm Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/protocol/MOESI_hammer-dir.sm Thu Jan 14 22:17:23 2010 -0800 @@ -34,7 +34,9 @@ */ machine(Directory, "AMD Hammer-like protocol") -: int memory_controller_latency = 12 +: DirectoryMemory * directory, + MemoryControl * memBuffer, + int memory_controller_latency = 12 { MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false"; @@ -108,20 +110,11 @@ // TYPES // DirectoryEntry - structure(Entry, desc="...") { + structure(Entry, desc="...", interface="AbstractEntry") { State DirectoryState, desc="Directory state"; DataBlock DataBlk, desc="data for the block"; } - external_type(DirectoryMemory) { - Entry lookup(Address); - bool isPresent(Address); - } - - external_type(MemoryControl, inport="yes", outport="yes") { - - } - // TBE entries for DMA requests structure(TBE, desc="TBE entries for outstanding DMA requests") { Address PhysicalAddress, desc="physical address"; @@ -145,17 +138,17 @@ // ** OBJECTS ** - DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])'; + TBETable TBEs, template_hack="<Directory_TBE>"; - MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])'; - - TBETable TBEs, template_hack="<Directory_TBE>"; + Entry getDirectoryEntry(Address addr), return_by_ref="yes" { + return static_cast(Entry, directory[addr]); + } State getState(Address addr) { if (TBEs.isPresent(addr)) { return TBEs[addr].TBEState; } else { - return directory[addr].DirectoryState; + return getDirectoryEntry(addr).DirectoryState; } } @@ -163,7 +156,7 @@ if (TBEs.isPresent(addr)) { TBEs[addr].TBEState := state; } - directory[addr].DirectoryState := state; + getDirectoryEntry(addr).DirectoryState := state; } MessageBuffer triggerQueue, ordered="true"; @@ -454,7 +447,7 @@ out_msg.Sender := machineID; out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.MessageSize := in_msg.MessageSize; - out_msg.DataBlk := directory[address].DataBlk; + out_msg.DataBlk := getDirectoryEntry(address).DataBlk; DEBUG_EXPR(out_msg); } } @@ -468,7 +461,7 @@ out_msg.Sender := machineID; out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.MessageSize := in_msg.MessageSize; - out_msg.DataBlk := directory[address].DataBlk; + out_msg.DataBlk := getDirectoryEntry(address).DataBlk; DEBUG_EXPR(out_msg); } } @@ -564,15 +557,15 @@ peek(unblockNetwork_in, ResponseMsg) { assert(in_msg.Dirty); assert(in_msg.MessageSize == MessageSizeType:Writeback_Data); - directory[address].DataBlk := in_msg.DataBlk; + getDirectoryEntry(address).DataBlk := in_msg.DataBlk; DEBUG_EXPR(in_msg.Address); DEBUG_EXPR(in_msg.DataBlk); } } action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") { - directory[address].DataBlk := TBEs[address].DataBlk; - directory[address].DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len); + getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk; + getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len); } action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") { @@ -610,7 +603,7 @@ // implementation. We include the data in the "dataless" // message so we can assert the clean data matches the datablock // in memory - assert(directory[address].DataBlk == in_msg.DataBlk); + assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk); } } diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/protocol/RubySlicc_Types.sm --- a/src/mem/protocol/RubySlicc_Types.sm Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/protocol/RubySlicc_Types.sm Thu Jan 14 22:17:23 2010 -0800 @@ -98,6 +98,30 @@ void profileNack(Address, int, int, uint64); } +external_type(AbstractEntry, primitive="yes"); + +external_type(DirectoryMemory) { + AbstractEntry lookup(Address); + bool isPresent(Address); +} + +external_type(AbstractCacheEntry, primitive="yes"); + +external_type(CacheMemory) { + bool cacheAvail(Address); + Address cacheProbe(Address); + void allocate(Address, AbstractCacheEntry); + void deallocate(Address); + AbstractCacheEntry lookup(Address); + void changePermission(Address, AccessPermission); + bool isTagPresent(Address); + void profileMiss(CacheMsg); +} + +external_type(MemoryControl, inport="yes", outport="yes") { + +} + external_type(TimerTable, inport="yes") { bool isReady(); Address readyAddress(); @@ -119,3 +143,5 @@ } + + diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/SConscript --- a/src/mem/ruby/SConscript Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/SConscript Thu Jan 14 22:17:23 2010 -0800 @@ -95,6 +95,7 @@ target = generated_dir.File(basename(source)) env.Command(target, source, MakeIncludeAction) +MakeInclude('slicc_interface/AbstractEntry.hh') MakeInclude('slicc_interface/AbstractCacheEntry.hh') MakeInclude('slicc_interface/AbstractProtocol.hh') MakeInclude('slicc_interface/Message.hh') diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/slicc_interface/AbstractCacheEntry.hh --- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh Thu Jan 14 22:17:23 2010 -0800 @@ -40,10 +40,11 @@ #include "mem/ruby/common/Global.hh" #include "mem/ruby/common/Address.hh" #include "mem/protocol/AccessPermission.hh" +#include "mem/ruby/slicc_interface/AbstractEntry.hh" class DataBlock; -class AbstractCacheEntry { +class AbstractCacheEntry : public AbstractEntry { public: // Constructors AbstractCacheEntry(); @@ -51,15 +52,6 @@ // Destructor, prevent it from instantiation virtual ~AbstractCacheEntry() = 0; - // Public Methods - - // The methods below are those called by ruby runtime, add when it is - // absolutely necessary and should all be virtual function. - virtual DataBlock& getDataBlk() = 0; - - - virtual void print(ostream& out) const = 0; - // Data Members (m_ prefix) Address m_Address; // Address of this block, required by CacheMemory Time m_LastRef; // Last time this block was referenced, required by CacheMemory diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/slicc_interface/AbstractEntry.cc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/mem/ruby/slicc_interface/AbstractEntry.cc Thu Jan 14 22:17:23 2010 -0800 @@ -0,0 +1,38 @@ + +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "mem/ruby/slicc_interface/AbstractEntry.hh" + +// Must define constructor and destructor in subclasses +AbstractEntry::AbstractEntry() { +} + +AbstractEntry::~AbstractEntry() { +} + diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/slicc_interface/AbstractEntry.hh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/mem/ruby/slicc_interface/AbstractEntry.hh Thu Jan 14 22:17:23 2010 -0800 @@ -0,0 +1,73 @@ + +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AbstractEntry_H +#define AbstractEntry_H + +#include "mem/ruby/common/Global.hh" +#include "mem/ruby/common/Address.hh" +#include "mem/protocol/AccessPermission.hh" + +class DataBlock; + +class AbstractEntry { +public: + // Constructors + AbstractEntry(); + + // Destructor, prevent it from instantiation + virtual ~AbstractEntry() = 0; + + // Public Methods + + // The methods below are those called by ruby runtime, add when it is + // absolutely necessary and should all be virtual function. + virtual DataBlock& getDataBlk() = 0; + + + virtual void print(ostream& out) const = 0; + +}; + +// Output operator declaration +ostream& operator<<(ostream& out, const AbstractEntry& obj); + +// ******************* Definitions ******************* + +// Output operator definition +extern inline +ostream& operator<<(ostream& out, const AbstractEntry& obj) +{ + obj.print(out); + out << flush; + return out; +} + +#endif //AbstractEntry_H + diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/slicc_interface/SConscript --- a/src/mem/ruby/slicc_interface/SConscript Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/slicc_interface/SConscript Thu Jan 14 22:17:23 2010 -0800 @@ -35,6 +35,7 @@ SimObject('Controller.py') +Source('AbstractEntry.cc') Source('AbstractCacheEntry.cc') Source('RubySlicc_Profiler_interface.cc') Source('RubySlicc_ComponentMapping.cc') diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/Cache.py --- a/src/mem/ruby/system/Cache.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/Cache.py Thu Jan 14 22:17:23 2010 -0800 @@ -9,4 +9,3 @@ latency = Param.Int(""); assoc = Param.Int(""); replacement_policy = Param.String("PSEUDO_LRU", ""); - controller = Param.RubyController(""); diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/CacheMemory.cc --- a/src/mem/ruby/system/CacheMemory.cc Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/CacheMemory.cc Thu Jan 14 22:17:23 2010 -0800 @@ -31,9 +31,6 @@ int CacheMemory::m_num_last_level_caches = 0; MachineType CacheMemory::m_last_level_machine_type = MachineType_FIRST; -// Output operator declaration -//ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj); - // ******************* Definitions ******************* // Output operator definition @@ -57,29 +54,27 @@ CacheMemory::CacheMemory(const Params *p) : SimObject(p) { - int cache_size = p->size; + m_cache_size = p->size; m_latency = p->latency; m_cache_assoc = p->assoc; - string policy = p->replacement_policy; - m_controller = p->controller; - - m_cache_num_sets = (cache_size / m_cache_assoc) / RubySystem::getBlockSizeBytes(); - assert(m_cache_num_sets > 1); - m_cache_num_set_bits = log_int(m_cache_num_sets); - assert(m_cache_num_set_bits > 0); - - if(policy == "PSEUDO_LRU") - m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc); - else if (policy == "LRU") - m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc); - else - assert(false); - + m_policy = p->replacement_policy; } void CacheMemory::init() { + m_cache_num_sets = (m_cache_size / m_cache_assoc) / RubySystem::getBlockSizeBytes(); + assert(m_cache_num_sets > 1); + m_cache_num_set_bits = log_int(m_cache_num_sets); + assert(m_cache_num_set_bits > 0); + + if(m_policy == "PSEUDO_LRU") + m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc); + else if (m_policy == "LRU") + m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc); + else + assert(false); + m_num_last_level_caches = MachineType_base_count(MachineType_FIRST); #if 0 @@ -127,8 +122,6 @@ void CacheMemory::printConfig(ostream& out) { out << "Cache config: " << m_cache_name << endl; - if (m_controller != NULL) - out << " controller: " << m_controller->getName() << endl; out << " cache_associativity: " << m_cache_assoc << endl; out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl; const int cache_num_sets = 1 << m_cache_num_set_bits; diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/CacheMemory.hh --- a/src/mem/ruby/system/CacheMemory.hh Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/CacheMemory.hh Thu Jan 14 22:17:23 2010 -0800 @@ -71,10 +71,6 @@ // Destructor ~CacheMemory(); - // factory - // static CacheMemory* createCache(int level, int num, char split_type, AbstractCacheEntry* (*entry_factory)()); - // static CacheMemory* getCache(int cache_id); - static int numberOfLastLevelCaches(); // Public Methods @@ -154,7 +150,6 @@ private: const string m_cache_name; - AbstractController* m_controller; int m_latency; // Data Members (m_prefix) @@ -170,6 +165,8 @@ CacheProfiler* m_profiler_ptr; + int m_cache_size; + string m_policy; int m_cache_num_sets; int m_cache_num_set_bits; int m_cache_assoc; diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/DirectoryMemory.cc --- a/src/mem/ruby/system/DirectoryMemory.cc Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/DirectoryMemory.cc Thu Jan 14 22:17:23 2010 -0800 @@ -39,7 +39,6 @@ #include "mem/ruby/system/System.hh" #include "mem/ruby/system/DirectoryMemory.hh" #include "mem/ruby/slicc_interface/RubySlicc_Util.hh" -#include "mem/ruby/slicc_interface/AbstractController.hh" #include "mem/gems_common/util.hh" int DirectoryMemory::m_num_directories = 0; @@ -52,7 +51,6 @@ m_version = p->version; m_size_bytes = p->size_mb * static_cast<uint64>(1<<20); m_size_bits = log_int(m_size_bytes); - m_controller = p->controller; } void DirectoryMemory::init() @@ -85,7 +83,6 @@ void DirectoryMemory::printConfig(ostream& out) const { out << "DirectoryMemory module config: " << m_name << endl; - out << " controller: " << m_controller->getName() << endl; out << " version: " << m_version << endl; out << " memory_bits: " << m_size_bits << endl; out << " memory_size_bytes: " << m_size_bytes << endl; diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/DirectoryMemory.hh --- a/src/mem/ruby/system/DirectoryMemory.hh Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/DirectoryMemory.hh Thu Jan 14 22:17:23 2010 -0800 @@ -46,8 +46,6 @@ #include "sim/sim_object.hh" #include "params/RubyDirectoryMemory.hh" -class AbstractController; - class DirectoryMemory : public SimObject { public: // Constructors @@ -83,7 +81,6 @@ private: const string m_name; - AbstractController* m_controller; // Data Members (m_ prefix) Directory_Entry **m_entries; // int m_size; // # of memory module blocks this directory is responsible for diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/DirectoryMemory.py --- a/src/mem/ruby/system/DirectoryMemory.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/DirectoryMemory.py Thu Jan 14 22:17:23 2010 -0800 @@ -7,4 +7,3 @@ cxx_class = 'DirectoryMemory' version = Param.Int(0, "") size_mb = Param.Int(1024, "") - controller = Param.RubyController(Parent.any, "") diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/RubyPort.cc --- a/src/mem/ruby/system/RubyPort.cc Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/RubyPort.cc Thu Jan 14 22:17:23 2010 -0800 @@ -2,27 +2,290 @@ #include "mem/ruby/system/RubyPort.hh" #include "mem/ruby/slicc_interface/AbstractController.hh" -//void (*RubyPort::m_hit_callback)(int64_t) = NULL; uint16_t RubyPort::m_num_ports = 0; +RubyPort::RequestMap RubyPort::pending_cpu_requests; + RubyPort::RubyPort(const Params *p) - : MemObject(p) + : MemObject(p), + funcMemPort(csprintf("%s-funcmem_port", name()), this) { m_version = p->version; assert(m_version != -1); - m_controller = p->controller; - assert(m_controller != NULL); - m_mandatory_q_ptr = m_controller->getMandatoryQueue(); + m_controller = NULL; + m_mandatory_q_ptr = NULL; m_port_id = m_num_ports++; m_request_cnt = 0; - m_hit_callback = NULL; + m_hit_callback = ruby_hit_callback; + pio_port = NULL; assert(m_num_ports <= 2048); // see below for reason } +void RubyPort::init() +{ + assert(m_controller != NULL); + m_mandatory_q_ptr = m_controller->getMandatoryQueue(); +} + Port * RubyPort::getPort(const std::string &if_name, int idx) { + if (if_name == "port") { + return new M5Port(csprintf("%s-port%d", name(), idx), this); + } else if (if_name == "pio_port") { + // + // ensure there is only one pio port + // + assert(pio_port == NULL); + + pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), + this); + + return pio_port; + } else if (if_name == "funcmem_port") { + return &funcMemPort; + } return NULL; } + +RubyPort::PioPort::PioPort(const std::string &_name, + RubyPort *_port) + : SimpleTimingPort(_name, _port) +{ + DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name); + ruby_port = _port; +} + +RubyPort::M5Port::M5Port(const std::string &_name, + RubyPort *_port) + : SimpleTimingPort(_name, _port) +{ + DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name); + ruby_port = _port; +} + +Tick +RubyPort::PioPort::recvAtomic(PacketPtr pkt) +{ + panic("RubyPort::PioPort::recvAtomic() not implemented!\n"); + return 0; +} + + +Tick +RubyPort::M5Port::recvAtomic(PacketPtr pkt) +{ + panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); + return 0; +} + + +bool +RubyPort::PioPort::recvTiming(PacketPtr pkt) +{ + // + // In FS mode, ruby memory will receive pio responses from devices and + // it must forward these responses back to the particular CPU. + // + DPRINTF(MemoryAccess, + "Pio response for address %#x\n", + pkt->getAddr()); + + assert(pkt->isResponse()); + + // + // First we must retrieve the request port from the sender State + // + RubyPort::SenderState *senderState = + safe_cast<RubyPort::SenderState *>(pkt->senderState); + M5Port *port = senderState->port; + assert(port != NULL); + + // pop the sender state from the packet + pkt->senderState = senderState->saved; + delete senderState; + + port->sendTiming(pkt); + + return true; +} + +bool +RubyPort::M5Port::recvTiming(PacketPtr pkt) +{ + DPRINTF(MemoryAccess, + "Timing access caught for address %#x\n", + pkt->getAddr()); + + //dsm: based on SimpleTimingPort::recvTiming(pkt); + + // + // After checking for pio responses, the remainder of packets + // received by ruby should only be M5 requests, which should never + // get nacked. There used to be code to hanldle nacks here, but + // I'm pretty sure it didn't work correctly with the drain code, + // so that would need to be fixed if we ever added it back. + // + assert(pkt->isRequest()); + + if (pkt->memInhibitAsserted()) { + warn("memInhibitAsserted???"); + // snooper will supply based on copy of packet + // still target's responsibility to delete packet + delete pkt; + return true; + } + + // + // Check for pio requests and directly send them to the dedicated + // pio port. + // + if (!isPhysMemAddress(pkt->getAddr())) { + assert(ruby_port->pio_port != NULL); + + // + // Save the port in the sender state object to be used later to + // route the response + // + pkt->senderState = new SenderState(this, pkt->senderState); + + return ruby_port->pio_port->sendTiming(pkt); + } + + // + // For DMA and CPU requests, translate them to ruby requests before + // sending them to our assigned ruby port. + // + RubyRequestType type = RubyRequestType_NULL; + Addr pc = 0; + if (pkt->isRead()) { + if (pkt->req->isInstFetch()) { + type = RubyRequestType_IFETCH; + pc = pkt->req->getPC(); + } else { + type = RubyRequestType_LD; + } + } else if (pkt->isWrite()) { + type = RubyRequestType_ST; + } else if (pkt->isReadWrite()) { + // type = RubyRequestType_RMW; + } + + RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(), + pkt->getSize(), pc, type, + RubyAccessMode_Supervisor); + + // Submit the ruby request + int64_t req_id = ruby_port->makeRequest(ruby_request); + if (req_id == -1) { + return false; + } + + // Save the request for the callback + RubyPort::pending_cpu_requests[req_id] = new RequestCookie(pkt, this); + + return true; +} + +void +RubyPort::ruby_hit_callback(int64_t req_id) +{ + // + // Note: This single fuction can be called by cpu and dma ports, + // as well as the functional port. + // + RequestMap::iterator i = pending_cpu_requests.find(req_id); + if (i == pending_cpu_requests.end()) + panic("could not find pending request %d\n", req_id); + + RequestCookie *cookie = i->second; + pending_cpu_requests.erase(i); + + Packet *pkt = cookie->pkt; + M5Port *port = cookie->m5Port; + delete cookie; + + port->hitCallback(pkt); +} + +void +RubyPort::M5Port::hitCallback(PacketPtr pkt) +{ + + bool needsResponse = pkt->needsResponse(); + + DPRINTF(MemoryAccess, "Hit callback needs response %d\n", + needsResponse); + + ruby_port->funcMemPort.sendFunctional(pkt); + + // turn packet around to go back to requester if response expected + if (needsResponse) { + // recvAtomic() should already have turned packet into + // atomic response + assert(pkt->isResponse()); + DPRINTF(MemoryAccess, "Sending packet back over port\n"); + sendTiming(pkt); + } else { + delete pkt; + } + DPRINTF(MemoryAccess, "Hit callback done!\n"); +} + +bool +RubyPort::M5Port::sendTiming(PacketPtr pkt) +{ + schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0 + return true; +} + +bool +RubyPort::PioPort::sendTiming(PacketPtr pkt) +{ + schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0 + return true; +} + +bool +RubyPort::M5Port::isPhysMemAddress(Addr addr) +{ + AddrRangeList physMemAddrList; + bool snoop = false; + ruby_port->funcMemPort.getPeerAddressRanges(physMemAddrList, snoop); + for(AddrRangeIter iter = physMemAddrList.begin(); + iter != physMemAddrList.end(); + iter++) { + if (addr >= iter->start && addr <= iter->end) { + DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n", + iter->start, iter->end); + return true; + } + } + assert(isPioAddress(addr)); + return false; +} + +bool +RubyPort::M5Port::isPioAddress(Addr addr) +{ + AddrRangeList pioAddrList; + bool snoop = false; + if (ruby_port->pio_port == NULL) { + return false; + } + + ruby_port->pio_port->getPeerAddressRanges(pioAddrList, snoop); + for(AddrRangeIter iter = pioAddrList.begin(); + iter != pioAddrList.end(); + iter++) { + if (addr >= iter->start && addr <= iter->end) { + DPRINTF(MemoryAccess, "Pio request found in %#llx - %#llx range\n", + iter->start, iter->end); + return true; + } + } + return false; +} + diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/RubyPort.hh --- a/src/mem/ruby/system/RubyPort.hh Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/RubyPort.hh Thu Jan 14 22:17:23 2010 -0800 @@ -17,18 +17,81 @@ class RubyPort : public MemObject { public: + + class M5Port : public SimpleTimingPort + { + + RubyPort *ruby_port; + + public: + M5Port(const std::string &_name, + RubyPort *_port); + bool sendTiming(PacketPtr pkt); + void hitCallback(PacketPtr pkt); + + protected: + virtual bool recvTiming(PacketPtr pkt); + virtual Tick recvAtomic(PacketPtr pkt); + + private: + bool isPioAddress(Addr addr); + bool isPhysMemAddress(Addr addr); + }; + + friend class M5Port; + + class PioPort : public SimpleTimingPort + { + + RubyPort *ruby_port; + + public: + PioPort(const std::string &_name, + RubyPort *_port); + bool sendTiming(PacketPtr pkt); + + protected: + virtual bool recvTiming(PacketPtr pkt); + virtual Tick recvAtomic(PacketPtr pkt); + }; + + friend class PioPort; + + struct SenderState : public Packet::SenderState + { + M5Port* port; + Packet::SenderState *saved; + + SenderState(M5Port* _port, + Packet::SenderState *sender_state = NULL) + : port(_port), saved(sender_state) + {} + }; + typedef RubyPortParams Params; RubyPort(const Params *p); - virtual ~RubyPort() {} + virtual ~RubyPort() {} + + void init(); Port *getPort(const std::string &if_name, int idx); - virtual int64_t makeRequest(const RubyRequest & request) = 0; + virtual int64_t makeRequest(const RubyRequest & request) = 0; - void registerHitCallback(void (*hit_callback)(int64_t request_id)) { - assert(m_hit_callback == NULL); // can't assign hit_callback twice - m_hit_callback = hit_callback; - } + void registerHitCallback(void (*hit_callback)(int64_t request_id)) { + // + // Can't assign hit_callback twice and by default it is set to the + // RubyPort's default callback function. + // + assert(m_hit_callback == ruby_hit_callback); + m_hit_callback = hit_callback; + } + + // + // Called by the controller to give the sequencer a pointer. + // A pointer to the controller is needed for atomic support. + // + void setController(AbstractController* _cntrl) { m_controller = _cntrl; } protected: const string m_name; @@ -58,11 +121,26 @@ int m_version; AbstractController* m_controller; MessageBuffer* m_mandatory_q_ptr; + PioPort* pio_port; private: static uint16_t m_num_ports; uint16_t m_port_id; uint64_t m_request_cnt; + + struct RequestCookie { + Packet *pkt; + M5Port *m5Port; + RequestCookie(Packet *p, M5Port *m5p) + : pkt(p), m5Port(m5p) + {} + }; + + typedef std::map<int64_t, RequestCookie*> RequestMap; + static RequestMap pending_cpu_requests; + static void ruby_hit_callback(int64_t req_id); + + FunctionalPort funcMemPort; }; #endif diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/Sequencer.hh --- a/src/mem/ruby/system/Sequencer.hh Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/Sequencer.hh Thu Jan 14 22:17:23 2010 -0800 @@ -117,9 +117,6 @@ CacheMemory* m_dataCache_ptr; CacheMemory* m_instCache_ptr; - // indicates what processor on the chip this sequencer is associated with - int m_controller_type; - Map<Address, SequencerRequest*> m_writeRequestTable; Map<Address, SequencerRequest*> m_readRequestTable; // Global outstanding request count, across all request tables diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/ruby/system/Sequencer.py --- a/src/mem/ruby/system/Sequencer.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/ruby/system/Sequencer.py Thu Jan 14 22:17:23 2010 -0800 @@ -1,12 +1,13 @@ from m5.params import * +from m5.proxy import * from MemObject import MemObject class RubyPort(MemObject): type = 'RubyPort' abstract = True port = VectorPort("M5 port") - controller = Param.RubyController("") version = Param.Int(0, "") + pio_port = Port("Ruby_pio_port") class RubySequencer(RubyPort): type = 'RubySequencer' diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/slicc/ast/FormalParamAST.py --- a/src/mem/slicc/ast/FormalParamAST.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/slicc/ast/FormalParamAST.py Thu Jan 14 22:17:23 2010 -0800 @@ -29,11 +29,12 @@ from slicc.symbols import Var class FormalParamAST(AST): - def __init__(self, slicc, type_ast, ident, default = None): + def __init__(self, slicc, type_ast, ident, default = None, pointer = False): super(FormalParamAST, self).__init__(slicc) self.type_ast = type_ast self.ident = ident self.default = default + self.pointer = pointer def __repr__(self): return "[FormalParamAST: %s]" % self.ident diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/slicc/ast/MethodCallExprAST.py --- a/src/mem/slicc/ast/MethodCallExprAST.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/slicc/ast/MethodCallExprAST.py Thu Jan 14 22:17:23 2010 -0800 @@ -68,7 +68,8 @@ for actual_type, expected_type in \ zip(paramTypes, obj_type.methods[methodId].param_types): - if actual_type != expected_type: + if actual_type != expected_type and \ + str(actual_type["interface"]) != str(expected_type): self.error("Type mismatch: expected: %s actual: %s", expected_type, actual_type) @@ -97,9 +98,48 @@ methodId = obj_type.methodId(self.proc_name, paramTypes) prefix = "" + implements_interface = False if methodId not in obj_type.methods: - self.error("Invalid method call: Type '%s' does not have a method '%s'", - obj_type, methodId) + # + # The initial method check has failed, but before generating an + # error we must check whether any of the paramTypes implement + # an interface. If so, we must check if the method ids using + # the inherited types exist. + # + # This code is a temporary fix and only checks for the methodId + # where all paramTypes are converted to their inherited type. The + # right way to do this is to replace slicc's simple string + # comparison for determining the correct overloaded method, with a + # more robust param by param check. + # + implemented_paramTypes = [] + for paramType in paramTypes: + implemented_paramType = paramType + if paramType.isInterface: + implements_interface = True + implemented_paramType.abstract_ident = paramType["interface"] + else: + implemented_paramType.abstract_ident = paramType.c_ident + + implemented_paramTypes.append(implemented_paramType) + + if implements_interface: + implementedMethodId = obj_type.methodIdAbstract(self.proc_name, + implemented_paramTypes) + else: + implementedMethodId = "" + + if implementedMethodId not in obj_type.methods: + self.error("Invalid method call: " \ + "Type '%s' does not have a method '%s' nor '%s'", + obj_type, methodId, implementedMethodId) + else: + # + # Replace the methodId with the implementedMethodId found in + # the method list. + # + methodId = implementedMethodId + return_type = obj_type.methods[methodId].return_type if return_type.isInterface: prefix = "static_cast<%s &>" % return_type.c_ident diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/slicc/ast/StaticCastAST.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/mem/slicc/ast/StaticCastAST.py Thu Jan 14 22:17:23 2010 -0800 @@ -0,0 +1,54 @@ +# Copyright (c) 2009 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from slicc.ast.ExprAST import ExprAST + +class StaticCastAST(ExprAST): + def __init__(self, slicc, type_ast, expr_ast): + super(StaticCastAST, self).__init__(slicc) + + self.type_ast = type_ast + self.expr_ast = expr_ast + + def __repr__(self): + return "[StaticCastAST: %r]" % self.expr_ast + + def generate(self, code): + actual_type, ecode = self.expr_ast.inline(True) + code('static_cast<${{self.type_ast.type.c_ident}} &>($ecode)') + + if not "interface" in self.type_ast.type: + self.expr_ast.error("static cast only premitted for those types " \ + "that implement inherit an interface") + + # The interface type should match + if str(actual_type) != str(self.type_ast.type["interface"]): + self.expr_ast.error("static cast miss-match, type is '%s'," \ + "but inherited type is '%s'", + actual_type, self.type_ast.type["interface"]) + + return self.type_ast.type + diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/slicc/ast/__init__.py --- a/src/mem/slicc/ast/__init__.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/slicc/ast/__init__.py Thu Jan 14 22:17:23 2010 -0800 @@ -59,6 +59,7 @@ from slicc.ast.ReturnStatementAST import * from slicc.ast.StatementAST import * from slicc.ast.StatementListAST import * +from slicc.ast.StaticCastAST import * from slicc.ast.TransitionDeclAST import * from slicc.ast.TypeAST import * from slicc.ast.TypeDeclAST import * diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/slicc/parser.py --- a/src/mem/slicc/parser.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/slicc/parser.py Thu Jan 14 22:17:23 2010 -0800 @@ -154,6 +154,7 @@ 'copy_head' : 'COPY_HEAD', 'check_allocate' : 'CHECK_ALLOCATE', 'check_stop_slots' : 'CHECK_STOP_SLOTS', + 'static_cast' : 'STATIC_CAST', 'if' : 'IF', 'else' : 'ELSE', 'return' : 'RETURN', @@ -416,6 +417,10 @@ "param : type ident" p[0] = ast.FormalParamAST(self, p[1], p[2]) + def p_param__pointer(self, p): + "param : type STAR ident" + p[0] = ast.FormalParamAST(self, p[1], p[3], None, True) + def p_param__default(self, p): "param : type ident '=' NUMBER" p[0] = ast.FormalParamAST(self, p[1], p[2], p[4]) @@ -531,6 +536,10 @@ "statement : CHECK_STOP_SLOTS '(' var ',' STRING ',' STRING ')' SEMI" p[0] = ast.CheckStopStatementAST(self, p[3], p[5], p[7]) + def p_statement__static_cast(self, p): + "aexpr : STATIC_CAST '(' type ',' expr ')'" + p[0] = ast.StaticCastAST(self, p[3], p[5]) + def p_statement__return(self, p): "statement : RETURN expr SEMI" p[0] = ast.ReturnStatementAST(self, p[2]) diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/slicc/symbols/StateMachine.py --- a/src/mem/slicc/symbols/StateMachine.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/slicc/symbols/StateMachine.py Thu Jan 14 22:17:23 2010 -0800 @@ -31,14 +31,27 @@ from slicc.symbols.Var import Var import slicc.generate.html as html +python_class_map = {"int": "Int", + "string": "String", + "bool": "Bool", + "CacheMemory": "RubyCache", + "Sequencer": "RubySequencer", + "DirectoryMemory": "RubyDirectoryMemory", + "MemoryControl": "RubyMemoryControl", + } + class StateMachine(Symbol): def __init__(self, symtab, ident, location, pairs, config_parameters): super(StateMachine, self).__init__(symtab, ident, location, pairs) self.table = None self.config_parameters = config_parameters for param in config_parameters: - var = Var(symtab, param.name, location, param.type_ast.type, - "m_%s" % param.name, {}, self) + if param.pointer: + var = Var(symtab, param.name, location, param.type_ast.type, + "(*m_%s_ptr)" % param.name, {}, self) + else: + var = Var(symtab, param.name, location, param.type_ast.type, + "m_%s" % param.name, {}, self) self.symtab.registerSym(param.name, var) self.states = orderdict() @@ -153,7 +166,13 @@ dflt_str = '' if param.default is not None: dflt_str = str(param.default) + ', ' - code('${{param.name}} = Param.Int(${dflt_str}"")') + if python_class_map.has_key(param.type_ast.type.c_ident): + python_type = python_class_map[param.type_ast.type.c_ident] + code('${{param.name}} = Param.${{python_type}}(${dflt_str}"")') + else: + self.error("Unknown c++ to python class conversion for c++ " \ + "type: '%s'. Please update the python_class_map " \ + "in StateMachine.py", param.type_ast.type.c_ident) code.dedent() code.write(path, '%s.py' % py_ident) @@ -224,7 +243,10 @@ code.indent() # added by SS for param in self.config_parameters: - code('int m_${{param.ident}};') + if param.pointer: + code('${{param.type_ast.type}}* m_${{param.ident}}_ptr;') + else: + code('${{param.type_ast.type}} m_${{param.ident}};') if self.ident == "L1Cache": code(''' @@ -338,10 +360,33 @@ m_number_of_TBEs = p->number_of_TBEs; ''') code.indent() + + # + # After initializing the universal machine parameters, initialize the + # this machines config parameters. Also detemine if these configuration + # params include a sequencer. This information will be used later for + # contecting the sequencer back to the L1 cache controller. + # + contains_sequencer = False for param in self.config_parameters: - code('m_${{param.name}} = p->${{param.name}};') + if param.name == "sequencer": + contains_sequencer = True + if param.pointer: + code('m_${{param.name}}_ptr = p->${{param.name}};') + else: + code('m_${{param.name}} = p->${{param.name}};') + + # + # For the l1 cache controller, add the special atomic support which + # includes passing the sequencer a pointer to the controller. + # if self.ident == "L1Cache": + if not contains_sequencer: + self.error("The L1Cache controller must include the sequencer " \ + "configuration parameter") + code(''' +m_sequencer_ptr->setController(this); servicing_atomic = 0; started_receiving_writes = false; locked_read_request1 = Address(-1); diff -r 207d8bc75eb9 -r 2a756159e6b0 src/mem/slicc/symbols/Type.py --- a/src/mem/slicc/symbols/Type.py Thu Jan 14 22:17:23 2010 -0800 +++ b/src/mem/slicc/symbols/Type.py Thu Jan 14 22:17:23 2010 -0800 @@ -51,6 +51,7 @@ def __init__(self, table, ident, location, pairs, machine=None): super(Type, self).__init__(table, ident, location, pairs) self.c_ident = ident + self.abstract_ident = "" if machine: if self.isExternal or self.isPrimitive: if "external_name" in self: @@ -154,6 +155,9 @@ def methodId(self, name, param_type_vec): return '_'.join([name] + [ pt.c_ident for pt in param_type_vec ]) + def methodIdAbstract(self, name, param_type_vec): + return '_'.join([name] + [ pt.abstract_ident for pt in param_type_vec ]) + def methodAdd(self, name, return_type, param_type_vec): ident = self.methodId(name, param_type_vec) if ident in self.methods: _______________________________________________ m5-dev mailing list m5-dev@m5sim.org http://m5sim.org/mailman/listinfo/m5-dev