It appears that some targets may need more than the 2 "standard" execution modes. For example, PowerPC 64 has 3 execution modes: hypervisor, supervisor and problem state. Another example is alpha which has 4 execution modes (even if Linux seems to use only two of them).
My proposal is to increase the number of the tlb_tables entries in a way that would be selected at compile time in order to keep good performances for other emulated targets that do not need this feature. You'll find attached a short patch implementing it. With this patch, one would just have to properly define CPU_MEM_INDEX and is_user flags for those targets. Please comment. -- J. Mayer <[EMAIL PROTECTED]> Never organized
Index: cpu-defs.h =================================================================== RCS file: /sources/qemu/qemu/cpu-defs.h,v retrieving revision 1.18 diff -u -d -d -p -r1.18 cpu-defs.h --- cpu-defs.h 16 Mar 2007 23:58:11 -0000 1.18 +++ cpu-defs.h 26 Mar 2007 06:52:11 -0000 @@ -106,6 +108,13 @@ typedef struct CPUTLBEntry { target_phys_addr_t addend; } CPUTLBEntry; +/* Alpha has 4 different running levels */ +#if defined(TARGET_ALPHA) +#define NB_MMU_MODES 4 +#else +#define NB_MMU_MODES 2 +#endif + #define CPU_COMMON \ struct TranslationBlock *current_tb; /* currently executing TB */ \ /* soft mmu support */ \ @@ -117,7 +126,7 @@ typedef struct CPUTLBEntry { target_ulong mem_write_vaddr; /* target virtual addr at which the \ memory was written */ \ /* 0 = kernel, 1 = user */ \ - CPUTLBEntry tlb_table[2][CPU_TLB_SIZE]; \ + CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ \ /* from this point: preserved by CPU reset */ \ Index: exec.c =================================================================== RCS file: /sources/qemu/qemu/exec.c,v retrieving revision 1.89 diff -u -d -d -p -r1.89 exec.c --- exec.c 17 Mar 2007 15:17:58 -0000 1.89 +++ exec.c 26 Mar 2007 06:52:11 -0000 @@ -1300,6 +1307,16 @@ void tlb_flush(CPUState *env, int flush_ env->tlb_table[1][i].addr_read = -1; env->tlb_table[1][i].addr_write = -1; env->tlb_table[1][i].addr_code = -1; +#if (NB_MMU_MODES >= 3) + env->tlb_table[2][i].addr_read = -1; + env->tlb_table[2][i].addr_write = -1; + env->tlb_table[2][i].addr_code = -1; +#if (NB_MMU_MODES == 4) + env->tlb_table[3][i].addr_read = -1; + env->tlb_table[3][i].addr_write = -1; + env->tlb_table[3][i].addr_code = -1; +#endif +#endif } memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); @@ -1345,6 +1362,12 @@ void tlb_flush_page(CPUState *env, targe i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_flush_entry(&env->tlb_table[0][i], addr); tlb_flush_entry(&env->tlb_table[1][i], addr); +#if (NB_MMU_MODES >= 3) + tlb_flush_entry(&env->tlb_table[2][i], addr); +#if (NB_MMU_MODES == 4) + tlb_flush_entry(&env->tlb_table[3][i], addr); +#endif +#endif /* Discard jump cache entries for any tb which might potentially overlap the flushed page. */ @@ -1434,6 +1457,14 @@ void cpu_physical_memory_reset_dirty(ram tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); +#if (NB_MMU_MODES >= 3) + for(i = 0; i < CPU_TLB_SIZE; i++) + tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length); +#if (NB_MMU_MODES == 4) + for(i = 0; i < CPU_TLB_SIZE; i++) + tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length); +#endif +#endif } #if !defined(CONFIG_SOFTMMU) @@ -1486,6 +1517,14 @@ void cpu_tlb_update_dirty(CPUState *env) tlb_update_dirty(&env->tlb_table[0][i]); for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[1][i]); +#if (NB_MMU_MODES >= 3) + for(i = 0; i < CPU_TLB_SIZE; i++) + tlb_update_dirty(&env->tlb_table[2][i]); +#if (NB_MMU_MODES == 4) + for(i = 0; i < CPU_TLB_SIZE; i++) + tlb_update_dirty(&env->tlb_table[3][i]); +#endif +#endif } static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, @@ -1511,6 +1550,12 @@ static inline void tlb_set_dirty(CPUStat i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_set_dirty1(&env->tlb_table[0][i], addr); tlb_set_dirty1(&env->tlb_table[1][i], addr); +#if (NB_MMU_MODES >= 3) + tlb_set_dirty1(&env->tlb_table[2][i], addr); +#if (NB_MMU_MODES == 4) + tlb_set_dirty1(&env->tlb_table[3][i], addr); +#endif +#endif } /* add a new TLB entry. At most one entry for a given virtual address