Add declarations and templates of extended MMU helpers.
An extended helper takes an additional argument of the host address accessing
a guest memory which differs from the address of the call site to the helper
because helper call sites locate at the end of a generated code block.

Signed-off-by: Yeongkyoon Lee <yeongkyoon....@samsung.com>
---
 softmmu_defs.h     |   39 +++++++++++++++++++++++++++++++++++++++
 softmmu_header.h   |   15 +++++++++++++++
 softmmu_template.h |   41 +++++++++++++++++++++++++++++++++--------
 3 files changed, 87 insertions(+), 8 deletions(-)

diff --git a/softmmu_defs.h b/softmmu_defs.h
index 1f25e33..a93adf0 100644
--- a/softmmu_defs.h
+++ b/softmmu_defs.h
@@ -9,6 +9,7 @@
 #ifndef SOFTMMU_DEFS_H
 #define SOFTMMU_DEFS_H

+#ifndef CONFIG_QEMU_LDST_OPTIMIZATION
 uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
 void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
                     int mmu_idx);
@@ -34,4 +35,42 @@ void helper_stl_cmmu(CPUArchState *env, target_ulong addr, 
uint32_t val,
 uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
 void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
                      int mmu_idx);
+#else
+/* Extended versions of MMU helpers for qemu_ld/st optimization.
+   The additional argument is a host code address accessing guest memory */
+uint8_t ext_helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                           uintptr_t ra);
+void ext_helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+                        int mmu_idx, uintptr_t ra);
+uint16_t ext_helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                            uintptr_t ra);
+void ext_helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+                        int mmu_idx, uintptr_t ra);
+uint32_t ext_helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                            uintptr_t ra);
+void ext_helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+                        int mmu_idx, uintptr_t ra);
+uint64_t ext_helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                            uintptr_t ra);
+void ext_helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+                        int mmu_idx, uintptr_t ra);
+
+uint8_t ext_helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                            uintptr_t ra);
+void ext_helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
+                         int mmu_idx, uintptr_t ra);
+uint16_t ext_helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                             uintptr_t ra);
+void ext_helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
+                         int mmu_idx, uintptr_t ra);
+uint32_t ext_helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                             uintptr_t ra);
+void ext_helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
+                         int mmu_idx, uintptr_t ra);
+uint64_t ext_helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                             uintptr_t ra);
+void ext_helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
+                         int mmu_idx, uintptr_t ra);
+#endif  /* CONFIG_QEMU_LDST_OPTIMIZATION */
+
 #endif
diff --git a/softmmu_header.h b/softmmu_header.h
index d8d9c81..d18c8f8 100644
--- a/softmmu_header.h
+++ b/softmmu_header.h
@@ -93,7 +93,12 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, 
target_ulong ptr)
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+        res = glue(glue(ext_helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
+                                                           (uintptr_t)NULL);
+#else
         res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
+#endif
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
@@ -114,8 +119,13 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, 
target_ulong ptr)
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+        res = (DATA_STYPE)glue(glue(ext_helper_ld, SUFFIX),
+                               MMUSUFFIX)(env, addr, mmu_idx, (uintptr_t)NULL);
+#else
         res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
                                MMUSUFFIX)(env, addr, mmu_idx);
+#endif
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
@@ -141,7 +151,12 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, 
target_ulong ptr,
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+        glue(glue(ext_helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx,
+                                                     (uintptr_t)NULL);
+#else
         glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
+#endif
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         glue(glue(st, SUFFIX), _raw)(hostaddr, v);
diff --git a/softmmu_template.h b/softmmu_template.h
index e2490f0..e40c060 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -54,6 +54,14 @@
 #define ADDR_READ addr_read
 #endif

+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+/* An extended MMU helper takes one more argument which is
+   a host address of generated code accessing guest memory */
+#define GET_RET_ADDR() ra
+#else
+#define GET_RET_ADDR() GETPC()
+#endif  /* CONFIG_QEMU_LDST_OPTIMIZATION */
+
 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
                                                         target_ulong addr,
                                                         int mmu_idx,
@@ -91,9 +99,17 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState 
*env,
 }

 /* handle all cases except unaligned access which span two pages */
+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+DATA_TYPE
+glue(glue(ext_helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                             target_ulong addr,
+                                             int mmu_idx,
+                                             uintptr_t ra)
+#else
 DATA_TYPE
 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
                                          int mmu_idx)
+#endif
 {
     DATA_TYPE res;
     int index;
@@ -111,13 +127,13 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState 
*env, target_ulong addr,
             /* IO access */
             if ((addr & (DATA_SIZE - 1)) != 0)
                 goto do_unaligned_access;
-            retaddr = GETPC();
+            retaddr = GET_RET_ADDR();
             ioaddr = env->iotlb[mmu_idx][index];
             res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= 
TARGET_PAGE_SIZE) {
             /* slow unaligned access (it spans two pages or IO) */
         do_unaligned_access:
-            retaddr = GETPC();
+            retaddr = GET_RET_ADDR();
 #ifdef ALIGNED_ONLY
             do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
 #endif
@@ -128,7 +144,7 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, 
target_ulong addr,
             uintptr_t addend;
 #ifdef ALIGNED_ONLY
             if ((addr & (DATA_SIZE - 1)) != 0) {
-                retaddr = GETPC();
+                retaddr = GET_RET_ADDR();
                 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, 
retaddr);
             }
 #endif
@@ -138,7 +154,7 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, 
target_ulong addr,
         }
     } else {
         /* the page is not in the TLB : fill it */
-        retaddr = GETPC();
+        retaddr = GET_RET_ADDR();
 #ifdef ALIGNED_ONLY
         if ((addr & (DATA_SIZE - 1)) != 0)
             do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
@@ -240,9 +256,17 @@ static inline void glue(io_write, SUFFIX)(CPUArchState 
*env,
 #endif /* SHIFT > 2 */
 }

+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+void glue(glue(ext_helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                                  target_ulong addr,
+                                                  DATA_TYPE val,
+                                                  int mmu_idx,
+                                                  uintptr_t ra)
+#else
 void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
                                               target_ulong addr, DATA_TYPE val,
                                               int mmu_idx)
+#endif
 {
     target_phys_addr_t ioaddr;
     target_ulong tlb_addr;
@@ -257,12 +281,12 @@ void glue(glue(helper_st, SUFFIX), 
MMUSUFFIX)(CPUArchState *env,
             /* IO access */
             if ((addr & (DATA_SIZE - 1)) != 0)
                 goto do_unaligned_access;
-            retaddr = GETPC();
+            retaddr = GET_RET_ADDR();
             ioaddr = env->iotlb[mmu_idx][index];
             glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= 
TARGET_PAGE_SIZE) {
         do_unaligned_access:
-            retaddr = GETPC();
+            retaddr = GET_RET_ADDR();
 #ifdef ALIGNED_ONLY
             do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
 #endif
@@ -273,7 +297,7 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState 
*env,
             uintptr_t addend;
 #ifdef ALIGNED_ONLY
             if ((addr & (DATA_SIZE - 1)) != 0) {
-                retaddr = GETPC();
+                retaddr = GET_RET_ADDR();
                 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
             }
 #endif
@@ -283,7 +307,7 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState 
*env,
         }
     } else {
         /* the page is not in the TLB : fill it */
-        retaddr = GETPC();
+        retaddr = GET_RET_ADDR();
 #ifdef ALIGNED_ONLY
         if ((addr & (DATA_SIZE - 1)) != 0)
             do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
@@ -352,3 +376,4 @@ static void glue(glue(slow_st, SUFFIX), 
MMUSUFFIX)(CPUArchState *env,
 #undef USUFFIX
 #undef DATA_SIZE
 #undef ADDR_READ
+#undef GET_RET_ADDR
--
1.7.5.4

Reply via email to