On 7/10/24 23:28, Paolo Bonzini wrote:
On 7/10/24 20:40, Paolo Bonzini wrote:


Il mer 10 lug 2024, 18:47 Richard Henderson <richard.hender...@linaro.org <mailto:richard.hender...@linaro.org>> ha scritto:

    On 7/9/24 23:29, Paolo Bonzini wrote:
     > This takes care of probing the vaddr range in advance, and is
    also faster
     > because it avoids repeated TLB lookups.  It also matches the
    Intel manual
     > better, as it says "Checks that the current (old) TSS, new TSS,
    and all
     > segment descriptors used in the task switch are paged into system
    memory";
     > note however that it's not clear how the processor checks for segment
     > descriptors, and this check is not included in the AMD manual.
     >
     > Signed-off-by: Paolo Bonzini <pbonz...@redhat.com
    <mailto:pbonz...@redhat.com>>
     > ---
     >   target/i386/tcg/seg_helper.c | 101
    ++++++++++++++++++-----------------
     >   1 file changed, 51 insertions(+), 50 deletions(-)
     >
     > diff --git a/target/i386/tcg/seg_helper.c
    b/target/i386/tcg/seg_helper.c
     > index 25af9d4a4ec..77f2c65c3cf 100644
     > --- a/target/i386/tcg/seg_helper.c
     > +++ b/target/i386/tcg/seg_helper.c
     > @@ -311,35 +313,44 @@ static int switch_tss_ra(CPUX86State *env,
    int tss_selector,
     >           raise_exception_err_ra(env, EXCP0A_TSS, tss_selector &
    0xfffc, retaddr);
     >       }
     >
     > +    /* X86Access avoids memory exceptions during the task switch */
     > +    access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max,
     > +                    MMU_DATA_STORE, cpu_mmu_index_kernel(env),
    retaddr);
     > +
     > +    if (source == SWITCH_TSS_CALL) {
     > +        /* Probe for future write of parent task */
     > +        probe_access(env, tss_base, 2, MMU_DATA_STORE,
     > +                  cpu_mmu_index_kernel(env), retaddr);
     > +    }
     > +    access_prepare_mmu(&new, env, tss_base, tss_limit,
     > +                    MMU_DATA_LOAD, cpu_mmu_index_kernel(env),
    retaddr);

    You're computing cpu_mmu_index_kernel 3 times.

Squashing this in (easier to review than the whole thing):

Excellent, thanks!


r~


diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index 4123ff1245e..4edfd26135f 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -321,7 +321,7 @@ static void switch_tss_ra(CPUX86State *env, int 
tss_selector,
      uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
      uint32_t old_eflags, eflags_mask;
      SegmentCache *dt;
-    int index;
+    int mmu_index, index;
      target_ulong ptr;
      X86Access old, new;

@@ -378,16 +378,17 @@ static void switch_tss_ra(CPUX86State *env, int 
tss_selector,
      }

      /* X86Access avoids memory exceptions during the task switch */
+    mmu_index = cpu_mmu_index_kernel(env);
      access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max,
-               MMU_DATA_STORE, cpu_mmu_index_kernel(env), retaddr);
+               MMU_DATA_STORE, mmu_index, retaddr);

      if (source == SWITCH_TSS_CALL) {
          /* Probe for future write of parent task */
          probe_access(env, tss_base, 2, MMU_DATA_STORE,
-             cpu_mmu_index_kernel(env), retaddr);
+             mmu_index, retaddr);
      }
      access_prepare_mmu(&new, env, tss_base, tss_limit,
-               MMU_DATA_LOAD, cpu_mmu_index_kernel(env), retaddr);
+               MMU_DATA_LOAD, mmu_index, retaddr);

      /* read all the registers from the new TSS */
      if (type & 8) {
@@ -468,7 +469,11 @@ static void switch_tss_ra(CPUX86State *env, int 
tss_selector,
         context */

      if (source == SWITCH_TSS_CALL) {
-        cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
+        /*
+         * Thanks to the probe_access above, we know the first two
+         * bytes addressed by &new are writable too.
+         */
+        access_stw(&new, tss_base, env->tr.selector);
          new_eflags |= NT_MASK;
      }

Paolo



Reply via email to