Hi Julien,

> On 22 May 2024, at 14:25, Julien Grall <jul...@xen.org> wrote:
> 
>> diff --git a/xen/arch/arm/mmu/p2m.c b/xen/arch/arm/mmu/p2m.c
>> index 41fcca011cf4..b496266deef6 100644
>> --- a/xen/arch/arm/mmu/p2m.c
>> +++ b/xen/arch/arm/mmu/p2m.c
>> @@ -753,17 +753,9 @@ static int p2m_mem_access_radix_set(struct p2m_domain 
>> *p2m, gfn_t gfn,
>>      return rc;
>>  }
>>  -/*
>> - * Put any references on the single 4K page referenced by pte.
>> - * TODO: Handle superpages, for now we only take special references for leaf
>> - * pages (specifically foreign ones, which can't be super mapped today).
>> - */
>> -static void p2m_put_l3_page(const lpae_t pte)
>> +/* Put any references on the single 4K page referenced by mfn. */
>> +static void p2m_put_l3_page(mfn_t mfn, p2m_type_t type)
>>  {
>> -    mfn_t mfn = lpae_get_mfn(pte);
>> -
>> -    ASSERT(p2m_is_valid(pte));
>> -
>>      /*
>>       * TODO: Handle other p2m types
>>       *
>> @@ -771,16 +763,43 @@ static void p2m_put_l3_page(const lpae_t pte)
>>       * flush the TLBs if the page is reallocated before the end of
>>       * this loop.
>>       */
>> -    if ( p2m_is_foreign(pte.p2m.type) )
>> +    if ( p2m_is_foreign(type) )
>>      {
>>          ASSERT(mfn_valid(mfn));
>>          put_page(mfn_to_page(mfn));
>>      }
>>      /* Detect the xenheap page and mark the stored GFN as invalid. */
>> -    else if ( p2m_is_ram(pte.p2m.type) && is_xen_heap_mfn(mfn) )
>> +    else if ( p2m_is_ram(type) && is_xen_heap_mfn(mfn) )
>>          page_set_xenheap_gfn(mfn_to_page(mfn), INVALID_GFN);
>>  }
> 
> All the pages within a 2MB mapping should be the same type. So...
> 
>>  +/* Put any references on the superpage referenced by mfn. */
>> +static void p2m_put_l2_superpage(mfn_t mfn, p2m_type_t type)
>> +{
>> +    unsigned int i;
>> +
>> +    for ( i = 0; i < XEN_PT_LPAE_ENTRIES; i++ )
>> +    {
>> +        p2m_put_l3_page(mfn, type);
>> +
>> +        mfn = mfn_add(mfn, 1);
>> +    }
> 
> ... this solution is a bit wasteful as we will now call p2m_put_l3_page() 512 
> times even though there is nothing to do.
> 
> So instead can we move the checks outside to optimize the path a bit?

You mean this?

diff --git a/xen/arch/arm/mmu/p2m.c b/xen/arch/arm/mmu/p2m.c
index b496266deef6..d40cddda48f3 100644
--- a/xen/arch/arm/mmu/p2m.c
+++ b/xen/arch/arm/mmu/p2m.c
@@ -794,7 +794,8 @@ static void p2m_put_page(const lpae_t pte, unsigned int 
level)
     ASSERT(p2m_is_valid(pte));
 
     /* We have a second level 2M superpage */
-    if ( p2m_is_superpage(pte, level) && (level == 2) )
+    if ( p2m_is_superpage(pte, level) && (level == 2) &&
+         p2m_is_foreign(pte.p2m.type) )
         return p2m_put_l2_superpage(mfn, pte.p2m.type);
     else if ( level == 3 )
         return p2m_put_l3_page(mfn, pte.p2m.type);


> Otherwise...
> 
>> +}
>> +
>> +/* Put any references on the page referenced by pte. */
>> +static void p2m_put_page(const lpae_t pte, unsigned int level)
>> +{
>> +    mfn_t mfn = lpae_get_mfn(pte);
>> +
>> +    ASSERT(p2m_is_valid(pte));
>> +
>> +    /* We have a second level 2M superpage */
>> +    if ( p2m_is_superpage(pte, level) && (level == 2) )
>> +        return p2m_put_l2_superpage(mfn, pte.p2m.type);
>> +    else if ( level == 3 )
>> +        return p2m_put_l3_page(mfn, pte.p2m.type);
>> +}
>> +
>>  /* Free lpae sub-tree behind an entry */
>>  static void p2m_free_entry(struct p2m_domain *p2m,
>>                             lpae_t entry, unsigned int level)
>> @@ -809,9 +828,16 @@ static void p2m_free_entry(struct p2m_domain *p2m,
>>  #endif
>>            p2m->stats.mappings[level]--;
>> -        /* Nothing to do if the entry is a super-page. */
>> -        if ( level == 3 )
>> -            p2m_put_l3_page(entry);
>> +        /*
>> +         * TODO: Currently we don't handle 1GB super-page, Xen is not
>> +         * preemptible and therefore some work is needed to handle such
>> +         * superpages, for which at some point Xen might end up freeing 
>> memory
>> +         * and therefore for such a big mapping it could end up in a very 
>> long
>> +         * operation.
>> +         */
>> +        if ( level >= 2 )
>> +            p2m_put_page(entry, level);
>> +
>>          return;
>>      }
>>  @@ -1558,9 +1584,12 @@ int relinquish_p2m_mapping(struct domain *d)
>>            count++;
>>          /*
>> -         * Arbitrarily preempt every 512 iterations.
>> +         * Arbitrarily preempt every 512 iterations or when type is foreign
>> +         * mapping and the order is above 9 (2MB).
>>           */
>> -        if ( !(count % 512) && hypercall_preempt_check() )
>> +        if ( (!(count % 512) ||
>> +              (p2m_is_foreign(t) && (order > XEN_PT_LEVEL_ORDER(2)))) &&
> 
> ... we would need to preempt for every 2MB rather than just for the 
> p2m_is_foreign().

Ok otherwise you are suggesting that if we don’t go for the solution above we 
drop p2m_is_foreign(t) from
the condition here, am I right?

> 
> BTW, p2m_put_l3_page() has also another case. Should we consider to handle 
> preemption for it too?

You mean checking for 512 iterations, or foreign mapping when order is > 9, or
p2m_is_ram(type) && is_xen_heap_mfn(mfn) ?

Just want to be sure I fully understand your comments here.

Cheers,
Luca

Reply via email to