Re: [PATCH v2 02/34] s390: Use _pt_s390_gaddr for gmap address tracking

2023-05-25 Thread Vishal Moola
On Thu, May 25, 2023 at 1:58 AM Mike Rapoport  wrote:
>
> On Mon, May 01, 2023 at 12:27:57PM -0700, Vishal Moola (Oracle) wrote:
> > s390 uses page->index to keep track of page tables for the guest address
> > space. In an attempt to consolidate the usage of page fields in s390,
> > replace _pt_pad_2 with _pt_s390_gaddr to replace page->index in gmap.
> >
> > This will help with the splitting of struct ptdesc from struct page, as
> > well as allow s390 to use _pt_frag_refcount for fragmented page table
> > tracking.
> >
> > Since page->_pt_s390_gaddr aliases with mapping, ensure its set to NULL
> > before freeing the pages as well.
>
> Wouldn't it be easier to use _pt_pad_1 which is aliased with lru and that
> does not seem to be used by page tables at all?

I initially thought the same, but s390 page tables use lru.


Re: [PATCH v2 02/34] s390: Use _pt_s390_gaddr for gmap address tracking

2023-05-25 Thread Mike Rapoport
On Mon, May 01, 2023 at 12:27:57PM -0700, Vishal Moola (Oracle) wrote:
> s390 uses page->index to keep track of page tables for the guest address
> space. In an attempt to consolidate the usage of page fields in s390,
> replace _pt_pad_2 with _pt_s390_gaddr to replace page->index in gmap.
> 
> This will help with the splitting of struct ptdesc from struct page, as
> well as allow s390 to use _pt_frag_refcount for fragmented page table
> tracking.
> 
> Since page->_pt_s390_gaddr aliases with mapping, ensure its set to NULL
> before freeing the pages as well.

Wouldn't it be easier to use _pt_pad_1 which is aliased with lru and that
does not seem to be used by page tables at all?
 
> This also reverts commit 7e25de77bc5ea ("s390/mm: use pmd_pgtable_page()
> helper in __gmap_segment_gaddr()") which had s390 use
> pmd_pgtable_page() to get a gmap page table, as pmd_pgtable_page()
> should be used for more generic process page tables.
> 
> Signed-off-by: Vishal Moola (Oracle) 
> ---
>  arch/s390/mm/gmap.c  | 56 +++-
>  include/linux/mm_types.h |  2 +-
>  2 files changed, 39 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
> index dfe905c7bd8e..a9e8b1805894 100644
> --- a/arch/s390/mm/gmap.c
> +++ b/arch/s390/mm/gmap.c
> @@ -70,7 +70,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
>   page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
>   if (!page)
>   goto out_free;
> - page->index = 0;
> + page->_pt_s390_gaddr = 0;
>   list_add(>lru, >crst_list);
>   table = page_to_virt(page);
>   crst_table_init(table, etype);
> @@ -187,16 +187,20 @@ static void gmap_free(struct gmap *gmap)
>   if (!(gmap_is_shadow(gmap) && gmap->removed))
>   gmap_flush_tlb(gmap);
>   /* Free all segment & region tables. */
> - list_for_each_entry_safe(page, next, >crst_list, lru)
> + list_for_each_entry_safe(page, next, >crst_list, lru) {
> + page->_pt_s390_gaddr = 0;
>   __free_pages(page, CRST_ALLOC_ORDER);
> + }
>   gmap_radix_tree_free(>guest_to_host);
>   gmap_radix_tree_free(>host_to_guest);
>  
>   /* Free additional data for a shadow gmap */
>   if (gmap_is_shadow(gmap)) {
>   /* Free all page tables. */
> - list_for_each_entry_safe(page, next, >pt_list, lru)
> + list_for_each_entry_safe(page, next, >pt_list, lru) {
> + page->_pt_s390_gaddr = 0;
>   page_table_free_pgste(page);
> + }
>   gmap_rmap_radix_tree_free(>host_to_rmap);
>   /* Release reference to the parent */
>   gmap_put(gmap->parent);
> @@ -318,12 +322,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned 
> long *table,
>   list_add(>lru, >crst_list);
>   *table = __pa(new) | _REGION_ENTRY_LENGTH |
>   (*table & _REGION_ENTRY_TYPE_MASK);
> - page->index = gaddr;
> + page->_pt_s390_gaddr = gaddr;
>   page = NULL;
>   }
>   spin_unlock(>guest_table_lock);
> - if (page)
> + if (page) {
> + page->_pt_s390_gaddr = 0;
>   __free_pages(page, CRST_ALLOC_ORDER);
> + }
>   return 0;
>  }
>  
> @@ -336,12 +342,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned 
> long *table,
>  static unsigned long __gmap_segment_gaddr(unsigned long *entry)
>  {
>   struct page *page;
> - unsigned long offset;
> + unsigned long offset, mask;
>  
>   offset = (unsigned long) entry / sizeof(unsigned long);
>   offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
> - page = pmd_pgtable_page((pmd_t *) entry);
> - return page->index + offset;
> + mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
> + page = virt_to_page((void *)((unsigned long) entry & mask));
> +
> + return page->_pt_s390_gaddr + offset;
>  }
>  
>  /**
> @@ -1351,6 +1359,7 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned 
> long raddr)
>   /* Free page table */
>   page = phys_to_page(pgt);
>   list_del(>lru);
> + page->_pt_s390_gaddr = 0;
>   page_table_free_pgste(page);
>  }
>  
> @@ -1379,6 +1388,7 @@ static void __gmap_unshadow_sgt(struct gmap *sg, 
> unsigned long raddr,
>   /* Free page table */
>   page = phys_to_page(pgt);
>   list_del(>lru);
> + page->_pt_s390_gaddr = 0;
>   page_table_free_pgste(page);
>   }
>  }
> @@ -1409,6 +1419,7 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned 
> long raddr)
>   /* Free segment table */
>   page = phys_to_page(sgt);
>   list_del(>lru);
> + page->_pt_s390_gaddr = 0;
>   __free_pages(page, CRST_ALLOC_ORDER);
>  }
>  
> @@ -1437,6 +1448,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, 
> unsigned long raddr,
>   /* Free segment table */
>  

[PATCH v2 02/34] s390: Use _pt_s390_gaddr for gmap address tracking

2023-05-01 Thread Vishal Moola (Oracle)
s390 uses page->index to keep track of page tables for the guest address
space. In an attempt to consolidate the usage of page fields in s390,
replace _pt_pad_2 with _pt_s390_gaddr to replace page->index in gmap.

This will help with the splitting of struct ptdesc from struct page, as
well as allow s390 to use _pt_frag_refcount for fragmented page table
tracking.

Since page->_pt_s390_gaddr aliases with mapping, ensure its set to NULL
before freeing the pages as well.

This also reverts commit 7e25de77bc5ea ("s390/mm: use pmd_pgtable_page()
helper in __gmap_segment_gaddr()") which had s390 use
pmd_pgtable_page() to get a gmap page table, as pmd_pgtable_page()
should be used for more generic process page tables.

Signed-off-by: Vishal Moola (Oracle) 
---
 arch/s390/mm/gmap.c  | 56 +++-
 include/linux/mm_types.h |  2 +-
 2 files changed, 39 insertions(+), 19 deletions(-)

diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index dfe905c7bd8e..a9e8b1805894 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -70,7 +70,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
goto out_free;
-   page->index = 0;
+   page->_pt_s390_gaddr = 0;
list_add(>lru, >crst_list);
table = page_to_virt(page);
crst_table_init(table, etype);
@@ -187,16 +187,20 @@ static void gmap_free(struct gmap *gmap)
if (!(gmap_is_shadow(gmap) && gmap->removed))
gmap_flush_tlb(gmap);
/* Free all segment & region tables. */
-   list_for_each_entry_safe(page, next, >crst_list, lru)
+   list_for_each_entry_safe(page, next, >crst_list, lru) {
+   page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
+   }
gmap_radix_tree_free(>guest_to_host);
gmap_radix_tree_free(>host_to_guest);
 
/* Free additional data for a shadow gmap */
if (gmap_is_shadow(gmap)) {
/* Free all page tables. */
-   list_for_each_entry_safe(page, next, >pt_list, lru)
+   list_for_each_entry_safe(page, next, >pt_list, lru) {
+   page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
+   }
gmap_rmap_radix_tree_free(>host_to_rmap);
/* Release reference to the parent */
gmap_put(gmap->parent);
@@ -318,12 +322,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned 
long *table,
list_add(>lru, >crst_list);
*table = __pa(new) | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK);
-   page->index = gaddr;
+   page->_pt_s390_gaddr = gaddr;
page = NULL;
}
spin_unlock(>guest_table_lock);
-   if (page)
+   if (page) {
+   page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
+   }
return 0;
 }
 
@@ -336,12 +342,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned 
long *table,
 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
 {
struct page *page;
-   unsigned long offset;
+   unsigned long offset, mask;
 
offset = (unsigned long) entry / sizeof(unsigned long);
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
-   page = pmd_pgtable_page((pmd_t *) entry);
-   return page->index + offset;
+   mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+   page = virt_to_page((void *)((unsigned long) entry & mask));
+
+   return page->_pt_s390_gaddr + offset;
 }
 
 /**
@@ -1351,6 +1359,7 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned 
long raddr)
/* Free page table */
page = phys_to_page(pgt);
list_del(>lru);
+   page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
 }
 
@@ -1379,6 +1388,7 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned 
long raddr,
/* Free page table */
page = phys_to_page(pgt);
list_del(>lru);
+   page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
}
 }
@@ -1409,6 +1419,7 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned 
long raddr)
/* Free segment table */
page = phys_to_page(sgt);
list_del(>lru);
+   page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
 }
 
@@ -1437,6 +1448,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned 
long raddr,
/* Free segment table */
page = phys_to_page(sgt);
list_del(>lru);
+   page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
 }
@@ -1467,6 +1479,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned 
long raddr)
/* Free region 3 table */