Re: [PATCH 1/4] mm/hmm: make full use of walk_page_range()

2019-09-13 Thread Christoph Hellwig
> +static int hmm_pfns_fill(unsigned long addr,
> +  unsigned long end,
> +  struct hmm_range *range,
> +  enum hmm_pfn_value_e value)

Nit: can we use the space a little more efficient, e.g.:

static int hmm_pfns_fill(unsigned long addr, unsigned long end,
struct hmm_range *range, enum hmm_pfn_value_e value)

> +static int hmm_vma_walk_test(unsigned long start,
> +  unsigned long end,
> +  struct mm_walk *walk)

Same here.

> + if (!(vma->vm_flags & VM_READ)) {
> + (void) hmm_pfns_fill(start, end, range, HMM_PFN_NONE);

There should be no need for the void cast here.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 1/4] mm/hmm: make full use of walk_page_range()

2019-09-12 Thread Ralph Campbell



On 9/12/19 1:26 AM, Christoph Hellwig wrote:

+static int hmm_pfns_fill(unsigned long addr,
+unsigned long end,
+struct hmm_range *range,
+enum hmm_pfn_value_e value)


Nit: can we use the space a little more efficient, e.g.:

static int hmm_pfns_fill(unsigned long addr, unsigned long end,
struct hmm_range *range, enum hmm_pfn_value_e value)


+static int hmm_vma_walk_test(unsigned long start,
+unsigned long end,
+struct mm_walk *walk)


Same here.


+   if (!(vma->vm_flags & VM_READ)) {
+   (void) hmm_pfns_fill(start, end, range, HMM_PFN_NONE);


There should be no need for the void cast here.



OK. I'll post a v2 with the these changes.
Thanks for the reviews.


[PATCH 1/4] mm/hmm: make full use of walk_page_range()

2019-09-11 Thread Ralph Campbell
hmm_range_fault() calls find_vma() and walk_page_range() in a loop.
This is unnecessary duplication since walk_page_range() calls find_vma()
in a loop already.
Simplify hmm_range_fault() by defining a walk_test() callback function
to filter unhandled vmas.
This also fixes a bug where hmm_range_fault() was not checking
start >= vma->vm_start before checking vma->vm_flags so hmm_range_fault()
could return an error based on the wrong vma for the requested range.

Signed-off-by: Ralph Campbell 
Cc: "Jérôme Glisse" 
Cc: Jason Gunthorpe 
Cc: Christoph Hellwig 
---
 mm/hmm.c | 113 ---
 1 file changed, 50 insertions(+), 63 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 902f5fa6bf93..06041d4399ff 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -252,18 +252,17 @@ static int hmm_vma_do_fault(struct mm_walk *walk, 
unsigned long addr,
return -EFAULT;
 }
 
-static int hmm_pfns_bad(unsigned long addr,
-   unsigned long end,
-   struct mm_walk *walk)
+static int hmm_pfns_fill(unsigned long addr,
+unsigned long end,
+struct hmm_range *range,
+enum hmm_pfn_value_e value)
 {
-   struct hmm_vma_walk *hmm_vma_walk = walk->private;
-   struct hmm_range *range = hmm_vma_walk->range;
uint64_t *pfns = range->pfns;
unsigned long i;
 
i = (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++)
-   pfns[i] = range->values[HMM_PFN_ERROR];
+   pfns[i] = range->values[value];
 
return 0;
 }
@@ -584,7 +583,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
}
return 0;
} else if (!pmd_present(pmd))
-   return hmm_pfns_bad(start, end, walk);
+   return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
@@ -612,7 +611,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 * recover.
 */
if (pmd_bad(pmd))
-   return hmm_pfns_bad(start, end, walk);
+   return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 
ptep = pte_offset_map(pmdp, addr);
i = (addr - range->start) >> PAGE_SHIFT;
@@ -770,13 +769,36 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, 
unsigned long hmask,
 #define hmm_vma_walk_hugetlb_entry NULL
 #endif /* CONFIG_HUGETLB_PAGE */
 
-static void hmm_pfns_clear(struct hmm_range *range,
-  uint64_t *pfns,
-  unsigned long addr,
-  unsigned long end)
+static int hmm_vma_walk_test(unsigned long start,
+unsigned long end,
+struct mm_walk *walk)
 {
-   for (; addr < end; addr += PAGE_SIZE, pfns++)
-   *pfns = range->values[HMM_PFN_NONE];
+   struct hmm_vma_walk *hmm_vma_walk = walk->private;
+   struct hmm_range *range = hmm_vma_walk->range;
+   struct vm_area_struct *vma = walk->vma;
+
+   /* If range is no longer valid, force retry. */
+   if (!range->valid)
+   return -EBUSY;
+
+   /*
+* Skip vma ranges that don't have struct page backing them or
+* map I/O devices directly.
+*/
+   if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
+   return -EFAULT;
+
+   /*
+* If the vma does not allow read access, then assume that it does not
+* allow write access either. HMM does not support architectures
+* that allow write without read.
+*/
+   if (!(vma->vm_flags & VM_READ)) {
+   (void) hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
+   return -EPERM;
+   }
+
+   return 0;
 }
 
 /*
@@ -857,6 +879,7 @@ static const struct mm_walk_ops hmm_walk_ops = {
.pmd_entry  = hmm_vma_walk_pmd,
.pte_hole   = hmm_vma_walk_hole,
.hugetlb_entry  = hmm_vma_walk_hugetlb_entry,
+   .test_walk  = hmm_vma_walk_test,
 };
 
 /**
@@ -889,63 +912,27 @@ static const struct mm_walk_ops hmm_walk_ops = {
  */
 long hmm_range_fault(struct hmm_range *range, unsigned int flags)
 {
-   const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
-   unsigned long start = range->start, end;
-   struct hmm_vma_walk hmm_vma_walk;
+   unsigned long start = range->start;
+   struct hmm_vma_walk hmm_vma_walk = {
+   .range = range,
+   .last = start,
+   .flags = flags,
+   };
struct hmm *hmm = range->hmm;
-   struct vm_area_struct *vma;
int ret;
 
lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
 
do {
-   /* If range is no longer valid force retry. */
-   if (!range->valid)
-   return -EBUSY;
-
-   vma = find_vma(hmm->mmu_notifier.mm,