Re: [PATCH v3 03/14] mm/hmm: allow hmm_range to be used with a mmu_interval_notifier or hmm_mirror

2019-11-13 Thread Christoph Hellwig
Looks good,

Reviewed-by: Christoph Hellwig 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v3 03/14] mm/hmm: allow hmm_range to be used with a mmu_interval_notifier or hmm_mirror

2019-11-12 Thread Jason Gunthorpe
From: Jason Gunthorpe 

hmm_mirror's handling of ranges does not use a sequence count which
results in this bug:

 CPU0   CPU1
 hmm_range_wait_until_valid(range)
 valid == true
 hmm_range_fault(range)
hmm_invalidate_range_start()
   range->valid = false
hmm_invalidate_range_end()
   range->valid = true
 hmm_range_valid(range)
  valid == true

Where the hmm_range_valid() should not have succeeded.

Adding the required sequence count would make it nearly identical to the
new mmu_interval_notifier. Instead replace the hmm_mirror stuff with
mmu_interval_notifier.

Co-existence of the two APIs is the first step.

Reviewed-by: Jérôme Glisse 
Tested-by: Philip Yang 
Tested-by: Ralph Campbell 
Signed-off-by: Jason Gunthorpe 
---
 include/linux/hmm.h |  5 +
 mm/hmm.c| 25 +++--
 2 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 3fec513b9c00f1..fbb35c78637e57 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -145,6 +145,9 @@ enum hmm_pfn_value_e {
 /*
  * struct hmm_range - track invalidation lock on virtual address range
  *
+ * @notifier: an optional mmu_interval_notifier
+ * @notifier_seq: when notifier is used this is the result of
+ *mmu_interval_read_begin()
  * @hmm: the core HMM structure this range is active against
  * @vma: the vm area struct for the range
  * @list: all range lock are on a list
@@ -159,6 +162,8 @@ enum hmm_pfn_value_e {
  * @valid: pfns array did not change since it has been fill by an HMM function
  */
 struct hmm_range {
+   struct mmu_interval_notifier *notifier;
+   unsigned long   notifier_seq;
struct hmm  *hmm;
struct list_headlist;
unsigned long   start;
diff --git a/mm/hmm.c b/mm/hmm.c
index 6b0136665407a3..8d060c5dabe37b 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -858,6 +858,14 @@ void hmm_range_unregister(struct hmm_range *range)
 }
 EXPORT_SYMBOL(hmm_range_unregister);
 
+static bool needs_retry(struct hmm_range *range)
+{
+   if (range->notifier)
+   return mmu_interval_check_retry(range->notifier,
+   range->notifier_seq);
+   return !range->valid;
+}
+
 static const struct mm_walk_ops hmm_walk_ops = {
.pud_entry  = hmm_vma_walk_pud,
.pmd_entry  = hmm_vma_walk_pmd,
@@ -898,18 +906,23 @@ long hmm_range_fault(struct hmm_range *range, unsigned 
int flags)
const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
unsigned long start = range->start, end;
struct hmm_vma_walk hmm_vma_walk;
-   struct hmm *hmm = range->hmm;
+   struct mm_struct *mm;
struct vm_area_struct *vma;
int ret;
 
-   lockdep_assert_held(>mmu_notifier.mm->mmap_sem);
+   if (range->notifier)
+   mm = range->notifier->mm;
+   else
+   mm = range->hmm->mmu_notifier.mm;
+
+   lockdep_assert_held(>mmap_sem);
 
do {
/* If range is no longer valid force retry. */
-   if (!range->valid)
+   if (needs_retry(range))
return -EBUSY;
 
-   vma = find_vma(hmm->mmu_notifier.mm, start);
+   vma = find_vma(mm, start);
if (vma == NULL || (vma->vm_flags & device_vma))
return -EFAULT;
 
@@ -939,7 +952,7 @@ long hmm_range_fault(struct hmm_range *range, unsigned int 
flags)
start = hmm_vma_walk.last;
 
/* Keep trying while the range is valid. */
-   } while (ret == -EBUSY && range->valid);
+   } while (ret == -EBUSY && !needs_retry(range));
 
if (ret) {
unsigned long i;
@@ -997,7 +1010,7 @@ long hmm_range_dma_map(struct hmm_range *range, struct 
device *device,
continue;
 
/* Check if range is being invalidated */
-   if (!range->valid) {
+   if (needs_retry(range)) {
ret = -EBUSY;
goto unmap;
}
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx