Re: [PATCH] drm/amdkfd: remap unaligned svm ranges that have split

2023-10-19 Thread Felix Kuehling

On 2023-10-19 16:51, Alex Sierra wrote:

Split SVM ranges that have been mapped into 2MB page table entries,
require to be remap in case the split has happened in a non-aligned
VA.
[WHY]:
This condition causes the 2MB page table entries be split into 4KB
PTEs.

Signed-off-by: Alex Sierra 


Reviewed-by: Felix Kuehling 



---
  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 43 +---
  1 file changed, 32 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 7b81233bc9ae..aa2996d6f818 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1104,26 +1104,32 @@ svm_range_split(struct svm_range *prange, uint64_t 
start, uint64_t last,
  }
  
  static int

-svm_range_split_tail(struct svm_range *prange,
-uint64_t new_last, struct list_head *insert_list)
+svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
+struct list_head *insert_list, struct list_head 
*remap_list)
  {
struct svm_range *tail;
int r = svm_range_split(prange, prange->start, new_last, &tail);
  
-	if (!r)

+   if (!r) {
list_add(&tail->list, insert_list);
+   if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
+   list_add(&tail->update_list, remap_list);
+   }
return r;
  }
  
  static int

-svm_range_split_head(struct svm_range *prange,
-uint64_t new_start, struct list_head *insert_list)
+svm_range_split_head(struct svm_range *prange, uint64_t new_start,
+struct list_head *insert_list, struct list_head 
*remap_list)
  {
struct svm_range *head;
int r = svm_range_split(prange, new_start, prange->last, &head);
  
-	if (!r)

+   if (!r) {
list_add(&head->list, insert_list);
+   if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
+   list_add(&head->update_list, remap_list);
+   }
return r;
  }
  
@@ -2113,7 +2119,7 @@ static int

  svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
  uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
  struct list_head *update_list, struct list_head *insert_list,
- struct list_head *remove_list)
+ struct list_head *remove_list, struct list_head *remap_list)
  {
unsigned long last = start + size - 1UL;
struct svm_range_list *svms = &p->svms;
@@ -2129,6 +2135,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
INIT_LIST_HEAD(&new_list);
+   INIT_LIST_HEAD(remap_list);
  
  	node = interval_tree_iter_first(&svms->objects, start, last);

while (node) {
@@ -2153,6 +2160,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
struct svm_range *old = prange;
  
  			prange = svm_range_clone(old);

+
if (!prange) {
r = -ENOMEM;
goto out;
@@ -2161,18 +2169,17 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
list_add(&old->update_list, remove_list);
list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
-
if (node->start < start) {
pr_debug("change old range start\n");
r = svm_range_split_head(prange, start,
-insert_list);
+insert_list, 
remap_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
r = svm_range_split_tail(prange, last,
-insert_list);
+insert_list, 
remap_list);
if (r)
goto out;
}
@@ -3565,6 +3572,7 @@ svm_range_set_attr(struct kfd_process *p, struct 
mm_struct *mm,
struct list_head update_list;
struct list_head insert_list;
struct list_head remove_list;
+   struct list_head remap_list;
struct svm_range_list *svms;
struct svm_range *prange;
struct svm_range *next;
@@ -3596,7 +3604,7 @@ svm_range_set_attr(struct kfd_process *p, struct 
mm_struct *mm,
  
  	/* Add new range and split existing ranges as needed */

r = svm_range_add(p, start, size, nattr, attrs, &update_list,
- &inser

Re: [PATCH] drm/amdkfd: remap unaligned svm ranges that have split

2023-10-19 Thread Felix Kuehling



On 2023-10-19 16:53, Philip Yang wrote:



On 2023-10-19 16:05, Felix Kuehling wrote:


On 2023-10-18 18:26, Alex Sierra wrote:

Split SVM ranges that have been mapped into 2MB page table entries,
require to be remap in case the split has happened in a non-aligned
VA.
[WHY]:
This condition causes the 2MB page table entries be split into 4KB
PTEs.

Signed-off-by: Alex Sierra 
---
  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 45 
+---

  1 file changed, 34 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index 7b81233bc9ae..1dd9a1cf2358 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1104,26 +1104,34 @@ svm_range_split(struct svm_range *prange, 
uint64_t start, uint64_t last,

  }
    static int
-svm_range_split_tail(struct svm_range *prange,
- uint64_t new_last, struct list_head *insert_list)
+svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
+ struct list_head *insert_list, struct list_head 
*remap_list)

  {
  struct svm_range *tail;
  int r = svm_range_split(prange, prange->start, new_last, &tail);
  -    if (!r)
+    if (!r) {
  list_add(&tail->list, insert_list);
+    if (!IS_ALIGNED(tail->last + 1 - tail->start,
+    1UL << tail->granularity))


I'm not sure about this condition. I thought this condition should be 
about the point where the range is split, not the size of it. So my 
understanding is that this should be


    if (!IS_ALIGNED(new_last+1, 1UL << prange->granularity))


I think prange->granularity is not always 9, 512 pages, we should 
check the original prange size is larger than 512.


  if (!IS_ALIGNED(new_last + 1, 512) && tail->last - 
prange->start >= 512)


That's if you only want to protect against splitting of 2MB pages. If 
you also want to protect against splitting of fragments (together with 
your WIP patch series for the mapping bitmap), then we should use 
granularity.


Regards,
  Felix






+ list_add(&tail->update_list, remap_list);
+    }
  return r;
  }
    static int
-svm_range_split_head(struct svm_range *prange,
- uint64_t new_start, struct list_head *insert_list)
+svm_range_split_head(struct svm_range *prange, uint64_t new_start,
+ struct list_head *insert_list, struct list_head 
*remap_list)

  {
  struct svm_range *head;
  int r = svm_range_split(prange, new_start, prange->last, &head);
  -    if (!r)
+    if (!r) {
  list_add(&head->list, insert_list);
+    if (!IS_ALIGNED(head->last + 1 - head->start,
+    1UL << head->granularity))


Similar as above.

    if (!IS_ALIGNED(new_start, 1UL << prange->granularity))


  if (!IS_ALIGNED(new_start, 512) && prange->last - 
head->start >= 512)




Regards,
  Felix



+ list_add(&head->update_list, remap_list);
+    }
  return r;
  }
  @@ -2113,7 +2121,7 @@ static int
  svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
    uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
    struct list_head *update_list, struct list_head 
*insert_list,

-  struct list_head *remove_list)
+  struct list_head *remove_list, struct list_head *remap_list)
  {
  unsigned long last = start + size - 1UL;
  struct svm_range_list *svms = &p->svms;
@@ -2129,6 +2137,7 @@ svm_range_add(struct kfd_process *p, uint64_t 
start, uint64_t size,

  INIT_LIST_HEAD(insert_list);
  INIT_LIST_HEAD(remove_list);
  INIT_LIST_HEAD(&new_list);
+    INIT_LIST_HEAD(remap_list);
    node = interval_tree_iter_first(&svms->objects, start, last);
  while (node) {
@@ -2153,6 +2162,7 @@ svm_range_add(struct kfd_process *p, uint64_t 
start, uint64_t size,

  struct svm_range *old = prange;
    prange = svm_range_clone(old);
+


unnecessary change.

Regards,

Philip


  if (!prange) {
  r = -ENOMEM;
  goto out;
@@ -2161,18 +2171,17 @@ svm_range_add(struct kfd_process *p, 
uint64_t start, uint64_t size,

  list_add(&old->update_list, remove_list);
  list_add(&prange->list, insert_list);
  list_add(&prange->update_list, update_list);
-
  if (node->start < start) {
  pr_debug("change old range start\n");
  r = svm_range_split_head(prange, start,
- insert_list);
+ insert_list, remap_list);
  if (r)
  goto out;
  }
  if (node->last > last) {
  pr_debug("change old range last\n");
  r = svm_range_split_tail(prange, last,
- insert_list);
+ insert_list, remap_list);
  if (r)
  goto out;
  }
@@ -3565,6 +3574

Re: [PATCH] drm/amdkfd: remap unaligned svm ranges that have split

2023-10-19 Thread Philip Yang

  


On 2023-10-19 16:05, Felix Kuehling
  wrote:


  
  On 2023-10-18 18:26, Alex Sierra wrote:
  
  Split SVM ranges that have been mapped
into 2MB page table entries,

require to be remap in case the split has happened in a
non-aligned

VA.

[WHY]:

This condition causes the 2MB page table entries be split into
4KB

PTEs.


Signed-off-by: Alex Sierra 

---

  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 45
+---

  1 file changed, 34 insertions(+), 11 deletions(-)


diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index 7b81233bc9ae..1dd9a1cf2358 100644

--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c

+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c

@@ -1104,26 +1104,34 @@ svm_range_split(struct svm_range
*prange, uint64_t start, uint64_t last,

  }

    static int

-svm_range_split_tail(struct svm_range *prange,

- uint64_t new_last, struct list_head *insert_list)

+svm_range_split_tail(struct svm_range *prange, uint64_t
new_last,

+ struct list_head *insert_list, struct list_head
*remap_list)

  {

  struct svm_range *tail;

  int r = svm_range_split(prange, prange->start,
new_last, &tail);

  -    if (!r)

+    if (!r) {

  list_add(&tail->list, insert_list);

+    if (!IS_ALIGNED(tail->last + 1 - tail->start,

+    1UL << tail->granularity))

  
  
  I'm not sure about this condition. I thought this condition should
  be about the point where the range is split, not the size of it.
  So my understanding is that this should be
  
  
      if (!IS_ALIGNED(new_last+1, 1UL <<
  prange->granularity))
  

I think prange->granularity is not always 9, 512 pages, we
  should check the original prange size is larger than 512.

  if (!IS_ALIGNED(new_last + 1, 512) &&
  tail->last - prange->start >= 512)


  
  
  +   
list_add(&tail->update_list, remap_list);

+    }

  return r;

  }

    static int

-svm_range_split_head(struct svm_range *prange,

- uint64_t new_start, struct list_head *insert_list)

+svm_range_split_head(struct svm_range *prange, uint64_t
new_start,

+ struct list_head *insert_list, struct list_head
*remap_list)

  {

  struct svm_range *head;

  int r = svm_range_split(prange, new_start,
prange->last, &head);

  -    if (!r)

+    if (!r) {

  list_add(&head->list, insert_list);

+    if (!IS_ALIGNED(head->last + 1 - head->start,

+    1UL << head->granularity))

  
  
  Similar as above.
  
  
      if (!IS_ALIGNED(new_start, 1UL <<
  prange->granularity))
  

  if (!IS_ALIGNED(new_start, 512) &&
  prange->last - head->start >= 512)

  
  Regards,
  
    Felix
  
  
  
  +   
list_add(&head->update_list, remap_list);

+    }

  return r;

  }

  @@ -2113,7 +2121,7 @@ static int

  svm_range_add(struct kfd_process *p, uint64_t start, uint64_t
size,

    uint32_t nattr, struct kfd_ioctl_svm_attribute
*attrs,

    struct list_head *update_list, struct list_head
*insert_list,

-  struct list_head *remove_list)

+  struct list_head *remove_list, struct list_head
*remap_list)

  {

  unsigned long last = start + size - 1UL;

  struct svm_range_list *svms = &p->svms;

@@ -2129,6 +2137,7 @@ svm_range_add(struct kfd_process *p,
uint64_t start, uint64_t size,

  INIT_LIST_HEAD(insert_list);

  INIT_LIST_HEAD(remove_list);

  INIT_LIST_HEAD(&new_list);

+    INIT_LIST_HEAD(remap_list);

    node = interval_tree_iter_first(&svms->objects,
start, last);

  whi

[PATCH] drm/amdkfd: remap unaligned svm ranges that have split

2023-10-19 Thread Alex Sierra
Split SVM ranges that have been mapped into 2MB page table entries,
require to be remap in case the split has happened in a non-aligned
VA.
[WHY]:
This condition causes the 2MB page table entries be split into 4KB
PTEs.

Signed-off-by: Alex Sierra 
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 43 +---
 1 file changed, 32 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 7b81233bc9ae..aa2996d6f818 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1104,26 +1104,32 @@ svm_range_split(struct svm_range *prange, uint64_t 
start, uint64_t last,
 }
 
 static int
-svm_range_split_tail(struct svm_range *prange,
-uint64_t new_last, struct list_head *insert_list)
+svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
+struct list_head *insert_list, struct list_head 
*remap_list)
 {
struct svm_range *tail;
int r = svm_range_split(prange, prange->start, new_last, &tail);
 
-   if (!r)
+   if (!r) {
list_add(&tail->list, insert_list);
+   if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
+   list_add(&tail->update_list, remap_list);
+   }
return r;
 }
 
 static int
-svm_range_split_head(struct svm_range *prange,
-uint64_t new_start, struct list_head *insert_list)
+svm_range_split_head(struct svm_range *prange, uint64_t new_start,
+struct list_head *insert_list, struct list_head 
*remap_list)
 {
struct svm_range *head;
int r = svm_range_split(prange, new_start, prange->last, &head);
 
-   if (!r)
+   if (!r) {
list_add(&head->list, insert_list);
+   if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
+   list_add(&head->update_list, remap_list);
+   }
return r;
 }
 
@@ -2113,7 +2119,7 @@ static int
 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
  uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
  struct list_head *update_list, struct list_head *insert_list,
- struct list_head *remove_list)
+ struct list_head *remove_list, struct list_head *remap_list)
 {
unsigned long last = start + size - 1UL;
struct svm_range_list *svms = &p->svms;
@@ -2129,6 +2135,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
INIT_LIST_HEAD(&new_list);
+   INIT_LIST_HEAD(remap_list);
 
node = interval_tree_iter_first(&svms->objects, start, last);
while (node) {
@@ -2153,6 +2160,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
struct svm_range *old = prange;
 
prange = svm_range_clone(old);
+
if (!prange) {
r = -ENOMEM;
goto out;
@@ -2161,18 +2169,17 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
list_add(&old->update_list, remove_list);
list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
-
if (node->start < start) {
pr_debug("change old range start\n");
r = svm_range_split_head(prange, start,
-insert_list);
+insert_list, 
remap_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
r = svm_range_split_tail(prange, last,
-insert_list);
+insert_list, 
remap_list);
if (r)
goto out;
}
@@ -3565,6 +3572,7 @@ svm_range_set_attr(struct kfd_process *p, struct 
mm_struct *mm,
struct list_head update_list;
struct list_head insert_list;
struct list_head remove_list;
+   struct list_head remap_list;
struct svm_range_list *svms;
struct svm_range *prange;
struct svm_range *next;
@@ -3596,7 +3604,7 @@ svm_range_set_attr(struct kfd_process *p, struct 
mm_struct *mm,
 
/* Add new range and split existing ranges as needed */
r = svm_range_add(p, start, size, nattr, attrs, &update_list,
- &insert_list, &remove_list);
+ &insert_list,

Re: [PATCH] drm/amdkfd: remap unaligned svm ranges that have split

2023-10-19 Thread Felix Kuehling



On 2023-10-18 18:26, Alex Sierra wrote:

Split SVM ranges that have been mapped into 2MB page table entries,
require to be remap in case the split has happened in a non-aligned
VA.
[WHY]:
This condition causes the 2MB page table entries be split into 4KB
PTEs.

Signed-off-by: Alex Sierra 
---
  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 45 +---
  1 file changed, 34 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 7b81233bc9ae..1dd9a1cf2358 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1104,26 +1104,34 @@ svm_range_split(struct svm_range *prange, uint64_t 
start, uint64_t last,
  }
  
  static int

-svm_range_split_tail(struct svm_range *prange,
-uint64_t new_last, struct list_head *insert_list)
+svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
+struct list_head *insert_list, struct list_head 
*remap_list)
  {
struct svm_range *tail;
int r = svm_range_split(prange, prange->start, new_last, &tail);
  
-	if (!r)

+   if (!r) {
list_add(&tail->list, insert_list);
+   if (!IS_ALIGNED(tail->last + 1 - tail->start,
+   1UL << tail->granularity))


I'm not sure about this condition. I thought this condition should be 
about the point where the range is split, not the size of it. So my 
understanding is that this should be


if (!IS_ALIGNED(new_last+1, 1UL << prange->granularity))



+   list_add(&tail->update_list, remap_list);
+   }
return r;
  }
  
  static int

-svm_range_split_head(struct svm_range *prange,
-uint64_t new_start, struct list_head *insert_list)
+svm_range_split_head(struct svm_range *prange, uint64_t new_start,
+struct list_head *insert_list, struct list_head 
*remap_list)
  {
struct svm_range *head;
int r = svm_range_split(prange, new_start, prange->last, &head);
  
-	if (!r)

+   if (!r) {
list_add(&head->list, insert_list);
+   if (!IS_ALIGNED(head->last + 1 - head->start,
+   1UL << head->granularity))


Similar as above.

if (!IS_ALIGNED(new_start, 1UL << prange->granularity))

Regards,
  Felix



+   list_add(&head->update_list, remap_list);
+   }
return r;
  }
  
@@ -2113,7 +2121,7 @@ static int

  svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
  uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
  struct list_head *update_list, struct list_head *insert_list,
- struct list_head *remove_list)
+ struct list_head *remove_list, struct list_head *remap_list)
  {
unsigned long last = start + size - 1UL;
struct svm_range_list *svms = &p->svms;
@@ -2129,6 +2137,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
INIT_LIST_HEAD(&new_list);
+   INIT_LIST_HEAD(remap_list);
  
  	node = interval_tree_iter_first(&svms->objects, start, last);

while (node) {
@@ -2153,6 +2162,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
struct svm_range *old = prange;
  
  			prange = svm_range_clone(old);

+
if (!prange) {
r = -ENOMEM;
goto out;
@@ -2161,18 +2171,17 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
list_add(&old->update_list, remove_list);
list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
-
if (node->start < start) {
pr_debug("change old range start\n");
r = svm_range_split_head(prange, start,
-insert_list);
+insert_list, 
remap_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
r = svm_range_split_tail(prange, last,
-insert_list);
+insert_list, 
remap_list);
if (r)
goto out;
}
@@ -3565,6 +3574,7 @@ svm_range_set_attr(struct kfd_process *p, struct 
mm_struct *mm,
struct list_head update_list;
struct list_head insert_li

[PATCH] drm/amdkfd: remap unaligned svm ranges that have split

2023-10-18 Thread Alex Sierra
Split SVM ranges that have been mapped into 2MB page table entries,
require to be remap in case the split has happened in a non-aligned
VA.
[WHY]:
This condition causes the 2MB page table entries be split into 4KB
PTEs.

Signed-off-by: Alex Sierra 
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 45 +---
 1 file changed, 34 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 7b81233bc9ae..1dd9a1cf2358 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1104,26 +1104,34 @@ svm_range_split(struct svm_range *prange, uint64_t 
start, uint64_t last,
 }
 
 static int
-svm_range_split_tail(struct svm_range *prange,
-uint64_t new_last, struct list_head *insert_list)
+svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
+struct list_head *insert_list, struct list_head 
*remap_list)
 {
struct svm_range *tail;
int r = svm_range_split(prange, prange->start, new_last, &tail);
 
-   if (!r)
+   if (!r) {
list_add(&tail->list, insert_list);
+   if (!IS_ALIGNED(tail->last + 1 - tail->start,
+   1UL << tail->granularity))
+   list_add(&tail->update_list, remap_list);
+   }
return r;
 }
 
 static int
-svm_range_split_head(struct svm_range *prange,
-uint64_t new_start, struct list_head *insert_list)
+svm_range_split_head(struct svm_range *prange, uint64_t new_start,
+struct list_head *insert_list, struct list_head 
*remap_list)
 {
struct svm_range *head;
int r = svm_range_split(prange, new_start, prange->last, &head);
 
-   if (!r)
+   if (!r) {
list_add(&head->list, insert_list);
+   if (!IS_ALIGNED(head->last + 1 - head->start,
+   1UL << head->granularity))
+   list_add(&head->update_list, remap_list);
+   }
return r;
 }
 
@@ -2113,7 +2121,7 @@ static int
 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
  uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
  struct list_head *update_list, struct list_head *insert_list,
- struct list_head *remove_list)
+ struct list_head *remove_list, struct list_head *remap_list)
 {
unsigned long last = start + size - 1UL;
struct svm_range_list *svms = &p->svms;
@@ -2129,6 +2137,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
INIT_LIST_HEAD(&new_list);
+   INIT_LIST_HEAD(remap_list);
 
node = interval_tree_iter_first(&svms->objects, start, last);
while (node) {
@@ -2153,6 +2162,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
struct svm_range *old = prange;
 
prange = svm_range_clone(old);
+
if (!prange) {
r = -ENOMEM;
goto out;
@@ -2161,18 +2171,17 @@ svm_range_add(struct kfd_process *p, uint64_t start, 
uint64_t size,
list_add(&old->update_list, remove_list);
list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
-
if (node->start < start) {
pr_debug("change old range start\n");
r = svm_range_split_head(prange, start,
-insert_list);
+insert_list, 
remap_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
r = svm_range_split_tail(prange, last,
-insert_list);
+insert_list, 
remap_list);
if (r)
goto out;
}
@@ -3565,6 +3574,7 @@ svm_range_set_attr(struct kfd_process *p, struct 
mm_struct *mm,
struct list_head update_list;
struct list_head insert_list;
struct list_head remove_list;
+   struct list_head remap_list;
struct svm_range_list *svms;
struct svm_range *prange;
struct svm_range *next;
@@ -3596,7 +3606,7 @@ svm_range_set_attr(struct kfd_process *p, struct 
mm_struct *mm,
 
/* Add new range and split existing ranges as needed */
r = svm_range_add(p, start, size, nattr, attrs, &update_list,