Device generic type case added for migrate_vma_pages and
migrate_vma_check_page helpers.
Both, generic and private device types have the same
conditions to decide to migrate pages from/to device
memory.

Signed-off-by: Alex Sierra <alex.sie...@amd.com>
---
 mm/migrate.c | 20 +++++++++-----------
 1 file changed, 9 insertions(+), 11 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 8c2430d3e77b..7bac06ae831e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2602,7 +2602,7 @@ static bool migrate_vma_check_page(struct page *page)
                 * FIXME proper solution is to rework migration_entry_wait() so
                 * it does not need to take a reference on page.
                 */
-               return is_device_private_page(page);
+               return is_device_page(page);
        }
 
        /* For file back page */
@@ -2891,7 +2891,7 @@ EXPORT_SYMBOL(migrate_vma_setup);
  *     handle_pte_fault()
  *       do_anonymous_page()
  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
- * private page.
+ * private or generic page.
  */
 static void migrate_vma_insert_page(struct migrate_vma *migrate,
                                    unsigned long addr,
@@ -2956,13 +2956,11 @@ static void migrate_vma_insert_page(struct migrate_vma 
*migrate,
         */
        __SetPageUptodate(page);
 
-       if (is_zone_device_page(page)) {
-               if (is_device_private_page(page)) {
-                       swp_entry_t swp_entry;
+       if (is_device_private_page(page)) {
+               swp_entry_t swp_entry;
 
-                       swp_entry = make_device_private_entry(page, 
vma->vm_flags & VM_WRITE);
-                       entry = swp_entry_to_pte(swp_entry);
-               }
+               swp_entry = make_device_private_entry(page, vma->vm_flags & 
VM_WRITE);
+               entry = swp_entry_to_pte(swp_entry);
        } else {
                entry = mk_pte(page, vma->vm_page_prot);
                if (vma->vm_flags & VM_WRITE)
@@ -3064,10 +3062,10 @@ void migrate_vma_pages(struct migrate_vma *migrate)
                mapping = page_mapping(page);
 
                if (is_zone_device_page(newpage)) {
-                       if (is_device_private_page(newpage)) {
+                       if (is_device_page(newpage)) {
                                /*
-                                * For now only support private anonymous when
-                                * migrating to un-addressable device memory.
+                                * For now only support private and generic
+                                * anonymous when migrating to device memory.
                                 */
                                if (mapping) {
                                        migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
-- 
2.32.0

Reply via email to