Device generic type case added for migrate_vma_pages and
migrate_vma_check_page helpers.
Both, generic and private device types have the same
conditions to decide to migrate pages from/to device
memory.

Signed-off-by: Alex Sierra <alex.sie...@amd.com>
---
 mm/migrate.c | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index e3a10e2a1bb3..f9e6bfa2867c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2565,7 +2565,7 @@ static bool migrate_vma_check_page(struct page *page)
                 * FIXME proper solution is to rework migration_entry_wait() so
                 * it does not need to take a reference on page.
                 */
-               return is_device_private_page(page);
+               return is_device_page(page);
        }
 
        /* For file back page */
@@ -2854,7 +2854,7 @@ EXPORT_SYMBOL(migrate_vma_setup);
  *     handle_pte_fault()
  *       do_anonymous_page()
  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
- * private page.
+ * private or generic page.
  */
 static void migrate_vma_insert_page(struct migrate_vma *migrate,
                                    unsigned long addr,
@@ -2925,10 +2925,14 @@ static void migrate_vma_insert_page(struct migrate_vma 
*migrate,
 
                        swp_entry = make_device_private_entry(page, 
vma->vm_flags & VM_WRITE);
                        entry = swp_entry_to_pte(swp_entry);
+               } else if (is_device_page(page)) {
+                       entry = mk_pte(page, vma->vm_page_prot);
+                       if (vma->vm_flags & VM_WRITE)
+                               entry = pte_mkwrite(pte_mkdirty(entry));
                } else {
                        /*
-                        * For now we only support migrating to un-addressable
-                        * device memory.
+                        * We support migrating to private and generic types 
for device
+                        * zone memory.
                         */
                        pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
                        goto abort;
@@ -3034,10 +3038,10 @@ void migrate_vma_pages(struct migrate_vma *migrate)
                mapping = page_mapping(page);
 
                if (is_zone_device_page(newpage)) {
-                       if (is_device_private_page(newpage)) {
+                       if (is_device_page(newpage)) {
                                /*
-                                * For now only support private anonymous when
-                                * migrating to un-addressable device memory.
+                                * For now only support private and generic
+                                * anonymous when migrating to device memory.
                                 */
                                if (mapping) {
                                        migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
-- 
2.32.0

Reply via email to