Hold state of deferred I/O in struct fb_deferred_io_state. Allocate an
instance as part of initializing deferred I/O and remove it only after
the final mapping has been closed. If the fb_info and the contained
deferred I/O meanwhile goes away, clear struct fb_deferred_io_state.info
to invalidate the mapping. Any access will then result in a SIGBUS
signal.

Fixes a long-standing problem, where a device hot-unplug happens while
user space still has an active mapping of the graphics memory. The hot-
unplug frees the instance of struct fb_info. Accessing the memory will
operate on undefined state.

Signed-off-by: Thomas Zimmermann <[email protected]>
Fixes: 60b59beafba8 ("fbdev: mm: Deferred IO support")
Cc: Helge Deller <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: <[email protected]> # v2.6.22+
---
 drivers/video/fbdev/core/fb_defio.c | 178 ++++++++++++++++++++++------
 include/linux/fb.h                  |   4 +-
 2 files changed, 145 insertions(+), 37 deletions(-)

diff --git a/drivers/video/fbdev/core/fb_defio.c 
b/drivers/video/fbdev/core/fb_defio.c
index 8df2e51e3390..0b099a89a823 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -24,6 +24,75 @@
 #include <linux/rmap.h>
 #include <linux/pagemap.h>
 
+/*
+ * struct fb_deferred_io_state
+ */
+
+struct fb_deferred_io_state {
+       struct kref ref;
+
+       struct mutex lock; /* mutex that protects the pageref list */
+       /* fields protected by lock */
+       struct fb_info *info;
+};
+
+static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
+{
+       struct fb_deferred_io_state *fbdefio_state;
+
+       fbdefio_state = kzalloc(sizeof(*fbdefio_state), GFP_KERNEL);
+       if (!fbdefio_state)
+               return NULL;
+
+       kref_init(&fbdefio_state->ref);
+       mutex_init(&fbdefio_state->lock);
+
+       return fbdefio_state;
+}
+
+static void fb_deferred_io_state_release(struct fb_deferred_io_state 
*fbdefio_state)
+{
+       mutex_destroy(&fbdefio_state->lock);
+
+       kfree(fbdefio_state);
+}
+
+static void fb_deferred_io_state_get(struct fb_deferred_io_state 
*fbdefio_state)
+{
+       kref_get(&fbdefio_state->ref);
+}
+
+static void __fb_deferred_io_state_release(struct kref *ref)
+{
+       struct fb_deferred_io_state *fbdefio_state =
+               container_of(ref, struct fb_deferred_io_state, ref);
+
+       fb_deferred_io_state_release(fbdefio_state);
+}
+
+static void fb_deferred_io_state_put(struct fb_deferred_io_state 
*fbdefio_state)
+{
+       kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release);
+}
+
+/*
+ * struct vm_operations_struct
+ */
+
+static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
+{
+       struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
+
+       fb_deferred_io_state_get(fbdefio_state);
+}
+
+static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
+{
+       struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
+
+       fb_deferred_io_state_put(fbdefio_state);
+}
+
 static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned 
long offs)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
@@ -121,25 +190,46 @@ static void fb_deferred_io_pageref_put(struct 
fb_deferred_io_pageref *pageref,
 /* this is to find and return the vmalloc-ed fb pages */
 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
 {
+       struct fb_info *info;
        unsigned long offset;
        struct page *page;
-       struct fb_info *info = vmf->vma->vm_private_data;
+       vm_fault_t ret;
+       struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
+
+       mutex_lock(&fbdefio_state->lock);
+
+       info = fbdefio_state->info;
+       if (!info) {
+               ret = VM_FAULT_SIGBUS; /* our device is gone */
+               goto err_mutex_unlock;
+       }
 
        offset = vmf->pgoff << PAGE_SHIFT;
-       if (offset >= info->fix.smem_len)
-               return VM_FAULT_SIGBUS;
+       if (offset >= info->fix.smem_len) {
+               ret = VM_FAULT_SIGBUS;
+               goto err_mutex_unlock;
+       }
 
        page = fb_deferred_io_get_page(info, offset);
-       if (!page)
-               return VM_FAULT_SIGBUS;
+       if (!page) {
+               ret = VM_FAULT_SIGBUS;
+               goto err_mutex_unlock;
+       }
 
        if (!vmf->vma->vm_file)
                fb_err(info, "no mapping available\n");
 
        BUG_ON(!info->fbdefio->mapping);
 
+       mutex_unlock(&fbdefio_state->lock);
+
        vmf->page = page;
+
        return 0;
+
+err_mutex_unlock:
+       mutex_unlock(&fbdefio_state->lock);
+       return ret;
 }
 
 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int 
datasync)
@@ -166,15 +256,24 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
  * Adds a page to the dirty list. Call this from struct
  * vm_operations_struct.page_mkwrite.
  */
-static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned 
long offset,
-                                           struct page *page)
+static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state 
*fbdefio_state,
+                                           unsigned long offset, struct page 
*page)
 {
-       struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct fb_info *info;
+       struct fb_deferred_io *fbdefio;
        struct fb_deferred_io_pageref *pageref;
        vm_fault_t ret;
 
        /* protect against the workqueue changing the page list */
-       mutex_lock(&fbdefio->lock);
+       mutex_lock(&fbdefio_state->lock);
+
+       info = fbdefio_state->info;
+       if (!info) {
+               ret = VM_FAULT_SIGBUS; /* our device is gone */
+               goto err_mutex_unlock;
+       }
+
+       fbdefio = info->fbdefio;
 
        pageref = fb_deferred_io_pageref_get(info, offset, page);
        if (WARN_ON_ONCE(!pageref)) {
@@ -192,50 +291,38 @@ static vm_fault_t fb_deferred_io_track_page(struct 
fb_info *info, unsigned long
         */
        lock_page(pageref->page);
 
-       mutex_unlock(&fbdefio->lock);
+       mutex_unlock(&fbdefio_state->lock);
 
        /* come back after delay to process the deferred IO */
        schedule_delayed_work(&info->deferred_work, fbdefio->delay);
        return VM_FAULT_LOCKED;
 
 err_mutex_unlock:
-       mutex_unlock(&fbdefio->lock);
+       mutex_unlock(&fbdefio_state->lock);
        return ret;
 }
 
-/*
- * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
- * @fb_info: The fbdev info structure
- * @vmf: The VM fault
- *
- * This is a callback we get when userspace first tries to
- * write to the page. We schedule a workqueue. That workqueue
- * will eventually mkclean the touched pages and execute the
- * deferred framebuffer IO. Then if userspace touches a page
- * again, we repeat the same scheme.
- *
- * Returns:
- * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
- */
-static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct 
vm_fault *vmf)
+static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state 
*fbdefio_state,
+                                             struct vm_fault *vmf)
 {
        unsigned long offset = vmf->pgoff << PAGE_SHIFT;
        struct page *page = vmf->page;
 
        file_update_time(vmf->vma->vm_file);
 
-       return fb_deferred_io_track_page(info, offset, page);
+       return fb_deferred_io_track_page(fbdefio_state, offset, page);
 }
 
-/* vm_ops->page_mkwrite handler */
 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
 {
-       struct fb_info *info = vmf->vma->vm_private_data;
+       struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
 
-       return fb_deferred_io_page_mkwrite(info, vmf);
+       return fb_deferred_io_page_mkwrite(fbdefio_state, vmf);
 }
 
 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
+       .open           = fb_deferred_io_vm_open,
+       .close          = fb_deferred_io_vm_close,
        .fault          = fb_deferred_io_fault,
        .page_mkwrite   = fb_deferred_io_mkwrite,
 };
@@ -252,7 +339,10 @@ int fb_deferred_io_mmap(struct fb_info *info, struct 
vm_area_struct *vma)
        vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
        if (!(info->flags & FBINFO_VIRTFB))
                vm_flags_set(vma, VM_IO);
-       vma->vm_private_data = info;
+       vma->vm_private_data = info->fbdefio_state;
+
+       fb_deferred_io_state_get(info->fbdefio_state); /* released in 
vma->vm_ops->close() */
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
@@ -263,9 +353,10 @@ static void fb_deferred_io_work(struct work_struct *work)
        struct fb_info *info = container_of(work, struct fb_info, 
deferred_work.work);
        struct fb_deferred_io_pageref *pageref, *next;
        struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
 
        /* here we wrprotect the page's mappings, then do all deferred IO. */
-       mutex_lock(&fbdefio->lock);
+       mutex_lock(&fbdefio_state->lock);
 #ifdef CONFIG_MMU
        list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
                struct page *page = pageref->page;
@@ -283,12 +374,13 @@ static void fb_deferred_io_work(struct work_struct *work)
        list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
                fb_deferred_io_pageref_put(pageref, info);
 
-       mutex_unlock(&fbdefio->lock);
+       mutex_unlock(&fbdefio_state->lock);
 }
 
 int fb_deferred_io_init(struct fb_info *info)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct fb_deferred_io_state *fbdefio_state;
        struct fb_deferred_io_pageref *pagerefs;
        unsigned long npagerefs;
        int ret;
@@ -298,7 +390,11 @@ int fb_deferred_io_init(struct fb_info *info)
        if (WARN_ON(!info->fix.smem_len))
                return -EINVAL;
 
-       mutex_init(&fbdefio->lock);
+       fbdefio_state = fb_deferred_io_state_alloc();
+       if (!fbdefio_state)
+               return -ENOMEM;
+       fbdefio_state->info = info;
+
        INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
        INIT_LIST_HEAD(&fbdefio->pagereflist);
        if (fbdefio->delay == 0) /* set a default of 1 s */
@@ -315,10 +411,12 @@ int fb_deferred_io_init(struct fb_info *info)
        info->npagerefs = npagerefs;
        info->pagerefs = pagerefs;
 
+       info->fbdefio_state = fbdefio_state;
+
        return 0;
 
 err:
-       mutex_destroy(&fbdefio->lock);
+       fb_deferred_io_state_release(fbdefio_state);
        return ret;
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
@@ -352,11 +450,19 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_release);
 void fb_deferred_io_cleanup(struct fb_info *info)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
 
        fb_deferred_io_lastclose(info);
 
+       info->fbdefio_state = NULL;
+
+       mutex_lock(&fbdefio_state->lock);
+       fbdefio_state->info = NULL;
+       mutex_unlock(&fbdefio_state->lock);
+
+       fb_deferred_io_state_put(fbdefio_state);
+
        kvfree(info->pagerefs);
-       mutex_destroy(&fbdefio->lock);
        fbdefio->mapping = NULL;
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 65fb70382675..0bf3f1a5cf1e 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -217,13 +217,14 @@ struct fb_deferred_io {
        unsigned long delay;
        bool sort_pagereflist; /* sort pagelist by offset */
        int open_count; /* number of opened files; protected by fb_info lock */
-       struct mutex lock; /* mutex that protects the pageref list */
        struct list_head pagereflist; /* list of pagerefs for touched pages */
        struct address_space *mapping; /* page cache object for fb device */
        /* callback */
        struct page *(*get_page)(struct fb_info *info, unsigned long offset);
        void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
 };
+
+struct fb_deferred_io_state;
 #endif
 
 /*
@@ -486,6 +487,7 @@ struct fb_info {
        unsigned long npagerefs;
        struct fb_deferred_io_pageref *pagerefs;
        struct fb_deferred_io *fbdefio;
+       struct fb_deferred_io_state *fbdefio_state;
 #endif
 
        const struct fb_ops *fbops;
-- 
2.52.0

Reply via email to