[PATCH 5/7] HMM: add per mirror page table.

2014-12-22 Thread j . glisse
From: Jérôme Glisse 

This patch add the per mirror page table. It also propagate CPU page table
update to this per mirror page table using mmu_notifier callback. All update
are contextualized with an HMM event structure that convey all information
needed by device driver to take proper actions (update its own mmu to reflect
changes and schedule proper flushing).

Core HMM is responsible for updating the per mirror page table once the device
driver is done with its update. Most importantly HMM will properly propagate
HMM page table dirty bit to underlying page.

Signed-off-by: Jérôme Glisse 
Signed-off-by: Sherry Cheung 
Signed-off-by: Subhash Gutti 
Signed-off-by: Mark Hairgrove 
Signed-off-by: John Hubbard 
Signed-off-by: Jatin Kumar 
---
 include/linux/hmm.h | 136 +++
 mm/hmm.c| 263 
 2 files changed, 399 insertions(+)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 8eddc15..dd34572 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -46,12 +46,65 @@
 #include 
 #include 
 #include 
+#include 
 
 
 struct hmm_device;
 struct hmm_mirror;
+struct hmm_fence;
 struct hmm;
 
+/* hmm_fence - Device driver fence allowing to batch update and delay wait.
+ *
+ * @mirror: The HMM mirror this fence is associated with.
+ * @list: List of fence.
+ *
+ * Each time HMM callback into a device driver for update the device driver can
+ * return fence which core HMM will wait on. This allow HMM to batch update to
+ * several different device driver and then wait for each of them to complete.
+ *
+ * The hmm_fence structure is intended to be embedded inside a device driver
+ * specific fence structure.
+ */
+struct hmm_fence {
+   struct hmm_mirror   *mirror;
+   struct list_headlist;
+};
+
+
+/*
+ * hmm_event - each event is described by a type associated with a struct.
+ */
+enum hmm_etype {
+   HMM_NONE = 0,
+   HMM_ISDIRTY,
+   HMM_MIGRATE,
+   HMM_MUNMAP,
+   HMM_DEVICE_RFAULT,
+   HMM_DEVICE_WFAULT,
+   HMM_WRITE_PROTECT,
+};
+
+/* struct hmm_event - memory event information.
+ *
+ * @list: So HMM can keep track of all active events.
+ * @start: First address (inclusive).
+ * @end: Last address (exclusive).
+ * @fences: List of device fences associated with this event.
+ * @pte_mask: HMM pte update mask (bit(s) that are still valid).
+ * @etype: Event type (munmap, migrate, truncate, ...).
+ * @backoff: Only meaningful for device page fault.
+ */
+struct hmm_event {
+   struct list_headlist;
+   unsigned long   start;
+   unsigned long   end;
+   struct list_headfences;
+   dma_addr_t  pte_mask;
+   enum hmm_etype  etype;
+   boolbackoff;
+};
+
 
 /* hmm_device - Each device must register one and only one hmm_device.
  *
@@ -72,6 +125,87 @@ struct hmm_device_ops {
 * from the mirror page table.
 */
void (*release)(struct hmm_mirror *mirror);
+
+   /* fence_wait() - to wait on device driver fence.
+*
+* @fence: The device driver fence struct.
+* Returns: 0 on success,-EIO on error, -EAGAIN to wait again.
+*
+* Called when hmm want to wait for all operations associated with a
+* fence to complete (including device cache flush if the event mandate
+* it).
+*
+* Device driver must free fence and associated resources if it returns
+* something else thant -EAGAIN. On -EAGAIN the fence must not be free
+* as hmm will call back again.
+*
+* Return error if scheduled operation failed or if need to wait again.
+* -EIO Some input/output error with the device.
+* -EAGAIN The fence not yet signaled, hmm reschedule waiting thread.
+*
+* All other return value trigger warning and are transformed to -EIO.
+*/
+   int (*fence_wait)(struct hmm_fence *fence);
+
+   /* fence_ref() - take a reference fence structure.
+*
+* @fence: Fence structure hmm is referencing.
+*/
+   void (*fence_ref)(struct hmm_fence *fence);
+
+   /* fence_unref() - drop a reference fence structure.
+*
+* @fence: Fence structure hmm is dereferencing.
+*/
+   void (*fence_unref)(struct hmm_fence *fence);
+
+   /* update() - update device mmu following an event.
+*
+* @mirror: The mirror that link process address space with the device.
+* @event: The event that triggered the update.
+* Returns: Valid fence ptr or NULL on success otherwise ERR_PTR.
+*
+* Called to update device page table for a range of address.
+* The event type provide the nature of the update :
+*   - Range is no longer valid (munmap).
+*   - Range protection changes (mprotect, COW, ...).
+*   - Range is unmapped (swap, 

[PATCH 5/7] HMM: add per mirror page table.

2014-12-22 Thread j . glisse
From: Jérôme Glisse jgli...@redhat.com

This patch add the per mirror page table. It also propagate CPU page table
update to this per mirror page table using mmu_notifier callback. All update
are contextualized with an HMM event structure that convey all information
needed by device driver to take proper actions (update its own mmu to reflect
changes and schedule proper flushing).

Core HMM is responsible for updating the per mirror page table once the device
driver is done with its update. Most importantly HMM will properly propagate
HMM page table dirty bit to underlying page.

Signed-off-by: Jérôme Glisse jgli...@redhat.com
Signed-off-by: Sherry Cheung sche...@nvidia.com
Signed-off-by: Subhash Gutti sgu...@nvidia.com
Signed-off-by: Mark Hairgrove mhairgr...@nvidia.com
Signed-off-by: John Hubbard jhubb...@nvidia.com
Signed-off-by: Jatin Kumar jaku...@nvidia.com
---
 include/linux/hmm.h | 136 +++
 mm/hmm.c| 263 
 2 files changed, 399 insertions(+)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 8eddc15..dd34572 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -46,12 +46,65 @@
 #include linux/mmu_notifier.h
 #include linux/workqueue.h
 #include linux/mman.h
+#include linux/hmm_pt.h
 
 
 struct hmm_device;
 struct hmm_mirror;
+struct hmm_fence;
 struct hmm;
 
+/* hmm_fence - Device driver fence allowing to batch update and delay wait.
+ *
+ * @mirror: The HMM mirror this fence is associated with.
+ * @list: List of fence.
+ *
+ * Each time HMM callback into a device driver for update the device driver can
+ * return fence which core HMM will wait on. This allow HMM to batch update to
+ * several different device driver and then wait for each of them to complete.
+ *
+ * The hmm_fence structure is intended to be embedded inside a device driver
+ * specific fence structure.
+ */
+struct hmm_fence {
+   struct hmm_mirror   *mirror;
+   struct list_headlist;
+};
+
+
+/*
+ * hmm_event - each event is described by a type associated with a struct.
+ */
+enum hmm_etype {
+   HMM_NONE = 0,
+   HMM_ISDIRTY,
+   HMM_MIGRATE,
+   HMM_MUNMAP,
+   HMM_DEVICE_RFAULT,
+   HMM_DEVICE_WFAULT,
+   HMM_WRITE_PROTECT,
+};
+
+/* struct hmm_event - memory event information.
+ *
+ * @list: So HMM can keep track of all active events.
+ * @start: First address (inclusive).
+ * @end: Last address (exclusive).
+ * @fences: List of device fences associated with this event.
+ * @pte_mask: HMM pte update mask (bit(s) that are still valid).
+ * @etype: Event type (munmap, migrate, truncate, ...).
+ * @backoff: Only meaningful for device page fault.
+ */
+struct hmm_event {
+   struct list_headlist;
+   unsigned long   start;
+   unsigned long   end;
+   struct list_headfences;
+   dma_addr_t  pte_mask;
+   enum hmm_etype  etype;
+   boolbackoff;
+};
+
 
 /* hmm_device - Each device must register one and only one hmm_device.
  *
@@ -72,6 +125,87 @@ struct hmm_device_ops {
 * from the mirror page table.
 */
void (*release)(struct hmm_mirror *mirror);
+
+   /* fence_wait() - to wait on device driver fence.
+*
+* @fence: The device driver fence struct.
+* Returns: 0 on success,-EIO on error, -EAGAIN to wait again.
+*
+* Called when hmm want to wait for all operations associated with a
+* fence to complete (including device cache flush if the event mandate
+* it).
+*
+* Device driver must free fence and associated resources if it returns
+* something else thant -EAGAIN. On -EAGAIN the fence must not be free
+* as hmm will call back again.
+*
+* Return error if scheduled operation failed or if need to wait again.
+* -EIO Some input/output error with the device.
+* -EAGAIN The fence not yet signaled, hmm reschedule waiting thread.
+*
+* All other return value trigger warning and are transformed to -EIO.
+*/
+   int (*fence_wait)(struct hmm_fence *fence);
+
+   /* fence_ref() - take a reference fence structure.
+*
+* @fence: Fence structure hmm is referencing.
+*/
+   void (*fence_ref)(struct hmm_fence *fence);
+
+   /* fence_unref() - drop a reference fence structure.
+*
+* @fence: Fence structure hmm is dereferencing.
+*/
+   void (*fence_unref)(struct hmm_fence *fence);
+
+   /* update() - update device mmu following an event.
+*
+* @mirror: The mirror that link process address space with the device.
+* @event: The event that triggered the update.
+* Returns: Valid fence ptr or NULL on success otherwise ERR_PTR.
+*
+* Called to update device page table for a range of address.
+* The event