The PowerNV specific table_group_ops are defined in powernv/pci-ioda.c.
The pSeries specific table_group_ops are sitting in the generic powerpc
file. Move it to where it actually belong(pseries/iommu.c).

The functions are currently defined even for CONFIG_PPC_POWERNV
which are unused on PowerNV.

Only code movement, no functional changes intended.

Signed-off-by: Shivaprasad G Bhat <sb...@linux.ibm.com>
---
 arch/powerpc/include/asm/iommu.h       |    4 +
 arch/powerpc/kernel/iommu.c            |  149 --------------------------------
 arch/powerpc/platforms/pseries/iommu.c |  144 +++++++++++++++++++++++++++++++
 3 files changed, 149 insertions(+), 148 deletions(-)

diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 026695943550..744cc5fc22d3 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -156,6 +156,9 @@ extern int iommu_tce_table_put(struct iommu_table *tbl);
 extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
                int nid, unsigned long res_start, unsigned long res_end);
 bool iommu_table_in_use(struct iommu_table *tbl);
+extern void iommu_table_reserve_pages(struct iommu_table *tbl,
+               unsigned long res_start, unsigned long res_end);
+extern void iommu_table_clear(struct iommu_table *tbl);
 
 #define IOMMU_TABLE_GROUP_MAX_TABLES   2
 
@@ -218,7 +221,6 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
 extern void iommu_tce_kill(struct iommu_table *tbl,
                unsigned long entry, unsigned long pages);
 
-extern struct iommu_table_group_ops spapr_tce_table_group_ops;
 #else
 static inline void iommu_register_group(struct iommu_table_group *table_group,
                                        int pci_domain_number,
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index b70b4f93561f..b5febc6c7a5e 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -643,7 +643,7 @@ void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct 
scatterlist *sglist,
                tbl->it_ops->flush(tbl);
 }
 
-static void iommu_table_clear(struct iommu_table *tbl)
+void iommu_table_clear(struct iommu_table *tbl)
 {
        /*
         * In case of firmware assisted dump system goes through clean
@@ -684,7 +684,7 @@ static void iommu_table_clear(struct iommu_table *tbl)
 #endif
 }
 
-static void iommu_table_reserve_pages(struct iommu_table *tbl,
+void iommu_table_reserve_pages(struct iommu_table *tbl,
                unsigned long res_start, unsigned long res_end)
 {
        int i;
@@ -1102,59 +1102,6 @@ void iommu_tce_kill(struct iommu_table *tbl,
 }
 EXPORT_SYMBOL_GPL(iommu_tce_kill);
 
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
-static int iommu_take_ownership(struct iommu_table *tbl)
-{
-       unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
-       int ret = 0;
-
-       /*
-        * VFIO does not control TCE entries allocation and the guest
-        * can write new TCEs on top of existing ones so iommu_tce_build()
-        * must be able to release old pages. This functionality
-        * requires exchange() callback defined so if it is not
-        * implemented, we disallow taking ownership over the table.
-        */
-       if (!tbl->it_ops->xchg_no_kill)
-               return -EINVAL;
-
-       spin_lock_irqsave(&tbl->large_pool.lock, flags);
-       for (i = 0; i < tbl->nr_pools; i++)
-               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
-
-       if (iommu_table_in_use(tbl)) {
-               pr_err("iommu_tce: it_map is not empty");
-               ret = -EBUSY;
-       } else {
-               memset(tbl->it_map, 0xff, sz);
-       }
-
-       for (i = 0; i < tbl->nr_pools; i++)
-               spin_unlock(&tbl->pools[i].lock);
-       spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
-
-       return ret;
-}
-
-static void iommu_release_ownership(struct iommu_table *tbl)
-{
-       unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
-
-       spin_lock_irqsave(&tbl->large_pool.lock, flags);
-       for (i = 0; i < tbl->nr_pools; i++)
-               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
-
-       memset(tbl->it_map, 0, sz);
-
-       iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
-                       tbl->it_reserved_end);
-
-       for (i = 0; i < tbl->nr_pools; i++)
-               spin_unlock(&tbl->pools[i].lock);
-       spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
-}
-#endif
-
 int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
 {
        /*
@@ -1186,98 +1133,6 @@ int iommu_add_device(struct iommu_table_group 
*table_group, struct device *dev)
 EXPORT_SYMBOL_GPL(iommu_add_device);
 
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
-/*
- * A simple iommu_table_group_ops which only allows reusing the existing
- * iommu_table. This handles VFIO for POWER7 or the nested KVM.
- * The ops does not allow creating windows and only allows reusing the existing
- * one if it matches table_group->tce32_start/tce32_size/page_shift.
- */
-static unsigned long spapr_tce_get_table_size(__u32 page_shift,
-                                             __u64 window_size, __u32 levels)
-{
-       unsigned long size;
-
-       if (levels > 1)
-               return ~0U;
-       size = window_size >> (page_shift - 3);
-       return size;
-}
-
-static long spapr_tce_create_table(struct iommu_table_group *table_group, int 
num,
-                                  __u32 page_shift, __u64 window_size, __u32 
levels,
-                                  struct iommu_table **ptbl)
-{
-       struct iommu_table *tbl = table_group->tables[0];
-
-       if (num > 0)
-               return -EPERM;
-
-       if (tbl->it_page_shift != page_shift ||
-           tbl->it_size != (window_size >> page_shift) ||
-           tbl->it_indirect_levels != levels - 1)
-               return -EINVAL;
-
-       *ptbl = iommu_tce_table_get(tbl);
-       return 0;
-}
-
-static long spapr_tce_set_window(struct iommu_table_group *table_group,
-                                int num, struct iommu_table *tbl)
-{
-       return tbl == table_group->tables[num] ? 0 : -EPERM;
-}
-
-static long spapr_tce_unset_window(struct iommu_table_group *table_group, int 
num)
-{
-       return 0;
-}
-
-static long spapr_tce_take_ownership(struct iommu_table_group *table_group)
-{
-       int i, j, rc = 0;
-
-       for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
-               struct iommu_table *tbl = table_group->tables[i];
-
-               if (!tbl || !tbl->it_map)
-                       continue;
-
-               rc = iommu_take_ownership(tbl);
-               if (!rc)
-                       continue;
-
-               for (j = 0; j < i; ++j)
-                       iommu_release_ownership(table_group->tables[j]);
-               return rc;
-       }
-       return 0;
-}
-
-static void spapr_tce_release_ownership(struct iommu_table_group *table_group)
-{
-       int i;
-
-       for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
-               struct iommu_table *tbl = table_group->tables[i];
-
-               if (!tbl)
-                       continue;
-
-               iommu_table_clear(tbl);
-               if (tbl->it_map)
-                       iommu_release_ownership(tbl);
-       }
-}
-
-struct iommu_table_group_ops spapr_tce_table_group_ops = {
-       .get_table_size = spapr_tce_get_table_size,
-       .create_table = spapr_tce_create_table,
-       .set_window = spapr_tce_set_window,
-       .unset_window = spapr_tce_unset_window,
-       .take_ownership = spapr_tce_take_ownership,
-       .release_ownership = spapr_tce_release_ownership,
-};
-
 /*
  * A simple iommu_ops to allow less cruft in generic VFIO code.
  */
diff --git a/arch/powerpc/platforms/pseries/iommu.c 
b/arch/powerpc/platforms/pseries/iommu.c
index b1e6d275cda9..bbe7eaacd829 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -54,6 +54,57 @@ enum {
        DDW_EXT_QUERY_OUT_SIZE = 2
 };
 
+static int iommu_take_ownership(struct iommu_table *tbl)
+{
+       unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
+       int ret = 0;
+
+       /*
+        * VFIO does not control TCE entries allocation and the guest
+        * can write new TCEs on top of existing ones so iommu_tce_build()
+        * must be able to release old pages. This functionality
+        * requires exchange() callback defined so if it is not
+        * implemented, we disallow taking ownership over the table.
+        */
+       if (!tbl->it_ops->xchg_no_kill)
+               return -EINVAL;
+
+       spin_lock_irqsave(&tbl->large_pool.lock, flags);
+       for (i = 0; i < tbl->nr_pools; i++)
+               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
+
+       if (iommu_table_in_use(tbl)) {
+               pr_err("iommu_tce: it_map is not empty");
+               ret = -EBUSY;
+       } else {
+               memset(tbl->it_map, 0xff, sz);
+       }
+
+       for (i = 0; i < tbl->nr_pools; i++)
+               spin_unlock(&tbl->pools[i].lock);
+       spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
+
+       return ret;
+}
+
+static void iommu_release_ownership(struct iommu_table *tbl)
+{
+       unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
+
+       spin_lock_irqsave(&tbl->large_pool.lock, flags);
+       for (i = 0; i < tbl->nr_pools; i++)
+               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
+
+       memset(tbl->it_map, 0, sz);
+
+       iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
+                       tbl->it_reserved_end);
+
+       for (i = 0; i < tbl->nr_pools; i++)
+               spin_unlock(&tbl->pools[i].lock);
+       spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
+}
+
 static struct iommu_table *iommu_pseries_alloc_table(int node)
 {
        struct iommu_table *tbl;
@@ -67,6 +118,8 @@ static struct iommu_table *iommu_pseries_alloc_table(int 
node)
        return tbl;
 }
 
+static struct iommu_table_group_ops spapr_tce_table_group_ops;
+
 static struct iommu_table_group *iommu_pseries_alloc_group(int node)
 {
        struct iommu_table_group *table_group;
@@ -1651,6 +1704,97 @@ static bool iommu_bypass_supported_pSeriesLP(struct 
pci_dev *pdev, u64 dma_mask)
        return false;
 }
 
+/*
+ * A simple iommu_table_group_ops which only allows reusing the existing
+ * iommu_table. This handles VFIO for POWER7 or the nested KVM.
+ * The ops does not allow creating windows and only allows reusing the existing
+ * one if it matches table_group->tce32_start/tce32_size/page_shift.
+ */
+static unsigned long spapr_tce_get_table_size(__u32 page_shift,
+                                             __u64 window_size, __u32 levels)
+{
+       unsigned long size;
+
+       if (levels > 1)
+               return ~0U;
+       size = window_size >> (page_shift - 3);
+       return size;
+}
+
+static long spapr_tce_create_table(struct iommu_table_group *table_group, int 
num,
+                                  __u32 page_shift, __u64 window_size, __u32 
levels,
+                                  struct iommu_table **ptbl)
+{
+       struct iommu_table *tbl = table_group->tables[0];
+
+       if (num > 0)
+               return -EPERM;
+
+       if (tbl->it_page_shift != page_shift ||
+           tbl->it_size != (window_size >> page_shift) ||
+           tbl->it_indirect_levels != levels - 1)
+               return -EINVAL;
+
+       *ptbl = iommu_tce_table_get(tbl);
+       return 0;
+}
+
+static long spapr_tce_set_window(struct iommu_table_group *table_group,
+                                int num, struct iommu_table *tbl)
+{
+       return tbl == table_group->tables[num] ? 0 : -EPERM;
+}
+
+static long spapr_tce_unset_window(struct iommu_table_group *table_group, int 
num)
+{
+       return 0;
+}
+
+static long spapr_tce_take_ownership(struct iommu_table_group *table_group)
+{
+       int i, j, rc = 0;
+
+       for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+               struct iommu_table *tbl = table_group->tables[i];
+
+               if (!tbl || !tbl->it_map)
+                       continue;
+
+               rc = iommu_take_ownership(tbl);
+               if (!rc)
+                       continue;
+
+               for (j = 0; j < i; ++j)
+                       iommu_release_ownership(table_group->tables[j]);
+               return rc;
+       }
+       return 0;
+}
+
+static void spapr_tce_release_ownership(struct iommu_table_group *table_group)
+{
+       int i;
+
+       for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+               struct iommu_table *tbl = table_group->tables[i];
+
+               if (!tbl)
+                       continue;
+
+               if (tbl->it_map)
+                       iommu_release_ownership(tbl);
+       }
+}
+
+static struct iommu_table_group_ops spapr_tce_table_group_ops = {
+       .get_table_size = spapr_tce_get_table_size,
+       .create_table = spapr_tce_create_table,
+       .set_window = spapr_tce_set_window,
+       .unset_window = spapr_tce_unset_window,
+       .take_ownership = spapr_tce_take_ownership,
+       .release_ownership = spapr_tce_release_ownership,
+};
+
 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
                void *data)
 {


Reply via email to