Having a function to check if the iommu table has any allocation helps deciding if a tbl can be reset for using a new DMA window.
It should be enough to replace all instances of !bitmap_empty(tbl...). iommu_table_in_use() skips reserved memory, so we don't need to worry about releasing it before testing. This causes iommu_table_release_pages() to become unnecessary, given it is only used to remove reserved memory for testing. Signed-off-by: Leonardo Bras <leobra...@gmail.com> --- arch/powerpc/include/asm/iommu.h | 1 + arch/powerpc/kernel/iommu.c | 62 ++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 26 deletions(-) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 5032f1593299..2913e5c8b1f8 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -154,6 +154,7 @@ extern int iommu_tce_table_put(struct iommu_table *tbl); */ extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, unsigned long res_start, unsigned long res_end); +bool iommu_table_in_use(struct iommu_table *tbl); #define IOMMU_TABLE_GROUP_MAX_TABLES 2 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 7f603d4e62d4..c5d5d36ab65e 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -668,21 +668,6 @@ static void iommu_table_reserve_pages(struct iommu_table *tbl, set_bit(i - tbl->it_offset, tbl->it_map); } -static void iommu_table_release_pages(struct iommu_table *tbl) -{ - int i; - - /* - * In case we have reserved the first bit, we should not emit - * the warning below. - */ - if (tbl->it_offset == 0) - clear_bit(0, tbl->it_map); - - for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) - clear_bit(i - tbl->it_offset, tbl->it_map); -} - /* * Build a iommu_table structure. This contains a bit map which * is used to manage allocation of the tce space. @@ -743,6 +728,38 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, return tbl; } +bool iommu_table_in_use(struct iommu_table *tbl) +{ + bool in_use; + unsigned long p1_start = 0, p1_end, p2_start, p2_end; + + /*ignore reserved bit0*/ + if (tbl->it_offset == 0) + p1_start = 1; + + /* Check if reserved memory is valid*/ + if (tbl->it_reserved_start >= tbl->it_offset && + tbl->it_reserved_start <= (tbl->it_offset + tbl->it_size) && + tbl->it_reserved_end >= tbl->it_offset && + tbl->it_reserved_end <= (tbl->it_offset + tbl->it_size)) { + p1_end = tbl->it_reserved_start - tbl->it_offset; + p2_start = tbl->it_reserved_end - tbl->it_offset + 1; + p2_end = tbl->it_size; + } else { + p1_end = tbl->it_size; + p2_start = 0; + p2_end = 0; + } + + in_use = (find_next_bit(tbl->it_map, p1_end, p1_start) != p1_end); + if (in_use || p2_start == 0) + return in_use; + + in_use = (find_next_bit(tbl->it_map, p2_end, p2_start) != p2_end); + + return in_use; +} + static void iommu_table_free(struct kref *kref) { unsigned long bitmap_sz; @@ -759,10 +776,8 @@ static void iommu_table_free(struct kref *kref) return; } - iommu_table_release_pages(tbl); - /* verify that table contains no entries */ - if (!bitmap_empty(tbl->it_map, tbl->it_size)) + if (iommu_table_in_use(tbl)) pr_warn("%s: Unexpected TCEs\n", __func__); /* calculate bitmap size in bytes */ @@ -1069,18 +1084,13 @@ int iommu_take_ownership(struct iommu_table *tbl) for (i = 0; i < tbl->nr_pools; i++) spin_lock(&tbl->pools[i].lock); - iommu_table_release_pages(tbl); - - if (!bitmap_empty(tbl->it_map, tbl->it_size)) { + if (iommu_table_in_use(tbl)) { pr_err("iommu_tce: it_map is not empty"); ret = -EBUSY; - /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */ - iommu_table_reserve_pages(tbl, tbl->it_reserved_start, - tbl->it_reserved_end); - } else { - memset(tbl->it_map, 0xff, sz); } + memset(tbl->it_map, 0xff, sz); + for (i = 0; i < tbl->nr_pools; i++) spin_unlock(&tbl->pools[i].lock); spin_unlock_irqrestore(&tbl->large_pool.lock, flags); -- 2.25.4