Currently Intel interrupt remapping drivers uses the "present" flag bit
in remapping entry to track whether an entry is allocated or not.
It works as follow:
1) allocate a remapping entry and set its "present" flag bit to 1
2) compose other fields for the entry
3) update the remapping entry with the composed value

The remapping hardware may access the entry between step 1 and step 3,
which then obervers an entry with the "present" flag set but random
values in all other fields.

This patch introduces a dedicated bitmap to track remapping entry
allocation status instead of sharing the "present" flag with hardware,
thus eliminate the race window. It also simplifies the implementation.

Tested-and-reviewed-by: Yijing Wang <wangyij...@huawei.com>
Signed-off-by: Jiang Liu <jiang....@linux.intel.com>
---
 drivers/iommu/intel_irq_remapping.c |   51 +++++++++++++++++------------------
 include/linux/intel-iommu.h         |    1 +
 2 files changed, 25 insertions(+), 27 deletions(-)

diff --git a/drivers/iommu/intel_irq_remapping.c 
b/drivers/iommu/intel_irq_remapping.c
index bab10b1..282d392 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -72,7 +72,6 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 
count)
        u16 index, start_index;
        unsigned int mask = 0;
        unsigned long flags;
-       int i;
 
        if (!count || !irq_iommu)
                return -1;
@@ -96,32 +95,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, 
u16 count)
        }
 
        raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-       do {
-               for (i = index; i < index + count; i++)
-                       if  (table->base[i].present)
-                               break;
-               /* empty index found */
-               if (i == index + count)
-                       break;
-
-               index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
-               if (index == start_index) {
-                       raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-                       printk(KERN_ERR "can't allocate an IRTE\n");
-                       return -1;
-               }
-       } while (1);
-
-       for (i = index; i < index + count; i++)
-               table->base[i].present = 1;
-
-       cfg->remapped = 1;
-       irq_iommu->iommu = iommu;
-       irq_iommu->irte_index =  index;
-       irq_iommu->sub_handle = 0;
-       irq_iommu->irte_mask = mask;
-
+       index = bitmap_find_free_region(table->bitmap,
+                                       INTR_REMAP_TABLE_ENTRIES, mask);
+       if (index < 0) {
+               printk(KERN_ERR "can't allocate an IRTE\n");
+       } else {
+               cfg->remapped = 1;
+               irq_iommu->iommu = iommu;
+               irq_iommu->irte_index =  index;
+               irq_iommu->sub_handle = 0;
+               irq_iommu->irte_mask = mask;
+       }
        raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return index;
@@ -254,6 +238,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
                set_64bit(&entry->low, 0);
                set_64bit(&entry->high, 0);
        }
+       bitmap_release_region(iommu->ir_table->bitmap, index,
+                             irq_iommu->irte_mask);
 
        return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 }
@@ -453,6 +439,7 @@ static int intel_setup_irq_remapping(struct intel_iommu 
*iommu, int mode)
 {
        struct ir_table *ir_table;
        struct page *pages;
+       unsigned long *bitmap;
 
        ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
                                             GFP_ATOMIC);
@@ -470,7 +457,17 @@ static int intel_setup_irq_remapping(struct intel_iommu 
*iommu, int mode)
                return -ENOMEM;
        }
 
+       bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
+                        sizeof(long), GFP_ATOMIC);
+       if (bitmap == NULL) {
+               printk(KERN_ERR "failed to allocate bitmap\n");
+               __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+               kfree(ir_table);
+               return -ENOMEM;
+       }
+
        ir_table->base = page_address(pages);
+       ir_table->bitmap = bitmap;
 
        iommu_set_irq_remapping(iommu, mode);
        return 0;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d380c5e..de1e5e9 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -288,6 +288,7 @@ struct q_inval {
 
 struct ir_table {
        struct irte *base;
+       unsigned long *bitmap;
 };
 #endif
 
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to