To support Cooperative Memory Overcommitment (CMO), we need to check
for failure from some of the tce hcalls.
These changes for the pseries platform affect the powerpc architecture;
patches for the other affected platforms are included in this patch.
pSeries platform IOMMU code changes:
* platform TCE functions must handle H_NOT_ENOUGH_RESOURCES errors and
return an error.
Architecture IOMMU code changes:
* Calls to ppc_md.tce_build need to check return values and return
DMA_MAPPING_ERROR for transient errors.
Architecture changes:
* struct machdep_calls for tce_build*_pSeriesLP functions need to change
to indicate failure.
* all other platforms will need updates to iommu functions to match the new
calling semantics; they will return 0 on success. The other platforms
default configs have been built, but no further testing was performed.
Signed-off-by: Robert Jennings <[EMAIL PROTECTED]>
---
arch/powerpc/kernel/iommu.c| 26 +
arch/powerpc/platforms/cell/iommu.c|3 ++-
arch/powerpc/platforms/iseries/iommu.c |3 ++-
arch/powerpc/platforms/pasemi/iommu.c |3 ++-
arch/powerpc/platforms/pseries/iommu.c | 34 -
arch/powerpc/sysdev/dart_iommu.c |3 ++-
include/asm-powerpc/machdep.h |2 +-
7 files changed, 60 insertions(+), 14 deletions(-)
Index: b/arch/powerpc/kernel/iommu.c
===
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -49,6 +49,8 @@ static int novmerge = 1;
static int protect4gb = 1;
+static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
+
static inline unsigned long iommu_num_pages(unsigned long vaddr,
unsigned long slen)
{
@@ -190,6 +192,7 @@ static dma_addr_t iommu_alloc(struct dev
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
+ int build_fail;
spin_lock_irqsave(&(tbl->it_lock), flags);
@@ -204,9 +207,21 @@ static dma_addr_t iommu_alloc(struct dev
ret = entry << IOMMU_PAGE_SHIFT;/* Set the return dma address */
/* Put the TCEs in the HW table */
- ppc_md.tce_build(tbl, entry, npages, (unsigned long)page &
IOMMU_PAGE_MASK,
-direction);
+ build_fail = ppc_md.tce_build(tbl, entry, npages,
+ (unsigned long)page & IOMMU_PAGE_MASK,
+ direction);
+
+ /* ppc_md.tce_build() only returns non-zero for transient errors.
+* Clean up the table bitmap in this case and return
+* DMA_ERROR_CODE. For all other errors the functionality is
+* not altered.
+*/
+ if (unlikely(build_fail)) {
+ __iommu_free(tbl, ret, npages);
+ spin_unlock_irqrestore(&(tbl->it_lock), flags);
+ return DMA_ERROR_CODE;
+ }
/* Flush/invalidate TLB caches if necessary */
if (ppc_md.tce_flush)
@@ -275,7 +290,7 @@ int iommu_map_sg(struct device *dev, str
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
struct scatterlist *s, *outs, *segstart;
- int outcount, incount, i;
+ int outcount, incount, i, build_fail = 0;
unsigned int align;
unsigned long handle;
unsigned int max_seg_size;
@@ -336,7 +351,10 @@ int iommu_map_sg(struct device *dev, str
npages, entry, dma_addr);
/* Insert into HW table */
- ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK,
direction);
+ build_fail = ppc_md.tce_build(tbl, entry, npages,
+ vaddr & IOMMU_PAGE_MASK,
direction);
+ if(unlikely(build_fail))
+ goto failure;
/* If we are in an open segment, try merging */
if (segstart != s) {
Index: b/arch/powerpc/platforms/cell/iommu.c
===
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -172,7 +172,7 @@ static void invalidate_tce_cache(struct
}
}
-static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
+static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction)
{
int i;
@@ -210,6 +210,7 @@ static void tce_build_cell(struct iommu_
pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
index, npages, direction, base_pte);
+ return 0;
}
static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
Index: b/arch/powerpc/platforms/iseries/iommu.c
===
--- a/arch/powerpc/platforms/iseries/io