add input parameter for ttm_dma_unpopulate.
when ttm_dma_pool_get_pages or ttm_mem_global_alloc_page fail, don't
call ttm_mem_global_free_page to update global memory count.
Signed-off-by: Roger He <hongbo...@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +-
drivers/gpu/drm/radeon/radeon_ttm.c | 2 +-
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 19 +++++++++++--------
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 2 +-
include/drm/ttm/ttm_page_alloc.h | 5 +++--
6 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e4bb435..723ccf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1046,7 +1046,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(>t->ttm, adev->dev);
+ ttm_dma_unpopulate(>t->ttm, adev->dev, true);
return;
}
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c
b/drivers/gpu/drm/nouveau/nouveau_bo.c
index ce328ed..3f7c30f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1632,7 +1632,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate((void *)ttm, dev);
+ ttm_dma_unpopulate((void *)ttm, dev, true);
return;
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c
b/drivers/gpu/drm/radeon/radeon_ttm.c
index a0a839b..449cc65 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -789,7 +789,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(>t->ttm, rdev->dev);
+ ttm_dma_unpopulate(>t->ttm, rdev->dev, true);
return;
}
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index c7f01a4..4cda764 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -969,7 +969,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct
device *dev,
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, ctx);
if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm_dma, dev, false);
return -ENOMEM;
}
@@ -998,14 +998,14 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
while (num_pages) {
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
if (ret != 0) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm_dma, dev, false);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, ctx);
if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm_dma, dev, false);
return -ENOMEM;
}
@@ -1016,7 +1016,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm_dma, dev, true);
return ret;
}
}
@@ -1027,7 +1027,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct
device *dev,
EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+ bool update_glob_count)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct dma_pool *pool;
@@ -1049,7 +1050,8 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
struct device *dev)
continue;
count++;
- ttm_mem_global_free_page(ttm->glob->mem_glob,
+ if (update_glob_count)
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
d_page->p, pool->size);
ttm_dma_page_put(pool, d_page);
}
@@ -1094,11 +1096,12 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
struct device *dev)
if (is_cached) {
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
page_list) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
+ if (update_glob_count)
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
d_page->p, pool->size);
ttm_dma_page_put(pool, d_page);
}
- } else {
+ } else if (update_glob_count) {
for (i = 0; i < count; i++) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i], pool->size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 22231bc..38df05d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -681,7 +681,7 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
size_t size =
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
- ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+ ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev, true);
ttm_mem_global_free(glob, size);
} else
ttm_pool_unpopulate(ttm);
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 4d9b019..8518cd5 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -92,7 +92,8 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void
*data);
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx);
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+ bool update_glob_count);
#else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
@@ -114,7 +115,7 @@ static inline int ttm_dma_populate(struct ttm_dma_tt
*ttm_dma,
return -ENOMEM;
}
static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
- struct device *dev)
+ struct device *dev, bool update_glob_count);
{
}
#endif