ttm_page_alloc_debugfs can be registered to output the state
of pools.

Debugfs file will output number of pages freed from the pool,
number of pages in pool now and the lowes number of pages in
pool since previous shrink.

Signed-off-by: Pauli Nieminen <suok...@gmail.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c |   46 +++++++++++++++++++++++++++++----
 include/drm/ttm/ttm_page_alloc.h     |    4 +++
 2 files changed, 44 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c 
b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 768d479..206bee9 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -34,6 +34,7 @@
 #include <linux/spinlock.h>
 #include <linux/highmem.h>
 #include <linux/mm_types.h>
+#include <linux/module.h>
 #include <linux/jiffies.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
@@ -73,6 +74,9 @@ struct ttm_page_pool {
        unsigned                npages;
        unsigned                nlowpages;
        unsigned                alloc_size;
+       char                    *name;
+       unsigned long           nfrees;
+       unsigned long           nrefills;
 };
 
 #define NUM_POOLS 4
@@ -240,6 +244,7 @@ static bool ttm_page_pool_free_pages_locked(struct 
ttm_page_pool *pool,
 {
        unsigned tmp;
        pool->npages -= freed_pages;
+       pool->nfrees += freed_pages;
        /* Calculate number of pages taken from nlowpages
         * npages_to_free = 1/2*nlowpages =>
         * nlowpages_delta = 2*freed_pages
@@ -320,6 +325,7 @@ restart:
        }
 
        pool->npages -= freed_pages;
+       pool->nfrees += freed_pages;
        /* set nlowpages to zero to prevent extra freeing in thsi patch.
         * nlowpages is reseted later after all work has been finnished.
         **/
@@ -537,6 +543,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool 
*pool,
 
                if (!r) {
                        list_splice(&new_pages, &pool->list);
+                       ++pool->nrefills;
                        pool->npages += pool->alloc_size;
                        /* Have to remmber to update the low number of pages
                         * too */
@@ -724,14 +731,16 @@ void ttm_put_pages(struct list_head *pages, int flags,
                                round_jiffies(_manager.free_interval));
 }
 
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags)
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+               char *name)
 {
        spin_lock_init(&pool->lock);
        pool->fill_lock = false;
        INIT_LIST_HEAD(&pool->list);
-       pool->npages = pool->nlowpages = 0;
+       pool->npages = pool->nlowpages = pool->nfrees = 0;
        pool->alloc_size = NUM_PAGES_TO_ALLOC;
        pool->gfp_flags = flags;
+       pool->name = name;
 }
 
 int ttm_page_alloc_init(struct ttm_mem_global *glob)
@@ -741,13 +750,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob)
 
        printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
 
-       ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER);
+       ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
 
-       ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER);
+       ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
 
-       ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | 
GFP_DMA32);
+       ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
+                       "wc dma");
 
-       ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | 
GFP_DMA32);
+       ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
+                       "uc dma");
 
        _manager.free_interval = msecs_to_jiffies(PAGE_FREE_INTERVAL);
        _manager.small_allocation = SMALL_ALLOCATION;
@@ -773,3 +784,26 @@ void ttm_page_alloc_fini()
        for (i = 0; i < NUM_POOLS; ++i)
                ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
 }
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+       struct ttm_page_pool *p;
+       unsigned i;
+       char *h[] = {"pool", "refills", "pages freed", "size", "min size"};
+       if (atomic_read(&_manager.page_alloc_inited) == 0) {
+               seq_printf(m, "No pool allocator running.\n");
+               return 0;
+       }
+       seq_printf(m, "%6s %12s %13s %8s %8s\n",
+                       h[0], h[1], h[2], h[3], h[4]);
+       for (i = 0; i < NUM_POOLS; ++i) {
+               p = &_manager.pools[i];
+
+               seq_printf(m, "%6s %12ld %13ld %8d %8d\n",
+                               p->name, p->nrefills,
+                               p->nfrees, p->npages,
+                               p->nlowpages);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 485514a..2df0caa 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -61,4 +61,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob);
  */
 void ttm_page_alloc_fini(void);
 
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
 #endif
-- 
1.6.3.3


------------------------------------------------------------------------------
Download Intel&#174; Parallel Studio Eval
Try the new software tools for yourself. Speed compiling, find bugs
proactively, and fine-tune applications for parallel performance.
See why Intel Parallel Studio got high marks during beta.
http://p.sf.net/sfu/intel-sw-dev
--
_______________________________________________
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel

Reply via email to