On Wed, Nov 16, 2011 at 10:50:01AM -0500, Konrad Rzeszutek Wilk wrote:
> > +int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
> > +{
> .. snip..
> 
> > +   for (i = 0; i < ttm->num_pages; ++i) {
> > +           ret = ttm_dma_pool_get_pages(pool, ttm, i);
> > +           if (ret != 0) {
> > +                   ttm_dma_unpopulate(ttm, dev);
> > +                   return -ENOMEM;
> > +           }
> > +
> > +           ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
> > +                                           false, false);
> 
> Ok, so we increment it here..
> 
> .. snip..
> > +void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
> > +{
> > +   struct dma_pool *pool;
> > +   struct dma_page *d_page;
> > +   enum pool_type type;
> > +   bool is_cached = false;
> > +   unsigned count = 0, i;
> > +   unsigned long irq_flags;
> > +
> > +   type = ttm_to_type(ttm->page_flags, ttm->caching_state);
> > +   pool = ttm_dma_find_pool(dev, type);
> > +   if (!pool) {
> > +           WARN_ON(!pool);
> > +           return;
> > +   }
> > +   is_cached = (ttm_dma_find_pool(pool->dev,
> > +                ttm_to_type(ttm->page_flags, tt_cached)) == pool);
> > +
> > +   /* make sure pages array match list and count number of pages */
> > +   list_for_each_entry(d_page, &ttm->alloc_list, page_list) {
> > +           ttm->pages[count] = d_page->p;
> > +           count++;
> > +   }
> > +
> > +   spin_lock_irqsave(&pool->lock, irq_flags);
> > +   pool->npages_in_use -= count;
> > +   if (is_cached) {
> > +           pool->nfrees += count;
> > +   } else {
> > +           pool->npages_free += count;
> > +           list_splice(&ttm->alloc_list, &pool->free_list);
> > +           if (pool->npages_free > _manager->options.max_size) {
> > +                   count = pool->npages_free - _manager->options.max_size;
> > +           }
> > +   }
> > +   spin_unlock_irqrestore(&pool->lock, irq_flags);
> 
> But we don't do it here.  I think you need:
> 
>       for (i = 0; i < ttm->num_pages; i++) {
>               ttm_mem_global_free_page(mem_glob, ttm->pages[i]);
> 
> And it has to be done before ttm_dma_pages_put as at that point the
> pages are truly gone.
> 
> Could actually be done in the '/* make sure pages array match list .."

Actually to avoid the race you need to do it one page at a time. Will
respin with that.

> 
> > +
> > +   if (is_cached) {
> > +           ttm_dma_pages_put(pool, &ttm->alloc_list,
> > +                             ttm->pages, count);
> > +   }
> > +
> > +   INIT_LIST_HEAD(&ttm->alloc_list);
> > +   for (i = 0; i < ttm->num_pages; i++) {
> > +           ttm->pages[i] = NULL;
> > +           ttm->dma_address[i] = 0;
> > +   }
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to