[PATCH 07/14] drm/ttm: page allocation use page array instead of list
Thomas Hellstrom
thellstrom at vmware.com
Thu Nov 17 05:27:53 PST 2011
On 11/16/2011 05:57 PM, j.glisse at gmail.com wrote:
> From: Jerome Glisse<jglisse at redhat.com>
>
Reviewed-by: Thomas Hellstrom <thellstrom at vmware.com>
> Use the ttm_tt pages array for pages allocations, move the list
> unwinding into the page allocation functions.
>
> Signed-off-by: Jerome Glisse<jglisse at redhat.com>
> ---
> drivers/gpu/drm/ttm/ttm_page_alloc.c | 85 +++++++++++++++++++++-------------
> drivers/gpu/drm/ttm/ttm_tt.c | 36 +++------------
> include/drm/ttm/ttm_page_alloc.h | 8 ++--
> 3 files changed, 63 insertions(+), 66 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 727e93d..0f3e6d2 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
> * @return count of pages still required to fulfill the request.
> */
> static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
> - struct list_head *pages, int ttm_flags,
> - enum ttm_caching_state cstate, unsigned count)
> + struct list_head *pages,
> + int ttm_flags,
> + enum ttm_caching_state cstate,
> + unsigned count)
> {
> unsigned long irq_flags;
> struct list_head *p;
> @@ -664,13 +666,15 @@ out:
> * On success pages list will hold count number of correctly
> * cached pages.
> */
> -int ttm_get_pages(struct list_head *pages, int flags,
> - enum ttm_caching_state cstate, unsigned count,
> +int ttm_get_pages(struct page **pages, int flags,
> + enum ttm_caching_state cstate, unsigned npages,
> dma_addr_t *dma_address)
> {
> struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
> + struct list_head plist;
> struct page *p = NULL;
> gfp_t gfp_flags = GFP_USER;
> + unsigned count;
> int r;
>
> /* set zero flag for page allocation if required */
> @@ -684,7 +688,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
> else
> gfp_flags |= GFP_HIGHUSER;
>
> - for (r = 0; r< count; ++r) {
> + for (r = 0; r< npages; ++r) {
> p = alloc_page(gfp_flags);
> if (!p) {
>
> @@ -693,85 +697,100 @@ int ttm_get_pages(struct list_head *pages, int flags,
> return -ENOMEM;
> }
>
> - list_add(&p->lru, pages);
> + pages[r] = p;
> }
> return 0;
> }
>
> -
> /* combine zero flag to pool flags */
> gfp_flags |= pool->gfp_flags;
>
> /* First we take pages from the pool */
> - count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
> + INIT_LIST_HEAD(&plist);
> + npages = ttm_page_pool_get_pages(pool,&plist, flags, cstate, npages);
> + count = 0;
> + list_for_each_entry(p,&plist, lru) {
> + pages[count++] = p;
> + }
>
> /* clear the pages coming from the pool if requested */
> if (flags& TTM_PAGE_FLAG_ZERO_ALLOC) {
> - list_for_each_entry(p, pages, lru) {
> + list_for_each_entry(p,&plist, lru) {
> clear_page(page_address(p));
> }
> }
>
> /* If pool didn't have enough pages allocate new one. */
> - if (count> 0) {
> + if (npages> 0) {
> /* ttm_alloc_new_pages doesn't reference pool so we can run
> * multiple requests in parallel.
> **/
> - r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
> + INIT_LIST_HEAD(&plist);
> + r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
> + list_for_each_entry(p,&plist, lru) {
> + pages[count++] = p;
> + }
> if (r) {
> /* If there is any pages in the list put them back to
> * the pool. */
> printk(KERN_ERR TTM_PFX
> "Failed to allocate extra pages "
> "for large request.");
> - ttm_put_pages(pages, 0, flags, cstate, NULL);
> + ttm_put_pages(pages, count, flags, cstate, NULL);
> return r;
> }
> }
>
> -
> return 0;
> }
>
> /* Put all pages in pages list to correct pool to wait for reuse */
> -void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
> +void ttm_put_pages(struct page **pages, unsigned npages, int flags,
> enum ttm_caching_state cstate, dma_addr_t *dma_address)
> {
> unsigned long irq_flags;
> struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
> - struct page *p, *tmp;
> + unsigned i;
>
> if (pool == NULL) {
> /* No pool for this memory type so free the pages */
> -
> - list_for_each_entry_safe(p, tmp, pages, lru) {
> - __free_page(p);
> + for (i = 0; i< npages; i++) {
> + if (pages[i]) {
> + if (page_count(pages[i]) != 1)
> + printk(KERN_ERR TTM_PFX
> + "Erroneous page count. "
> + "Leaking pages.\n");
> + __free_page(pages[i]);
> + pages[i] = NULL;
> + }
> }
> - /* Make the pages list empty */
> - INIT_LIST_HEAD(pages);
> return;
> }
> - if (page_count == 0) {
> - list_for_each_entry_safe(p, tmp, pages, lru) {
> - ++page_count;
> - }
> - }
>
> spin_lock_irqsave(&pool->lock, irq_flags);
> - list_splice_init(pages,&pool->list);
> - pool->npages += page_count;
> + for (i = 0; i< npages; i++) {
> + if (pages[i]) {
> + if (page_count(pages[i]) != 1)
> + printk(KERN_ERR TTM_PFX
> + "Erroneous page count. "
> + "Leaking pages.\n");
> + list_add_tail(&pages[i]->lru,&pool->list);
> + pages[i] = NULL;
> + pool->npages++;
> + }
> + }
> /* Check that we don't go over the pool limit */
> - page_count = 0;
> + npages = 0;
> if (pool->npages> _manager->options.max_size) {
> - page_count = pool->npages - _manager->options.max_size;
> + npages = pool->npages - _manager->options.max_size;
> /* free at least NUM_PAGES_TO_ALLOC number of pages
> * to reduce calls to set_memory_wb */
> - if (page_count< NUM_PAGES_TO_ALLOC)
> - page_count = NUM_PAGES_TO_ALLOC;
> + if (npages< NUM_PAGES_TO_ALLOC)
> + npages = NUM_PAGES_TO_ALLOC;
> }
> spin_unlock_irqrestore(&pool->lock, irq_flags);
> - if (page_count)
> - ttm_page_pool_free(pool, page_count);
> + if (npages)
> + ttm_page_pool_free(pool, npages);
> }
>
> static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
> diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
> index 54bbbad..6e079de 100644
> --- a/drivers/gpu/drm/ttm/ttm_tt.c
> +++ b/drivers/gpu/drm/ttm/ttm_tt.c
> @@ -66,22 +66,16 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
> static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
> {
> struct page *p;
> - struct list_head h;
> struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
> int ret;
>
> if (NULL == (p = ttm->pages[index])) {
>
> - INIT_LIST_HEAD(&h);
> -
> - ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
> + ret = ttm_get_pages(&p, ttm->page_flags, ttm->caching_state, 1,
> &ttm->dma_address[index]);
> -
> if (ret != 0)
> return NULL;
>
> - p = list_first_entry(&h, struct page, lru);
> -
> ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
> if (unlikely(ret != 0))
> goto out_err;
> @@ -90,9 +84,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
> }
> return p;
> out_err:
> - INIT_LIST_HEAD(&h);
> - list_add(&p->lru,&h);
> - ttm_put_pages(&h, 1, ttm->page_flags,
> + ttm_put_pages(&p, 1, ttm->page_flags,
> ttm->caching_state,&ttm->dma_address[index]);
> return NULL;
> }
> @@ -243,33 +235,19 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
>
> static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
> {
> - int i;
> - unsigned count = 0;
> - struct list_head h;
> - struct page *cur_page;
> struct ttm_backend *be = ttm->be;
> -
> - INIT_LIST_HEAD(&h);
> + unsigned i;
>
> if (be)
> be->func->clear(be);
> for (i = 0; i< ttm->num_pages; ++i) {
> -
> - cur_page = ttm->pages[i];
> - ttm->pages[i] = NULL;
> - if (cur_page) {
> - if (page_count(cur_page) != 1)
> - printk(KERN_ERR TTM_PFX
> - "Erroneous page count. "
> - "Leaking pages.\n");
> + if (ttm->pages[i]) {
> ttm_mem_global_free_page(ttm->glob->mem_glob,
> - cur_page);
> - list_add(&cur_page->lru,&h);
> - count++;
> + ttm->pages[i]);
> + ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags,
> + ttm->caching_state,&ttm->dma_address[i]);
> }
> }
> - ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
> - ttm->dma_address);
> ttm->state = tt_unpopulated;
> }
>
> diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
> index 129de12..fe61c8d 100644
> --- a/include/drm/ttm/ttm_page_alloc.h
> +++ b/include/drm/ttm/ttm_page_alloc.h
> @@ -38,10 +38,10 @@
> * @count: number of pages to allocate.
> * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
> */
> -int ttm_get_pages(struct list_head *pages,
> +int ttm_get_pages(struct page **pages,
> int flags,
> enum ttm_caching_state cstate,
> - unsigned count,
> + unsigned npages,
> dma_addr_t *dma_address);
> /**
> * Put linked list of pages to pool.
> @@ -53,8 +53,8 @@ int ttm_get_pages(struct list_head *pages,
> * @cstate: ttm caching state.
> * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
> */
> -void ttm_put_pages(struct list_head *pages,
> - unsigned page_count,
> +void ttm_put_pages(struct page **pages,
> + unsigned npages,
> int flags,
> enum ttm_caching_state cstate,
> dma_addr_t *dma_address);
>
More information about the dri-devel
mailing list