[PATCH] drm/ttm: pass buffer object for bind/unbind callback

Ben Skeggs skeggsb at gmail.com
Fri Nov 18 05:15:28 PST 2011


On Fri, 2011-11-18 at 08:57 +0100, Thomas Hellstrom wrote:
> Jerome,
> 
> I don't like this change for the following reasons
-snip-

> >
> > One can take advantage of move notify callback but, there are
> > corner case where bind/unbind might be call without move notify
> > being call (in error path mostly). So to make sure that each
> > virtual address space have a consistent view of wether a buffer
> > object is backed or not by system page it's better to pass the
> > buffer object to bind/unbind.
As I discussed previously with Jerome on IRC, I believe the move_notify
hook is sufficient.  I fixed a couple of issues back with it back when I
implemented support for this in nouveau.

Jerome did point out two issues however, which I believe can be solved
easily enough.

The first was that the error path after move_notify() has been called
doesn't reverse the move_notify() too, leaving incorrect page table
entries.  This, I think, could easily be solved by swapping bo->mem and
mem, and re-calling move_notify() to have the driver reverse whatever it
did.

The second issue is that apparently move_notify() doesn't get called in
the destroy path.  Nouveau's GEM layer takes care of this for our
userspace bos, and we don't use any kernel bos that are mapped into a
channel's address space so we don't hit it.  This can probably be solved
easily enough too I expect.

Ben.

> >
> > Patch is straightforward, try to disturb the code as little as
> > possible.
> >
> > Signed-off-by: Jerome Glisse<jglisse at redhat.com>
> > ---
> >   drivers/gpu/drm/nouveau/nouveau_bo.c    |    2 +-
> >   drivers/gpu/drm/nouveau/nouveau_sgdma.c |   23 +++++++++++++++--------
> >   drivers/gpu/drm/radeon/radeon_ttm.c     |    8 +++++---
> >   drivers/gpu/drm/ttm/ttm_agp_backend.c   |    8 ++++----
> >   drivers/gpu/drm/ttm/ttm_bo.c            |   12 ++++++------
> >   drivers/gpu/drm/ttm/ttm_bo_util.c       |   12 ++++++------
> >   drivers/gpu/drm/ttm/ttm_tt.c            |   30 ++++++++++++++++--------------
> >   drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c  |    6 ++++--
> >   include/drm/ttm/ttm_bo_driver.h         |   15 ++++++++-------
> >   9 files changed, 65 insertions(+), 51 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> > index 857bca4..2c8474e 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> > +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> > @@ -760,7 +760,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
> >   	if (ret)
> >   		return ret;
> >
> > -	ret = ttm_tt_bind(bo->ttm,&tmp_mem);
> > +	ret = ttm_tt_bind(bo,&tmp_mem);
> >   	if (ret)
> >   		goto out;
> >
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
> > index 47f245e..4ca0f79 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
> > +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
> > @@ -29,8 +29,9 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
> >   }
> >
> >   static int
> > -nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
> > +nv04_sgdma_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
> >   	struct drm_device *dev = nvbe->dev;
> >   	struct drm_nouveau_private *dev_priv = dev->dev_private;
> > @@ -55,8 +56,9 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
> >   }
> >
> >   static int
> > -nv04_sgdma_unbind(struct ttm_tt *ttm)
> > +nv04_sgdma_unbind(struct ttm_buffer_object *bo)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
> >   	struct drm_device *dev = nvbe->dev;
> >   	struct drm_nouveau_private *dev_priv = dev->dev_private;
> > @@ -96,8 +98,9 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
> >   }
> >
> >   static int
> > -nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
> > +nv41_sgdma_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
> >   	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
> >   	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
> > @@ -117,8 +120,9 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
> >   }
> >
> >   static int
> > -nv41_sgdma_unbind(struct ttm_tt *ttm)
> > +nv41_sgdma_unbind(struct ttm_buffer_object *bo)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
> >   	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
> >   	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
> > @@ -205,8 +209,9 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
> >   }
> >
> >   static int
> > -nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
> > +nv44_sgdma_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
> >   	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
> >   	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
> > @@ -245,8 +250,9 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
> >   }
> >
> >   static int
> > -nv44_sgdma_unbind(struct ttm_tt *ttm)
> > +nv44_sgdma_unbind(struct ttm_buffer_object *bo)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
> >   	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
> >   	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
> > @@ -284,8 +290,9 @@ static struct ttm_backend_func nv44_sgdma_backend = {
> >   };
> >
> >   static int
> > -nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
> > +nv50_sgdma_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
> >   	struct nouveau_mem *node = mem->mm_node;
> >
> > @@ -295,7 +302,7 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
> >   }
> >
> >   static int
> > -nv50_sgdma_unbind(struct ttm_tt *ttm)
> > +nv50_sgdma_unbind(struct ttm_buffer_object *bo)
> >   {
> >   	/* noop: unbound in move_notify() */
> >   	return 0;
> > diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
> > index b0ebaf8..584db4c 100644
> > --- a/drivers/gpu/drm/radeon/radeon_ttm.c
> > +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
> > @@ -305,7 +305,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
> >   		goto out_cleanup;
> >   	}
> >
> > -	r = ttm_tt_bind(bo->ttm,&tmp_mem);
> > +	r = ttm_tt_bind(bo,&tmp_mem);
> >   	if (unlikely(r)) {
> >   		goto out_cleanup;
> >   	}
> > @@ -506,9 +506,10 @@ struct radeon_ttm_tt {
> >   	u64				offset;
> >   };
> >
> > -static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
> > +static int radeon_ttm_backend_bind(struct ttm_buffer_object *bo,
> >   				   struct ttm_mem_reg *bo_mem)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct radeon_ttm_tt *gtt = (void*)ttm;
> >   	int r;
> >
> > @@ -527,8 +528,9 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
> >   	return 0;
> >   }
> >
> > -static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
> > +static int radeon_ttm_backend_unbind(struct ttm_buffer_object *bo)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct radeon_ttm_tt *gtt = (void *)ttm;
> >
> >   	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
> > diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
> > index 14ebd36..a842530 100644
> > --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
> > +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
> > @@ -45,8 +45,9 @@ struct ttm_agp_backend {
> >   	struct agp_bridge_data *bridge;
> >   };
> >
> > -static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
> > +static int ttm_agp_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
> >   	struct drm_mm_node *node = bo_mem->mm_node;
> >   	struct agp_memory *mem;
> > @@ -78,8 +79,9 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
> >   	return ret;
> >   }
> >
> > -static int ttm_agp_unbind(struct ttm_tt *ttm)
> > +static int ttm_agp_unbind(struct ttm_buffer_object *bo)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
> >
> >   	if (agp_be->mem) {
> > @@ -95,8 +97,6 @@ static void ttm_agp_destroy(struct ttm_tt *ttm)
> >   {
> >   	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
> >
> > -	if (agp_be->mem)
> > -		ttm_agp_unbind(ttm);
> >   	kfree(agp_be);
> >   }
> >
> > diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> > index de7ad99..bffaf5d6 100644
> > --- a/drivers/gpu/drm/ttm/ttm_bo.c
> > +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> > @@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
> >   	BUG_ON(!list_empty(&bo->ddestroy));
> >
> >   	if (bo->ttm)
> > -		ttm_tt_destroy(bo->ttm);
> > +		ttm_tt_destroy(bo);
> >   	atomic_dec(&bo->glob->bo_count);
> >   	if (bo->destroy)
> >   		bo->destroy(bo);
> > @@ -390,7 +390,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
> >   			goto out_err;
> >
> >   		if (mem->mem_type != TTM_PL_SYSTEM) {
> > -			ret = ttm_tt_bind(bo->ttm, mem);
> > +			ret = ttm_tt_bind(bo, mem);
> >   			if (ret)
> >   				goto out_err;
> >   		}
> > @@ -439,8 +439,8 @@ moved:
> >   out_err:
> >   	new_man =&bdev->man[bo->mem.mem_type];
> >   	if ((new_man->flags&  TTM_MEMTYPE_FLAG_FIXED)&&  bo->ttm) {
> > -		ttm_tt_unbind(bo->ttm);
> > -		ttm_tt_destroy(bo->ttm);
> > +		ttm_tt_unbind(bo);
> > +		ttm_tt_destroy(bo);
> >   		bo->ttm = NULL;
> >   	}
> >
> > @@ -458,8 +458,8 @@ out_err:
> >   static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
> >   {
> >   	if (bo->ttm) {
> > -		ttm_tt_unbind(bo->ttm);
> > -		ttm_tt_destroy(bo->ttm);
> > +		ttm_tt_unbind(bo);
> > +		ttm_tt_destroy(bo);
> >   		bo->ttm = NULL;
> >   	}
> >   	ttm_bo_mem_put(bo,&bo->mem);
> > diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> > index 60f204d..aa09837 100644
> > --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> > +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> > @@ -51,7 +51,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
> >   	int ret;
> >
> >   	if (old_mem->mem_type != TTM_PL_SYSTEM) {
> > -		ttm_tt_unbind(ttm);
> > +		ttm_tt_unbind(bo);
> >   		ttm_bo_free_old_node(bo);
> >   		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
> >   				TTM_PL_MASK_MEM);
> > @@ -63,7 +63,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
> >   		return ret;
> >
> >   	if (new_mem->mem_type != TTM_PL_SYSTEM) {
> > -		ret = ttm_tt_bind(ttm, new_mem);
> > +		ret = ttm_tt_bind(bo, new_mem);
> >   		if (unlikely(ret != 0))
> >   			return ret;
> >   	}
> > @@ -381,8 +381,8 @@ out2:
> >   	new_mem->mm_node = NULL;
> >
> >   	if ((man->flags&  TTM_MEMTYPE_FLAG_FIXED)&&  (ttm != NULL)) {
> > -		ttm_tt_unbind(ttm);
> > -		ttm_tt_destroy(ttm);
> > +		ttm_tt_unbind(bo);
> > +		ttm_tt_destroy(bo);
> >   		bo->ttm = NULL;
> >   	}
> >
> > @@ -640,8 +640,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
> >
> >   		if ((man->flags&  TTM_MEMTYPE_FLAG_FIXED)&&
> >   		(bo->ttm != NULL)) {
> > -			ttm_tt_unbind(bo->ttm);
> > -			ttm_tt_destroy(bo->ttm);
> > +			ttm_tt_unbind(bo);
> > +			ttm_tt_destroy(bo);
> >   			bo->ttm = NULL;
> >   		}
> >   		ttm_bo_free_old_node(bo);
> > diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
> > index 58e1fa1..ceb8ef8 100644
> > --- a/drivers/gpu/drm/ttm/ttm_tt.c
> > +++ b/drivers/gpu/drm/ttm/ttm_tt.c
> > @@ -159,13 +159,15 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
> >   }
> >   EXPORT_SYMBOL(ttm_tt_set_placement_caching);
> >
> > -void ttm_tt_destroy(struct ttm_tt *ttm)
> > +void ttm_tt_destroy(struct ttm_buffer_object *bo)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> > +
> >   	if (unlikely(ttm == NULL))
> >   		return;
> >
> >   	if (ttm->state == tt_bound) {
> > -		ttm_tt_unbind(ttm);
> > +		ttm_tt_unbind(bo);
> >   	}
> >
> >   	if (likely(ttm->pages != NULL)) {
> > @@ -194,7 +196,7 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
> >
> >   	ttm_tt_alloc_page_directory(ttm);
> >   	if (!ttm->pages) {
> > -		ttm_tt_destroy(ttm);
> > +		ttm->func->destroy(ttm);
> >   		printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
> >   		return -ENOMEM;
> >   	}
> > @@ -226,7 +228,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
> >   	INIT_LIST_HEAD(&ttm_dma->pages_list);
> >   	ttm_dma_tt_alloc_page_directory(ttm_dma);
> >   	if (!ttm->pages || !ttm_dma->dma_address) {
> > -		ttm_tt_destroy(ttm);
> > +		ttm->func->destroy(ttm);
> >   		printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
> >   		return -ENOMEM;
> >   	}
> > @@ -245,36 +247,36 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
> >   }
> >   EXPORT_SYMBOL(ttm_dma_tt_fini);
> >
> > -void ttm_tt_unbind(struct ttm_tt *ttm)
> > +void ttm_tt_unbind(struct ttm_buffer_object *bo)
> >   {
> >   	int ret;
> >
> > -	if (ttm->state == tt_bound) {
> > -		ret = ttm->func->unbind(ttm);
> > +	if (bo->ttm->state == tt_bound) {
> > +		ret = bo->ttm->func->unbind(bo);
> >   		BUG_ON(ret);
> > -		ttm->state = tt_unbound;
> > +		bo->ttm->state = tt_unbound;
> >   	}
> >   }
> >
> > -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
> > +int ttm_tt_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
> >   {
> >   	int ret = 0;
> >
> > -	if (!ttm)
> > +	if (!bo->ttm)
> >   		return -EINVAL;
> >
> > -	if (ttm->state == tt_bound)
> > +	if (bo->ttm->state == tt_bound)
> >   		return 0;
> >
> > -	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
> > +	ret = bo->ttm->bdev->driver->ttm_tt_populate(bo->ttm);
> >   	if (ret)
> >   		return ret;
> >
> > -	ret = ttm->func->bind(ttm, bo_mem);
> > +	ret = bo->ttm->func->bind(bo, bo_mem);
> >   	if (unlikely(ret != 0))
> >   		return ret;
> >
> > -	ttm->state = tt_bound;
> > +	bo->ttm->state = tt_bound;
> >
> >   	return 0;
> >   }
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> > index 1e2c0fb..68ae2d9 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> > @@ -146,8 +146,9 @@ struct vmw_ttm_tt {
> >   	int gmr_id;
> >   };
> >
> > -static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
> > +static int vmw_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
> >
> >   	vmw_be->gmr_id = bo_mem->start;
> > @@ -156,8 +157,9 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
> >   			    ttm->num_pages, vmw_be->gmr_id);
> >   }
> >
> > -static int vmw_ttm_unbind(struct ttm_tt *ttm)
> > +static int vmw_ttm_unbind(struct ttm_buffer_object *bo)
> >   {
> > +	struct ttm_tt *ttm = bo->ttm;
> >   	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
> >
> >   	vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
> > diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> > index 2be8891..4ae39aa 100644
> > --- a/include/drm/ttm/ttm_bo_driver.h
> > +++ b/include/drm/ttm/ttm_bo_driver.h
> > @@ -45,7 +45,7 @@ struct ttm_backend_func {
> >   	/**
> >   	 * struct ttm_backend_func member bind
> >   	 *
> > -	 * @ttm: Pointer to a struct ttm_tt.
> > +	 * @bo: buffer object holding the ttm_tt struct we are binding.
> >   	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
> >   	 * memory type and location for binding.
> >   	 *
> > @@ -53,7 +53,7 @@ struct ttm_backend_func {
> >   	 * indicated by @bo_mem. This function should be able to handle
> >   	 * differences between aperture and system page sizes.
> >   	 */
> > -	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
> > +	int (*bind) (struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
> >
> >   	/**
> >   	 * struct ttm_backend_func member unbind
> > @@ -63,7 +63,7 @@ struct ttm_backend_func {
> >   	 * Unbind previously bound backend pages. This function should be
> >   	 * able to handle differences between aperture and system page sizes.
> >   	 */
> > -	int (*unbind) (struct ttm_tt *ttm);
> > +	int (*unbind) (struct ttm_buffer_object *bo);
> >
> >   	/**
> >   	 * struct ttm_backend_func member destroy
> > @@ -625,16 +625,17 @@ extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
> >    *
> >    * Bind the pages of @ttm to an aperture location identified by @bo_mem
> >    */
> > -extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
> > +extern int ttm_tt_bind(struct ttm_buffer_object *bo,
> > +		       struct ttm_mem_reg *bo_mem);
> >
> >   /**
> >    * ttm_ttm_destroy:
> >    *
> > - * @ttm: The struct ttm_tt.
> > + * @bo: buffer object holding the struct ttm_tt.
> >    *
> >    * Unbind, unpopulate and destroy common struct ttm_tt.
> >    */
> > -extern void ttm_tt_destroy(struct ttm_tt *ttm);
> > +extern void ttm_tt_destroy(struct ttm_buffer_object *bo);
> >
> >   /**
> >    * ttm_ttm_unbind:
> > @@ -643,7 +644,7 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
> >    *
> >    * Unbind a struct ttm_tt.
> >    */
> > -extern void ttm_tt_unbind(struct ttm_tt *ttm);
> > +extern void ttm_tt_unbind(struct ttm_buffer_object *bo);
> >
> >   /**
> >    * ttm_tt_swapin:
> >    
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel




More information about the dri-devel mailing list