[PATCH 4/7] drm/ttm: use an operation ctx for ttm_tt_populate in ttm_bo_driver
Christian König
ckoenig.leichtzumerken at gmail.com
Wed Dec 20 14:20:27 UTC 2017
Am 20.12.2017 um 11:34 schrieb Roger He:
> Change-Id: I803ea52d11e5c06add0dffab836c3aecc00b56dd
> Signed-off-by: Roger He <Hongbo.He at amd.com>
Commit message! And please double check the coding style of
ast_ttm_tt_populate.
With that fixed that patch is Reviewed-by: Christian König
<christian.koenig at amd.com>.
Regards,
Christian.
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ++++---
> drivers/gpu/drm/ast/ast_ttm.c | 5 +++--
> drivers/gpu/drm/cirrus/cirrus_ttm.c | 5 +++--
> drivers/gpu/drm/nouveau/nouveau_bo.c | 8 ++++----
> drivers/gpu/drm/qxl/qxl_ttm.c | 5 +++--
> drivers/gpu/drm/radeon/radeon_ttm.c | 9 +++++----
> drivers/gpu/drm/ttm/ttm_agp_backend.c | 4 ++--
> drivers/gpu/drm/ttm/ttm_bo_util.c | 11 ++++++++---
> drivers/gpu/drm/ttm/ttm_bo_vm.c | 7 ++++++-
> drivers/gpu/drm/ttm/ttm_page_alloc.c | 13 +++++--------
> drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 11 ++++-------
> drivers/gpu/drm/ttm/ttm_tt.c | 6 +++++-
> drivers/gpu/drm/virtio/virtgpu_object.c | 6 +++++-
> drivers/gpu/drm/virtio/virtgpu_ttm.c | 5 +++--
> drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 13 +++++--------
> drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 13 +++++++++++--
> include/drm/ttm/ttm_bo_driver.h | 5 +++--
> include/drm/ttm/ttm_page_alloc.h | 11 +++++++----
> 18 files changed, 86 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index f1b7d98..52aab9d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -990,7 +990,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
> return >t->ttm.ttm;
> }
>
> -static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
> +static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
> + struct ttm_operation_ctx *ctx)
> {
> struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
> struct amdgpu_ttm_tt *gtt = (void *)ttm;
> @@ -1018,11 +1019,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
>
> #ifdef CONFIG_SWIOTLB
> if (swiotlb_nr_tbl()) {
> - return ttm_dma_populate(>t->ttm, adev->dev);
> + return ttm_dma_populate(>t->ttm, adev->dev, ctx);
> }
> #endif
>
> - return ttm_populate_and_map_pages(adev->dev, >t->ttm);
> + return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
> }
>
> static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
> diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
> index 28da7c2..1413e94 100644
> --- a/drivers/gpu/drm/ast/ast_ttm.c
> +++ b/drivers/gpu/drm/ast/ast_ttm.c
> @@ -216,9 +216,10 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
> return tt;
> }
>
> -static int ast_ttm_tt_populate(struct ttm_tt *ttm)
> +static int ast_ttm_tt_populate(struct ttm_tt *ttm,
> + struct ttm_operation_ctx *ctx)
> {
> - return ttm_pool_populate(ttm);
> + return ttm_pool_populate(ttm, ctx);
> }
>
> static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
> diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
> index 2a5b54d..95e2d40 100644
> --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
> +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
> @@ -216,9 +216,10 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
> return tt;
> }
>
> -static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
> +static int cirrus_ttm_tt_populate(struct ttm_tt *ttm,
> + struct ttm_operation_ctx *ctx)
> {
> - return ttm_pool_populate(ttm);
> + return ttm_pool_populate(ttm, ctx);
> }
>
> static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> index 6b6fb20..b141c27 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> @@ -1547,7 +1547,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
> }
>
> static int
> -nouveau_ttm_tt_populate(struct ttm_tt *ttm)
> +nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
> {
> struct ttm_dma_tt *ttm_dma = (void *)ttm;
> struct nouveau_drm *drm;
> @@ -1572,17 +1572,17 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
>
> #if IS_ENABLED(CONFIG_AGP)
> if (drm->agp.bridge) {
> - return ttm_agp_tt_populate(ttm);
> + return ttm_agp_tt_populate(ttm, ctx);
> }
> #endif
>
> #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
> if (swiotlb_nr_tbl()) {
> - return ttm_dma_populate((void *)ttm, dev);
> + return ttm_dma_populate((void *)ttm, dev, ctx);
> }
> #endif
>
> - r = ttm_pool_populate(ttm);
> + r = ttm_pool_populate(ttm, ctx);
> if (r) {
> return r;
> }
> diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
> index 78ce118..989645c 100644
> --- a/drivers/gpu/drm/qxl/qxl_ttm.c
> +++ b/drivers/gpu/drm/qxl/qxl_ttm.c
> @@ -291,14 +291,15 @@ static struct ttm_backend_func qxl_backend_func = {
> .destroy = &qxl_ttm_backend_destroy,
> };
>
> -static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
> +static int qxl_ttm_tt_populate(struct ttm_tt *ttm,
> + struct ttm_operation_ctx *ctx)
> {
> int r;
>
> if (ttm->state != tt_unpopulated)
> return 0;
>
> - r = ttm_pool_populate(ttm);
> + r = ttm_pool_populate(ttm, ctx);
> if (r)
> return r;
>
> diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
> index 557fd79..634fc6f 100644
> --- a/drivers/gpu/drm/radeon/radeon_ttm.c
> +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
> @@ -721,7 +721,8 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
> return (struct radeon_ttm_tt *)ttm;
> }
>
> -static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
> +static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
> + struct ttm_operation_ctx *ctx)
> {
> struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
> struct radeon_device *rdev;
> @@ -750,17 +751,17 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
> rdev = radeon_get_rdev(ttm->bdev);
> #if IS_ENABLED(CONFIG_AGP)
> if (rdev->flags & RADEON_IS_AGP) {
> - return ttm_agp_tt_populate(ttm);
> + return ttm_agp_tt_populate(ttm, ctx);
> }
> #endif
>
> #ifdef CONFIG_SWIOTLB
> if (swiotlb_nr_tbl()) {
> - return ttm_dma_populate(>t->ttm, rdev->dev);
> + return ttm_dma_populate(>t->ttm, rdev->dev, ctx);
> }
> #endif
>
> - return ttm_populate_and_map_pages(rdev->dev, >t->ttm);
> + return ttm_populate_and_map_pages(rdev->dev, >t->ttm, ctx);
> }
>
> static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
> diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
> index 028ab60..3e795a0 100644
> --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
> +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
> @@ -133,12 +133,12 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
> }
> EXPORT_SYMBOL(ttm_agp_tt_create);
>
> -int ttm_agp_tt_populate(struct ttm_tt *ttm)
> +int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
> {
> if (ttm->state != tt_unpopulated)
> return 0;
>
> - return ttm_pool_populate(ttm);
> + return ttm_pool_populate(ttm, ctx);
> }
> EXPORT_SYMBOL(ttm_agp_tt_populate);
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 6e353df..b7eb507 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -376,7 +376,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
> * TTM might be null for moves within the same region.
> */
> if (ttm && ttm->state == tt_unpopulated) {
> - ret = ttm->bdev->driver->ttm_tt_populate(ttm);
> + ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
> if (ret)
> goto out1;
> }
> @@ -545,14 +545,19 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
> unsigned long num_pages,
> struct ttm_bo_kmap_obj *map)
> {
> - struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
> + struct ttm_mem_reg *mem = &bo->mem;
> + struct ttm_operation_ctx ctx = {
> + .interruptible = false,
> + .no_wait_gpu = false
> + };
> struct ttm_tt *ttm = bo->ttm;
> + pgprot_t prot;
> int ret;
>
> BUG_ON(!ttm);
>
> if (ttm->state == tt_unpopulated) {
> - ret = ttm->bdev->driver->ttm_tt_populate(ttm);
> + ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
> if (ret)
> return ret;
> }
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index c8ebb75..65dfcdd 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -215,12 +215,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
> cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
> cvma.vm_page_prot);
> } else {
> + struct ttm_operation_ctx ctx = {
> + .interruptible = false,
> + .no_wait_gpu = false
> + };
> +
> ttm = bo->ttm;
> cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
> cvma.vm_page_prot);
>
> /* Allocate all page at once, most common usage */
> - if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
> + if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
> retval = VM_FAULT_OOM;
> goto out_io_unlock;
> }
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 8f93ff3..f1a3d55 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -1058,13 +1058,9 @@ void ttm_page_alloc_fini(void)
> _manager = NULL;
> }
>
> -int ttm_pool_populate(struct ttm_tt *ttm)
> +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
> {
> struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
> - struct ttm_operation_ctx ctx = {
> - .interruptible = false,
> - .no_wait_gpu = false
> - };
> unsigned i;
> int ret;
>
> @@ -1080,7 +1076,7 @@ int ttm_pool_populate(struct ttm_tt *ttm)
>
> for (i = 0; i < ttm->num_pages; ++i) {
> ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
> - PAGE_SIZE, &ctx);
> + PAGE_SIZE, ctx);
> if (unlikely(ret != 0)) {
> ttm_pool_unpopulate(ttm);
> return -ENOMEM;
> @@ -1117,12 +1113,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
> }
> EXPORT_SYMBOL(ttm_pool_unpopulate);
>
> -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
> +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
> + struct ttm_operation_ctx *ctx)
> {
> unsigned i, j;
> int r;
>
> - r = ttm_pool_populate(&tt->ttm);
> + r = ttm_pool_populate(&tt->ttm, ctx);
> if (r)
> return r;
>
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> index 8aac86a..3ac5391 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> @@ -923,14 +923,11 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
> * On success pages list will hold count number of correctly
> * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
> */
> -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
> +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
> + struct ttm_operation_ctx *ctx)
> {
> struct ttm_tt *ttm = &ttm_dma->ttm;
> struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
> - struct ttm_operation_ctx ctx = {
> - .interruptible = false,
> - .no_wait_gpu = false
> - };
> unsigned long num_pages = ttm->num_pages;
> struct dma_pool *pool;
> enum pool_type type;
> @@ -966,7 +963,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
> break;
>
> ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
> - pool->size, &ctx);
> + pool->size, ctx);
> if (unlikely(ret != 0)) {
> ttm_dma_unpopulate(ttm_dma, dev);
> return -ENOMEM;
> @@ -1002,7 +999,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
> }
>
> ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
> - pool->size, &ctx);
> + pool->size, ctx);
> if (unlikely(ret != 0)) {
> ttm_dma_unpopulate(ttm_dma, dev);
> return -ENOMEM;
> diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
> index 8ebc8d3..b48d7a0 100644
> --- a/drivers/gpu/drm/ttm/ttm_tt.c
> +++ b/drivers/gpu/drm/ttm/ttm_tt.c
> @@ -263,6 +263,10 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
>
> int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
> {
> + struct ttm_operation_ctx ctx = {
> + .interruptible = false,
> + .no_wait_gpu = false
> + };
> int ret = 0;
>
> if (!ttm)
> @@ -271,7 +275,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
> if (ttm->state == tt_bound)
> return 0;
>
> - ret = ttm->bdev->driver->ttm_tt_populate(ttm);
> + ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
> if (ret)
> return ret;
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
> index 6f66b73..0b90cdb 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_object.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
> @@ -124,13 +124,17 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
> int ret;
> struct page **pages = bo->tbo.ttm->pages;
> int nr_pages = bo->tbo.num_pages;
> + struct ttm_operation_ctx ctx = {
> + .interruptible = false,
> + .no_wait_gpu = false
> + };
>
> /* wtf swapping */
> if (bo->pages)
> return 0;
>
> if (bo->tbo.ttm->state == tt_unpopulated)
> - bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
> + bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
> bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
> if (!bo->pages)
> goto out;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
> index 488c6bd..72eb417 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
> @@ -324,12 +324,13 @@ static struct ttm_backend_func virtio_gpu_backend_func = {
> .destroy = &virtio_gpu_ttm_backend_destroy,
> };
>
> -static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
> +static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm,
> + struct ttm_operation_ctx *ctx)
> {
> if (ttm->state != tt_unpopulated)
> return 0;
>
> - return ttm_pool_populate(ttm);
> + return ttm_pool_populate(ttm, ctx);
> }
>
> static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> index ef97542..90b0d6b 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
> @@ -635,16 +635,12 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
> }
>
>
> -static int vmw_ttm_populate(struct ttm_tt *ttm)
> +static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
> {
> struct vmw_ttm_tt *vmw_tt =
> container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
> struct vmw_private *dev_priv = vmw_tt->dev_priv;
> struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
> - struct ttm_operation_ctx ctx = {
> - .interruptible = true,
> - .no_wait_gpu = false
> - };
> int ret;
>
> if (ttm->state != tt_unpopulated)
> @@ -653,15 +649,16 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
> if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
> size_t size =
> ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
> - ret = ttm_mem_global_alloc(glob, size, &ctx);
> + ret = ttm_mem_global_alloc(glob, size, ctx);
> if (unlikely(ret != 0))
> return ret;
>
> - ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
> + ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
> + ctx);
> if (unlikely(ret != 0))
> ttm_mem_global_free(glob, size);
> } else
> - ret = ttm_pool_populate(ttm);
> + ret = ttm_pool_populate(ttm, ctx);
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
> index b17f08f..736ca47 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
> @@ -240,6 +240,10 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
> unsigned long offset;
> unsigned long bo_size;
> struct vmw_otable *otables = batch->otables;
> + struct ttm_operation_ctx ctx = {
> + .interruptible = false,
> + .no_wait_gpu = false
> + };
> SVGAOTableType i;
> int ret;
>
> @@ -264,7 +268,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
>
> ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
> BUG_ON(ret != 0);
> - ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
> + ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
> if (unlikely(ret != 0))
> goto out_unreserve;
> ret = vmw_bo_map_dma(batch->otable_bo);
> @@ -430,6 +434,11 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
> struct vmw_mob *mob)
> {
> int ret;
> + struct ttm_operation_ctx ctx = {
> + .interruptible = false,
> + .no_wait_gpu = false
> + };
> +
> BUG_ON(mob->pt_bo != NULL);
>
> ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
> @@ -442,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
> ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
>
> BUG_ON(ret != 0);
> - ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
> + ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
> if (unlikely(ret != 0))
> goto out_unreserve;
> ret = vmw_bo_map_dma(mob->pt_bo);
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> index 934fecf..84860ec 100644
> --- a/include/drm/ttm/ttm_bo_driver.h
> +++ b/include/drm/ttm/ttm_bo_driver.h
> @@ -352,7 +352,8 @@ struct ttm_bo_driver {
> * Returns:
> * -ENOMEM: Out of memory.
> */
> - int (*ttm_tt_populate)(struct ttm_tt *ttm);
> + int (*ttm_tt_populate)(struct ttm_tt *ttm,
> + struct ttm_operation_ctx *ctx);
>
> /**
> * ttm_tt_unpopulate
> @@ -1077,7 +1078,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
> struct agp_bridge_data *bridge,
> unsigned long size, uint32_t page_flags,
> struct page *dummy_read_page);
> -int ttm_agp_tt_populate(struct ttm_tt *ttm);
> +int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
> void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
> #endif
>
> diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
> index 5938113..f8395dd 100644
> --- a/include/drm/ttm/ttm_page_alloc.h
> +++ b/include/drm/ttm/ttm_page_alloc.h
> @@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void);
> *
> * Add backing pages to all of @ttm
> */
> -int ttm_pool_populate(struct ttm_tt *ttm);
> +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
>
> /**
> * ttm_pool_unpopulate:
> @@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm);
> /**
> * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
> */
> -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
> +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
> + struct ttm_operation_ctx *ctx);
>
> /**
> * Unpopulates and DMA unmaps pages as part of a
> @@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void);
> */
> int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
>
> -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
> +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
> + struct ttm_operation_ctx *ctx);
> void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
>
> #else
> @@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
> return 0;
> }
> static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
> - struct device *dev)
> + struct device *dev,
> + struct ttm_operation_ctx *ctx)
> {
> return -ENOMEM;
> }
More information about the amd-gfx
mailing list