[PATCH 4/5] drm/amdgpu: remove adev pointer from struct amdgpu_bo
Alex Deucher
alexdeucher at gmail.com
Thu Sep 29 16:39:25 UTC 2016
On Thu, Sep 29, 2016 at 4:05 AM, Christian König
<deathsimple at vodafone.de> wrote:
> Am 15.09.2016 um 20:30 schrieb Alex Deucher:
>>
>> On Thu, Sep 15, 2016 at 9:10 AM, Christian König
>> <deathsimple at vodafone.de> wrote:
>>>
>>> From: Christian König <christian.koenig at amd.com>
>>>
>>> It's completely pointsless to have two pointers to the
>>> device in the same structur.
>>>
>>> Signed-off-by: Christian König <christian.koenig at amd.com>
>>> ---
>>> drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +++-
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +++---
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 +--
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 4 +--
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 50
>>> ++++++++++++++++--------------
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 3 +-
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 +++-------
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
>>> 9 files changed, 50 insertions(+), 48 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> index 3b3981d..ed91f08 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> @@ -465,7 +465,6 @@ struct amdgpu_bo {
>>> */
>>> struct list_head va;
>>> /* Constant after initialization */
>>> - struct amdgpu_device *adev;
>>> struct drm_gem_object gem_base;
>>> struct amdgpu_bo *parent;
>>> struct amdgpu_bo *shadow;
>>> @@ -2144,6 +2143,11 @@ struct amdgpu_device {
>>>
>>> };
>>>
>>> +static inline struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device
>>> *bdev)
>>> +{
>>> + return container_of(bdev, struct amdgpu_device, mman.bdev);
>>> +}
>>
>> Maybe call this amdgpu_ttm_get_adev() to make it clear to to get the
>> adev from a ttm bo device?
>
>
> Do you have a strong opinion on this? I thought about that as well, but I
> find it a bit long then. Especially a few lines would then become longer
> than 80 characters again.
My main concern is if we ever add another accessor for an adev
embedded in something else. How about amdgpu_ttm_to_adev() or
amdgpu_ttm_adev()?
Alex
>
> I just send the patch out with a bunch of other cleanups as well, please
> take another look.
>
> Christian.
>
>
>>
>> Alex
>>
>>> +
>>> bool amdgpu_device_is_px(struct drm_device *dev);
>>> int amdgpu_device_init(struct amdgpu_device *adev,
>>> struct drm_device *ddev,
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> index 187c366..5beab71 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> @@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct
>>> amdgpu_device *adev,
>>> static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
>>> struct amdgpu_bo *bo)
>>> {
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->tbo.bdev);
>>> u64 initial_bytes_moved;
>>> uint32_t domain;
>>> int r;
>>> @@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct
>>> amdgpu_cs_parser *p,
>>>
>>> retry:
>>> amdgpu_ttm_placement_from_domain(bo, domain);
>>> - initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
>>> + initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
>>> r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
>>> - p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
>>> + p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
>>> initial_bytes_moved;
>>>
>>> if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
>>> @@ -400,6 +401,7 @@ static bool amdgpu_cs_try_evict(struct
>>> amdgpu_cs_parser *p,
>>>
>>> struct amdgpu_bo_list_entry *candidate = p->evictable;
>>> struct amdgpu_bo *bo = candidate->robj;
>>> + struct amdgpu_device *adev =
>>> amdgpu_get_adev(bo->tbo.bdev);
>>> u64 initial_bytes_moved;
>>> uint32_t other;
>>>
>>> @@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct
>>> amdgpu_cs_parser *p,
>>>
>>> /* Good we can try to move this BO somewhere else */
>>> amdgpu_ttm_placement_from_domain(bo, other);
>>> - initial_bytes_moved =
>>> atomic64_read(&bo->adev->num_bytes_moved);
>>> + initial_bytes_moved =
>>> atomic64_read(&adev->num_bytes_moved);
>>> r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
>>> false);
>>> - p->bytes_moved +=
>>> atomic64_read(&bo->adev->num_bytes_moved) -
>>> + p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
>>> initial_bytes_moved;
>>>
>>> if (unlikely(r))
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> index 70e294b..480ae7e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> @@ -121,7 +121,7 @@ int amdgpu_gem_object_open(struct drm_gem_object
>>> *obj,
>>> struct drm_file *file_priv)
>>> {
>>> struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
>>> - struct amdgpu_device *adev = rbo->adev;
>>> + struct amdgpu_device *adev = amdgpu_get_adev(rbo->tbo.bdev);
>>> struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
>>> struct amdgpu_vm *vm = &fpriv->vm;
>>> struct amdgpu_bo_va *bo_va;
>>> @@ -144,7 +144,7 @@ void amdgpu_gem_object_close(struct drm_gem_object
>>> *obj,
>>> struct drm_file *file_priv)
>>> {
>>> struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
>>> - struct amdgpu_device *adev = bo->adev;
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->tbo.bdev);
>>> struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
>>> struct amdgpu_vm *vm = &fpriv->vm;
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
>>> index 32fa7b7..4731231 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
>>> @@ -285,7 +285,7 @@ free_rmn:
>>> int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
>>> {
>>> unsigned long end = addr + amdgpu_bo_size(bo) - 1;
>>> - struct amdgpu_device *adev = bo->adev;
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->tbo.bdev);
>>> struct amdgpu_mn *rmn;
>>> struct amdgpu_mn_node *node = NULL;
>>> struct list_head bos;
>>> @@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned
>>> long addr)
>>> */
>>> void amdgpu_mn_unregister(struct amdgpu_bo *bo)
>>> {
>>> - struct amdgpu_device *adev = bo->adev;
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->tbo.bdev);
>>> struct amdgpu_mn *rmn;
>>> struct list_head *head;
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>>> index 4289c31..5cbe803 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>>> @@ -90,18 +90,19 @@ static void amdgpu_update_memory_usage(struct
>>> amdgpu_device *adev,
>>>
>>> static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
>>> {
>>> + struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
>>> struct amdgpu_bo *bo;
>>>
>>> bo = container_of(tbo, struct amdgpu_bo, tbo);
>>>
>>> - amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
>>> + amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
>>>
>>> drm_gem_object_release(&bo->gem_base);
>>> amdgpu_bo_unref(&bo->parent);
>>> if (!list_empty(&bo->shadow_list)) {
>>> - mutex_lock(&bo->adev->shadow_list_lock);
>>> + mutex_lock(&adev->shadow_list_lock);
>>> list_del_init(&bo->shadow_list);
>>> - mutex_unlock(&bo->adev->shadow_list_lock);
>>> + mutex_unlock(&adev->shadow_list_lock);
>>> }
>>> kfree(bo->metadata);
>>> kfree(bo);
>>> @@ -212,8 +213,10 @@ static void amdgpu_ttm_placement_init(struct
>>> amdgpu_device *adev,
>>>
>>> void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32
>>> domain)
>>> {
>>> - amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
>>> - rbo->placements, domain, rbo->flags);
>>> + struct amdgpu_device *adev = amdgpu_get_adev(rbo->tbo.bdev);
>>> +
>>> + amdgpu_ttm_placement_init(adev, &rbo->placement, rbo->placements,
>>> + domain, rbo->flags);
>>> }
>>>
>>> static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
>>> @@ -359,7 +362,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device
>>> *adev,
>>> kfree(bo);
>>> return r;
>>> }
>>> - bo->adev = adev;
>>> INIT_LIST_HEAD(&bo->shadow_list);
>>> INIT_LIST_HEAD(&bo->va);
>>> bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
>>> @@ -624,6 +626,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo,
>>> u32 domain,
>>> u64 min_offset, u64 max_offset,
>>> u64 *gpu_addr)
>>> {
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->tbo.bdev);
>>> int r, i;
>>> unsigned fpfn, lpfn;
>>>
>>> @@ -659,12 +662,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo,
>>> u32 domain,
>>> if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
>>> !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
>>> (!max_offset || max_offset >
>>> - bo->adev->mc.visible_vram_size)) {
>>> + adev->mc.visible_vram_size)) {
>>> if (WARN_ON_ONCE(min_offset >
>>> - bo->adev->mc.visible_vram_size))
>>> + adev->mc.visible_vram_size))
>>> return -EINVAL;
>>> fpfn = min_offset >> PAGE_SHIFT;
>>> - lpfn = bo->adev->mc.visible_vram_size >>
>>> PAGE_SHIFT;
>>> + lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
>>> } else {
>>> fpfn = min_offset >> PAGE_SHIFT;
>>> lpfn = max_offset >> PAGE_SHIFT;
>>> @@ -679,12 +682,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo,
>>> u32 domain,
>>>
>>> r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
>>> if (unlikely(r)) {
>>> - dev_err(bo->adev->dev, "%p pin failed\n", bo);
>>> + dev_err(adev->dev, "%p pin failed\n", bo);
>>> goto error;
>>> }
>>> r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
>>> if (unlikely(r)) {
>>> - dev_err(bo->adev->dev, "%p bind failed\n", bo);
>>> + dev_err(adev->dev, "%p bind failed\n", bo);
>>> goto error;
>>> }
>>>
>>> @@ -692,11 +695,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo,
>>> u32 domain,
>>> if (gpu_addr != NULL)
>>> *gpu_addr = amdgpu_bo_gpu_offset(bo);
>>> if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
>>> - bo->adev->vram_pin_size += amdgpu_bo_size(bo);
>>> + adev->vram_pin_size += amdgpu_bo_size(bo);
>>> if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
>>> - bo->adev->invisible_pin_size +=
>>> amdgpu_bo_size(bo);
>>> + adev->invisible_pin_size += amdgpu_bo_size(bo);
>>> } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
>>> - bo->adev->gart_pin_size += amdgpu_bo_size(bo);
>>> + adev->gart_pin_size += amdgpu_bo_size(bo);
>>> }
>>>
>>> error:
>>> @@ -710,10 +713,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain,
>>> u64 *gpu_addr)
>>>
>>> int amdgpu_bo_unpin(struct amdgpu_bo *bo)
>>> {
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->tbo.bdev);
>>> int r, i;
>>>
>>> if (!bo->pin_count) {
>>> - dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
>>> + dev_warn(adev->dev, "%p unpin not necessary\n", bo);
>>> return 0;
>>> }
>>> bo->pin_count--;
>>> @@ -725,16 +729,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
>>> }
>>> r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
>>> if (unlikely(r)) {
>>> - dev_err(bo->adev->dev, "%p validate failed for unpin\n",
>>> bo);
>>> + dev_err(adev->dev, "%p validate failed for unpin\n", bo);
>>> goto error;
>>> }
>>>
>>> if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
>>> - bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
>>> + adev->vram_pin_size -= amdgpu_bo_size(bo);
>>> if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
>>> - bo->adev->invisible_pin_size -=
>>> amdgpu_bo_size(bo);
>>> + adev->invisible_pin_size -= amdgpu_bo_size(bo);
>>> } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
>>> - bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
>>> + adev->gart_pin_size -= amdgpu_bo_size(bo);
>>> }
>>>
>>> error:
>>> @@ -859,6 +863,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void
>>> *buffer,
>>> void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
>>> struct ttm_mem_reg *new_mem)
>>> {
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->bdev);
>>> struct amdgpu_bo *rbo;
>>> struct ttm_mem_reg *old_mem = &bo->mem;
>>>
>>> @@ -866,21 +871,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object
>>> *bo,
>>> return;
>>>
>>> rbo = container_of(bo, struct amdgpu_bo, tbo);
>>> - amdgpu_vm_bo_invalidate(rbo->adev, rbo);
>>> + amdgpu_vm_bo_invalidate(adev, rbo);
>>>
>>> /* update statistics */
>>> if (!new_mem)
>>> return;
>>>
>>> /* move_notify is called before move happens */
>>> - amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
>>> + amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
>>>
>>> trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type,
>>> old_mem->mem_type);
>>> }
>>>
>>> int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
>>> {
>>> - struct amdgpu_device *adev;
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->bdev);
>>> struct amdgpu_bo *abo;
>>> unsigned long offset, size, lpfn;
>>> int i, r;
>>> @@ -889,7 +894,6 @@ int amdgpu_bo_fault_reserve_notify(struct
>>> ttm_buffer_object *bo)
>>> return 0;
>>>
>>> abo = container_of(bo, struct amdgpu_bo, tbo);
>>> - adev = abo->adev;
>>> if (bo->mem.mem_type != TTM_PL_VRAM)
>>> return 0;
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
>>> index 8255034..c9b2c01 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
>>> @@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32
>>> mem_type)
>>> */
>>> static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
>>> {
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->tbo.bdev);
>>> int r;
>>>
>>> r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
>>> if (unlikely(r != 0)) {
>>> if (r != -ERESTARTSYS)
>>> - dev_err(bo->adev->dev, "%p reserve failed\n",
>>> bo);
>>> + dev_err(adev->dev, "%p reserve failed\n", bo);
>>> return r;
>>> }
>>> return 0;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>>> index 1965209..d246a1f 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>>> @@ -51,16 +51,6 @@
>>> static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
>>> static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
>>>
>>> -static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
>>> -{
>>> - struct amdgpu_mman *mman;
>>> - struct amdgpu_device *adev;
>>> -
>>> - mman = container_of(bdev, struct amdgpu_mman, bdev);
>>> - adev = container_of(mman, struct amdgpu_device, mman);
>>> - return adev;
>>> -}
>>> -
>>>
>>> /*
>>> * Global memory.
>>> @@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device
>>> *bdev, uint32_t type,
>>> static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>>> struct ttm_placement *placement)
>>> {
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->bdev);
>>> struct amdgpu_bo *rbo;
>>> static struct ttm_place placements = {
>>> .fpfn = 0,
>>> @@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct
>>> ttm_buffer_object *bo,
>>> rbo = container_of(bo, struct amdgpu_bo, tbo);
>>> switch (bo->mem.mem_type) {
>>> case TTM_PL_VRAM:
>>> - if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
>>> + if (adev->mman.buffer_funcs_ring->ready == false) {
>>> amdgpu_ttm_placement_from_domain(rbo,
>>>
>>> AMDGPU_GEM_DOMAIN_CPU);
>>> } else {
>>> @@ -231,7 +222,7 @@ static void amdgpu_evict_flags(struct
>>> ttm_buffer_object *bo,
>>> * allocating address space for the BO.
>>> */
>>> rbo->placements[i].lpfn =
>>> - rbo->adev->mc.gtt_size >>
>>> PAGE_SHIFT;
>>> + adev->mc.gtt_size >> PAGE_SHIFT;
>>> }
>>> }
>>> break;
>>> @@ -1384,7 +1375,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
>>> uint32_t src_data,
>>> struct reservation_object *resv,
>>> struct fence **fence)
>>> {
>>> - struct amdgpu_device *adev = bo->adev;
>>> + struct amdgpu_device *adev = amdgpu_get_adev(bo->tbo.bdev);
>>> struct amdgpu_job *job;
>>> struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> index e6842b0..87bcac3 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> @@ -931,7 +931,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring
>>> *ring, struct amdgpu_bo *bo,
>>> if (r)
>>> return r;
>>>
>>> - if (!bo->adev->uvd.address_64_bit) {
>>> + if (!ring->adev->uvd.address_64_bit) {
>>> amdgpu_ttm_placement_from_domain(bo,
>>> AMDGPU_GEM_DOMAIN_VRAM);
>>> amdgpu_uvd_force_into_uvd_segment(bo);
>>> }
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> index d3a2d1f..5dba9f7 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> @@ -1175,7 +1175,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>>>
>>> flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
>>> gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
>>> - adev == bo_va->bo->adev) ? flags : 0;
>>> + adev == amdgpu_get_adev(bo_va->bo->tbo.bdev)) ? flags :
>>> 0;
>>>
>>> spin_lock(&vm->status_lock);
>>> if (!list_empty(&bo_va->vm_status))
>>> --
>>> 2.5.0
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx at lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>
>
More information about the amd-gfx
mailing list