[PATCH 4/7] drm/amdgpu: use the AGP aperture for system memory access v2
Zhang, Jerry (Junwei)
Jerry.Zhang at amd.com
Fri Aug 31 01:39:38 UTC 2018
On 08/30/2018 08:15 PM, Christian König wrote:
> Am 30.08.2018 um 05:20 schrieb Zhang, Jerry (Junwei):
>> On 08/29/2018 10:08 PM, Christian König wrote:
>>> Start to use the old AGP aperture for system memory access.
>>>
>>> v2: Move that to amdgpu_ttm_alloc_gart
>>>
>>> Signed-off-by: Christian König <christian.koenig at amd.com>
>>> ---
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 23 ++++++++++
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 1 +
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 58 ++++++++++++++-----------
>>> 3 files changed, 57 insertions(+), 25 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
>>> index 1d201fd3f4af..65aee57b35fe 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
>>> @@ -79,6 +79,29 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
>>> return pd_addr;
>>> }
>>>
>>> +/**
>>> + * amdgpu_gmc_agp_addr - return the address in the AGP address space
>>> + *
>>> + * @tbo: TTM BO which needs the address, must be in GTT domain
>>> + *
>>> + * Tries to figure out how to access the BO through the AGP aperture. Returns
>>> + * AMDGPU_BO_INVALID_OFFSET if that is not possible.
>>> + */
>>> +uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
>>> +{
>>> + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
>>> + struct ttm_dma_tt *ttm;
>>> +
>>> + if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
>>> + return AMDGPU_BO_INVALID_OFFSET;
>>
>> If GTT bo size is 1 page, it will also access in AGP address space?
>
> Yes, that is the idea here.
>
> We basically can avoid GART mappings for BOs in the GTT domain which are only one page in size.
Thanks to explain that, got the intention.
Jerry
>
> Christian.
>
>>
>> Jerry
>>> +
>>> + ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
>>> + if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
>>> + return AMDGPU_BO_INVALID_OFFSET;
>>> +
>>> + return adev->gmc.agp_start + ttm->dma_address[0];
>>> +}
>>> +
>>> /**
>>> * amdgpu_gmc_vram_location - try to find VRAM location
>>> *
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
>>> index c9985e7dc9e5..265ca415c64c 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
>>> @@ -163,6 +163,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
>>> void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
>>> uint64_t *addr, uint64_t *flags);
>>> uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
>>> +uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
>>> void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
>>> u64 base);
>>> void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>>> index d9f3201c9e5c..8a158ee922f7 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>>> @@ -1081,41 +1081,49 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
>>> struct ttm_mem_reg tmp;
>>> struct ttm_placement placement;
>>> struct ttm_place placements;
>>> - uint64_t flags;
>>> + uint64_t addr, flags;
>>> int r;
>>>
>>> if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
>>> return 0;
>>>
>>> - /* allocate GART space */
>>> - tmp = bo->mem;
>>> - tmp.mm_node = NULL;
>>> - placement.num_placement = 1;
>>> - placement.placement = &placements;
>>> - placement.num_busy_placement = 1;
>>> - placement.busy_placement = &placements;
>>> - placements.fpfn = 0;
>>> - placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
>>> - placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
>>> - TTM_PL_FLAG_TT;
>>> + addr = amdgpu_gmc_agp_addr(bo);
>>> + if (addr != AMDGPU_BO_INVALID_OFFSET) {
>>> + bo->mem.start = addr >> PAGE_SHIFT;
>>> + } else {
>>>
>>> - r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
>>> - if (unlikely(r))
>>> - return r;
>>> + /* allocate GART space */
>>> + tmp = bo->mem;
>>> + tmp.mm_node = NULL;
>>> + placement.num_placement = 1;
>>> + placement.placement = &placements;
>>> + placement.num_busy_placement = 1;
>>> + placement.busy_placement = &placements;
>>> + placements.fpfn = 0;
>>> + placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
>>> + placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
>>> + TTM_PL_FLAG_TT;
>>> +
>>> + r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
>>> + if (unlikely(r))
>>> + return r;
>>>
>>> - /* compute PTE flags for this buffer object */
>>> - flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
>>> + /* compute PTE flags for this buffer object */
>>> + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
>>>
>>> - /* Bind pages */
>>> - gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - adev->gmc.gart_start;
>>> - r = amdgpu_ttm_gart_bind(adev, bo, flags);
>>> - if (unlikely(r)) {
>>> - ttm_bo_mem_put(bo, &tmp);
>>> - return r;
>>> + /* Bind pages */
>>> + gtt->offset = ((u64)tmp.start << PAGE_SHIFT) -
>>> + adev->gmc.gart_start;
>>> + r = amdgpu_ttm_gart_bind(adev, bo, flags);
>>> + if (unlikely(r)) {
>>> + ttm_bo_mem_put(bo, &tmp);
>>> + return r;
>>> + }
>>> +
>>> + ttm_bo_mem_put(bo, &bo->mem);
>>> + bo->mem = tmp;
>>> }
>>>
>>> - ttm_bo_mem_put(bo, &bo->mem);
>>> - bo->mem = tmp;
>>> bo->offset = (bo->mem.start << PAGE_SHIFT) +
>>> bo->bdev->man[bo->mem.mem_type].gpu_offset;
>>>
>>>
>
More information about the amd-gfx
mailing list