[Intel-xe] [PATCH 3/4] drm/xe: Size GT device memory correctly

Ruhl, Michael J michael.j.ruhl at intel.com
Fri May 5 14:15:52 UTC 2023


>-----Original Message-----
>From: Auld, Matthew <matthew.auld at intel.com>
>Sent: Friday, May 5, 2023 6:39 AM
>To: Ruhl, Michael J <michael.j.ruhl at intel.com>; intel-
>xe at lists.freedesktop.org
>Cc: Brost, Matthew <matthew.brost at intel.com>; Kershner, David
><david.kershner at intel.com>; Ghimiray, Himal Prasad
><himal.prasad.ghimiray at intel.com>; Upadhyay, Tejas
><tejas.upadhyay at intel.com>
>Subject: Re: [PATCH 3/4] drm/xe: Size GT device memory correctly
>
>On 04/05/2023 21:52, Michael J. Ruhl wrote:
>> The current method of sizing GT device memory is not quite right.
>>
>> Update the algorithm to use the relevant HW information and offsets
>> to set up the sizing correctly.
>>
>> Update the stolen memory sizing to reflect the changes, and to be
>> GT specific.
>>
>> Signed-off-by: Michael J. Ruhl <michael.j.ruhl at intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_device_types.h   |  4 +-
>>   drivers/gpu/drm/xe/xe_gt_types.h       | 14 ++--
>>   drivers/gpu/drm/xe/xe_mmio.c           | 89 ++++++++++++--------------
>>   drivers/gpu/drm/xe/xe_mmio.h           |  2 +-
>>   drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c |  2 +
>>   5 files changed, 55 insertions(+), 56 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_device_types.h
>b/drivers/gpu/drm/xe/xe_device_types.h
>> index 2eeb10e97381..8898aea4bc2b 100644
>> --- a/drivers/gpu/drm/xe/xe_device_types.h
>> +++ b/drivers/gpu/drm/xe/xe_device_types.h
>> @@ -1,6 +1,6 @@
>>   /* SPDX-License-Identifier: MIT */
>>   /*
>> - * Copyright © 2022 Intel Corporation
>> + * Copyright © 2022-2023 Intel Corporation
>>    */
>>
>>   #ifndef _XE_DEVICE_TYPES_H_
>> @@ -202,6 +202,8 @@ struct xe_device {
>>   			 * known as small-bar.
>>   			 */
>>   			resource_size_t io_size;
>> +			/** @base: Offset to apply for Device Physical
>Address control */
>> +			resource_size_t base;
>>   			/** @mapping: pointer to VRAM mappable space */
>>   			void *__iomem mapping;
>>   		} vram;
>> diff --git a/drivers/gpu/drm/xe/xe_gt_types.h
>b/drivers/gpu/drm/xe/xe_gt_types.h
>> index 7c47d67aa8be..47f059bb8c6d 100644
>> --- a/drivers/gpu/drm/xe/xe_gt_types.h
>> +++ b/drivers/gpu/drm/xe/xe_gt_types.h
>> @@ -1,6 +1,6 @@
>>   /* SPDX-License-Identifier: MIT */
>>   /*
>> - * Copyright © 2022 Intel Corporation
>> + * Copyright © 2022-2023 Intel Corporation
>>    */
>>
>>   #ifndef _XE_GT_TYPES_H_
>> @@ -148,13 +148,15 @@ struct xe_gt {
>>   			/**
>>   			 * @io_size: IO size of this VRAM instance
>>   			 *
>> -			 * This represents how much of this VRAM we can
>access
>> -			 * via the CPU through the VRAM BAR. This can be
>smaller
>> -			 * than @size, in which case only part of VRAM is CPU
>> -			 * accessible (typically the first 256M). This
>> -			 * configuration is known as small-bar.
>> +			 * This represents how much of the VRAM the CPU
>can access
>> +			 * via the VRAM BAR.
>> +			 * This can be smaller than the actual @size, in which
>> +			 * case only part of VRAM is CPU accessible (typically
>> +			 * the first 256M). This configuration is known as small-
>bar.
>>   			 */
>>   			resource_size_t io_size;
>> +			/** @base: offset of VRAM starting base */
>> +			resource_size_t base;
>>   			/** @size: size of VRAM. */
>>   			resource_size_t size;
>>   			/** @mapping: pointer to VRAM mappable space */
>> diff --git a/drivers/gpu/drm/xe/xe_mmio.c
>b/drivers/gpu/drm/xe/xe_mmio.c
>> index 8ec044473886..739e04d066e4 100644
>> --- a/drivers/gpu/drm/xe/xe_mmio.c
>> +++ b/drivers/gpu/drm/xe/xe_mmio.c
>> @@ -185,6 +185,8 @@ static int xe_determine_lmem_bar_size(struct
>xe_device *xe)
>>   	if (!xe->mem.vram.io_size)
>>   		return -EIO;
>>
>> +	xe->mem.vram.base = 0; /* DPA offset */
>> +
>>   	/* set up a map to the total memory area. */
>>   	xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe-
>>mem.vram.io_size);
>>
>> @@ -246,6 +248,9 @@ int xe_mmio_tile_vram_size(struct xe_gt *gt, u64
>*vram_size, u64 *tile_size, u64
>>
>>   int xe_mmio_probe_vram(struct xe_device *xe)
>>   {
>> +	resource_size_t io_size;
>> +	u64 available_size = 0;
>> +	u64 total_size = 0;
>>   	struct xe_gt *gt;
>>   	u64 tile_offset;
>>   	u64 tile_size;
>> @@ -271,63 +276,54 @@ int xe_mmio_probe_vram(struct xe_device *xe)
>>   		drm_warn(&xe->drm, "Restricting VRAM size to PCI resource
>size (0x%llx->0x%llx)\n",
>>   			 vram_size, (u64)xe->mem.vram.io_size);
>>
>> -	/* Limit size to available memory to account for the current memory
>algorithm */
>> -	xe->mem.vram.io_size = min_t(u64, xe->mem.vram.io_size,
>vram_size);
>> -
>>   	drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe-
>>mem.vram.io_start,
>>   		 &xe->mem.vram.io_size);
>>
>> -	/* FIXME: Assuming equally partitioned VRAM, incorrect */
>> -	if (xe->info.tile_count > 1) {
>> -		u8 adj_tile_count = xe->info.tile_count;
>> -		resource_size_t size, io_start, io_size;
>> +	io_size = xe->mem.vram.io_size;
>>
>> -		for_each_gt(gt, xe, id)
>> -			if (xe_gt_is_media_type(gt))
>> -				--adj_tile_count;
>> +	/* gt specific ranges */
>> +	for_each_gt(gt, xe, id) {
>> +		if (xe_gt_is_media_type(gt))
>> +			continue;
>>
>> -		XE_BUG_ON(!adj_tile_count);
>> +		err = xe_mmio_tile_vram_size(gt, &vram_size, &tile_size,
>&tile_offset);
>> +		if (err)
>> +			return err;
>>
>> -		size = xe->mem.vram.io_size / adj_tile_count;
>> -		io_start = xe->mem.vram.io_start;
>> -		io_size = xe->mem.vram.io_size;
>> +		gt->mem.vram.io_start = xe->mem.vram.io_start +
>tile_offset;
>> +		gt->mem.vram.io_size = min_t(u64, vram_size, io_size);
>>
>> -		for_each_gt(gt, xe, id) {
>> -			if (id && !xe_gt_is_media_type(gt)) {
>> -				io_size -= min(io_size, size);
>> -				io_start += io_size;
>> -			}
>> +		if (!gt->mem.vram.io_size) {
>> +			drm_err(&xe->drm, "Tile without any CPU visible
>VRAM. Aborting.\n");
>> +			return -ENODEV;
>> +		}
>>
>> -			gt->mem.vram.size = size;
>> -
>> -			/*
>> -			 * XXX: multi-tile small-bar might be wild. Hopefully
>> -			 * full tile without any mappable vram is not
>something
>> -			 * we care about.
>> -			 */
>> -
>> -			gt->mem.vram.io_size = min(size, io_size);
>> -			if (io_size) {
>> -				gt->mem.vram.io_start = io_start;
>> -				gt->mem.vram.mapping = xe-
>>mem.vram.mapping +
>> -					(io_start - xe->mem.vram.io_start);
>> -			} else {
>> -				drm_err(&xe->drm, "Tile without any CPU
>visible VRAM. Aborting.\n");
>> -				return -ENODEV;
>> -			}
>> +		gt->mem.vram.base = xe->mem.vram.base + tile_offset;
>> +		gt->mem.vram.size = vram_size;
>> +		gt->mem.vram.mapping = xe->mem.vram.mapping +
>tile_offset;
>>
>> -			drm_info(&xe->drm, "VRAM[%u, %u]: %pa, %pa\n",
>> -				 id, gt->info.vram_id, &gt-
>>mem.vram.io_start,
>> -				 &gt->mem.vram.size);
>> +		drm_info(&xe->drm, "VRAM[%u, %u]: %pa, %pa\n", id, gt-
>>info.vram_id,
>> +			 &gt->mem.vram.io_start, &gt->mem.vram.size);
>> +
>> +		if (gt->mem.vram.io_size < gt->mem.vram.size)
>> +			drm_info(&xe->drm, "VRAM[%u, %u]: CPU access
>limited to %pa\n", id,
>> +				 gt->info.vram_id, &gt->mem.vram.io_size);
>> +
>> +		/* calculate total size using tile size to get the correct HW sizing
>*/
>> +		total_size += tile_size;
>> +		available_size += vram_size;
>> +
>> +		if (total_size > xe->mem.vram.io_size) {
>> +			drm_warn(&xe->drm, "VRAM: %pa is larger than
>resource %pa\n",
>> +				 &total_size, &xe->mem.vram.io_size);
>>   		}
>> -	} else {
>> -		gt->mem.vram.size = xe->mem.vram.io_size;
>> -		gt->mem.vram.io_start = xe->mem.vram.io_start;
>> -		gt->mem.vram.io_size = xe->mem.vram.io_size;
>> -		gt->mem.vram.mapping = xe->mem.vram.mapping;
>>
>> -		drm_info(&xe->drm, "VRAM: %pa\n", &gt->mem.vram.size);
>> +		io_size -= min_t(u64, tile_size, io_size);
>>   	}
>> +
>> +	drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe-
>>mem.vram.io_start,
>> +		 &available_size);
>> +
>>   	return 0;
>>   }
>>
>> @@ -347,9 +343,6 @@ static void xe_mmio_probe_tiles(struct xe_device
>*xe)
>>   	if (xe->info.media_verx100 >= 1300)
>>   		xe->info.tile_count *= 2;
>>
>> -	drm_info(&xe->drm, "tile_count: %d, adj_tile_count %d\n",
>> -		 xe->info.tile_count, adj_tile_count);
>> -
>>   	if (xe->info.tile_count > 1) {
>>   		const int mmio_bar = 0;
>>   		size_t size;
>> diff --git a/drivers/gpu/drm/xe/xe_mmio.h
>b/drivers/gpu/drm/xe/xe_mmio.h
>> index 9bc00b0dbc4a..424c129ae0f3 100644
>> --- a/drivers/gpu/drm/xe/xe_mmio.h
>> +++ b/drivers/gpu/drm/xe/xe_mmio.h
>> @@ -1,6 +1,6 @@
>>   /* SPDX-License-Identifier: MIT */
>>   /*
>> - * Copyright © 2021 Intel Corporation
>> + * Copyright © 2021-2023 Intel Corporation
>>    */
>>
>>   #ifndef _XE_MMIO_H_
>> diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
>b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
>> index 7cd0c98ab8cb..55fa728f1bd3 100644
>> --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
>> +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
>> @@ -12,9 +12,11 @@
>>   #include <drm/ttm/ttm_range_manager.h>
>>
>>   #include "regs/xe_regs.h"
>> +#include "regs/xe_gt_regs.h"
>>   #include "xe_bo.h"
>>   #include "xe_device.h"
>>   #include "xe_gt.h"
>> +#include "xe_gt_mcr.h"
>
>Wrong patch maybe?

Umm...Not sure, will clean this up.

>Otherwise,
>Reviewed-by: Matthew Auld <matthew.auld at intel.com>

Thanks!

M

>>   #include "xe_mmio.h"
>>   #include "xe_res_cursor.h"
>>   #include "xe_ttm_stolen_mgr.h"


More information about the Intel-xe mailing list