[Intel-xe] [PATCH 3/4] drm/xe: Size GT device memory correctly
Michael J. Ruhl
michael.j.ruhl at intel.com
Fri Apr 28 21:19:47 UTC 2023
The current method of sizing GT device memory is not quite right.
Update the algorithm to use the relevant HW information and offsets
to set up the sizing correctly.
Update the stolen memory sizing to reflect the changes, and to be
GT specific.
Signed-off-by: Michael J. Ruhl <michael.j.ruhl at intel.com>
---
drivers/gpu/drm/xe/xe_device_types.h | 4 +-
drivers/gpu/drm/xe/xe_gt_types.h | 14 ++--
drivers/gpu/drm/xe/xe_mmio.c | 102 ++++++++++++++-----------
drivers/gpu/drm/xe/xe_mmio.h | 2 +-
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c | 2 +
5 files changed, 71 insertions(+), 53 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 2eeb10e97381..8898aea4bc2b 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2022 Intel Corporation
+ * Copyright © 2022-2023 Intel Corporation
*/
#ifndef _XE_DEVICE_TYPES_H_
@@ -202,6 +202,8 @@ struct xe_device {
* known as small-bar.
*/
resource_size_t io_size;
+ /** @base: Offset to apply for Device Physical Address control */
+ resource_size_t base;
/** @mapping: pointer to VRAM mappable space */
void *__iomem mapping;
} vram;
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 7c47d67aa8be..47f059bb8c6d 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2022 Intel Corporation
+ * Copyright © 2022-2023 Intel Corporation
*/
#ifndef _XE_GT_TYPES_H_
@@ -148,13 +148,15 @@ struct xe_gt {
/**
* @io_size: IO size of this VRAM instance
*
- * This represents how much of this VRAM we can access
- * via the CPU through the VRAM BAR. This can be smaller
- * than @size, in which case only part of VRAM is CPU
- * accessible (typically the first 256M). This
- * configuration is known as small-bar.
+ * This represents how much of the VRAM the CPU can access
+ * via the VRAM BAR.
+ * This can be smaller than the actual @size, in which
+ * case only part of VRAM is CPU accessible (typically
+ * the first 256M). This configuration is known as small-bar.
*/
resource_size_t io_size;
+ /** @base: offset of VRAM starting base */
+ resource_size_t base;
/** @size: size of VRAM. */
resource_size_t size;
/** @mapping: pointer to VRAM mappable space */
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index d259c11e7677..be1cd3cac661 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -3,6 +3,8 @@
* Copyright © 2021-2023 Intel Corporation
*/
+#include <linux/minmax.h>
+
#include "xe_mmio.h"
#include <drm/drm_managed.h>
@@ -184,6 +186,8 @@ static int xe_determine_lmem_bar_size(struct xe_device *xe)
if (!xe->mem.vram.io_size)
return -EIO;
+ xe->mem.vram.base = 0; /* DPA offset */
+
/* set up a map to the total memory area. */
xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
@@ -245,13 +249,25 @@ int xe_mmio_tile_vram_size(struct xe_gt *gt, u64 *vram_size, u64 *tile_size, u64
int xe_mmio_probe_vram(struct xe_device *xe)
{
+ resource_size_t io_size;
+ u64 available_size = 0;
+ u64 total_size = 0;
struct xe_gt *gt;
+ u64 tile_offset;
+ u64 tile_size;
+ u64 vram_size;
int err;
u8 id;
if (!IS_DGFX(xe))
return 0;
+ /* Get the size of the gt0 vram for later accessibility comparison */
+ gt = xe_device_get_gt(xe, 0);
+ err = xe_mmio_tile_vram_size(gt, &vram_size, &tile_size, &tile_offset);
+ if (err)
+ return err;
+
err = xe_determine_lmem_bar_size(xe);
if (err)
return err;
@@ -259,57 +275,56 @@ int xe_mmio_probe_vram(struct xe_device *xe)
drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
&xe->mem.vram.io_size);
- /* FIXME: Assuming equally partitioned VRAM, incorrect */
- if (xe->info.tile_count > 1) {
- u8 adj_tile_count = xe->info.tile_count;
- resource_size_t size, io_start, io_size;
+ /* small bar issues will only cover gt0 sizes */
+ if (xe->mem.vram.io_size < vram_size)
+ drm_warn(&xe->drm, "Restricting VRAM size to PCI resource size (0x%llx->0x%llx)\n",
+ vram_size, (u64)xe->mem.vram.io_size);
- for_each_gt(gt, xe, id)
- if (xe_gt_is_media_type(gt))
- --adj_tile_count;
+ io_size = xe->mem.vram.io_size;
- XE_BUG_ON(!adj_tile_count);
+ /* gt specific ranges */
+ for_each_gt(gt, xe, id) {
+ if (xe_gt_is_media_type(gt))
+ continue;
- size = xe->mem.vram.io_size / adj_tile_count;
- io_start = xe->mem.vram.io_start;
- io_size = xe->mem.vram.io_size;
+ err = xe_mmio_tile_vram_size(gt, &vram_size, &tile_size, &tile_offset);
+ if (err)
+ return err;
- for_each_gt(gt, xe, id) {
- if (id && !xe_gt_is_media_type(gt)) {
- io_size -= min(io_size, size);
- io_start += io_size;
- }
+ gt->mem.vram.io_start = xe->mem.vram.io_start + tile_offset;
+ gt->mem.vram.io_size = min_t(u64, vram_size, io_size);
- gt->mem.vram.size = size;
-
- /*
- * XXX: multi-tile small-bar might be wild. Hopefully
- * full tile without any mappable vram is not something
- * we care about.
- */
-
- gt->mem.vram.io_size = min(size, io_size);
- if (io_size) {
- gt->mem.vram.io_start = io_start;
- gt->mem.vram.mapping = xe->mem.vram.mapping +
- (io_start - xe->mem.vram.io_start);
- } else {
- drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
- return -ENODEV;
- }
+ if (!gt->mem.vram.io_size) {
+ drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
+ return -ENODEV;
+ }
+
+ gt->mem.vram.base = xe->mem.vram.base + tile_offset;
+ gt->mem.vram.size = vram_size;
+ gt->mem.vram.mapping = xe->mem.vram.mapping + tile_offset;
+
+ drm_info(&xe->drm, "VRAM[%u, %u]: %pa, %pa\n", id, gt->info.vram_id,
+ >->mem.vram.io_start, >->mem.vram.size);
+
+ if (gt->mem.vram.io_size < gt->mem.vram.size)
+ drm_info(&xe->drm, "VRAM[%u, %u]: CPU access limited to %pa\n", id,
+ gt->info.vram_id, >->mem.vram.io_size);
+
+ /* calculate total size using tile size to get the correct HW sizing */
+ total_size += tile_size;
+ available_size += vram_size;
- drm_info(&xe->drm, "VRAM[%u, %u]: %pa, %pa\n",
- id, gt->info.vram_id, >->mem.vram.io_start,
- >->mem.vram.size);
+ if (total_size > xe->mem.vram.io_size) {
+ drm_warn(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
+ &total_size, &xe->mem.vram.io_size);
}
- } else {
- gt->mem.vram.size = xe->mem.vram.io_size;
- gt->mem.vram.io_start = xe->mem.vram.io_start;
- gt->mem.vram.io_size = xe->mem.vram.io_size;
- gt->mem.vram.mapping = xe->mem.vram.mapping;
- drm_info(&xe->drm, "VRAM: %pa\n", >->mem.vram.size);
+ io_size -= min_t(u64, tile_size, io_size);
}
+
+ drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
+ &available_size);
+
return 0;
}
@@ -329,9 +344,6 @@ static void xe_mmio_probe_tiles(struct xe_device *xe)
if (xe->info.media_verx100 >= 1300)
xe->info.tile_count *= 2;
- drm_info(&xe->drm, "tile_count: %d, adj_tile_count %d\n",
- xe->info.tile_count, adj_tile_count);
-
if (xe->info.tile_count > 1) {
const int mmio_bar = 0;
size_t size;
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 556cf3d9e4f5..d284f84a2e60 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2021 Intel Corporation
+ * Copyright © 2021-2023 Intel Corporation
*/
#ifndef _XE_MMIO_H_
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index a329f12f14fe..72e0a65b1d71 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -12,9 +12,11 @@
#include <drm/ttm/ttm_range_manager.h>
#include "regs/xe_regs.h"
+#include "regs/xe_gt_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_mcr.h"
#include "xe_mmio.h"
#include "xe_res_cursor.h"
#include "xe_ttm_stolen_mgr.h"
--
2.39.2
More information about the Intel-xe
mailing list