[PATCH 05/13] drm/amdgpu: add doorbell support to amdgpu_bar_mgr

Shashank Sharma shashank.sharma at amd.com
Fri Feb 3 19:08:22 UTC 2023


From: Alex Deucher <alexander.deucher at amd.com>

Adjust the code to handle doorbell BARs as well as VRAM.

Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
Signed-off-by: Shashank Sharma <shashank.sharma at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.c | 101 ++++++++++++++++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.h |   1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h     |   2 +
 3 files changed, 85 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.c
index 30d68e3a2469..0e0f212bd71c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.c
@@ -47,7 +47,10 @@ to_bar_mgr(struct ttm_resource_manager *man)
 static inline struct amdgpu_device *
 to_amdgpu_device(struct amdgpu_bar_mgr *mgr)
 {
-	return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
+	if (mgr->domain == TTM_PL_VRAM)
+		return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
+	else
+		return container_of(mgr, struct amdgpu_device, mman.doorbell_mgr);
 }
 
 static inline struct drm_buddy_block *
@@ -100,7 +103,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
  * DOC: mem_info_vis_vram_total
  *
  * The amdgpu driver provides a sysfs API for reporting current total
- * visible BAR available on the device
+ * visible VRAM available on the device
  * The file mem_info_vis_vram_total is used for this and returns the total
  * amount of visible VRAM in bytes
  */
@@ -192,16 +195,56 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
 	}
 }
 
+/**
+ * DOC: mem_info_doorbell_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total DOORBELL
+ * available on the device
+ * The file mem_info_vram_total is used for this and returns the total
+ * amount of DOORBELL in bytes
+ */
+static ssize_t amdgpu_mem_info_doorbell_total_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = drm_to_adev(ddev);
+
+	return sysfs_emit(buf, "%llu\n", adev->doorbell.size);
+}
+
+/**
+ * DOC: mem_info_doorbell_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total DOORBELL
+ * available on the device
+ * The file mem_info_vram_used is used for this and returns the total
+ * amount of currently used DOORBELL in bytes
+ */
+static ssize_t amdgpu_mem_info_doorbell_used_show(struct device *dev,
+						  struct device_attribute *attr,
+						  char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = drm_to_adev(ddev);
+	struct ttm_resource_manager *man = &adev->mman.doorbell_mgr.manager;
+
+	return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
+}
+
 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
 		   amdgpu_mem_info_vram_total_show, NULL);
 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
-		   amdgpu_mem_info_vis_vram_total_show,NULL);
+		   amdgpu_mem_info_vis_vram_total_show, NULL);
 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
 		   amdgpu_mem_info_vram_used_show, NULL);
 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
 		   amdgpu_mem_info_vis_vram_used_show, NULL);
 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
 		   amdgpu_mem_info_vram_vendor, NULL);
+static DEVICE_ATTR(mem_info_doorbell_total, S_IRUGO,
+		   amdgpu_mem_info_doorbell_total_show, NULL);
+static DEVICE_ATTR(mem_info_doorbell_used, S_IRUGO,
+		   amdgpu_mem_info_doorbell_used_show, NULL);
 
 static struct attribute *amdgpu_bar_mgr_attributes[] = {
 	&dev_attr_mem_info_vram_total.attr,
@@ -209,6 +252,8 @@ static struct attribute *amdgpu_bar_mgr_attributes[] = {
 	&dev_attr_mem_info_vram_used.attr,
 	&dev_attr_mem_info_vis_vram_used.attr,
 	&dev_attr_mem_info_vram_vendor.attr,
+	&dev_attr_mem_info_doorbell_total.attr,
+	&dev_attr_mem_info_doorbell_used.attr,
 	NULL
 };
 
@@ -265,7 +310,7 @@ u64 amdgpu_bar_mgr_bo_visible_size(struct amdgpu_bo *bo)
 	return usage;
 }
 
-/* Commit the reservation of VRAM pages */
+/* Commit the reservation of BAR pages */
 static void amdgpu_bar_mgr_do_reserve(struct ttm_resource_manager *man)
 {
 	struct amdgpu_bar_mgr *mgr = to_bar_mgr(man);
@@ -288,8 +333,10 @@ static void amdgpu_bar_mgr_do_reserve(struct ttm_resource_manager *man)
 		dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
 			rsv->start, rsv->size);
 
-		vis_usage = amdgpu_bar_mgr_vis_size(adev, block);
-		atomic64_add(vis_usage, &mgr->vis_usage);
+		if (mgr->domain == TTM_PL_VRAM) {
+			vis_usage = amdgpu_bar_mgr_vis_size(adev, block);
+			atomic64_add(vis_usage, &mgr->vis_usage);
+		}
 		spin_lock(&man->bdev->lru_lock);
 		man->usage += rsv->size;
 		spin_unlock(&man->bdev->lru_lock);
@@ -539,7 +586,8 @@ static int amdgpu_bar_mgr_new(struct ttm_resource_manager *man,
 			start = 0;
 		vres->base.start = max(vres->base.start, start);
 
-		vis_usage += amdgpu_bar_mgr_vis_size(adev, block);
+		if (mgr->domain == TTM_PL_VRAM)
+			vis_usage += amdgpu_bar_mgr_vis_size(adev, block);
 	}
 
 	if (amdgpu_is_bar_mgr_blocks_contiguous(&vres->blocks))
@@ -550,7 +598,8 @@ static int amdgpu_bar_mgr_new(struct ttm_resource_manager *man,
 	else
 		vres->base.bus.caching = ttm_write_combined;
 
-	atomic64_add(vis_usage, &mgr->vis_usage);
+	if (mgr->domain == TTM_PL_VRAM)
+		atomic64_add(vis_usage, &mgr->vis_usage);
 	*res = &vres->base;
 	return 0;
 
@@ -583,15 +632,18 @@ static void amdgpu_bar_mgr_del(struct ttm_resource_manager *man,
 	uint64_t vis_usage = 0;
 
 	mutex_lock(&mgr->lock);
-	list_for_each_entry(block, &vres->blocks, link)
-		vis_usage += amdgpu_bar_mgr_vis_size(adev, block);
+	if (mgr->domain == TTM_PL_VRAM) {
+		list_for_each_entry(block, &vres->blocks, link)
+			vis_usage += amdgpu_bar_mgr_vis_size(adev, block);
+	}
 
 	amdgpu_bar_mgr_do_reserve(man);
 
 	drm_buddy_free_list(mm, &vres->blocks);
 	mutex_unlock(&mgr->lock);
 
-	atomic64_sub(vis_usage, &mgr->vis_usage);
+	if (mgr->domain == TTM_PL_VRAM)
+		atomic64_sub(vis_usage, &mgr->vis_usage);
 
 	ttm_resource_fini(man, res);
 	kfree(vres);
@@ -801,8 +853,9 @@ static void amdgpu_bar_mgr_debug(struct ttm_resource_manager *man,
 	struct drm_buddy *mm = &mgr->mm;
 	struct drm_buddy_block *block;
 
-	drm_printf(printer, "  vis usage:%llu\n",
-		   amdgpu_bar_mgr_vis_usage(mgr));
+	if (mgr->domain == TTM_PL_VRAM)
+		drm_printf(printer, "  vis usage:%llu\n",
+			   amdgpu_bar_mgr_vis_usage(mgr));
 
 	mutex_lock(&mgr->lock);
 	drm_printf(printer, "default_page_size: %lluKiB\n",
@@ -837,15 +890,22 @@ int amdgpu_bar_mgr_init(struct amdgpu_device *adev, u32 domain)
 	struct amdgpu_bar_mgr *mgr;
 	struct ttm_resource_manager *man;
 	int err;
+	u64 size;
 
-	if (domain != TTM_PL_VRAM)
+	if (domain == TTM_PL_VRAM) {
+		mgr = &adev->mman.vram_mgr;
+		size = adev->gmc.real_vram_size;
+	} else if (domain == AMDGPU_PL_DOORBELL) {
+		mgr = &adev->mman.doorbell_mgr;
+		size = adev->doorbell.size;
+	} else {
 		return -EINVAL;
+	}
 
-	mgr = &adev->mman.vram_mgr;
+	mgr->domain = domain;
 	man = &mgr->manager;
 
-	ttm_resource_manager_init(man, &adev->mman.bdev,
-				  adev->gmc.real_vram_size);
+	ttm_resource_manager_init(man, &adev->mman.bdev, size);
 
 	man->func = &amdgpu_bar_mgr_func;
 
@@ -879,10 +939,13 @@ void amdgpu_bar_mgr_fini(struct amdgpu_device *adev, u32 domain)
 	int ret;
 	struct amdgpu_bar_reservation *rsv, *temp;
 
-	if (domain != TTM_PL_VRAM)
+	if (domain == TTM_PL_VRAM)
+		mgr = &adev->mman.vram_mgr;
+	else if (domain == AMDGPU_PL_DOORBELL)
+		mgr = &adev->mman.doorbell_mgr;
+	else
 		return;
 
-	mgr = &adev->mman.vram_mgr;
 	man = &mgr->manager;
 
 	ttm_resource_manager_set_used(man, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.h
index 241faba5ae55..f989a6b918d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bar_mgr.h
@@ -35,6 +35,7 @@ struct amdgpu_bar_mgr {
 	struct list_head reserved_pages;
 	atomic64_t vis_usage;
 	u64 default_page_size;
+	u32 domain;
 };
 
 struct amdgpu_bar_mgr_resource {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 020ebba5a51a..ea53aae3ee0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -33,6 +33,7 @@
 #define AMDGPU_PL_GWS		(TTM_PL_PRIV + 1)
 #define AMDGPU_PL_OA		(TTM_PL_PRIV + 2)
 #define AMDGPU_PL_PREEMPT	(TTM_PL_PRIV + 3)
+#define AMDGPU_PL_DOORBELL	(TTM_PL_PRIV + 4)
 
 #define AMDGPU_GTT_MAX_TRANSFER_SIZE	512
 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS	2
@@ -64,6 +65,7 @@ struct amdgpu_mman {
 	struct amdgpu_bar_mgr vram_mgr;
 	struct amdgpu_gtt_mgr gtt_mgr;
 	struct ttm_resource_manager preempt_mgr;
+	struct amdgpu_bar_mgr doorbell_mgr;
 
 	uint64_t		stolen_vga_size;
 	struct amdgpu_bo	*stolen_vga_memory;
-- 
2.34.1



More information about the amd-gfx mailing list