[PATCH 04/10] drm/amdgpu: Convert update_partition_sched_list into a common helper v3

Hawking Zhang Hawking.Zhang at amd.com
Mon Jun 16 09:37:50 UTC 2025


The update_partition_sched_list function does not
need to remain as a soc specific callback. It can
be reused for future products.

v2: bypass the function if xcp_mgr is not available (Likun)

v3: Let caller check the availability of xcp_mgr (Lijo)

Signed-off-by: Hawking Zhang <Hawking.Zhang at amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |   3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c    | 111 +++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h    |   7 +-
 drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 119 ---------------------
 4 files changed, 114 insertions(+), 126 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7c15bd2c8a9c..c401f27a06fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3012,7 +3012,8 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
 		}
 	}
 
-	amdgpu_xcp_update_partition_sched_list(adev);
+	if (adev->xcp_mgr)
+		amdgpu_xcp_update_partition_sched_list(adev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 5530166f1b23..f056c6cf7faf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -483,6 +483,117 @@ int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
+			      uint32_t inst_idx,
+			      struct amdgpu_ring *ring)
+{
+	int xcp_id;
+	enum AMDGPU_XCP_IP_BLOCK ip_blk;
+	uint32_t inst_mask;
+
+	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+		adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
+	if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+	    (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
+		return;
+
+	inst_mask = 1 << inst_idx;
+
+	switch (ring->funcs->type) {
+	case AMDGPU_HW_IP_GFX:
+	case AMDGPU_RING_TYPE_COMPUTE:
+	case AMDGPU_RING_TYPE_KIQ:
+		ip_blk = AMDGPU_XCP_GFX;
+		break;
+	case AMDGPU_RING_TYPE_SDMA:
+		ip_blk = AMDGPU_XCP_SDMA;
+		break;
+	case AMDGPU_RING_TYPE_VCN_ENC:
+	case AMDGPU_RING_TYPE_VCN_JPEG:
+		ip_blk = AMDGPU_XCP_VCN;
+		break;
+	default:
+		dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
+		return;
+	}
+
+	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+			ring->xcp_id = xcp_id;
+			dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+				ring->xcp_id);
+			if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+				adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
+			break;
+		}
+	}
+}
+
+static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
+					struct amdgpu_ring *ring,
+					unsigned int sel_xcp_id)
+{
+	unsigned int *num_gpu_sched;
+
+	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+			.sched[(*num_gpu_sched)++] = &ring->sched;
+	dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
+		ring->name, sel_xcp_id, ring->funcs->type,
+		ring->hw_prio, *num_gpu_sched);
+}
+
+static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	int i;
+
+	for (i = 0; i < MAX_XCP; i++) {
+		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+	}
+
+	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+		return 0;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		ring = adev->rings[i];
+		if (!ring || !ring->sched.ready || ring->no_scheduler)
+			continue;
+
+		amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+		/* VCN may be shared by two partitions under CPX MODE in certain
+		 * configs.
+		 */
+		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+		    (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
+			amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+	}
+
+	return 0;
+}
+
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->num_rings; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+
+		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+			amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
+		else
+			amdgpu_set_xcp_id(adev, ring->me, ring);
+	}
+
+	return amdgpu_xcp_sched_list_update(adev);
+}
+
 /*====================== xcp sysfs - configuration ======================*/
 #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name)                         \
 	static ssize_t amdgpu_xcp_res_sysfs_##_name##_show(        \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 0ae4f14293bb..5845dce6b804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -144,7 +144,6 @@ struct amdgpu_xcp_mgr_funcs {
 	int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
 	int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
 	int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
-	int (*update_partition_sched_list)(struct amdgpu_device *adev);
 };
 
 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -178,14 +177,10 @@ int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
                              struct amdgpu_fpriv *fpriv,
                              unsigned int *num_scheds,
                              struct drm_gpu_scheduler ***scheds);
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
 void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
 void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
 
-#define amdgpu_xcp_update_partition_sched_list(adev) \
-	((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
-	(adev)->xcp_mgr->funcs->update_partition_sched_list ? \
-	(adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
-
 static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
 {
 	if (!xcp_mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 8a74b954855e..86f0eaae5274 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -63,123 +63,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
 }
 
-static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
-{
-	return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
-}
-
-static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
-			     uint32_t inst_idx, struct amdgpu_ring *ring)
-{
-	int xcp_id;
-	enum AMDGPU_XCP_IP_BLOCK ip_blk;
-	uint32_t inst_mask;
-
-	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
-	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
-		adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
-	if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
-	    (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
-		return;
-
-	inst_mask = 1 << inst_idx;
-
-	switch (ring->funcs->type) {
-	case AMDGPU_HW_IP_GFX:
-	case AMDGPU_RING_TYPE_COMPUTE:
-	case AMDGPU_RING_TYPE_KIQ:
-		ip_blk = AMDGPU_XCP_GFX;
-		break;
-	case AMDGPU_RING_TYPE_SDMA:
-		ip_blk = AMDGPU_XCP_SDMA;
-		break;
-	case AMDGPU_RING_TYPE_VCN_ENC:
-	case AMDGPU_RING_TYPE_VCN_JPEG:
-		ip_blk = AMDGPU_XCP_VCN;
-		break;
-	default:
-		DRM_ERROR("Not support ring type %d!", ring->funcs->type);
-		return;
-	}
-
-	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
-		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
-			ring->xcp_id = xcp_id;
-			dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
-				ring->xcp_id);
-			if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
-				adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
-			break;
-		}
-	}
-}
-
-static void aqua_vanjaram_xcp_gpu_sched_update(
-		struct amdgpu_device *adev,
-		struct amdgpu_ring *ring,
-		unsigned int sel_xcp_id)
-{
-	unsigned int *num_gpu_sched;
-
-	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
-			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
-	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
-			.sched[(*num_gpu_sched)++] = &ring->sched;
-	DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
-			sel_xcp_id, ring->funcs->type,
-			ring->hw_prio, *num_gpu_sched);
-}
-
-static int aqua_vanjaram_xcp_sched_list_update(
-		struct amdgpu_device *adev)
-{
-	struct amdgpu_ring *ring;
-	int i;
-
-	for (i = 0; i < MAX_XCP; i++) {
-		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
-		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
-	}
-
-	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
-		return 0;
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-		ring = adev->rings[i];
-		if (!ring || !ring->sched.ready || ring->no_scheduler)
-			continue;
-
-		aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
-
-		/* VCN may be shared by two partitions under CPX MODE in certain
-		 * configs.
-		 */
-		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
-		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
-		    aqua_vanjaram_xcp_vcn_shared(adev))
-			aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
-	}
-
-	return 0;
-}
-
-static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
-{
-	int i;
-
-	for (i = 0; i < adev->num_rings; i++) {
-		struct amdgpu_ring *ring = adev->rings[i];
-
-		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
-			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
-			aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
-		else
-			aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
-	}
-
-	return aqua_vanjaram_xcp_sched_list_update(adev);
-}
-
 /* Fixed pattern for smn addressing on different AIDs:
  *   bit[34]: indicate cross AID access
  *   bit[33:32]: indicate target AID id
@@ -691,8 +574,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
 	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
 	.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
 	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
-	.update_partition_sched_list =
-		&aqua_vanjaram_update_partition_sched_list
 };
 
 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
-- 
2.17.1



More information about the amd-gfx mailing list