[PATCH 3/3] drm/amdgpu/jpeg: sriov support for jpeg_v5_0_1
Alex Deucher
alexdeucher at gmail.com
Mon May 12 14:41:33 UTC 2025
On Sun, May 11, 2025 at 11:44 PM fanhuang <FangSheng.Huang at amd.com> wrote:
>
> initialization table handshake with mmsch
>
> Signed-off-by: fanhuang <FangSheng.Huang at amd.com>
Series is:
Acked-by: Alex Deucher <alexander.deucher at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c | 142 +++++++++++++++++++++--
> 1 file changed, 131 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
> index 6f73033d78b5..cb94bd71300f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
> @@ -28,11 +28,13 @@
> #include "soc15d.h"
> #include "jpeg_v4_0_3.h"
> #include "jpeg_v5_0_1.h"
> +#include "mmsch_v5_0.h"
>
> #include "vcn/vcn_5_0_0_offset.h"
> #include "vcn/vcn_5_0_0_sh_mask.h"
> #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
>
> +static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev);
> static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
> static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
> static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
> @@ -163,14 +165,9 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
> (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
> 1 + j + 11 * jpeg_inst;
> } else {
> - if (j < 4)
> - ring->doorbell_index =
> - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
> - 4 + j + 32 * jpeg_inst;
> - else
> - ring->doorbell_index =
> - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
> - 8 + j + 32 * jpeg_inst;
> + ring->doorbell_index =
> + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
> + 2 + j + 32 * jpeg_inst;
> }
> sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
> r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
> @@ -237,7 +234,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
> int i, j, r, jpeg_inst;
>
> if (amdgpu_sriov_vf(adev)) {
> - /* jpeg_v5_0_1_start_sriov(adev); */
> + r = jpeg_v5_0_1_start_sriov(adev);
> + if (r)
> + return r;
> +
> for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
> for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
> ring = &adev->jpeg.inst[i].ring_dec[j];
> @@ -291,8 +291,10 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
>
> cancel_delayed_work_sync(&adev->jpeg.idle_work);
>
> - if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
> - ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
> + if (!amdgpu_sriov_vf(adev)) {
> + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
> + ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
> + }
>
> return ret;
> }
> @@ -422,6 +424,119 @@ static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
> reg_offset);
> }
>
> +static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev)
> +{
> + struct amdgpu_ring *ring;
> + uint64_t ctx_addr;
> + uint32_t param, resp, expected;
> + uint32_t tmp, timeout;
> +
> + struct amdgpu_mm_table *table = &adev->virt.mm_table;
> + uint32_t *table_loc;
> + uint32_t table_size;
> + uint32_t size, size_dw, item_offset;
> + uint32_t init_status;
> + int i, j, jpeg_inst;
> +
> + struct mmsch_v5_0_cmd_direct_write
> + direct_wt = { {0} };
> + struct mmsch_v5_0_cmd_end end = { {0} };
> + struct mmsch_v5_0_init_header header;
> +
> + direct_wt.cmd_header.command_type =
> + MMSCH_COMMAND__DIRECT_REG_WRITE;
> + end.cmd_header.command_type =
> + MMSCH_COMMAND__END;
> +
> + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
> + jpeg_inst = GET_INST(JPEG, i);
> +
> + memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
> + header.version = MMSCH_VERSION;
> + header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
> +
> + table_loc = (uint32_t *)table->cpu_addr;
> + table_loc += header.total_size;
> +
> + item_offset = header.total_size;
> +
> + for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
> + ring = &adev->jpeg.inst[i].ring_dec[j];
> + table_size = 0;
> +
> + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW);
> + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
> + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
> + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
> + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE);
> + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
> +
> + if (j < 5) {
> + header.mjpegdec0[j].table_offset = item_offset;
> + header.mjpegdec0[j].init_status = 0;
> + header.mjpegdec0[j].table_size = table_size;
> + } else {
> + header.mjpegdec1[j - 5].table_offset = item_offset;
> + header.mjpegdec1[j - 5].init_status = 0;
> + header.mjpegdec1[j - 5].table_size = table_size;
> + }
> + header.total_size += table_size;
> + item_offset += table_size;
> + }
> +
> + MMSCH_V5_0_INSERT_END();
> +
> + /* send init table to MMSCH */
> + size = sizeof(struct mmsch_v5_0_init_header);
> + table_loc = (uint32_t *)table->cpu_addr;
> + memcpy((void *)table_loc, &header, size);
> +
> + ctx_addr = table->gpu_addr;
> + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
> + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
> +
> + tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
> + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
> + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
> + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
> +
> + size = header.total_size;
> + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
> +
> + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
> +
> + param = 0x00000001;
> + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
> + tmp = 0;
> + timeout = 1000;
> + resp = 0;
> + expected = MMSCH_VF_MAILBOX_RESP__OK;
> + init_status =
> + ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status;
> + while (resp != expected) {
> + resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
> +
> + if (resp != 0)
> + break;
> + udelay(10);
> + tmp = tmp + 10;
> + if (tmp >= timeout) {
> + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
> + " waiting for regMMSCH_VF_MAILBOX_RESP "\
> + "(expected=0x%08x, readback=0x%08x)\n",
> + tmp, expected, resp);
> + return -EBUSY;
> + }
> + }
> + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
> + init_status != MMSCH_VF_ENGINE_STATUS__PASS)
> + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
> + resp, init_status);
> +
> + }
> + return 0;
> +}
> +
> /**
> * jpeg_v5_0_1_start - start JPEG block
> *
> @@ -581,6 +696,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
> struct amdgpu_device *adev = ip_block->adev;
> int ret;
>
> + if (amdgpu_sriov_vf(adev)) {
> + adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
> + return 0;
> + }
> +
> if (state == adev->jpeg.cur_state)
> return 0;
>
> --
> 2.34.1
>
More information about the amd-gfx
mailing list