[PATCH] drm/amdgpu: implement UVD VM mode for Stoney v2
Alex Deucher
alexdeucher at gmail.com
Wed Jul 13 18:45:46 UTC 2016
On Wed, Jul 13, 2016 at 9:59 AM, Christian König
<deathsimple at vodafone.de> wrote:
> From: Christian König <christian.koenig at amd.com>
>
> Starting with Stoney we support running UVD in VM mode as well.
>
> v2: rebased, only enable on Polaris for now.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 82 +++++++++++++++++++++-
> .../gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h | 2 +
> 2 files changed, 82 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> index 69439ab..9ab7797 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> @@ -34,6 +34,7 @@
> #include "smu/smu_7_1_3_d.h"
> #include "smu/smu_7_1_3_sh_mask.h"
> #include "bif/bif_5_1_d.h"
> +#include "gmc/gmc_8_1_d.h"
> #include "vi.h"
>
> static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
> @@ -674,6 +675,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
> struct amdgpu_ib *ib,
> unsigned vm_id, bool ctx_switch)
> {
> + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
> + amdgpu_ring_write(ring, vm_id);
> +
> amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
> amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
> amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
> @@ -682,6 +686,57 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
> amdgpu_ring_write(ring, ib->length_dw);
> }
>
> +static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
> + unsigned vm_id, uint64_t pd_addr)
> +{
> + uint32_t reg;
> +
> + if (vm_id < 8)
> + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
> + else
> + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
> +
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
> + amdgpu_ring_write(ring, reg << 2);
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
> + amdgpu_ring_write(ring, pd_addr >> 12);
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
> + amdgpu_ring_write(ring, 0x8);
> +
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
> + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
> + amdgpu_ring_write(ring, 1 << vm_id);
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
> + amdgpu_ring_write(ring, 0x8);
> +
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
> + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
> + amdgpu_ring_write(ring, 0);
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
> + amdgpu_ring_write(ring, 1 << vm_id); /* mask */
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
> + amdgpu_ring_write(ring, 0xC);
> +}
> +
> +static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
> +{
> + uint32_t seq = ring->fence_drv.sync_seq;
> + uint64_t addr = ring->fence_drv.gpu_addr;
> +
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
> + amdgpu_ring_write(ring, lower_32_bits(addr));
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
> + amdgpu_ring_write(ring, upper_32_bits(addr));
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
> + amdgpu_ring_write(ring, 0xffffffff); /* mask */
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
> + amdgpu_ring_write(ring, seq);
> + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
> + amdgpu_ring_write(ring, 0xE);
> +}
> +
> static bool uvd_v6_0_is_idle(void *handle)
> {
> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> @@ -918,7 +973,7 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
> .set_powergating_state = uvd_v6_0_set_powergating_state,
> };
>
> -static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
> +static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
> .get_rptr = uvd_v6_0_ring_get_rptr,
> .get_wptr = uvd_v6_0_ring_get_wptr,
> .set_wptr = uvd_v6_0_ring_set_wptr,
> @@ -933,9 +988,32 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
> .pad_ib = amdgpu_ring_generic_pad_ib,
> };
>
> +static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
> + .get_rptr = uvd_v6_0_ring_get_rptr,
> + .get_wptr = uvd_v6_0_ring_get_wptr,
> + .set_wptr = uvd_v6_0_ring_set_wptr,
> + .parse_cs = NULL,
> + .emit_ib = uvd_v6_0_ring_emit_ib,
> + .emit_fence = uvd_v6_0_ring_emit_fence,
> + .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
> + .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
> + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
> + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
> + .test_ring = uvd_v6_0_ring_test_ring,
> + .test_ib = amdgpu_uvd_ring_test_ib,
> + .insert_nop = amdgpu_ring_insert_nop,
> + .pad_ib = amdgpu_ring_generic_pad_ib,
> +};
> +
> static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
> {
> - adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs;
> + if (adev->asic_type >= CHIP_STONEY) {
> + adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
> + DRM_INFO("UVD is enabled in VM mode\n");
> + } else {
> + adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
> + DRM_INFO("UVD is enabled in physical mode\n");
> + }
> }
>
> static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
> diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
> index 6f6fb34..ec69869 100644
> --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
> +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
> @@ -111,6 +111,8 @@
> #define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
> #define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
> #define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
> +#define mmUVD_GP_SCRATCH8 0x3c0a
> +#define mmUVD_GP_SCRATCH9 0x3c0b
> #define mmUVD_GP_SCRATCH4 0x3d38
>
> #endif /* UVD_6_0_D_H */
> --
> 2.5.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
More information about the amd-gfx
mailing list