[PATCH 051/100] drm/amdgpu: add initial vce 4.0 support for vega10

Alex Deucher alexdeucher at gmail.com
Mon Mar 20 20:29:46 UTC 2017


From: Leo Liu <leo.liu at amd.com>

Signed-off-by: Leo Liu <leo.liu at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
Reviewed-by: Christian König <christian.koenig at amd.com>
Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile     |   3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c |   7 +
 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c   | 894 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/vce_v4_0.h   |  29 ++
 4 files changed, 932 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/vce_v4_0.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 65829fa..2ba5671 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -90,7 +90,8 @@ amdgpu-y += \
 # add VCE block
 amdgpu-y += \
 	amdgpu_vce.o \
-	vce_v3_0.o
+	vce_v3_0.o \
+	vce_v4_0.o
 
 # add amdkfd interfaces
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c46116c..647944b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -54,6 +54,8 @@
 #define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
 #define FIRMWARE_POLARIS12         "amdgpu/polaris12_vce.bin"
 
+#define FIRMWARE_VEGA10		"amdgpu/vega10_vce.bin"
+
 #ifdef CONFIG_DRM_AMDGPU_CIK
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
 MODULE_FIRMWARE(FIRMWARE_KABINI);
@@ -69,6 +71,8 @@ MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
 
+MODULE_FIRMWARE(FIRMWARE_VEGA10);
+
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 
 /**
@@ -123,6 +127,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 	case CHIP_POLARIS11:
 		fw_name = FIRMWARE_POLARIS11;
 		break;
+	case CHIP_VEGA10:
+		fw_name = FIRMWARE_VEGA10;
+		break;
 	case CHIP_POLARIS12:
 		fw_name = FIRMWARE_POLARIS12;
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
new file mode 100644
index 0000000..74146be
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_vce.h"
+#include "soc15d.h"
+#include "soc15_common.h"
+
+#include "vega10/soc15ip.h"
+#include "vega10/VCE/vce_4_0_offset.h"
+#include "vega10/VCE/vce_4_0_default.h"
+#include "vega10/VCE/vce_4_0_sh_mask.h"
+#include "vega10/MMHUB/mmhub_1_0_offset.h"
+#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
+
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
+
+#define VCE_V4_0_FW_SIZE	(384 * 1024)
+#define VCE_V4_0_STACK_SIZE	(64 * 1024)
+#define VCE_V4_0_DATA_SIZE	((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
+
+static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
+static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
+static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
+
+/**
+ * vce_v4_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring == &adev->vce.ring[0])
+		return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
+	else if (ring == &adev->vce.ring[1])
+		return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
+	else
+		return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
+}
+
+/**
+ * vce_v4_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring == &adev->vce.ring[0])
+		return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
+	else if (ring == &adev->vce.ring[1])
+		return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
+	else
+		return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
+}
+
+/**
+ * vce_v4_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring == &adev->vce.ring[0])
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
+			lower_32_bits(ring->wptr));
+	else if (ring == &adev->vce.ring[1])
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
+			lower_32_bits(ring->wptr));
+	else
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
+			lower_32_bits(ring->wptr));
+}
+
+static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			uint32_t status =
+				RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
+
+			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+				return 0;
+			mdelay(10);
+		}
+
+		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+		WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
+				VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+				~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		mdelay(10);
+		WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
+				~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		mdelay(10);
+
+	}
+
+	return -ETIMEDOUT;
+}
+
+/**
+ * vce_v4_0_start - start VCE block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the VCE block
+ */
+static int vce_v4_0_start(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	int r;
+
+	ring = &adev->vce.ring[0];
+
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
+
+	ring = &adev->vce.ring[1];
+
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
+
+	ring = &adev->vce.ring[2];
+
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
+
+	vce_v4_0_mc_resume(adev);
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), VCE_STATUS__JOB_BUSY_MASK,
+			~VCE_STATUS__JOB_BUSY_MASK);
+
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 1, ~0x200001);
+
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
+			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+	mdelay(100);
+
+	r = vce_v4_0_firmware_loaded(adev);
+
+	/* clear BUSY flag */
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
+
+	if (r) {
+		DRM_ERROR("VCE not responding, giving up!!!\n");
+		return r;
+	}
+
+	return 0;
+}
+
+static int vce_v4_0_stop(struct amdgpu_device *adev)
+{
+
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
+
+	/* hold on ECPU */
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
+			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+	/* clear BUSY flag */
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
+
+	/* Set Clock-Gating off */
+	/* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
+		vce_v4_0_set_vce_sw_clock_gating(adev, false);
+	*/
+
+	return 0;
+}
+
+static int vce_v4_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->vce.num_rings = 3;
+
+	vce_v4_0_set_ring_funcs(adev);
+	vce_v4_0_set_irq_funcs(adev);
+
+	return 0;
+}
+
+static int vce_v4_0_sw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_ring *ring;
+	unsigned size;
+	int r, i;
+
+	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
+	if (r)
+		return r;
+
+	size  = (VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE) * 2;
+	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+		size += VCE_V4_0_FW_SIZE;
+
+	r = amdgpu_vce_sw_init(adev, size);
+	if (r)
+		return r;
+
+	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+		const struct common_firmware_header *hdr;
+		hdr = (const struct common_firmware_header *)adev->vce.fw->data;
+		adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
+		adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
+		adev->firmware.fw_size +=
+			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+		DRM_INFO("PSP loading VCE firmware\n");
+	}
+
+	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+		r = amdgpu_vce_resume(adev);
+		if (r)
+			return r;
+	}
+
+	for (i = 0; i < adev->vce.num_rings; i++) {
+		ring = &adev->vce.ring[i];
+		sprintf(ring->name, "vce%d", i);
+		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+		if (r)
+			return r;
+	}
+
+	return r;
+}
+
+static int vce_v4_0_sw_fini(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_vce_suspend(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_vce_sw_fini(adev);
+	if (r)
+		return r;
+
+	return r;
+}
+
+static int vce_v4_0_hw_init(void *handle)
+{
+	int r, i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = vce_v4_0_start(adev);
+	if (r)
+		return r;
+
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].ready = false;
+
+	for (i = 0; i < adev->vce.num_rings; i++) {
+		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		if (r)
+			return r;
+		else
+			adev->vce.ring[i].ready = true;
+	}
+
+	DRM_INFO("VCE initialized successfully.\n");
+
+	return 0;
+}
+
+static int vce_v4_0_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int i;
+
+	/* vce_v4_0_wait_for_idle(handle); */
+	vce_v4_0_stop(adev);
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].ready = false;
+
+	return 0;
+}
+
+static int vce_v4_0_suspend(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = vce_v4_0_hw_fini(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_vce_suspend(adev);
+	if (r)
+		return r;
+
+	return r;
+}
+
+static int vce_v4_0_resume(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_vce_resume(adev);
+	if (r)
+		return r;
+
+	r = vce_v4_0_hw_init(adev);
+	if (r)
+		return r;
+
+	return r;
+}
+
+static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
+{
+	uint32_t offset, size;
+
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, ~0x3F);
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
+
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
+
+	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
+			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8));
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
+			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
+	} else {
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
+			(adev->vce.gpu_addr >> 8));
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
+			(adev->vce.gpu_addr >> 40) & 0xff);
+	}
+
+	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
+	size = VCE_V4_0_FW_SIZE;
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
+
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), (adev->vce.gpu_addr >> 40) & 0xff);
+	offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
+	size = VCE_V4_0_STACK_SIZE;
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & ~0x0f000000) | (1 << 24));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
+
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), (adev->vce.gpu_addr >> 8));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff);
+	offset += size;
+	size = VCE_V4_0_DATA_SIZE;
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & ~0x0f000000) | (2 << 24));
+	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
+
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
+			VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
+			~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+}
+
+static int vce_v4_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	/* needed for driver unload*/
+	return 0;
+}
+
+#if 0
+static bool vce_v4_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+
+	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
+	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
+
+	return !(RREG32(mmSRBM_STATUS2) & mask);
+}
+
+static int vce_v4_0_wait_for_idle(void *handle)
+{
+	unsigned i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++)
+		if (vce_v4_0_is_idle(handle))
+			return 0;
+
+	return -ETIMEDOUT;
+}
+
+#define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
+#define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
+#define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
+#define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
+				      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
+
+static bool vce_v4_0_check_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+
+	/* According to VCE team , we should use VCE_STATUS instead
+	 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
+	 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
+	 * instance's registers are accessed
+	 * (0 for 1st instance, 10 for 2nd instance).
+	 *
+	 *VCE_STATUS
+	 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
+	 *|----+----+-----------+----+----+----+----------+---------+----|
+	 *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
+	 *
+	 * VCE team suggest use bit 3--bit 6 for busy status check
+	 */
+	mutex_lock(&adev->grbm_idx_mutex);
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+	if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+	}
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+	if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+	}
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	if (srbm_soft_reset) {
+		adev->vce.srbm_soft_reset = srbm_soft_reset;
+		return true;
+	} else {
+		adev->vce.srbm_soft_reset = 0;
+		return false;
+	}
+}
+
+static int vce_v4_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset;
+
+	if (!adev->vce.srbm_soft_reset)
+		return 0;
+	srbm_soft_reset = adev->vce.srbm_soft_reset;
+
+	if (srbm_soft_reset) {
+		u32 tmp;
+
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		/* Wait a little for things to settle down */
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int vce_v4_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->vce.srbm_soft_reset)
+		return 0;
+
+	mdelay(5);
+
+	return vce_v4_0_suspend(adev);
+}
+
+
+static int vce_v4_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->vce.srbm_soft_reset)
+		return 0;
+
+	mdelay(5);
+
+	return vce_v4_0_resume(adev);
+}
+
+static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
+{
+	u32 tmp, data;
+
+	tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
+	if (override)
+		data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
+	else
+		data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
+
+	if (tmp != data)
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
+}
+
+static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
+					     bool gated)
+{
+	u32 data;
+
+	/* Set Override to disable Clock Gating */
+	vce_v4_0_override_vce_clock_gating(adev, true);
+
+	/* This function enables MGCG which is controlled by firmware.
+	   With the clocks in the gated state the core is still
+	   accessible but the firmware will throttle the clocks on the
+	   fly as necessary.
+	*/
+	if (gated) {
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
+		data |= 0x1ff;
+		data &= ~0xef0000;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
+
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
+		data |= 0x3ff000;
+		data &= ~0xffc00000;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
+
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
+		data |= 0x2;
+		data &= ~0x00010000;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
+
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
+		data |= 0x37f;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
+
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
+		data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
+			VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+			VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
+			0x8;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
+	} else {
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
+		data &= ~0x80010;
+		data |= 0xe70008;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
+
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
+		data |= 0xffc00000;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
+
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
+		data |= 0x10000;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
+
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
+		data &= ~0xffc00000;
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
+
+		data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
+		data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
+			  VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+			  VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
+			  0x8);
+		WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
+	}
+	vce_v4_0_override_vce_clock_gating(adev, false);
+}
+
+static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+	if (enable)
+		tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+	else
+		tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
+static int vce_v4_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	int i;
+
+	if ((adev->asic_type == CHIP_POLARIS10) ||
+		(adev->asic_type == CHIP_TONGA) ||
+		(adev->asic_type == CHIP_FIJI))
+		vce_v4_0_set_bypass_mode(adev, enable);
+
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
+		return 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < 2; i++) {
+		/* Program VCE Instance 0 or 1 if not harvested */
+		if (adev->vce.harvest_config & (1 << i))
+			continue;
+
+		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+
+		if (enable) {
+			/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
+			uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
+			data &= ~(0xf | 0xff0);
+			data |= ((0x0 << 0) | (0x04 << 4));
+			WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
+
+			/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
+			data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
+			data &= ~(0xf | 0xff0);
+			data |= ((0x0 << 0) | (0x04 << 4));
+			WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
+		}
+
+		vce_v4_0_set_vce_sw_clock_gating(adev, enable);
+	}
+
+	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return 0;
+}
+
+static int vce_v4_0_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	/* This doesn't actually powergate the VCE block.
+	 * That's done in the dpm code via the SMC.  This
+	 * just re-inits the block as necessary.  The actual
+	 * gating still happens in the dpm code.  We should
+	 * revisit this when there is a cleaner line between
+	 * the smc and the hw blocks
+	 */
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
+		return 0;
+
+	if (state == AMD_PG_STATE_GATE)
+		/* XXX do we need a vce_v4_0_stop()? */
+		return 0;
+	else
+		return vce_v4_0_start(adev);
+}
+#endif
+
+static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
+		struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+	amdgpu_ring_write(ring, vm_id);
+	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, ib->length_dw);
+}
+
+static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+			u64 seq, unsigned flags)
+{
+	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+	amdgpu_ring_write(ring, VCE_CMD_FENCE);
+	amdgpu_ring_write(ring, addr);
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+	amdgpu_ring_write(ring, seq);
+	amdgpu_ring_write(ring, VCE_CMD_TRAP);
+}
+
+static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, VCE_CMD_END);
+}
+
+static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
+			 unsigned int vm_id, uint64_t pd_addr)
+{
+	unsigned eng = ring->idx;
+	unsigned i;
+
+	pd_addr = pd_addr | 0x1; /* valid bit */
+	/* now only use physical base address of PDE and valid */
+	BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+
+	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+		struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
+		uint32_t req = hub->get_invalidate_req(vm_id);
+
+		amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
+		amdgpu_ring_write(ring,
+			(hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+		amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+
+		amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
+		amdgpu_ring_write(ring,
+			(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+		amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+		amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
+		amdgpu_ring_write(ring,
+			(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+		amdgpu_ring_write(ring, 0xffffffff);
+		amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+		/* flush TLB */
+		amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
+		amdgpu_ring_write(ring,	(hub->vm_inv_eng0_req + eng) << 2);
+		amdgpu_ring_write(ring, req);
+
+		/* wait for flush */
+		amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
+		amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+		amdgpu_ring_write(ring, 1 << vm_id);
+		amdgpu_ring_write(ring, 1 << vm_id);
+	}
+}
+
+static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned type,
+					enum amdgpu_interrupt_state state)
+{
+	uint32_t val = 0;
+
+	if (state == AMDGPU_IRQ_STATE_ENABLE)
+		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
+			~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+	return 0;
+}
+
+static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	DRM_DEBUG("IH: VCE\n");
+
+	WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS),
+			VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
+			~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
+
+	switch (entry->src_data[0]) {
+	case 0:
+	case 1:
+	case 2:
+		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
+		break;
+	default:
+		DRM_ERROR("Unhandled interrupt: %d %d\n",
+			  entry->src_id, entry->src_data[0]);
+		break;
+	}
+
+	return 0;
+}
+
+const struct amd_ip_funcs vce_v4_0_ip_funcs = {
+	.name = "vce_v4_0",
+	.early_init = vce_v4_0_early_init,
+	.late_init = NULL,
+	.sw_init = vce_v4_0_sw_init,
+	.sw_fini = vce_v4_0_sw_fini,
+	.hw_init = vce_v4_0_hw_init,
+	.hw_fini = vce_v4_0_hw_fini,
+	.suspend = vce_v4_0_suspend,
+	.resume = vce_v4_0_resume,
+	.is_idle = NULL /* vce_v4_0_is_idle */,
+	.wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
+	.check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
+	.pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
+	.soft_reset = NULL /* vce_v4_0_soft_reset */,
+	.post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
+	.set_clockgating_state = vce_v4_0_set_clockgating_state,
+	.set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
+};
+
+static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
+	.type = AMDGPU_RING_TYPE_VCE,
+	.align_mask = 0x3f,
+	.nop = VCE_CMD_NO_OP,
+	.support_64bit_ptrs = false,
+	.get_rptr = vce_v4_0_ring_get_rptr,
+	.get_wptr = vce_v4_0_ring_get_wptr,
+	.set_wptr = vce_v4_0_ring_set_wptr,
+	.parse_cs = amdgpu_vce_ring_parse_cs_vm,
+	.emit_frame_size =
+		17 * AMDGPU_MAX_VMHUBS + /* vce_v4_0_emit_vm_flush */
+		5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
+		1, /* vce_v4_0_ring_insert_end */
+	.emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
+	.emit_ib = vce_v4_0_ring_emit_ib,
+	.emit_vm_flush = vce_v4_0_emit_vm_flush,
+	.emit_fence = vce_v4_0_ring_emit_fence,
+	.test_ring = amdgpu_vce_ring_test_ring,
+	.test_ib = amdgpu_vce_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.insert_end = vce_v4_0_ring_insert_end,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.begin_use = amdgpu_vce_ring_begin_use,
+	.end_use = amdgpu_vce_ring_end_use,
+};
+
+static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
+	DRM_INFO("VCE enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
+	.set = vce_v4_0_set_interrupt_state,
+	.process = vce_v4_0_process_interrupt,
+};
+
+static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->vce.irq.num_types = 1;
+	adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
+};
+
+const struct amdgpu_ip_block_version vce_v4_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_VCE,
+	.major = 4,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &vce_v4_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.h
new file mode 100644
index 0000000..a32beda
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCE_V4_0_H__
+#define __VCE_V4_0_H__
+
+extern const struct amdgpu_ip_block_version vce_v4_0_ip_block;
+
+#endif
-- 
2.5.5



More information about the amd-gfx mailing list