[PATCH 01/23] drm/amdgpu: add support kernel interface queue(KIQ)

Xiangliang Yu Xiangliang.Yu at amd.com
Sat Dec 17 16:16:23 UTC 2016


KIQ is queue-memory based initialization method: setup KIQ queue
firstly, then send command to KIQ to setup other queues, without
accessing registers.

For virtualization, need KIQ to access virtual function registers
when running on guest mode.

Signed-off-by: Xiangliang Yu <Xiangliang.Yu at amd.com>
Signed-off-by: Monk Liu <Monk.Liu at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_kiq.c | 436 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/vid.h        |   2 +
 2 files changed, 438 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_kiq.c

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kiq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kiq.c
new file mode 100644
index 0000000..e9cdc28
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kiq.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Xiangliang.Yu at amd.com
+ *	    Monk.Liu at amd.com
+ */
+#include "amdgpu.h"
+#include "vid.h"
+#include "vi_structs.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+enum amdgpu_kiq_irq {
+	AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
+	AMDGPU_CP_KIQ_IRQ_LAST
+};
+
+int amdgpu_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+			 struct amdgpu_irq_src *irq)
+{
+	int r = 0;
+
+	ring->adev = NULL;
+	ring->ring_obj = NULL;
+	ring->use_doorbell = true;
+	ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+	if (adev->gfx.mec2_fw) {
+		ring->me = 2;
+		ring->pipe = 0;
+	} else {
+		ring->me = 1;
+		ring->pipe = 1;
+	}
+
+	irq->data = ring;
+	ring->queue = 0;
+	sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
+	r = amdgpu_ring_init(adev, ring, 1024,
+			     irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+	if (r)
+		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
+
+	return r;
+}
+
+void amdgpu_kiq_free_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq)
+{
+	amdgpu_ring_fini(ring);
+	irq->data = NULL;
+}
+
+void amdgpu_kiq_enable(struct amdgpu_ring *ring)
+{
+	uint32_t tmp;
+	struct amdgpu_device *adev = ring->adev;
+
+	/* tell RLC which is KIQ queue */
+	tmp = RREG32(mmRLC_CP_SCHEDULERS);
+	tmp &= 0xffffff00;
+	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
+	WREG32(mmRLC_CP_SCHEDULERS, tmp);
+	tmp |= 0x80;
+	WREG32(mmRLC_CP_SCHEDULERS, tmp);
+}
+
+void amdgpu_kiq_start(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_alloc(ring, 8);
+	/* set resources */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+	amdgpu_ring_write(ring, 0);	/* vmid_mask:0 queue_type:0 (KIQ) */
+	amdgpu_ring_write(ring, 0x000000FF);	/* queue mask lo */
+	amdgpu_ring_write(ring, 0);	/* queue mask hi */
+	amdgpu_ring_write(ring, 0);	/* gws mask lo */
+	amdgpu_ring_write(ring, 0);	/* gws mask hi */
+	amdgpu_ring_write(ring, 0);	/* oac mask */
+	amdgpu_ring_write(ring, 0);	/* gds heap base:0, gds heap size:0 */
+	amdgpu_ring_commit(ring);
+	udelay(50);
+}
+
+void amdgpu_kiq_map_queue(struct amdgpu_ring *kiq_ring,
+			  struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = kiq_ring->adev;
+	uint64_t mqd_addr, wptr_addr;
+
+	mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+	wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	amdgpu_ring_alloc(kiq_ring, 8);
+
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+	amdgpu_ring_write(kiq_ring, 0x21010000);
+	amdgpu_ring_write(kiq_ring, (ring->doorbell_index << 2) |
+			(ring->queue << 26) |
+			(ring->pipe << 29) |
+			((ring->me == 1 ? 0 : 1) << 31)); /* doorbell */
+	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+	amdgpu_ring_commit(kiq_ring);
+	udelay(50);
+}
+
+#define MEC_HPD_SIZE	2048
+
+int amdgpu_mqd_init(struct amdgpu_device *adev,
+		    struct vi_mqd *mqd,
+		    uint64_t mqd_gpu_addr,
+		    uint64_t eop_gpu_addr,
+		    struct amdgpu_ring *ring)
+{
+	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
+	uint32_t tmp;
+
+	mqd->header = 0xC0310800;
+	mqd->compute_pipelinestat_enable = 0x00000001;
+	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
+	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
+	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
+	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
+	mqd->compute_misc_reserved = 0x00000003;
+
+	eop_base_addr = eop_gpu_addr >> 8;
+	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
+	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
+
+	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+	tmp = RREG32(mmCP_HQD_EOP_CONTROL);
+	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+			(order_base_2(MEC_HPD_SIZE / 4) - 1));
+
+	mqd->cp_hqd_eop_control = tmp;
+
+	/* enable doorbell? */
+	tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
+
+	if (ring->use_doorbell)
+		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+					 DOORBELL_EN, 1);
+	else
+		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+					 DOORBELL_EN, 0);
+
+	mqd->cp_hqd_pq_doorbell_control = tmp;
+
+	/* disable the queue if it's active */
+	mqd->cp_hqd_dequeue_request = 0;
+	mqd->cp_hqd_pq_rptr = 0;
+	mqd->cp_hqd_pq_wptr = 0;
+
+	/* set the pointer to the MQD */
+	mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
+	mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
+
+	/* set MQD vmid to 0 */
+	tmp = RREG32(mmCP_MQD_CONTROL);
+	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
+	mqd->cp_mqd_control = tmp;
+
+	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+	hqd_gpu_addr = ring->gpu_addr >> 8;
+	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
+	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+
+	/* set up the HQD, this is similar to CP_RB0_CNTL */
+	tmp = RREG32(mmCP_HQD_PQ_CONTROL);
+	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
+			    (order_base_2(ring->ring_size / 4) - 1));
+	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+#ifdef __BIG_ENDIAN
+	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
+#endif
+	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
+	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+	mqd->cp_hqd_pq_control = tmp;
+
+	/* set the wb address whether it's enabled or not */
+	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
+	mqd->cp_hqd_pq_rptr_report_addr_hi =
+		upper_32_bits(wb_gpu_addr) & 0xffff;
+
+	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
+	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
+
+	tmp = 0;
+	/* enable the doorbell if requested */
+	if (ring->use_doorbell) {
+		tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
+		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+				DOORBELL_OFFSET, ring->doorbell_index);
+
+		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+					 DOORBELL_EN, 1);
+		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+					 DOORBELL_SOURCE, 0);
+		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+					 DOORBELL_HIT, 0);
+	}
+
+	mqd->cp_hqd_pq_doorbell_control = tmp;
+
+	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+	ring->wptr = 0;
+	mqd->cp_hqd_pq_wptr = ring->wptr;
+	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
+
+	/* set the vmid for the queue */
+	mqd->cp_hqd_vmid = 0;
+
+	tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
+	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
+	mqd->cp_hqd_persistent_state = tmp;
+
+	/* activate the queue */
+	mqd->cp_hqd_active = 1;
+
+	return 0;
+}
+
+int amdgpu_kiq_init(struct amdgpu_device *adev,
+		    struct vi_mqd *mqd,
+		    struct amdgpu_ring *ring)
+{
+	uint32_t tmp;
+	int j;
+
+	/* disable wptr polling */
+	tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
+	tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
+	WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
+
+	WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo);
+	WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi);
+
+	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+	WREG32(mmCP_HQD_EOP_CONTROL, mqd->cp_hqd_eop_control);
+
+	/* enable doorbell? */
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
+
+	/* disable the queue if it's active */
+	if (RREG32(mmCP_HQD_ACTIVE) & 1) {
+		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
+		for (j = 0; j < adev->usec_timeout; j++) {
+			if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
+				break;
+			udelay(1);
+		}
+		WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
+		WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
+		WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
+	}
+
+	/* set the pointer to the MQD */
+	WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
+	WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
+
+	/* set MQD vmid to 0 */
+	WREG32(mmCP_MQD_CONTROL, mqd->cp_mqd_control);
+
+	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+	WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
+	WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
+
+	/* set up the HQD, this is similar to CP_RB0_CNTL */
+	WREG32(mmCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
+				mqd->cp_hqd_pq_rptr_report_addr_lo);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+				mqd->cp_hqd_pq_rptr_report_addr_hi);
+
+	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+	WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo);
+	WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->cp_hqd_pq_wptr_poll_addr_hi);
+
+	/* enable the doorbell if requested */
+	if (ring->use_doorbell) {
+		if ((adev->asic_type == CHIP_CARRIZO) ||
+				(adev->asic_type == CHIP_FIJI) ||
+				(adev->asic_type == CHIP_STONEY)) {
+			WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
+						AMDGPU_DOORBELL_KIQ << 2);
+			WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
+						AMDGPU_DOORBELL_MEC_RING7 << 2);
+		}
+	}
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
+
+	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+	WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
+
+	/* set the vmid for the queue */
+	WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
+
+	WREG32(mmCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
+
+	/* activate the queue */
+	WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
+
+	if (ring->use_doorbell) {
+		tmp = RREG32(mmCP_PQ_STATUS);
+		tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
+		WREG32(mmCP_PQ_STATUS, tmp);
+	}
+
+	return 0;
+}
+
+void amdgpu_kiq_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+				u64 seq, unsigned int flags)
+{
+	/* we only allocate 32bit for each seq wb address */
+	if (flags & AMDGPU_FENCE_FLAG_64BIT)
+		BUG();
+
+	/* write fence seq to the "addr" */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+	amdgpu_ring_write(ring, lower_32_bits(seq));
+
+	if (flags & AMDGPU_FENCE_FLAG_INT) {
+		/* set register to trigger INT */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
+		amdgpu_ring_write(ring, mmCPC_INT_STATUS);
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
+	}
+}
+
+static int amdgpu_kiq_set_interrupt_state(struct amdgpu_device *adev,
+					  struct amdgpu_irq_src *src,
+					  unsigned int type,
+					  enum amdgpu_interrupt_state state)
+{
+	uint32_t tmp, target;
+	struct amdgpu_ring *ring = (struct amdgpu_ring *)src->data;
+
+	BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ));
+
+	target = (ring->me == 1) ? mmCP_ME1_PIPE0_INT_CNTL : mmCP_ME2_PIPE0_INT_CNTL;
+	target += ring->pipe;
+
+	switch (type) {
+	case AMDGPU_CP_KIQ_IRQ_DRIVER0:
+		if (state == AMDGPU_IRQ_STATE_DISABLE) {
+			tmp = RREG32(mmCPC_INT_CNTL);
+			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
+						 GENERIC2_INT_ENABLE, 0);
+			WREG32(mmCPC_INT_CNTL, tmp);
+
+			tmp = RREG32(target);
+			tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
+						 GENERIC2_INT_ENABLE, 0);
+			WREG32(target, tmp);
+		} else {
+			tmp = RREG32(mmCPC_INT_CNTL);
+			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
+						 GENERIC2_INT_ENABLE, 1);
+			WREG32(mmCPC_INT_CNTL, tmp);
+
+			tmp = RREG32(target);
+			tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
+						 GENERIC2_INT_ENABLE, 1);
+			WREG32(target, tmp);
+		}
+		break;
+	default:
+		BUG(); /* kiq only support GENERIC2_INT now */
+		break;
+	}
+	return 0;
+}
+
+static int amdgpu_kiq_irq(struct amdgpu_device *adev,
+			  struct amdgpu_irq_src *source,
+			  struct amdgpu_iv_entry *entry)
+{
+	u8 me_id, pipe_id, queue_id;
+	struct amdgpu_ring *ring = (struct amdgpu_ring *)source->data;
+
+	BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ));
+
+	me_id = (entry->ring_id & 0x0c) >> 2;
+	pipe_id = (entry->ring_id & 0x03) >> 0;
+	queue_id = (entry->ring_id & 0x70) >> 4;
+	DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
+		   me_id, pipe_id, queue_id);
+
+	amdgpu_fence_process(ring);
+	return 0;
+}
+
+static const struct amdgpu_irq_src_funcs amdgpu_kiq_irq_funcs = {
+	.set = amdgpu_kiq_set_interrupt_state,
+	.process = amdgpu_kiq_irq,
+};
+
+void amdgpu_kiq_set_irq_funcs(struct amdgpu_irq_src *irq)
+{
+	irq->num_types = AMDGPU_CP_KIQ_IRQ_LAST;
+	irq->funcs = &amdgpu_kiq_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 11746f2..7a3863a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -360,6 +360,8 @@
 #define	PACKET3_WAIT_ON_CE_COUNTER			0x86
 #define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
 #define	PACKET3_SWITCH_BUFFER				0x8B
+#define	PACKET3_SET_RESOURCES				0xA0
+#define	PACKET3_MAP_QUEUES				0xA2
 
 #define VCE_CMD_NO_OP		0x00000000
 #define VCE_CMD_END		0x00000001
-- 
2.7.4



More information about the amd-gfx mailing list