[Libva] [Libva-intel-driver PATCH 4/6] Allow the extensional ring flag to be passed when submitting the batchbuffer
Zhao, Yakui
yakui.zhao at intel.com
Sun Aug 17 19:50:53 PDT 2014
From: Zhao Yakui <yakui.zhao at intel.com>
Signed-off-by: Zhao Yakui <yakui.zhao at intel.com>
---
src/intel_batchbuffer.c | 49 ++++++++++++++++++++++++++++++-----------------
src/intel_batchbuffer.h | 5 +++--
2 files changed, 34 insertions(+), 20 deletions(-)
diff --git a/src/intel_batchbuffer.c b/src/intel_batchbuffer.c
index c6d3769..f76c133 100644
--- a/src/intel_batchbuffer.c
+++ b/src/intel_batchbuffer.c
@@ -33,16 +33,20 @@
#define MAX_BATCH_SIZE 0x400000
+
static void
intel_batchbuffer_reset(struct intel_batchbuffer *batch, int buffer_size)
{
struct intel_driver_data *intel = batch->intel;
int batch_size = buffer_size;
+ int ring_flag;
+
+ ring_flag = batch->flag & I915_EXEC_RING_MASK;
- assert(batch->flag == I915_EXEC_RENDER ||
- batch->flag == I915_EXEC_BLT ||
- batch->flag == I915_EXEC_BSD ||
- batch->flag == I915_EXEC_VEBOX);
+ assert(ring_flag == I915_EXEC_RENDER ||
+ ring_flag == I915_EXEC_BLT ||
+ ring_flag == I915_EXEC_BSD ||
+ ring_flag == I915_EXEC_VEBOX);
dri_bo_unreference(batch->buffer);
batch->buffer = dri_bo_alloc(intel->bufmgr,
@@ -69,10 +73,13 @@ struct intel_batchbuffer *
intel_batchbuffer_new(struct intel_driver_data *intel, int flag, int buffer_size)
{
struct intel_batchbuffer *batch = calloc(1, sizeof(*batch));
- assert(flag == I915_EXEC_RENDER ||
- flag == I915_EXEC_BSD ||
- flag == I915_EXEC_BLT ||
- flag == I915_EXEC_VEBOX);
+ int ring_flag;
+
+ ring_flag = flag & I915_EXEC_RING_MASK;
+ assert(ring_flag == I915_EXEC_RENDER ||
+ ring_flag == I915_EXEC_BSD ||
+ ring_flag == I915_EXEC_BLT ||
+ ring_flag == I915_EXEC_VEBOX);
if (!buffer_size || buffer_size < BATCH_SIZE) {
buffer_size = BATCH_SIZE;
@@ -182,11 +189,13 @@ void
intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
{
struct intel_driver_data *intel = batch->intel;
+ int ring_flag;
+ ring_flag = batch->flag & I915_EXEC_RING_MASK;
if (IS_GEN6(intel->device_info) ||
IS_GEN7(intel->device_info) ||
IS_GEN8(intel->device_info)) {
- if (batch->flag == I915_EXEC_RENDER) {
+ if (ring_flag == I915_EXEC_RENDER) {
if (IS_GEN8(intel->device_info)) {
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_PIPE_CONTROL | (6 - 2));
@@ -247,14 +256,14 @@ intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
}
} else {
- if (batch->flag == I915_EXEC_BLT) {
+ if (ring_flag == I915_EXEC_BLT) {
BEGIN_BLT_BATCH(batch, 4);
OUT_BLT_BATCH(batch, MI_FLUSH_DW);
OUT_BLT_BATCH(batch, 0);
OUT_BLT_BATCH(batch, 0);
OUT_BLT_BATCH(batch, 0);
ADVANCE_BLT_BATCH(batch);
- }else if (batch->flag == I915_EXEC_VEBOX) {
+ }else if (ring_flag == I915_EXEC_VEBOX) {
BEGIN_VEB_BATCH(batch, 4);
OUT_VEB_BATCH(batch, MI_FLUSH_DW);
OUT_VEB_BATCH(batch, 0);
@@ -262,7 +271,7 @@ intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
OUT_VEB_BATCH(batch, 0);
ADVANCE_VEB_BATCH(batch);
} else {
- assert(batch->flag == I915_EXEC_BSD);
+ assert(ring_flag == I915_EXEC_BSD);
BEGIN_BCS_BATCH(batch, 4);
OUT_BCS_BATCH(batch, MI_FLUSH_DW | MI_FLUSH_DW_VIDEO_PIPELINE_CACHE_INVALIDATE);
OUT_BCS_BATCH(batch, 0);
@@ -272,12 +281,12 @@ intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
}
}
} else {
- if (batch->flag == I915_EXEC_RENDER) {
+ if (ring_flag == I915_EXEC_RENDER) {
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
ADVANCE_BATCH(batch);
} else {
- assert(batch->flag == I915_EXEC_BSD);
+ assert(ring_flag == I915_EXEC_BSD);
BEGIN_BCS_BATCH(batch, 1);
OUT_BCS_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
ADVANCE_BCS_BATCH(batch);
@@ -301,10 +310,14 @@ intel_batchbuffer_advance_batch(struct intel_batchbuffer *batch)
void
intel_batchbuffer_check_batchbuffer_flag(struct intel_batchbuffer *batch, int flag)
{
- if (flag != I915_EXEC_RENDER &&
- flag != I915_EXEC_BLT &&
- flag != I915_EXEC_BSD &&
- flag != I915_EXEC_VEBOX)
+ int ring_flag;
+
+ ring_flag = flag & I915_EXEC_RING_MASK;
+
+ if (ring_flag != I915_EXEC_RENDER &&
+ ring_flag != I915_EXEC_BLT &&
+ ring_flag != I915_EXEC_BSD &&
+ ring_flag != I915_EXEC_VEBOX)
return;
if (batch->flag == flag)
diff --git a/src/intel_batchbuffer.h b/src/intel_batchbuffer.h
index 34ff66d..e4d0994 100644
--- a/src/intel_batchbuffer.h
+++ b/src/intel_batchbuffer.h
@@ -51,9 +51,10 @@ int intel_batchbuffer_check_free_space(struct intel_batchbuffer *batch, int size
int intel_batchbuffer_used_size(struct intel_batchbuffer *batch);
void intel_batchbuffer_align(struct intel_batchbuffer *batch, unsigned int alignedment);
+
#define __BEGIN_BATCH(batch, n, f) do { \
- assert(f == batch->flag); \
- intel_batchbuffer_check_batchbuffer_flag(batch, f); \
+ assert(f == (batch->flag & I915_EXEC_RING_MASK)); \
+ intel_batchbuffer_check_batchbuffer_flag(batch, batch->flag); \
intel_batchbuffer_require_space(batch, (n) * 4); \
intel_batchbuffer_begin_batch(batch, (n)); \
} while (0)
--
1.7.10.1
More information about the Libva
mailing list