[Intel-gfx] [PATCH 21/33] drm/i915: Use VMA for scratch page tracking
Chris Wilson
chris at chris-wilson.co.uk
Sun Aug 7 14:45:29 UTC 2016
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem_context.c | 2 +-
drivers/gpu/drm/i915/i915_gpu_error.c | 2 +-
drivers/gpu/drm/i915/intel_display.c | 2 +-
drivers/gpu/drm/i915/intel_engine_cs.c | 50 ++++++++++++++++++++++++++++
drivers/gpu/drm/i915/intel_lrc.c | 17 +++++-----
drivers/gpu/drm/i915/intel_ringbuffer.c | 59 +++++----------------------------
drivers/gpu/drm/i915/intel_ringbuffer.h | 10 ++----
7 files changed, 71 insertions(+), 71 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5d42fee75464..15eed897b498 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -660,7 +660,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(ring, last_reg);
- intel_ring_emit(ring, engine->scratch.gtt_offset);
+ intel_ring_emit(ring, engine->scratch->node.start);
intel_ring_emit(ring, MI_NOOP);
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 09c3ae0c282a..2d93af0bb793 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1075,7 +1075,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
if (HAS_BROKEN_CS_TLB(dev_priv))
ee->wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
- engine->scratch.obj);
+ engine->scratch->obj);
if (request->ctx->engine[i].state) {
ee->ctx = i915_error_ggtt_object_create(dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9cbf5431c1e3..3deee0306e82 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11325,7 +11325,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256);
+ intel_ring_emit(ring, req->engine->scratch->node.start + 256);
if (IS_GEN8(dev)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 0dd3d1de18aa..1dec35441ab5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -195,6 +195,54 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
i915_gem_batch_pool_init(engine, &engine->batch_pool);
}
+int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ WARN_ON(engine->scratch);
+
+ obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+ if (!obj)
+ obj = i915_gem_object_create(&engine->i915->drm, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ engine->scratch = vma;
+ DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08llx\n",
+ engine->name, vma->node.start);
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+{
+ struct i915_vma *vma;
+
+ vma = nullify(&engine->scratch);
+ if (!vma)
+ return;
+
+ i915_vma_unpin(vma);
+ i915_gem_object_put(vma->obj);
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -226,6 +274,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
+ intel_engine_cleanup_scratch(engine);
+
intel_engine_cleanup_cmd_parser(engine);
intel_engine_fini_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 198d59b272b2..096eb8c2da17 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -914,7 +914,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, engine->scratch->node.start + 256);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -932,7 +932,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, engine->scratch->node.start + 256);
wa_ctx_emit(batch, index, 0);
return index;
@@ -993,7 +993,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
/* Actual scratch location is at 128 bytes offset */
- scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+ scratch_addr = engine->scratch->node.start + 2*CACHELINE_BYTES;
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1072,8 +1072,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
/* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
- uint32_t scratch_addr
- = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+ uint32_t scratch_addr =
+ engine->scratch->node.start + 2*CACHELINE_BYTES;
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1215,7 +1215,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
}
/* some WA perform writes to scratch page, ensure it is valid */
- if (engine->scratch.obj == NULL) {
+ if (!engine->scratch) {
DRM_ERROR("scratch page not allocated for %s\n", engine->name);
return -EINVAL;
}
@@ -1483,7 +1483,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
{
struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
- u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 flags = 0;
int ret;
@@ -1844,11 +1844,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
else
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
- engine->cleanup = intel_fini_pipe_control;
engine->emit_flush = gen8_emit_flush_render;
engine->emit_request = gen8_emit_request_render;
- ret = intel_init_pipe_control(engine, 4096);
+ ret = intel_engine_create_scratch(engine, 4096);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cff9935fe36f..af2d81ae3e7d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -176,7 +176,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
u32 scratch_addr =
- req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
int ret;
ret = intel_ring_begin(req, 6);
@@ -212,7 +212,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
u32 scratch_addr =
- req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
u32 flags = 0;
int ret;
@@ -286,7 +286,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
u32 scratch_addr =
- req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
u32 flags = 0;
int ret;
@@ -370,7 +370,8 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req,
static int
gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
u32 flags = 0;
int ret;
@@ -612,48 +613,6 @@ out:
return ret;
}
-void intel_fini_pipe_control(struct intel_engine_cs *engine)
-{
- if (engine->scratch.obj == NULL)
- return;
-
- i915_gem_object_ggtt_unpin(engine->scratch.obj);
- i915_gem_object_put(engine->scratch.obj);
- engine->scratch.obj = NULL;
-}
-
-int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
-{
- struct drm_i915_gem_object *obj;
- int ret;
-
- WARN_ON(engine->scratch.obj);
-
- obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
- if (!obj)
- obj = i915_gem_object_create(&engine->i915->drm, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- ret = PTR_ERR(obj);
- goto err;
- }
-
- ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, PIN_HIGH);
- if (ret)
- goto err_unref;
-
- engine->scratch.obj = obj;
- engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
- DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- engine->name, engine->scratch.gtt_offset);
- return 0;
-
-err_unref:
- i915_gem_object_put(engine->scratch.obj);
-err:
- return ret;
-}
-
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
@@ -1304,8 +1263,6 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
i915_gem_object_put(dev_priv->semaphore_obj);
dev_priv->semaphore_obj = NULL;
}
-
- intel_fini_pipe_control(engine);
}
static int gen8_rcs_signal(struct drm_i915_gem_request *req)
@@ -1763,7 +1720,7 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
unsigned int dispatch_flags)
{
struct intel_ring *ring = req->ring;
- u32 cs_offset = req->engine->scratch.gtt_offset;
+ u32 cs_offset = req->engine->scratch->node.start;
int ret;
ret = intel_ring_begin(req, 6);
@@ -2790,11 +2747,11 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
return ret;
if (INTEL_GEN(dev_priv) >= 6) {
- ret = intel_init_pipe_control(engine, 4096);
+ ret = intel_engine_create_scratch(engine, 4096);
if (ret)
return ret;
} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
- ret = intel_init_pipe_control(engine, I830_WA_SIZE);
+ ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 35e2b87ab17a..9e3ab8129734 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -198,6 +198,7 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
+ struct i915_vma *scratch;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -320,11 +321,6 @@ struct intel_engine_cs {
struct intel_engine_hangcheck hangcheck;
- struct {
- struct drm_i915_gem_object *obj;
- u32 gtt_offset;
- } scratch;
-
bool needs_cmd_parser;
/*
@@ -476,11 +472,9 @@ void intel_ring_update_space(struct intel_ring *ring);
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
-void intel_fini_pipe_control(struct intel_engine_cs *engine);
-
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
+int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
static inline int intel_engine_idle(struct intel_engine_cs *engine,
--
2.8.1
More information about the Intel-gfx
mailing list