[Intel-gfx] [PATCH 18/49] drm/i915/bdw: Allocate ringbuffer for LR contexts
oscar.mateo at intel.com
oscar.mateo at intel.com
Thu Mar 27 18:59:47 CET 2014
From: Ben Widawsky <benjamin.widawsky at intel.com>
With our setup in previous patches, we've allocated one default context
per ring. Now, each of those contexts holds a pointer to the default
ringbuffers and makes its own allocation of the backing objects.
To reiterate the TODO in the patch: the ringbuffer objects are in the
CPU mappable region. This will likely need to change at some point.
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
v2: Place a ringbuffer pointer inside the context that, in the global
default context, just points to the engine's default ringbuffer. Update
the ringbuffer backing object early instead of waiting for the alloc &
destroy ringbuffer calls during ring initialization.
Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 4 ++
drivers/gpu/drm/i915/i915_lrc.c | 65 +++++++++++++++++++++++++++++++--
drivers/gpu/drm/i915/intel_ringbuffer.c | 8 ++++
3 files changed, 73 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ff6a33c..3a36e28 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -597,6 +597,7 @@ struct i915_hw_context {
struct drm_i915_file_private *file_priv;
struct intel_engine *last_ring;
struct drm_i915_gem_object *obj;
+ struct intel_ringbuffer *ringbuf;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
@@ -2317,6 +2318,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
/* i915_lrc.c */
int gen8_gem_context_init(struct drm_device *dev);
void gen8_gem_context_fini(struct drm_device *dev);
+struct i915_hw_context *gen8_gem_create_context(struct drm_device *dev,
+ struct intel_engine *ring,
+ struct drm_i915_file_private *file_priv, bool create_vm);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_lrc.c b/drivers/gpu/drm/i915/i915_lrc.c
index 10e6dbc..40dfa95 100644
--- a/drivers/gpu/drm/i915/i915_lrc.c
+++ b/drivers/gpu/drm/i915/i915_lrc.c
@@ -43,6 +43,56 @@
#define GEN8_LR_CONTEXT_SIZE (21 * PAGE_SIZE)
+struct i915_hw_context *
+gen8_gem_create_context(struct drm_device *dev,
+ struct intel_engine *ring,
+ struct drm_i915_file_private *file_priv,
+ bool create_vm)
+{
+ struct i915_hw_context *ctx = NULL;
+ struct drm_i915_gem_object *ring_obj = NULL;
+ int ret;
+
+ ctx = i915_gem_create_context(dev, file_priv, create_vm);
+ if (IS_ERR_OR_NULL(ctx))
+ return ctx;
+
+ ring_obj = i915_gem_alloc_object(dev, 32 * PAGE_SIZE);
+ if (!ring_obj) {
+ i915_gem_object_ggtt_unpin(ctx->obj);
+ i915_gem_context_unreference(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* TODO: For now we put this in the mappable region so that we can reuse
+ * the existing ringbuffer code which ioremaps it. When we start
+ * creating many contexts, this will no longer work and we must switch
+ * to a kmapish interface.
+ */
+ ret = i915_gem_obj_ggtt_pin(ring_obj, PAGE_SIZE, PIN_MAPPABLE);
+ if (ret) {
+ drm_gem_object_unreference(&ring_obj->base);
+ i915_gem_object_ggtt_unpin(ctx->obj);
+ i915_gem_context_unreference(ctx);
+ return ERR_PTR(ret);
+ }
+
+ /* Failure at this point is almost impossible */
+ ret = i915_gem_object_set_to_gtt_domain(ring_obj, true);
+ if (ret) {
+ i915_gem_object_ggtt_unpin(ring_obj);
+ drm_gem_object_unreference(&ring_obj->base);
+ i915_gem_object_ggtt_unpin(ctx->obj);
+ i915_gem_context_unreference(ctx);
+ return ERR_PTR(ret);
+ }
+
+ ctx->ringbuf = &ring->default_ringbuf;
+ ctx->ringbuf->obj = ring_obj;
+
+ return ctx;
+}
+
void gen8_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -50,9 +100,16 @@ void gen8_gem_context_fini(struct drm_device *dev)
int unused;
for_each_ring(ring, dev_priv, unused) {
- if (ring->default_context) {
- i915_gem_object_ggtt_unpin(ring->default_context->obj);
- i915_gem_context_unreference(ring->default_context);
+ struct i915_hw_context *ctx = ring->default_context;
+ if (ctx) {
+ struct drm_i915_gem_object *ring_obj = ctx->ringbuf->obj;
+ if (ring_obj) {
+ i915_gem_object_ggtt_unpin(ring_obj);
+ drm_gem_object_unreference(&ring_obj->base);
+ ctx->ringbuf->obj = NULL;
+ }
+ i915_gem_object_ggtt_unpin(ctx->obj);
+ i915_gem_context_unreference(ctx);
ring->default_context = NULL;
}
}
@@ -69,7 +126,7 @@ int gen8_gem_context_init(struct drm_device *dev)
dev_priv->hw_context_size = round_up(GEN8_LR_CONTEXT_SIZE, 4096);
for_each_ring(ring, dev_priv, ring_id) {
- ring->default_context = i915_gem_create_context(dev,
+ ring->default_context = gen8_gem_create_context(dev, ring,
NULL, (ring_id == RCS));
if (IS_ERR_OR_NULL(ring->default_context)) {
ret = PTR_ERR(ring->default_context);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 240e86a..a552c48 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1380,8 +1380,12 @@ static int init_phys_status_page(struct intel_engine *ring)
static void destroy_ring_buffer(struct intel_engine *ring)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+ if (dev_priv->lrc_enabled)
+ return;
+
i915_gem_object_ggtt_unpin(ringbuf->obj);
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
@@ -1390,10 +1394,14 @@ static void destroy_ring_buffer(struct intel_engine *ring)
static int alloc_ring_buffer(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = NULL;
struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
int ret;
+ if (dev_priv->lrc_enabled)
+ return 0;
+
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ringbuf->size);
if (obj == NULL)
--
1.9.0
More information about the Intel-gfx
mailing list