[Intel-gfx] [PATCH v2] drm/i915/guc: Keep the ctx_pool_vaddr mapped, for easy access
Oscar Mateo
oscar.mateo at intel.com
Thu Feb 16 14:18:09 UTC 2017
Onion teardown in a separate patch (since it addresses a separate problem).
On 02/16/2017 06:15 AM, Oscar Mateo wrote:
> The GuC descriptor is big in size. If we use a local definition of
> guc_desc we have a chance to overflow stack, so avoid it.
>
> Also, Chris abhors scatterlists :)
>
> v2: Rebased, helper function to retrieve the context descriptor,
> s/ctx_pool_vma/ctx_pool/
>
> Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
> ---
> drivers/gpu/drm/i915/i915_guc_submission.c | 94 ++++++++++++++----------------
> drivers/gpu/drm/i915/intel_guc_loader.c | 2 +-
> drivers/gpu/drm/i915/intel_uc.h | 3 +-
> 3 files changed, 48 insertions(+), 51 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 3ea2c71..f7d9897 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -134,6 +134,12 @@ static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 ctx_index)
> return intel_guc_send(guc, action, ARRAY_SIZE(action));
> }
>
> +static struct guc_context_desc *__get_context_desc(struct i915_guc_client *client)
> +{
> + return client->guc->ctx_pool_vaddr +
> + sizeof(struct guc_context_desc) * client->ctx_index;
> +}
> +
> /*
> * Initialise, update, or clear doorbell data shared with the GuC
> *
> @@ -143,21 +149,11 @@ static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 ctx_index)
>
> static int __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
> {
> - struct sg_table *sg = client->guc->ctx_pool_vma->pages;
> - struct guc_context_desc desc;
> - size_t len;
> + struct guc_context_desc *desc;
>
> /* Update the GuC's idea of the doorbell ID */
> - len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
> - sizeof(desc) * client->ctx_index);
> - if (len != sizeof(desc))
> - return -EFAULT;
> -
> - desc.db_id = new_id;
> - len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
> - sizeof(desc) * client->ctx_index);
> - if (len != sizeof(desc))
> - return -EFAULT;
> + desc = __get_context_desc(client);
> + desc->db_id = new_id;
>
> return 0;
> }
> @@ -270,29 +266,27 @@ static void guc_proc_desc_init(struct intel_guc *guc,
> * data structures relating to this client (doorbell, process descriptor,
> * write queue, etc).
> */
> -
> static void guc_ctx_desc_init(struct intel_guc *guc,
> struct i915_guc_client *client)
> {
> struct drm_i915_private *dev_priv = guc_to_i915(guc);
> struct intel_engine_cs *engine;
> struct i915_gem_context *ctx = client->owner;
> - struct guc_context_desc desc;
> - struct sg_table *sg;
> + struct guc_context_desc *desc;
> unsigned int tmp;
> u32 gfx_addr;
>
> - memset(&desc, 0, sizeof(desc));
> + desc = __get_context_desc(client);
>
> - desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
> - desc.context_id = client->ctx_index;
> - desc.priority = client->priority;
> - desc.db_id = client->doorbell_id;
> + desc->attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
> + desc->context_id = client->ctx_index;
> + desc->priority = client->priority;
> + desc->db_id = client->doorbell_id;
>
> for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
> struct intel_context *ce = &ctx->engine[engine->id];
> uint32_t guc_engine_id = engine->guc_id;
> - struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
> + struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
>
> /* TODO: We have a design issue to be solved here. Only when we
> * receive the first batch, we know which engine is used by the
> @@ -317,50 +311,41 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
> lrc->ring_next_free_location = lrc->ring_begin;
> lrc->ring_current_tail_pointer_value = 0;
>
> - desc.engines_used |= (1 << guc_engine_id);
> + desc->engines_used |= (1 << guc_engine_id);
> }
>
> DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
> - client->engines, desc.engines_used);
> - WARN_ON(desc.engines_used == 0);
> + client->engines, desc->engines_used);
> + WARN_ON(desc->engines_used == 0);
>
> /*
> * The doorbell, process descriptor, and workqueue are all parts
> * of the client object, which the GuC will reference via the GGTT
> */
> gfx_addr = guc_ggtt_offset(client->vma);
> - desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
> + desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
> + client->doorbell_offset;
> + desc->db_trigger_cpu = (uintptr_t)client->vaddr +
> client->doorbell_offset;
> - desc.db_trigger_cpu =
> - (uintptr_t)client->vaddr + client->doorbell_offset;
> - desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
> - desc.process_desc = gfx_addr + client->proc_desc_offset;
> - desc.wq_addr = gfx_addr + client->wq_offset;
> - desc.wq_size = client->wq_size;
> + desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
> + desc->process_desc = gfx_addr + client->proc_desc_offset;
> + desc->wq_addr = gfx_addr + client->wq_offset;
> + desc->wq_size = client->wq_size;
>
> /*
> * XXX: Take LRCs from an existing context if this is not an
> * IsKMDCreatedContext client
> */
> - desc.desc_private = (uintptr_t)client;
> -
> - /* Pool context is pinned already */
> - sg = guc->ctx_pool_vma->pages;
> - sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
> - sizeof(desc) * client->ctx_index);
> + desc->desc_private = (uintptr_t)client;
> }
>
> static void guc_ctx_desc_fini(struct intel_guc *guc,
> struct i915_guc_client *client)
> {
> - struct guc_context_desc desc;
> - struct sg_table *sg;
> -
> - memset(&desc, 0, sizeof(desc));
> + struct guc_context_desc *desc;
>
> - sg = guc->ctx_pool_vma->pages;
> - sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
> - sizeof(desc) * client->ctx_index);
> + desc = __get_context_desc(client);
> + memset(desc, 0, sizeof(*desc));
> }
>
> /**
> @@ -913,6 +898,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
> const size_t gemsize = round_up(poolsize, PAGE_SIZE);
> struct intel_guc *guc = &dev_priv->guc;
> struct i915_vma *vma;
> + void *vaddr;
>
> if (!HAS_GUC_SCHED(dev_priv))
> return 0;
> @@ -924,14 +910,21 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
> if (!i915.enable_guc_submission)
> return 0; /* not enabled */
>
> - if (guc->ctx_pool_vma)
> + if (guc->ctx_pool)
> return 0; /* already allocated */
>
> vma = intel_guc_allocate_vma(guc, gemsize);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
>
> - guc->ctx_pool_vma = vma;
> + guc->ctx_pool = vma;
> +
> + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
> + if (IS_ERR(vaddr))
> + goto err;
> +
> + guc->ctx_pool_vaddr = vaddr;
> +
> ida_init(&guc->ctx_ids);
> intel_guc_log_create(guc);
> guc_addon_create(guc);
> @@ -1023,9 +1016,12 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
> i915_vma_unpin_and_release(&guc->ads_vma);
> i915_vma_unpin_and_release(&guc->log.vma);
>
> - if (guc->ctx_pool_vma)
> + if (guc->ctx_pool_vaddr) {
> ida_destroy(&guc->ctx_ids);
> - i915_vma_unpin_and_release(&guc->ctx_pool_vma);
> + i915_gem_object_unpin_map(guc->ctx_pool->obj);
> + }
> +
> + i915_vma_unpin_and_release(&guc->ctx_pool);
> }
>
> /**
> diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
> index 9885f76..22f882d 100644
> --- a/drivers/gpu/drm/i915/intel_guc_loader.c
> +++ b/drivers/gpu/drm/i915/intel_guc_loader.c
> @@ -221,7 +221,7 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
>
> /* If GuC submission is enabled, set up additional parameters here */
> if (i915.enable_guc_submission) {
> - u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
> + u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool);
> u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
>
> pgs >>= PAGE_SHIFT;
> diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
> index c80f470..511b96b 100644
> --- a/drivers/gpu/drm/i915/intel_uc.h
> +++ b/drivers/gpu/drm/i915/intel_uc.h
> @@ -153,7 +153,8 @@ struct intel_guc {
> bool interrupts_enabled;
>
> struct i915_vma *ads_vma;
> - struct i915_vma *ctx_pool_vma;
> + struct i915_vma *ctx_pool;
> + void *ctx_pool_vaddr;
> struct ida ctx_ids;
>
> struct i915_guc_client *execbuf_client;
More information about the Intel-gfx
mailing list