[Intel-gfx] [PATCH 09/15] drm/i915: GuC submission setup, phase 1
Chris Wilson
chris at chris-wilson.co.uk
Mon Jun 15 14:32:38 PDT 2015
On Mon, Jun 15, 2015 at 07:36:27PM +0100, Dave Gordon wrote:
> +static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
> + u32 size)
> +{
> + struct drm_i915_gem_object *obj;
> +
> + obj = i915_gem_alloc_object(dev, size);
> + if (!obj)
> + return NULL;
Does it need to be a shmemfs object?
> + if (i915_gem_object_get_pages(obj)) {
> + drm_gem_object_unreference(&obj->base);
> + return NULL;
> + }
This is a random function call.
> + if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
> + PIN_OFFSET_BIAS | GUC_WOPCM_SIZE_VALUE)) {
> + drm_gem_object_unreference(&obj->base);
> + return NULL;
How about reporting the right error code?
> + }
> +
> + return obj;
> +}
> +
> +/**
> + * gem_release_guc_obj() - Release gem object allocated for GuC usage
> + * @obj: gem obj to be released
> + */
> +static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
> +{
> + if (!obj)
> + return;
> +
> + if (i915_gem_obj_is_pinned(obj))
> + i915_gem_object_ggtt_unpin(obj);
What?
> + drm_gem_object_unreference(&obj->base);
> +}
> +
> +/*
> + * Set up the memory resources to be shared with the GuC. At this point,
> + * we require just one object that can be mapped through the GGTT.
> + */
> +int i915_guc_submission_init(struct drm_device *dev)
> +{
> + struct drm_i915_private *dev_priv = dev->dev_private;
Bleh.
> + const size_t ctxsize = sizeof(struct guc_context_desc);
> + const size_t poolsize = MAX_GUC_GPU_CONTEXTS * ctxsize;
> + const size_t gemsize = round_up(poolsize, PAGE_SIZE);
> + struct intel_guc *guc = &dev_priv->guc;
> +
> + if (!i915.enable_guc_submission)
> + return 0; /* not enabled */
> +
> + if (guc->ctx_pool_obj)
> + return 0; /* already allocated */
Eh? Where have you hooked into... So looking at that, it looks like you
want to move this into the device initialisation rather than guc
firmware load. To me at least they are conceptually separate stages, and
judging by the above combining them has resulted in very clumsy code.
> + guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
> + if (!guc->ctx_pool_obj)
> + return -ENOMEM;
> +
> + spin_lock_init(&dev_priv->guc.host2guc_lock);
> +
> + ida_init(&guc->ctx_ids);
> +
> + memset(guc->doorbell_bitmap, 0, sizeof(guc->doorbell_bitmap));
> + guc->db_cacheline = 0;
Before you relied on guc being zeroed, and now you memset it again.
> +
> + return 0;
> +}
> +
> +void i915_guc_submission_fini(struct drm_device *dev)
> +{
> + struct drm_i915_private *dev_priv = dev->dev_private;
> + struct intel_guc *guc = &dev_priv->guc;
> +
> + gem_release_guc_obj(dev_priv->guc.log_obj);
> + guc->log_obj = NULL;
> +
> + if (guc->ctx_pool_obj)
> + ida_destroy(&guc->ctx_ids);
Interesting guard. Maybe just make the GuC controller a pointer from
i915 and then you can do a more natural if (i915->guc == NULL) return;
> + gem_release_guc_obj(guc->ctx_pool_obj);
> + guc->ctx_pool_obj = NULL;
> +}
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 0b44265..06b68c2 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -171,4 +171,8 @@ extern void intel_guc_ucode_init(struct drm_device *dev);
> extern int intel_guc_ucode_load(struct drm_device *dev, bool wait);
> extern void intel_guc_ucode_fini(struct drm_device *dev);
>
> +/* i915_guc_submission.c */
> +int i915_guc_submission_init(struct drm_device *dev);
> +void i915_guc_submission_fini(struct drm_device *dev);
> +
> #endif
> diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
> index 16eef4c..0f74876 100644
> --- a/drivers/gpu/drm/i915/intel_guc_loader.c
> +++ b/drivers/gpu/drm/i915/intel_guc_loader.c
> @@ -111,6 +111,21 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
> i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
> }
>
> + /* If GuC scheduling is enabled, setup params here. */
> + if (i915.enable_guc_submission) {
> + u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
> + u32 ctx_in_16 = MAX_GUC_GPU_CONTEXTS / 16;
So really you didn't need to pin the ctx_pool_obj until this point?
> +
> + pgs >>= PAGE_SHIFT;
> + params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
> + (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
> +
> + params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
> +
> + /* Unmask this bit to enable GuC scheduler */
> + params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
/* Enable multiple context submission through the GuC */
params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
Try to keep comments to explain why rather than what. Most of the
comments here fall into the "i++; // postincrement i" category.
-Chris
--
Chris Wilson, Intel Open Source Technology Centre
More information about the Intel-gfx
mailing list