[Intel-gfx] [PATCH 2/3] drm/i915: Complete kerneldoc for struct i915_gem_context

Daniel Vetter daniel at ffwll.ch
Fri Dec 30 19:13:44 UTC 2016


On Fri, Dec 30, 2016 at 09:27:20AM +0000, Chris Wilson wrote:
> The existing kerneldoc was outdated, so time for a refresh.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

A few bikesheds below, feel free to pick what you like and ignore the
others. Otherwise:

Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>

> ---
>  drivers/gpu/drm/i915/i915_drv.h            | 214 ++++++++++++++++++++++++-----
>  drivers/gpu/drm/i915/i915_gem.c            |  26 ++--
>  drivers/gpu/drm/i915/i915_gem_context.c    |  33 ++---
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |   2 +-
>  drivers/gpu/drm/i915/i915_gpu_error.c      |   2 +-
>  drivers/gpu/drm/i915/intel_lrc.c           |   2 +-
>  6 files changed, 211 insertions(+), 68 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 00ecbb4da25e..c8617360c912 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1051,44 +1051,118 @@ enum i915_cache_level {
>  #define DEFAULT_CONTEXT_HANDLE 0
>  
>  /**
> - * struct i915_gem_context - as the name implies, represents a context.
> - * @ref: reference count.
> - * @user_handle: userspace tracking identity for this context.
> - * @remap_slice: l3 row remapping information.
> - * @flags: context specific flags:
> - *         CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
> - * @file_priv: filp associated with this context (NULL for global default
> - *	       context).
> - * @hang_stats: information about the role of this context in possible GPU
> - *		hangs.
> - * @ppgtt: virtual memory space used by this context.
> - * @legacy_hw_ctx: render context backing object and whether it is correctly
> - *                initialized (legacy ring submission mechanism only).
> - * @link: link in the global list of contexts.
> + * struct i915_gem_context - client state
>   *
> - * Contexts are memory images used by the hardware to store copies of their
> - * internal state.
> + * The struct i915_gem_context represents the combined view of the driver and
> + * logical hardware state for a particular client.
>   */
>  struct i915_gem_context {
> -	struct kref ref;
> +	/**
> +	 * @i915: i915 device backpointer
> +	 */
>  	struct drm_i915_private *i915;

Hot of the press, but 4.10 has a new one-line inline format:

	/** i915: i915 device backpointer */

Only works if it's really just one line, but perfect for small stuff in
big structs.

Feel free to bikeshed while applying, or not.

> +
> +	/**
> +	 * @file_priv: owning file descriptor
> +	 */
>  	struct drm_i915_file_private *file_priv;
> +
> +	/**
> +	 * @ppgtt: unique address space (GTT)
> +	 *
> +	 * In full-ppgtt mode, each context has its own address space ensuring
> +	 * complete seperation of one client from all others.
> +	 */
>  	struct i915_hw_ppgtt *ppgtt;
> +
> +	/**
> +	 * @pid: process id of creator
> +	 *
> +	 * Note that who created the context may not be the principle user,
> +	 * as the context may be shared across a local socket. However,
> +	 * that should only affect the default context, all contexts created
> +	 * explicitly by the client are expected to be isolated.
> +	 */
>  	struct pid *pid;
> +
> +	/**
> +	 * @name: arbitrary name
> +	 *
> +	 * A name is constructed for the context from the creator's process
> +	 * name, pid and user handle in order to uniquely identify the
> +	 * context in messages.

... debug messages.

> +	 */
>  	const char *name;
>  
> +	/**
> +	 * @link: place with &drm_i915_private.context_list
> +	 */
> +	struct list_head link;
> +
> +	/**
> +	 * @ref: reference count
> +	 *
> +	 * A reference to a context is held by both the client who created it
> +	 * and on each request submitted to the hardware using the request
> +	 * (to ensure the hardware has access to the state until it has
> +	 * finished all pending writes).
> +	 */

For refcounts I like to point at the functions meant to manipulate it,
here i915_gem_context_get() and i915_gem_context_put(). Seems good
practice in general to mentions the funcs used to manipulate a bit of
data.

> +	struct kref ref;
> +
> +	/**
> +	 * @flags: small set of booleans
> +	 */
>  	unsigned long flags;
>  #define CONTEXT_NO_ZEROMAP		BIT(0)
> -#define CONTEXT_NO_ERROR_CAPTURE	BIT(1)
> +#define CONTEXT_NO_ERROR_CAPTURE	1
> +#define CONTEXT_CLOSED			2
> +#define CONTEXT_BANNABLE		3
> +#define CONTEXT_BANNED			4
> +#define CONTEXT_FORCE_SINGLE_SUBMISSION	5
>  
> -	/* Unique identifier for this context, used by the hw for tracking */
> +	/**
> +	 * @hw_id: - unique identifier for the context
> +	 *
> +	 * The hardware needs to uniquely identify the context for a few
> +	 * functions like fault reporting, PASID, scheduling. The
> +	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
> +	 * id for the lifetime of the context.
> +	 */
>  	unsigned int hw_id;
> +
> +	/**
> +	 * @user_handle: userspace identifier
> +	 *
> +	 * A unique per-file identifier is generated from
> +	 * &drm_i915_file_private.contexts.
> +	 */
>  	u32 user_handle;
> -	int priority; /* greater priorities are serviced first */
>  
> +	/**
> +	 * @priority: execution and service priority
> +	 *
> +	 * All clients are equal, but some are more equal than others!
> +	 *
> +	 * Requests from a context with a greater (more positive) value of
> +	 * @priority will be executed before those with a lower @priority
> +	 * value, forming a simple QoS.
> +	 *
> +	 * The kernel_context is assigned the minimum priority.
> +	 */
> +	int priority;
> +
> +	/**
> +	 * @ggtt_alignment: alignment restriction for context objects
> +	 */
>  	u32 ggtt_alignment;
> +	/**
> +	 * @ggtt_offset_bias: placement restriction for context objects
> +	 */
>  	u32 ggtt_offset_bias;
>  
> +	/**
> +	 * @engine: per-engine logical HW state
> +	 */
>  	struct intel_context {
>  		struct i915_vma *state;
>  		struct intel_ring *ring;
> @@ -1097,27 +1171,105 @@ struct i915_gem_context {
>  		int pin_count;
>  		bool initialised;
>  	} engine[I915_NUM_ENGINES];
> +
> +	/**
> +	 * @ring_size: size for allocating the per-engine ring buffer
> +	 */
>  	u32 ring_size;
> +	/**
> +	 * @desc_template: common/invariant fields for the HW context descriptor
> +	 */
>  	u32 desc_template;
> -	struct atomic_notifier_head status_notifier;
> -	bool execlists_force_single_submission;
> -
> -	struct list_head link;
>  
> -	u8 remap_slice;
> -	bool closed:1;
> -	bool bannable:1;
> -	bool banned:1;
> +	/**
> +	 * @status_notifier: list of callbacks
> +	 */
> +	struct atomic_notifier_head status_notifier;
>  
> -	unsigned int guilty_count; /* guilty of a hang */
> -	unsigned int active_count; /* active during hang */
> +	/**
> +	 * @guilty_count: How many times this context has caused a GPU hang.
> +	 */
> +	unsigned int guilty_count;
> +	/**
> +	 * @active_count: How many times this context was active during a GPU
> +	 * hang, but did not cause it.
> +	 */
> +	unsigned int active_count;
>  
>  #define CONTEXT_SCORE_GUILTY		10
>  #define CONTEXT_SCORE_BAN_THRESHOLD	40
> -	/* Accumulated score of hangs caused by this context */
> +	/**
> +	 * @ban_score: Accumulated score of all hangs caused by this context.
> +	 */
>  	int ban_score;
> +
> +	/**
> +	 * @remap_slice: Bitmask of cache lines that need remapping
> +	 */
> +	u8 remap_slice;
>  };
>  
> +static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
> +{
> +	return test_bit(CONTEXT_CLOSED, &ctx->flags);
> +}
> +
> +static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
> +{
> +	GEM_BUG_ON(i915_gem_context_is_closed(ctx));
> +	set_bit(CONTEXT_CLOSED, &ctx->flags);
> +}
> +
> +static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
> +{
> +	return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
> +}
> +
> +static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
> +{
> +	return set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
> +}
> +
> +static inline void i915_gem_context_unset_no_error_capture(struct i915_gem_context *ctx)
> +{
> +	return clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
> +}
> +
> +static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
> +{
> +	return test_bit(CONTEXT_BANNABLE, &ctx->flags);
> +}
> +
> +static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
> +{
> +	set_bit(CONTEXT_BANNABLE, &ctx->flags);
> +}
> +
> +static inline void i915_gem_context_unset_bannable(struct i915_gem_context *ctx)
> +{
> +	clear_bit(CONTEXT_BANNABLE, &ctx->flags);
> +}
> +
> +static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
> +{
> +	return test_bit(CONTEXT_BANNED, &ctx->flags);
> +}
> +
> +static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
> +{
> +	set_bit(CONTEXT_BANNED, &ctx->flags);
> +}
> +
> +static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
> +{
> +	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
> +}
> +
> +static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
> +{
> +	set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
> +}
> +
>  enum fb_op_origin {
>  	ORIGIN_GTT,
>  	ORIGIN_CPU,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 92560a776e17..d8760acf8001 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2656,34 +2656,24 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
>  	goto out_unlock;
>  }
>  
> -static bool i915_context_is_banned(const struct i915_gem_context *ctx)
> +static bool ban_context(const struct i915_gem_context *ctx)
>  {
> -	if (ctx->banned)
> -		return true;
> -
> -	if (!ctx->bannable)
> -		return false;
> -
> -	if (ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD) {
> -		DRM_DEBUG("context hanging too often, banning!\n");
> -		return true;
> -	}
> -
> -	return false;
> +	return (i915_gem_context_is_bannable(ctx) &&
> +		ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD);
>  }
>  
>  static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
>  {
> -	ctx->ban_score += CONTEXT_SCORE_GUILTY;
> -
> -	ctx->banned = i915_context_is_banned(ctx);
>  	ctx->guilty_count++;
> +	ctx->ban_score += CONTEXT_SCORE_GUILTY;
> +	if (ban_context(ctx))
> +		i915_gem_context_set_banned(ctx);
>  
>  	DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
>  			 ctx->name, ctx->ban_score,
> -			 yesno(ctx->banned));
> +			 yesno(i915_gem_context_is_banned(ctx)));
>  
> -	if (!ctx->banned || IS_ERR_OR_NULL(ctx->file_priv))
> +	if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv))
>  		return;
>  
>  	ctx->file_priv->context_bans++;
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 48e4ed5bb209..5c0c7064d532 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -141,7 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
>  
>  	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
>  	trace_i915_context_free(ctx);
> -	GEM_BUG_ON(!ctx->closed);
> +	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
>  
>  	i915_ppgtt_put(ctx->ppgtt);
>  
> @@ -228,8 +228,7 @@ static void i915_ppgtt_close(struct i915_address_space *vm)
>  
>  static void context_close(struct i915_gem_context *ctx)
>  {
> -	GEM_BUG_ON(ctx->closed);
> -	ctx->closed = true;
> +	i915_gem_context_set_closed(ctx);
>  	if (ctx->ppgtt)
>  		i915_ppgtt_close(&ctx->ppgtt->base);
>  	ctx->file_priv = ERR_PTR(-EBADF);
> @@ -329,7 +328,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
>  	 * is no remap info, it will be a NOP. */
>  	ctx->remap_slice = ALL_L3_SLICES(dev_priv);
>  
> -	ctx->bannable = true;
> +	i915_gem_context_set_bannable(ctx);
>  	ctx->ring_size = 4 * PAGE_SIZE;
>  	ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
>  			     GEN8_CTX_ADDRESSING_MODE_SHIFT;
> @@ -418,8 +417,9 @@ i915_gem_context_create_gvt(struct drm_device *dev)
>  	if (IS_ERR(ctx))
>  		goto out;
>  
> -	ctx->closed = true; /* not user accessible */
> -	ctx->execlists_force_single_submission = true;
> +	i915_gem_context_set_closed(ctx); /* not user accessible */
> +	i915_gem_context_unset_bannable(ctx);
> +	i915_gem_context_set_force_single_submission(ctx);
>  	ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
>  out:
>  	mutex_unlock(&dev->struct_mutex);
> @@ -468,6 +468,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
>  		return PTR_ERR(ctx);
>  	}
>  
> +	i915_gem_context_unset_bannable(ctx);
>  	ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
>  	dev_priv->kernel_context = ctx;
>  
> @@ -1040,10 +1041,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
>  			args->value = to_i915(dev)->ggtt.base.total;
>  		break;
>  	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
> -		args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
> +		args->value = i915_gem_context_no_error_capture(ctx);
>  		break;
>  	case I915_CONTEXT_PARAM_BANNABLE:
> -		args->value = ctx->bannable;
> +		args->value = i915_gem_context_is_bannable(ctx);
>  		break;
>  	default:
>  		ret = -EINVAL;
> @@ -1085,22 +1086,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
>  		}
>  		break;
>  	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
> -		if (args->size) {
> +		if (args->size)
>  			ret = -EINVAL;
> -		} else {
> -			if (args->value)
> -				ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
> -			else
> -				ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
> -		}
> +		else if (args->value)
> +			i915_gem_context_set_no_error_capture(ctx);
> +		else
> +			i915_gem_context_unset_no_error_capture(ctx);
>  		break;
>  	case I915_CONTEXT_PARAM_BANNABLE:
>  		if (args->size)
>  			ret = -EINVAL;
>  		else if (!capable(CAP_SYS_ADMIN) && !args->value)
>  			ret = -EPERM;
> +		else if (args->value)
> +			i915_gem_context_set_bannable(ctx);
>  		else
> -			ctx->bannable = args->value;
> +			i915_gem_context_unset_bannable(ctx);
>  		break;
>  	default:
>  		ret = -EINVAL;
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index c64438f8171c..a5fe299da1d3 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1238,7 +1238,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
>  	if (IS_ERR(ctx))
>  		return ctx;
>  
> -	if (ctx->banned) {
> +	if (i915_gem_context_is_banned(ctx)) {
>  		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
>  		return ERR_PTR(-EIO);
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index e16037d1b0ba..e34532d98004 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -1331,7 +1331,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
>  			}
>  
>  			error->simulated |=
> -				request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
> +				i915_gem_context_no_error_capture(request->ctx);
>  
>  			ee->rq_head = request->head;
>  			ee->rq_post = request->postfix;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index fc64be1bdea7..227978820320 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -413,7 +413,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
>  static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
>  {
>  	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
> -		ctx->execlists_force_single_submission);
> +		i915_gem_context_force_single_submission(ctx));
>  }
>  
>  static bool can_merge_ctx(const struct i915_gem_context *prev,
> -- 
> 2.11.0
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the Intel-gfx mailing list