[Intel-gfx] [PATCH 27/38] drm/i915: Split struct intel_context definition to its own header

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Tue Mar 5 16:19:12 UTC 2019


On 01/03/2019 14:03, Chris Wilson wrote:
> This complex struct pulling in half the driver deserves its own
> isolation in preparation for intel_context becoming an outright
> complicated class of its own.
> 
> In order to split this beast into its own header also requests splitting
> several of its dependent types and their dependencies into their own
> headers as well.

I don't feel like I need to read this one in detail. If it compiles it 
should be good.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Regards,

Tvrtko

> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_gem_context.h       | 245 +-------
>   drivers/gpu/drm/i915/i915_gem_context_types.h | 188 +++++++
>   drivers/gpu/drm/i915/i915_timeline.h          |  70 +--
>   drivers/gpu/drm/i915/i915_timeline_types.h    |  80 +++
>   drivers/gpu/drm/i915/intel_context.h          |  47 ++
>   drivers/gpu/drm/i915/intel_context_types.h    |  60 ++
>   drivers/gpu/drm/i915/intel_engine_types.h     | 521 ++++++++++++++++++
>   drivers/gpu/drm/i915/intel_guc.h              |   1 +
>   drivers/gpu/drm/i915/intel_ringbuffer.h       | 502 +----------------
>   drivers/gpu/drm/i915/intel_workarounds.h      |  13 +-
>   .../gpu/drm/i915/intel_workarounds_types.h    |  25 +
>   11 files changed, 928 insertions(+), 824 deletions(-)
>   create mode 100644 drivers/gpu/drm/i915/i915_gem_context_types.h
>   create mode 100644 drivers/gpu/drm/i915/i915_timeline_types.h
>   create mode 100644 drivers/gpu/drm/i915/intel_context.h
>   create mode 100644 drivers/gpu/drm/i915/intel_context_types.h
>   create mode 100644 drivers/gpu/drm/i915/intel_engine_types.h
>   create mode 100644 drivers/gpu/drm/i915/intel_workarounds_types.h
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
> index f09b5badbe73..110d5881c9de 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/i915_gem_context.h
> @@ -25,225 +25,17 @@
>   #ifndef __I915_GEM_CONTEXT_H__
>   #define __I915_GEM_CONTEXT_H__
>   
> -#include <linux/bitops.h>
> -#include <linux/list.h>
> -#include <linux/radix-tree.h>
> +#include "i915_gem_context_types.h"
>   
>   #include "i915_gem.h"
>   #include "i915_scheduler.h"
> +#include "intel_context.h"
>   #include "intel_device_info.h"
>   #include "intel_ringbuffer.h"
>   
> -struct pid;
> -
>   struct drm_device;
>   struct drm_file;
>   
> -struct drm_i915_private;
> -struct drm_i915_file_private;
> -struct i915_hw_ppgtt;
> -struct i915_request;
> -struct i915_timeline;
> -struct i915_vma;
> -struct intel_ring;
> -
> -#define DEFAULT_CONTEXT_HANDLE 0
> -
> -struct intel_context;
> -
> -struct intel_context_ops {
> -	void (*unpin)(struct intel_context *ce);
> -	void (*destroy)(struct intel_context *ce);
> -};
> -
> -/*
> - * Powergating configuration for a particular (context,engine).
> - */
> -struct intel_sseu {
> -	u8 slice_mask;
> -	u8 subslice_mask;
> -	u8 min_eus_per_subslice;
> -	u8 max_eus_per_subslice;
> -};
> -
> -/**
> - * struct i915_gem_context - client state
> - *
> - * The struct i915_gem_context represents the combined view of the driver and
> - * logical hardware state for a particular client.
> - */
> -struct i915_gem_context {
> -	/** i915: i915 device backpointer */
> -	struct drm_i915_private *i915;
> -
> -	/** file_priv: owning file descriptor */
> -	struct drm_i915_file_private *file_priv;
> -
> -	struct intel_engine_cs **engines;
> -
> -	struct i915_timeline *timeline;
> -
> -	/**
> -	 * @ppgtt: unique address space (GTT)
> -	 *
> -	 * In full-ppgtt mode, each context has its own address space ensuring
> -	 * complete seperation of one client from all others.
> -	 *
> -	 * In other modes, this is a NULL pointer with the expectation that
> -	 * the caller uses the shared global GTT.
> -	 */
> -	struct i915_hw_ppgtt *ppgtt;
> -
> -	/**
> -	 * @pid: process id of creator
> -	 *
> -	 * Note that who created the context may not be the principle user,
> -	 * as the context may be shared across a local socket. However,
> -	 * that should only affect the default context, all contexts created
> -	 * explicitly by the client are expected to be isolated.
> -	 */
> -	struct pid *pid;
> -
> -	/**
> -	 * @name: arbitrary name
> -	 *
> -	 * A name is constructed for the context from the creator's process
> -	 * name, pid and user handle in order to uniquely identify the
> -	 * context in messages.
> -	 */
> -	const char *name;
> -
> -	/** link: place with &drm_i915_private.context_list */
> -	struct list_head link;
> -	struct llist_node free_link;
> -
> -	/**
> -	 * @ref: reference count
> -	 *
> -	 * A reference to a context is held by both the client who created it
> -	 * and on each request submitted to the hardware using the request
> -	 * (to ensure the hardware has access to the state until it has
> -	 * finished all pending writes). See i915_gem_context_get() and
> -	 * i915_gem_context_put() for access.
> -	 */
> -	struct kref ref;
> -
> -	/**
> -	 * @rcu: rcu_head for deferred freeing.
> -	 */
> -	struct rcu_head rcu;
> -
> -	/**
> -	 * @user_flags: small set of booleans controlled by the user
> -	 */
> -	unsigned long user_flags;
> -#define UCONTEXT_NO_ZEROMAP		0
> -#define UCONTEXT_NO_ERROR_CAPTURE	1
> -#define UCONTEXT_BANNABLE		2
> -#define UCONTEXT_RECOVERABLE		3
> -
> -	/**
> -	 * @flags: small set of booleans
> -	 */
> -	unsigned long flags;
> -#define CONTEXT_BANNED			0
> -#define CONTEXT_CLOSED			1
> -#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
> -
> -	unsigned int nengine;
> -
> -	/**
> -	 * @hw_id: - unique identifier for the context
> -	 *
> -	 * The hardware needs to uniquely identify the context for a few
> -	 * functions like fault reporting, PASID, scheduling. The
> -	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
> -	 * id for the lifetime of the context.
> -	 *
> -	 * @hw_id_pin_count: - number of times this context had been pinned
> -	 * for use (should be, at most, once per engine).
> -	 *
> -	 * @hw_id_link: - all contexts with an assigned id are tracked
> -	 * for possible repossession.
> -	 */
> -	unsigned int hw_id;
> -	atomic_t hw_id_pin_count;
> -	struct list_head hw_id_link;
> -
> -	struct list_head active_engines;
> -	struct mutex mutex;
> -
> -	/**
> -	 * @user_handle: userspace identifier
> -	 *
> -	 * A unique per-file identifier is generated from
> -	 * &drm_i915_file_private.contexts.
> -	 */
> -	u32 user_handle;
> -
> -	struct i915_sched_attr sched;
> -
> -	/** engine: per-engine logical HW state */
> -	struct intel_context {
> -		struct i915_gem_context *gem_context;
> -		struct intel_engine_cs *engine;
> -		struct intel_engine_cs *active;
> -		struct list_head active_link;
> -		struct list_head signal_link;
> -		struct list_head signals;
> -		struct i915_vma *state;
> -		struct intel_ring *ring;
> -		u32 *lrc_reg_state;
> -		u64 lrc_desc;
> -		int pin_count;
> -
> -		/**
> -		 * active_tracker: Active tracker for the external rq activity
> -		 * on this intel_context object.
> -		 */
> -		struct i915_active_request active_tracker;
> -
> -		const struct intel_context_ops *ops;
> -
> -		/** sseu: Control eu/slice partitioning */
> -		struct intel_sseu sseu;
> -	} __engine[I915_NUM_ENGINES];
> -
> -	/** ring_size: size for allocating the per-engine ring buffer */
> -	u32 ring_size;
> -	/** desc_template: invariant fields for the HW context descriptor */
> -	u32 desc_template;
> -
> -	/** guilty_count: How many times this context has caused a GPU hang. */
> -	atomic_t guilty_count;
> -	/**
> -	 * @active_count: How many times this context was active during a GPU
> -	 * hang, but did not cause it.
> -	 */
> -	atomic_t active_count;
> -
> -	/**
> -	 * @hang_timestamp: The last time(s) this context caused a GPU hang
> -	 */
> -	unsigned long hang_timestamp[2];
> -#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */
> -
> -	/** remap_slice: Bitmask of cache lines that need remapping */
> -	u8 remap_slice;
> -
> -	/** handles_vma: rbtree to look up our context specific obj/vma for
> -	 * the user handle. (user handles are per fd, but the binding is
> -	 * per vm, which may be one per context or shared with the global GTT)
> -	 */
> -	struct radix_tree_root handles_vma;
> -
> -	/** handles_list: reverse list of all the rbtree entries in use for
> -	 * this context, which allows us to free all the allocations on
> -	 * context close.
> -	 */
> -	struct list_head handles_list;
> -};
> -
>   static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
>   {
>   	return test_bit(CONTEXT_CLOSED, &ctx->flags);
> @@ -345,35 +137,6 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
>   	return !ctx->file_priv;
>   }
>   
> -static inline struct intel_context *
> -to_intel_context(struct i915_gem_context *ctx,
> -		 const struct intel_engine_cs *engine)
> -{
> -	return &ctx->__engine[engine->id];
> -}
> -
> -static inline struct intel_context *
> -intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
> -{
> -	return engine->context_pin(engine, ctx);
> -}
> -
> -static inline void __intel_context_pin(struct intel_context *ce)
> -{
> -	GEM_BUG_ON(!ce->pin_count);
> -	ce->pin_count++;
> -}
> -
> -static inline void intel_context_unpin(struct intel_context *ce)
> -{
> -	GEM_BUG_ON(!ce->pin_count);
> -	if (--ce->pin_count)
> -		return;
> -
> -	GEM_BUG_ON(!ce->ops);
> -	ce->ops->unpin(ce);
> -}
> -
>   /* i915_gem_context.c */
>   int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
>   void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
> @@ -422,10 +185,6 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
>   	kref_put(&ctx->ref, i915_gem_context_release);
>   }
>   
> -void intel_context_init(struct intel_context *ce,
> -			struct i915_gem_context *ctx,
> -			struct intel_engine_cs *engine);
> -
>   struct i915_lut_handle *i915_lut_handle_alloc(void);
>   void i915_lut_handle_free(struct i915_lut_handle *lut);
>   
> diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h
> new file mode 100644
> index 000000000000..8baa7a5e7fdb
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/i915_gem_context_types.h
> @@ -0,0 +1,188 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef __I915_GEM_CONTEXT_TYPES_H__
> +#define __I915_GEM_CONTEXT_TYPES_H__
> +
> +#include <linux/atomic.h>
> +#include <linux/list.h>
> +#include <linux/llist.h>
> +#include <linux/kref.h>
> +#include <linux/mutex.h>
> +#include <linux/radix-tree.h>
> +#include <linux/rcupdate.h>
> +#include <linux/types.h>
> +
> +#include "i915_gem.h" /* I915_NUM_ENGINES */
> +#include "i915_scheduler.h"
> +#include "intel_context_types.h"
> +
> +struct pid;
> +
> +struct drm_i915_private;
> +struct drm_i915_file_private;
> +struct i915_hw_ppgtt;
> +struct i915_timeline;
> +struct intel_ring;
> +
> +/**
> + * struct i915_gem_context - client state
> + *
> + * The struct i915_gem_context represents the combined view of the driver and
> + * logical hardware state for a particular client.
> + */
> +struct i915_gem_context {
> +	/** i915: i915 device backpointer */
> +	struct drm_i915_private *i915;
> +
> +	/** file_priv: owning file descriptor */
> +	struct drm_i915_file_private *file_priv;
> +
> +	struct intel_engine_cs **engines;
> +
> +	struct i915_timeline *timeline;
> +
> +	/**
> +	 * @ppgtt: unique address space (GTT)
> +	 *
> +	 * In full-ppgtt mode, each context has its own address space ensuring
> +	 * complete seperation of one client from all others.
> +	 *
> +	 * In other modes, this is a NULL pointer with the expectation that
> +	 * the caller uses the shared global GTT.
> +	 */
> +	struct i915_hw_ppgtt *ppgtt;
> +
> +	/**
> +	 * @pid: process id of creator
> +	 *
> +	 * Note that who created the context may not be the principle user,
> +	 * as the context may be shared across a local socket. However,
> +	 * that should only affect the default context, all contexts created
> +	 * explicitly by the client are expected to be isolated.
> +	 */
> +	struct pid *pid;
> +
> +	/**
> +	 * @name: arbitrary name
> +	 *
> +	 * A name is constructed for the context from the creator's process
> +	 * name, pid and user handle in order to uniquely identify the
> +	 * context in messages.
> +	 */
> +	const char *name;
> +
> +	/** link: place with &drm_i915_private.context_list */
> +	struct list_head link;
> +	struct llist_node free_link;
> +
> +	/**
> +	 * @ref: reference count
> +	 *
> +	 * A reference to a context is held by both the client who created it
> +	 * and on each request submitted to the hardware using the request
> +	 * (to ensure the hardware has access to the state until it has
> +	 * finished all pending writes). See i915_gem_context_get() and
> +	 * i915_gem_context_put() for access.
> +	 */
> +	struct kref ref;
> +
> +	/**
> +	 * @rcu: rcu_head for deferred freeing.
> +	 */
> +	struct rcu_head rcu;
> +
> +	/**
> +	 * @user_flags: small set of booleans controlled by the user
> +	 */
> +	unsigned long user_flags;
> +#define UCONTEXT_NO_ZEROMAP		0
> +#define UCONTEXT_NO_ERROR_CAPTURE	1
> +#define UCONTEXT_BANNABLE		2
> +#define UCONTEXT_RECOVERABLE		3
> +
> +	/**
> +	 * @flags: small set of booleans
> +	 */
> +	unsigned long flags;
> +#define CONTEXT_BANNED			0
> +#define CONTEXT_CLOSED			1
> +#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
> +
> +	unsigned int nengine;
> +
> +	/**
> +	 * @hw_id: - unique identifier for the context
> +	 *
> +	 * The hardware needs to uniquely identify the context for a few
> +	 * functions like fault reporting, PASID, scheduling. The
> +	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
> +	 * id for the lifetime of the context.
> +	 *
> +	 * @hw_id_pin_count: - number of times this context had been pinned
> +	 * for use (should be, at most, once per engine).
> +	 *
> +	 * @hw_id_link: - all contexts with an assigned id are tracked
> +	 * for possible repossession.
> +	 */
> +	unsigned int hw_id;
> +	atomic_t hw_id_pin_count;
> +	struct list_head hw_id_link;
> +
> +	struct list_head active_engines;
> +	struct mutex mutex;
> +
> +	/**
> +	 * @user_handle: userspace identifier
> +	 *
> +	 * A unique per-file identifier is generated from
> +	 * &drm_i915_file_private.contexts.
> +	 */
> +	u32 user_handle;
> +#define DEFAULT_CONTEXT_HANDLE 0
> +
> +	struct i915_sched_attr sched;
> +
> +	/** engine: per-engine logical HW state */
> +	struct intel_context __engine[I915_NUM_ENGINES];
> +
> +	/** ring_size: size for allocating the per-engine ring buffer */
> +	u32 ring_size;
> +	/** desc_template: invariant fields for the HW context descriptor */
> +	u32 desc_template;
> +
> +	/** guilty_count: How many times this context has caused a GPU hang. */
> +	atomic_t guilty_count;
> +	/**
> +	 * @active_count: How many times this context was active during a GPU
> +	 * hang, but did not cause it.
> +	 */
> +	atomic_t active_count;
> +
> +	/**
> +	 * @hang_timestamp: The last time(s) this context caused a GPU hang
> +	 */
> +	unsigned long hang_timestamp[2];
> +#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */
> +
> +
> +	/** remap_slice: Bitmask of cache lines that need remapping */
> +	u8 remap_slice;
> +
> +	/** handles_vma: rbtree to look up our context specific obj/vma for
> +	 * the user handle. (user handles are per fd, but the binding is
> +	 * per vm, which may be one per context or shared with the global GTT)
> +	 */
> +	struct radix_tree_root handles_vma;
> +
> +	/** handles_list: reverse list of all the rbtree entries in use for
> +	 * this context, which allows us to free all the allocations on
> +	 * context close.
> +	 */
> +	struct list_head handles_list;
> +};
> +
> +#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
> diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
> index 60b1dfad93ed..9126c8206490 100644
> --- a/drivers/gpu/drm/i915/i915_timeline.h
> +++ b/drivers/gpu/drm/i915/i915_timeline.h
> @@ -25,76 +25,10 @@
>   #ifndef I915_TIMELINE_H
>   #define I915_TIMELINE_H
>   
> -#include <linux/list.h>
> -#include <linux/kref.h>
> +#include <linux/lockdep.h>
>   
> -#include "i915_active.h"
> -#include "i915_request.h"
>   #include "i915_syncmap.h"
> -#include "i915_utils.h"
> -
> -struct i915_vma;
> -struct i915_timeline_cacheline;
> -
> -struct i915_timeline {
> -	u64 fence_context;
> -	u32 seqno;
> -
> -	spinlock_t lock;
> -#define TIMELINE_CLIENT 0 /* default subclass */
> -#define TIMELINE_ENGINE 1
> -
> -	struct mutex mutex; /* protects the flow of requests */
> -
> -	unsigned int pin_count;
> -	const u32 *hwsp_seqno;
> -	struct i915_vma *hwsp_ggtt;
> -	u32 hwsp_offset;
> -
> -	struct i915_timeline_cacheline *hwsp_cacheline;
> -
> -	bool has_initial_breadcrumb;
> -
> -	/**
> -	 * List of breadcrumbs associated with GPU requests currently
> -	 * outstanding.
> -	 */
> -	struct list_head requests;
> -
> -	/* Contains an RCU guarded pointer to the last request. No reference is
> -	 * held to the request, users must carefully acquire a reference to
> -	 * the request using i915_active_request_get_request_rcu(), or hold the
> -	 * struct_mutex.
> -	 */
> -	struct i915_active_request last_request;
> -
> -	/**
> -	 * We track the most recent seqno that we wait on in every context so
> -	 * that we only have to emit a new await and dependency on a more
> -	 * recent sync point. As the contexts may be executed out-of-order, we
> -	 * have to track each individually and can not rely on an absolute
> -	 * global_seqno. When we know that all tracked fences are completed
> -	 * (i.e. when the driver is idle), we know that the syncmap is
> -	 * redundant and we can discard it without loss of generality.
> -	 */
> -	struct i915_syncmap *sync;
> -
> -	/**
> -	 * Barrier provides the ability to serialize ordering between different
> -	 * timelines.
> -	 *
> -	 * Users can call i915_timeline_set_barrier which will make all
> -	 * subsequent submissions to this timeline be executed only after the
> -	 * barrier has been completed.
> -	 */
> -	struct i915_active_request barrier;
> -
> -	struct list_head link;
> -	const char *name;
> -	struct drm_i915_private *i915;
> -
> -	struct kref kref;
> -};
> +#include "i915_timeline_types.h"
>   
>   int i915_timeline_init(struct drm_i915_private *i915,
>   		       struct i915_timeline *tl,
> diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
> new file mode 100644
> index 000000000000..8ff146dc05ba
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/i915_timeline_types.h
> @@ -0,0 +1,80 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2016 Intel Corporation
> + */
> +
> +#ifndef __I915_TIMELINE_TYPES_H__
> +#define __I915_TIMELINE_TYPES_H__
> +
> +#include <linux/list.h>
> +#include <linux/kref.h>
> +#include <linux/types.h>
> +
> +#include "i915_active.h"
> +
> +struct drm_i915_private;
> +struct i915_vma;
> +struct i915_timeline_cacheline;
> +struct i915_syncmap;
> +
> +struct i915_timeline {
> +	u64 fence_context;
> +	u32 seqno;
> +
> +	spinlock_t lock;
> +#define TIMELINE_CLIENT 0 /* default subclass */
> +#define TIMELINE_ENGINE 1
> +	struct mutex mutex; /* protects the flow of requests */
> +
> +	unsigned int pin_count;
> +	const u32 *hwsp_seqno;
> +	struct i915_vma *hwsp_ggtt;
> +	u32 hwsp_offset;
> +
> +	struct i915_timeline_cacheline *hwsp_cacheline;
> +
> +	bool has_initial_breadcrumb;
> +
> +	/**
> +	 * List of breadcrumbs associated with GPU requests currently
> +	 * outstanding.
> +	 */
> +	struct list_head requests;
> +
> +	/* Contains an RCU guarded pointer to the last request. No reference is
> +	 * held to the request, users must carefully acquire a reference to
> +	 * the request using i915_active_request_get_request_rcu(), or hold the
> +	 * struct_mutex.
> +	 */
> +	struct i915_active_request last_request;
> +
> +	/**
> +	 * We track the most recent seqno that we wait on in every context so
> +	 * that we only have to emit a new await and dependency on a more
> +	 * recent sync point. As the contexts may be executed out-of-order, we
> +	 * have to track each individually and can not rely on an absolute
> +	 * global_seqno. When we know that all tracked fences are completed
> +	 * (i.e. when the driver is idle), we know that the syncmap is
> +	 * redundant and we can discard it without loss of generality.
> +	 */
> +	struct i915_syncmap *sync;
> +
> +	/**
> +	 * Barrier provides the ability to serialize ordering between different
> +	 * timelines.
> +	 *
> +	 * Users can call i915_timeline_set_barrier which will make all
> +	 * subsequent submissions to this timeline be executed only after the
> +	 * barrier has been completed.
> +	 */
> +	struct i915_active_request barrier;
> +
> +	struct list_head link;
> +	const char *name;
> +	struct drm_i915_private *i915;
> +
> +	struct kref kref;
> +};
> +
> +#endif /* __I915_TIMELINE_TYPES_H__ */
> diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
> new file mode 100644
> index 000000000000..dd947692bb0b
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_context.h
> @@ -0,0 +1,47 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef __INTEL_CONTEXT_H__
> +#define __INTEL_CONTEXT_H__
> +
> +#include "i915_gem_context_types.h"
> +#include "intel_context_types.h"
> +#include "intel_engine_types.h"
> +
> +void intel_context_init(struct intel_context *ce,
> +			struct i915_gem_context *ctx,
> +			struct intel_engine_cs *engine);
> +
> +static inline struct intel_context *
> +to_intel_context(struct i915_gem_context *ctx,
> +		 const struct intel_engine_cs *engine)
> +{
> +	return &ctx->__engine[engine->id];
> +}
> +
> +static inline struct intel_context *
> +intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
> +{
> +	return engine->context_pin(engine, ctx);
> +}
> +
> +static inline void __intel_context_pin(struct intel_context *ce)
> +{
> +	GEM_BUG_ON(!ce->pin_count);
> +	ce->pin_count++;
> +}
> +
> +static inline void intel_context_unpin(struct intel_context *ce)
> +{
> +	GEM_BUG_ON(!ce->pin_count);
> +	if (--ce->pin_count)
> +		return;
> +
> +	GEM_BUG_ON(!ce->ops);
> +	ce->ops->unpin(ce);
> +}
> +
> +#endif /* __INTEL_CONTEXT_H__ */
> diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
> new file mode 100644
> index 000000000000..16e1306e9595
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_context_types.h
> @@ -0,0 +1,60 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef __INTEL_CONTEXT_TYPES__
> +#define __INTEL_CONTEXT_TYPES__
> +
> +#include <linux/list.h>
> +#include <linux/types.h>
> +
> +#include "i915_active_types.h"
> +
> +struct i915_gem_context;
> +struct i915_vma;
> +struct intel_context;
> +struct intel_ring;
> +
> +struct intel_context_ops {
> +	void (*unpin)(struct intel_context *ce);
> +	void (*destroy)(struct intel_context *ce);
> +};
> +
> +/*
> + * Powergating configuration for a particular (context,engine).
> + */
> +struct intel_sseu {
> +	u8 slice_mask;
> +	u8 subslice_mask;
> +	u8 min_eus_per_subslice;
> +	u8 max_eus_per_subslice;
> +};
> +
> +struct intel_context {
> +	struct i915_gem_context *gem_context;
> +	struct intel_engine_cs *engine;
> +	struct intel_engine_cs *active;
> +	struct list_head active_link;
> +	struct list_head signal_link;
> +	struct list_head signals;
> +	struct i915_vma *state;
> +	struct intel_ring *ring;
> +	u32 *lrc_reg_state;
> +	u64 lrc_desc;
> +	int pin_count;
> +
> +	/**
> +	 * active_tracker: Active tracker for the external rq activity
> +	 * on this intel_context object.
> +	 */
> +	struct i915_active_request active_tracker;
> +
> +	const struct intel_context_ops *ops;
> +
> +	/** sseu: Control eu/slice partitioning */
> +	struct intel_sseu sseu;
> +};
> +
> +#endif /* __INTEL_CONTEXT_TYPES__ */
> diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
> new file mode 100644
> index 000000000000..5ec6e72d0ffb
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_engine_types.h
> @@ -0,0 +1,521 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef __INTEL_ENGINE_TYPES__
> +#define __INTEL_ENGINE_TYPES__
> +
> +#include <linux/hashtable.h>
> +#include <linux/irq_work.h>
> +#include <linux/list.h>
> +#include <linux/types.h>
> +
> +#include "i915_timeline_types.h"
> +#include "intel_device_info.h"
> +#include "intel_workarounds_types.h"
> +
> +#include "i915_gem_batch_pool.h"
> +#include "i915_pmu.h"
> +
> +#define I915_MAX_SLICES	3
> +#define I915_MAX_SUBSLICES 8
> +
> +#define I915_CMD_HASH_ORDER 9
> +
> +struct drm_i915_reg_table;
> +struct i915_gem_context;
> +struct i915_request;
> +struct i915_sched_attr;
> +
> +struct intel_hw_status_page {
> +	struct i915_vma *vma;
> +	u32 *addr;
> +};
> +
> +struct intel_instdone {
> +	u32 instdone;
> +	/* The following exist only in the RCS engine */
> +	u32 slice_common;
> +	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
> +	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
> +};
> +
> +struct intel_engine_hangcheck {
> +	u64 acthd;
> +	u32 last_seqno;
> +	u32 next_seqno;
> +	unsigned long action_timestamp;
> +	struct intel_instdone instdone;
> +};
> +
> +struct intel_ring {
> +	struct i915_vma *vma;
> +	void *vaddr;
> +
> +	struct i915_timeline *timeline;
> +	struct list_head request_list;
> +	struct list_head active_link;
> +
> +	u32 head;
> +	u32 tail;
> +	u32 emit;
> +
> +	u32 space;
> +	u32 size;
> +	u32 effective_size;
> +};
> +
> +/*
> + * we use a single page to load ctx workarounds so all of these
> + * values are referred in terms of dwords
> + *
> + * struct i915_wa_ctx_bb:
> + *  offset: specifies batch starting position, also helpful in case
> + *    if we want to have multiple batches at different offsets based on
> + *    some criteria. It is not a requirement at the moment but provides
> + *    an option for future use.
> + *  size: size of the batch in DWORDS
> + */
> +struct i915_ctx_workarounds {
> +	struct i915_wa_ctx_bb {
> +		u32 offset;
> +		u32 size;
> +	} indirect_ctx, per_ctx;
> +	struct i915_vma *vma;
> +};
> +
> +#define I915_MAX_VCS	4
> +#define I915_MAX_VECS	2
> +
> +/*
> + * Engine IDs definitions.
> + * Keep instances of the same type engine together.
> + */
> +enum intel_engine_id {
> +	RCS = 0,
> +	BCS,
> +	VCS,
> +	VCS2,
> +	VCS3,
> +	VCS4,
> +#define _VCS(n) (VCS + (n))
> +	VECS,
> +	VECS2
> +#define _VECS(n) (VECS + (n))
> +};
> +
> +struct st_preempt_hang {
> +	struct completion completion;
> +	unsigned int count;
> +	bool inject_hang;
> +};
> +
> +/**
> + * struct intel_engine_execlists - execlist submission queue and port state
> + *
> + * The struct intel_engine_execlists represents the combined logical state of
> + * driver and the hardware state for execlist mode of submission.
> + */
> +struct intel_engine_execlists {
> +	/**
> +	 * @tasklet: softirq tasklet for bottom handler
> +	 */
> +	struct tasklet_struct tasklet;
> +
> +	/**
> +	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
> +	 */
> +	struct i915_priolist default_priolist;
> +
> +	/**
> +	 * @no_priolist: priority lists disabled
> +	 */
> +	bool no_priolist;
> +
> +	/**
> +	 * @submit_reg: gen-specific execlist submission register
> +	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
> +	 * the ExecList Submission Queue Contents register array for Gen11+
> +	 */
> +	u32 __iomem *submit_reg;
> +
> +	/**
> +	 * @ctrl_reg: the enhanced execlists control register, used to load the
> +	 * submit queue on the HW and to request preemptions to idle
> +	 */
> +	u32 __iomem *ctrl_reg;
> +
> +	/**
> +	 * @port: execlist port states
> +	 *
> +	 * For each hardware ELSP (ExecList Submission Port) we keep
> +	 * track of the last request and the number of times we submitted
> +	 * that port to hw. We then count the number of times the hw reports
> +	 * a context completion or preemption. As only one context can
> +	 * be active on hw, we limit resubmission of context to port[0]. This
> +	 * is called Lite Restore, of the context.
> +	 */
> +	struct execlist_port {
> +		/**
> +		 * @request_count: combined request and submission count
> +		 */
> +		struct i915_request *request_count;
> +#define EXECLIST_COUNT_BITS 2
> +#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
> +#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
> +#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
> +#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
> +#define port_set(p, packed) ((p)->request_count = (packed))
> +#define port_isset(p) ((p)->request_count)
> +#define port_index(p, execlists) ((p) - (execlists)->port)
> +
> +		/**
> +		 * @context_id: context ID for port
> +		 */
> +		GEM_DEBUG_DECL(u32 context_id);
> +
> +#define EXECLIST_MAX_PORTS 2
> +	} port[EXECLIST_MAX_PORTS];
> +
> +	/**
> +	 * @active: is the HW active? We consider the HW as active after
> +	 * submitting any context for execution and until we have seen the
> +	 * last context completion event. After that, we do not expect any
> +	 * more events until we submit, and so can park the HW.
> +	 *
> +	 * As we have a small number of different sources from which we feed
> +	 * the HW, we track the state of each inside a single bitfield.
> +	 */
> +	unsigned int active;
> +#define EXECLISTS_ACTIVE_USER 0
> +#define EXECLISTS_ACTIVE_PREEMPT 1
> +#define EXECLISTS_ACTIVE_HWACK 2
> +
> +	/**
> +	 * @port_mask: number of execlist ports - 1
> +	 */
> +	unsigned int port_mask;
> +
> +	/**
> +	 * @queue_priority_hint: Highest pending priority.
> +	 *
> +	 * When we add requests into the queue, or adjust the priority of
> +	 * executing requests, we compute the maximum priority of those
> +	 * pending requests. We can then use this value to determine if
> +	 * we need to preempt the executing requests to service the queue.
> +	 * However, since the we may have recorded the priority of an inflight
> +	 * request we wanted to preempt but since completed, at the time of
> +	 * dequeuing the priority hint may no longer may match the highest
> +	 * available request priority.
> +	 */
> +	int queue_priority_hint;
> +
> +	/**
> +	 * @queue: queue of requests, in priority lists
> +	 */
> +	struct rb_root_cached queue;
> +
> +	/**
> +	 * @csb_write: control register for Context Switch buffer
> +	 *
> +	 * Note this register may be either mmio or HWSP shadow.
> +	 */
> +	u32 *csb_write;
> +
> +	/**
> +	 * @csb_status: status array for Context Switch buffer
> +	 *
> +	 * Note these register may be either mmio or HWSP shadow.
> +	 */
> +	u32 *csb_status;
> +
> +	/**
> +	 * @preempt_complete_status: expected CSB upon completing preemption
> +	 */
> +	u32 preempt_complete_status;
> +
> +	/**
> +	 * @csb_head: context status buffer head
> +	 */
> +	u8 csb_head;
> +
> +	I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
> +};
> +
> +#define INTEL_ENGINE_CS_MAX_NAME 8
> +
> +struct intel_engine_cs {
> +	struct drm_i915_private *i915;
> +	char name[INTEL_ENGINE_CS_MAX_NAME];
> +
> +	enum intel_engine_id id;
> +	unsigned int hw_id;
> +	unsigned int guc_id;
> +	intel_engine_mask_t mask;
> +
> +	u8 uabi_class;
> +
> +	u8 class;
> +	u8 instance;
> +	u32 context_size;
> +	u32 mmio_base;
> +
> +	struct intel_ring *buffer;
> +
> +	struct i915_timeline timeline;
> +
> +	struct drm_i915_gem_object *default_state;
> +	void *pinned_default_state;
> +
> +	/* Rather than have every client wait upon all user interrupts,
> +	 * with the herd waking after every interrupt and each doing the
> +	 * heavyweight seqno dance, we delegate the task (of being the
> +	 * bottom-half of the user interrupt) to the first client. After
> +	 * every interrupt, we wake up one client, who does the heavyweight
> +	 * coherent seqno read and either goes back to sleep (if incomplete),
> +	 * or wakes up all the completed clients in parallel, before then
> +	 * transferring the bottom-half status to the next client in the queue.
> +	 *
> +	 * Compared to walking the entire list of waiters in a single dedicated
> +	 * bottom-half, we reduce the latency of the first waiter by avoiding
> +	 * a context switch, but incur additional coherent seqno reads when
> +	 * following the chain of request breadcrumbs. Since it is most likely
> +	 * that we have a single client waiting on each seqno, then reducing
> +	 * the overhead of waking that client is much preferred.
> +	 */
> +	struct intel_breadcrumbs {
> +		spinlock_t irq_lock;
> +		struct list_head signalers;
> +
> +		struct irq_work irq_work; /* for use from inside irq_lock */
> +
> +		unsigned int irq_enabled;
> +
> +		bool irq_armed;
> +	} breadcrumbs;
> +
> +	struct intel_engine_pmu {
> +		/**
> +		 * @enable: Bitmask of enable sample events on this engine.
> +		 *
> +		 * Bits correspond to sample event types, for instance
> +		 * I915_SAMPLE_QUEUED is bit 0 etc.
> +		 */
> +		u32 enable;
> +		/**
> +		 * @enable_count: Reference count for the enabled samplers.
> +		 *
> +		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
> +		 */
> +		unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
> +		/**
> +		 * @sample: Counter values for sampling events.
> +		 *
> +		 * Our internal timer stores the current counters in this field.
> +		 *
> +		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
> +		 */
> +		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
> +	} pmu;
> +
> +	/*
> +	 * A pool of objects to use as shadow copies of client batch buffers
> +	 * when the command parser is enabled. Prevents the client from
> +	 * modifying the batch contents after software parsing.
> +	 */
> +	struct i915_gem_batch_pool batch_pool;
> +
> +	struct intel_hw_status_page status_page;
> +	struct i915_ctx_workarounds wa_ctx;
> +	struct i915_wa_list ctx_wa_list;
> +	struct i915_wa_list wa_list;
> +	struct i915_wa_list whitelist;
> +
> +	u32             irq_keep_mask; /* always keep these interrupts */
> +	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
> +	void		(*irq_enable)(struct intel_engine_cs *engine);
> +	void		(*irq_disable)(struct intel_engine_cs *engine);
> +
> +	int		(*init_hw)(struct intel_engine_cs *engine);
> +
> +	struct {
> +		void (*prepare)(struct intel_engine_cs *engine);
> +		void (*reset)(struct intel_engine_cs *engine, bool stalled);
> +		void (*finish)(struct intel_engine_cs *engine);
> +	} reset;
> +
> +	void		(*park)(struct intel_engine_cs *engine);
> +	void		(*unpark)(struct intel_engine_cs *engine);
> +
> +	void		(*set_default_submission)(struct intel_engine_cs *engine);
> +
> +	struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
> +					     struct i915_gem_context *ctx);
> +
> +	int		(*request_alloc)(struct i915_request *rq);
> +	int		(*init_context)(struct i915_request *rq);
> +
> +	int		(*emit_flush)(struct i915_request *request, u32 mode);
> +#define EMIT_INVALIDATE	BIT(0)
> +#define EMIT_FLUSH	BIT(1)
> +#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
> +	int		(*emit_bb_start)(struct i915_request *rq,
> +					 u64 offset, u32 length,
> +					 unsigned int dispatch_flags);
> +#define I915_DISPATCH_SECURE BIT(0)
> +#define I915_DISPATCH_PINNED BIT(1)
> +	int		 (*emit_init_breadcrumb)(struct i915_request *rq);
> +	u32		*(*emit_fini_breadcrumb)(struct i915_request *rq,
> +						 u32 *cs);
> +	unsigned int	emit_fini_breadcrumb_dw;
> +
> +	/* Pass the request to the hardware queue (e.g. directly into
> +	 * the legacy ringbuffer or to the end of an execlist).
> +	 *
> +	 * This is called from an atomic context with irqs disabled; must
> +	 * be irq safe.
> +	 */
> +	void		(*submit_request)(struct i915_request *rq);
> +
> +	/*
> +	 * Call when the priority on a request has changed and it and its
> +	 * dependencies may need rescheduling. Note the request itself may
> +	 * not be ready to run!
> +	 */
> +	void		(*schedule)(struct i915_request *request,
> +				    const struct i915_sched_attr *attr);
> +
> +	/*
> +	 * Cancel all requests on the hardware, or queued for execution.
> +	 * This should only cancel the ready requests that have been
> +	 * submitted to the engine (via the engine->submit_request callback).
> +	 * This is called when marking the device as wedged.
> +	 */
> +	void		(*cancel_requests)(struct intel_engine_cs *engine);
> +
> +	void		(*cleanup)(struct intel_engine_cs *engine);
> +
> +	struct intel_engine_execlists execlists;
> +
> +	/* Contexts are pinned whilst they are active on the GPU. The last
> +	 * context executed remains active whilst the GPU is idle - the
> +	 * switch away and write to the context object only occurs on the
> +	 * next execution.  Contexts are only unpinned on retirement of the
> +	 * following request ensuring that we can always write to the object
> +	 * on the context switch even after idling. Across suspend, we switch
> +	 * to the kernel context and trash it as the save may not happen
> +	 * before the hardware is powered down.
> +	 */
> +	struct intel_context *last_retired_context;
> +
> +	/* status_notifier: list of callbacks for context-switch changes */
> +	struct atomic_notifier_head context_status_notifier;
> +
> +	struct intel_engine_hangcheck hangcheck;
> +
> +#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
> +#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
> +#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
> +#define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
> +	unsigned int flags;
> +
> +	/*
> +	 * Table of commands the command parser needs to know about
> +	 * for this engine.
> +	 */
> +	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
> +
> +	/*
> +	 * Table of registers allowed in commands that read/write registers.
> +	 */
> +	const struct drm_i915_reg_table *reg_tables;
> +	int reg_table_count;
> +
> +	/*
> +	 * Returns the bitmask for the length field of the specified command.
> +	 * Return 0 for an unrecognized/invalid command.
> +	 *
> +	 * If the command parser finds an entry for a command in the engine's
> +	 * cmd_tables, it gets the command's length based on the table entry.
> +	 * If not, it calls this function to determine the per-engine length
> +	 * field encoding for the command (i.e. different opcode ranges use
> +	 * certain bits to encode the command length in the header).
> +	 */
> +	u32 (*get_cmd_length_mask)(u32 cmd_header);
> +
> +	struct {
> +		/**
> +		 * @lock: Lock protecting the below fields.
> +		 */
> +		seqlock_t lock;
> +		/**
> +		 * @enabled: Reference count indicating number of listeners.
> +		 */
> +		unsigned int enabled;
> +		/**
> +		 * @active: Number of contexts currently scheduled in.
> +		 */
> +		unsigned int active;
> +		/**
> +		 * @enabled_at: Timestamp when busy stats were enabled.
> +		 */
> +		ktime_t enabled_at;
> +		/**
> +		 * @start: Timestamp of the last idle to active transition.
> +		 *
> +		 * Idle is defined as active == 0, active is active > 0.
> +		 */
> +		ktime_t start;
> +		/**
> +		 * @total: Total time this engine was busy.
> +		 *
> +		 * Accumulated time not counting the most recent block in cases
> +		 * where engine is currently busy (active > 0).
> +		 */
> +		ktime_t total;
> +	} stats;
> +};
> +
> +static inline bool
> +intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
> +{
> +	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
> +}
> +
> +static inline bool
> +intel_engine_supports_stats(const struct intel_engine_cs *engine)
> +{
> +	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
> +}
> +
> +static inline bool
> +intel_engine_has_preemption(const struct intel_engine_cs *engine)
> +{
> +	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
> +}
> +
> +static inline bool
> +intel_engine_has_semaphores(const struct intel_engine_cs *engine)
> +{
> +	return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
> +}
> +
> +#define instdone_slice_mask(dev_priv__) \
> +	(IS_GEN(dev_priv__, 7) ? \
> +	 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
> +
> +#define instdone_subslice_mask(dev_priv__) \
> +	(IS_GEN(dev_priv__, 7) ? \
> +	 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
> +
> +#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
> +	for ((slice__) = 0, (subslice__) = 0; \
> +	     (slice__) < I915_MAX_SLICES; \
> +	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
> +	       (slice__) += ((subslice__) == 0)) \
> +		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
> +			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
> +
> +#endif /* __INTEL_ENGINE_TYPES_H__ */
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 744220296653..77ec1bd4df5a 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -32,6 +32,7 @@
>   #include "intel_guc_log.h"
>   #include "intel_guc_reg.h"
>   #include "intel_uc_fw.h"
> +#include "i915_utils.h"
>   #include "i915_vma.h"
>   
>   struct guc_preempt_work {
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 13819de57ef9..5fe38f74b480 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -15,14 +15,11 @@
>   #include "i915_request.h"
>   #include "i915_selftest.h"
>   #include "i915_timeline.h"
> -#include "intel_device_info.h"
> +#include "intel_engine_types.h"
>   #include "intel_gpu_commands.h"
>   #include "intel_workarounds.h"
>   
>   struct drm_printer;
> -struct i915_sched_attr;
> -
> -#define I915_CMD_HASH_ORDER 9
>   
>   /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
>    * but keeps the logic simple. Indeed, the whole purpose of this macro is just
> @@ -32,11 +29,6 @@ struct i915_sched_attr;
>   #define CACHELINE_BYTES 64
>   #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
>   
> -struct intel_hw_status_page {
> -	struct i915_vma *vma;
> -	u32 *addr;
> -};
> -
>   #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
>   #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
>   
> @@ -91,498 +83,6 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
>   	return "unknown";
>   }
>   
> -#define I915_MAX_SLICES	3
> -#define I915_MAX_SUBSLICES 8
> -
> -#define instdone_slice_mask(dev_priv__) \
> -	(IS_GEN(dev_priv__, 7) ? \
> -	 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
> -
> -#define instdone_subslice_mask(dev_priv__) \
> -	(IS_GEN(dev_priv__, 7) ? \
> -	 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
> -
> -#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
> -	for ((slice__) = 0, (subslice__) = 0; \
> -	     (slice__) < I915_MAX_SLICES; \
> -	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
> -	       (slice__) += ((subslice__) == 0)) \
> -		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
> -			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
> -
> -struct intel_instdone {
> -	u32 instdone;
> -	/* The following exist only in the RCS engine */
> -	u32 slice_common;
> -	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
> -	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
> -};
> -
> -struct intel_engine_hangcheck {
> -	u64 acthd;
> -	u32 last_seqno;
> -	u32 next_seqno;
> -	unsigned long action_timestamp;
> -	struct intel_instdone instdone;
> -};
> -
> -struct intel_ring {
> -	struct i915_vma *vma;
> -	void *vaddr;
> -
> -	struct i915_timeline *timeline;
> -	struct list_head request_list;
> -	struct list_head active_link;
> -
> -	u32 head;
> -	u32 tail;
> -	u32 emit;
> -
> -	u32 space;
> -	u32 size;
> -	u32 effective_size;
> -};
> -
> -struct i915_gem_context;
> -struct drm_i915_reg_table;
> -
> -/*
> - * we use a single page to load ctx workarounds so all of these
> - * values are referred in terms of dwords
> - *
> - * struct i915_wa_ctx_bb:
> - *  offset: specifies batch starting position, also helpful in case
> - *    if we want to have multiple batches at different offsets based on
> - *    some criteria. It is not a requirement at the moment but provides
> - *    an option for future use.
> - *  size: size of the batch in DWORDS
> - */
> -struct i915_ctx_workarounds {
> -	struct i915_wa_ctx_bb {
> -		u32 offset;
> -		u32 size;
> -	} indirect_ctx, per_ctx;
> -	struct i915_vma *vma;
> -};
> -
> -struct i915_request;
> -
> -#define I915_MAX_VCS	4
> -#define I915_MAX_VECS	2
> -
> -/*
> - * Engine IDs definitions.
> - * Keep instances of the same type engine together.
> - */
> -enum intel_engine_id {
> -	RCS = 0,
> -	BCS,
> -	VCS,
> -	VCS2,
> -	VCS3,
> -	VCS4,
> -#define _VCS(n) (VCS + (n))
> -	VECS,
> -	VECS2
> -#define _VECS(n) (VECS + (n))
> -};
> -
> -struct st_preempt_hang {
> -	struct completion completion;
> -	unsigned int count;
> -	bool inject_hang;
> -};
> -
> -/**
> - * struct intel_engine_execlists - execlist submission queue and port state
> - *
> - * The struct intel_engine_execlists represents the combined logical state of
> - * driver and the hardware state for execlist mode of submission.
> - */
> -struct intel_engine_execlists {
> -	/**
> -	 * @tasklet: softirq tasklet for bottom handler
> -	 */
> -	struct tasklet_struct tasklet;
> -
> -	/**
> -	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
> -	 */
> -	struct i915_priolist default_priolist;
> -
> -	/**
> -	 * @no_priolist: priority lists disabled
> -	 */
> -	bool no_priolist;
> -
> -	/**
> -	 * @submit_reg: gen-specific execlist submission register
> -	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
> -	 * the ExecList Submission Queue Contents register array for Gen11+
> -	 */
> -	u32 __iomem *submit_reg;
> -
> -	/**
> -	 * @ctrl_reg: the enhanced execlists control register, used to load the
> -	 * submit queue on the HW and to request preemptions to idle
> -	 */
> -	u32 __iomem *ctrl_reg;
> -
> -	/**
> -	 * @port: execlist port states
> -	 *
> -	 * For each hardware ELSP (ExecList Submission Port) we keep
> -	 * track of the last request and the number of times we submitted
> -	 * that port to hw. We then count the number of times the hw reports
> -	 * a context completion or preemption. As only one context can
> -	 * be active on hw, we limit resubmission of context to port[0]. This
> -	 * is called Lite Restore, of the context.
> -	 */
> -	struct execlist_port {
> -		/**
> -		 * @request_count: combined request and submission count
> -		 */
> -		struct i915_request *request_count;
> -#define EXECLIST_COUNT_BITS 2
> -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
> -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
> -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
> -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
> -#define port_set(p, packed) ((p)->request_count = (packed))
> -#define port_isset(p) ((p)->request_count)
> -#define port_index(p, execlists) ((p) - (execlists)->port)
> -
> -		/**
> -		 * @context_id: context ID for port
> -		 */
> -		GEM_DEBUG_DECL(u32 context_id);
> -
> -#define EXECLIST_MAX_PORTS 2
> -	} port[EXECLIST_MAX_PORTS];
> -
> -	/**
> -	 * @active: is the HW active? We consider the HW as active after
> -	 * submitting any context for execution and until we have seen the
> -	 * last context completion event. After that, we do not expect any
> -	 * more events until we submit, and so can park the HW.
> -	 *
> -	 * As we have a small number of different sources from which we feed
> -	 * the HW, we track the state of each inside a single bitfield.
> -	 */
> -	unsigned int active;
> -#define EXECLISTS_ACTIVE_USER 0
> -#define EXECLISTS_ACTIVE_PREEMPT 1
> -#define EXECLISTS_ACTIVE_HWACK 2
> -
> -	/**
> -	 * @port_mask: number of execlist ports - 1
> -	 */
> -	unsigned int port_mask;
> -
> -	/**
> -	 * @queue_priority_hint: Highest pending priority.
> -	 *
> -	 * When we add requests into the queue, or adjust the priority of
> -	 * executing requests, we compute the maximum priority of those
> -	 * pending requests. We can then use this value to determine if
> -	 * we need to preempt the executing requests to service the queue.
> -	 * However, since the we may have recorded the priority of an inflight
> -	 * request we wanted to preempt but since completed, at the time of
> -	 * dequeuing the priority hint may no longer may match the highest
> -	 * available request priority.
> -	 */
> -	int queue_priority_hint;
> -
> -	/**
> -	 * @queue: queue of requests, in priority lists
> -	 */
> -	struct rb_root_cached queue;
> -
> -	/**
> -	 * @csb_write: control register for Context Switch buffer
> -	 *
> -	 * Note this register may be either mmio or HWSP shadow.
> -	 */
> -	u32 *csb_write;
> -
> -	/**
> -	 * @csb_status: status array for Context Switch buffer
> -	 *
> -	 * Note these register may be either mmio or HWSP shadow.
> -	 */
> -	u32 *csb_status;
> -
> -	/**
> -	 * @preempt_complete_status: expected CSB upon completing preemption
> -	 */
> -	u32 preempt_complete_status;
> -
> -	/**
> -	 * @csb_head: context status buffer head
> -	 */
> -	u8 csb_head;
> -
> -	I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
> -};
> -
> -#define INTEL_ENGINE_CS_MAX_NAME 8
> -
> -struct intel_engine_cs {
> -	struct drm_i915_private *i915;
> -	char name[INTEL_ENGINE_CS_MAX_NAME];
> -
> -	enum intel_engine_id id;
> -	unsigned int hw_id;
> -	unsigned int guc_id;
> -	intel_engine_mask_t mask;
> -
> -	u8 uabi_class;
> -
> -	u8 class;
> -	u8 instance;
> -	u32 context_size;
> -	u32 mmio_base;
> -
> -	struct intel_ring *buffer;
> -
> -	struct i915_timeline timeline;
> -
> -	struct drm_i915_gem_object *default_state;
> -	void *pinned_default_state;
> -
> -	/* Rather than have every client wait upon all user interrupts,
> -	 * with the herd waking after every interrupt and each doing the
> -	 * heavyweight seqno dance, we delegate the task (of being the
> -	 * bottom-half of the user interrupt) to the first client. After
> -	 * every interrupt, we wake up one client, who does the heavyweight
> -	 * coherent seqno read and either goes back to sleep (if incomplete),
> -	 * or wakes up all the completed clients in parallel, before then
> -	 * transferring the bottom-half status to the next client in the queue.
> -	 *
> -	 * Compared to walking the entire list of waiters in a single dedicated
> -	 * bottom-half, we reduce the latency of the first waiter by avoiding
> -	 * a context switch, but incur additional coherent seqno reads when
> -	 * following the chain of request breadcrumbs. Since it is most likely
> -	 * that we have a single client waiting on each seqno, then reducing
> -	 * the overhead of waking that client is much preferred.
> -	 */
> -	struct intel_breadcrumbs {
> -		spinlock_t irq_lock;
> -		struct list_head signalers;
> -
> -		struct irq_work irq_work; /* for use from inside irq_lock */
> -
> -		unsigned int irq_enabled;
> -
> -		bool irq_armed;
> -	} breadcrumbs;
> -
> -	struct intel_engine_pmu {
> -		/**
> -		 * @enable: Bitmask of enable sample events on this engine.
> -		 *
> -		 * Bits correspond to sample event types, for instance
> -		 * I915_SAMPLE_QUEUED is bit 0 etc.
> -		 */
> -		u32 enable;
> -		/**
> -		 * @enable_count: Reference count for the enabled samplers.
> -		 *
> -		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
> -		 */
> -		unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
> -		/**
> -		 * @sample: Counter values for sampling events.
> -		 *
> -		 * Our internal timer stores the current counters in this field.
> -		 *
> -		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
> -		 */
> -		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
> -	} pmu;
> -
> -	/*
> -	 * A pool of objects to use as shadow copies of client batch buffers
> -	 * when the command parser is enabled. Prevents the client from
> -	 * modifying the batch contents after software parsing.
> -	 */
> -	struct i915_gem_batch_pool batch_pool;
> -
> -	struct intel_hw_status_page status_page;
> -	struct i915_ctx_workarounds wa_ctx;
> -	struct i915_wa_list ctx_wa_list;
> -	struct i915_wa_list wa_list;
> -	struct i915_wa_list whitelist;
> -
> -	u32             irq_keep_mask; /* always keep these interrupts */
> -	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
> -	void		(*irq_enable)(struct intel_engine_cs *engine);
> -	void		(*irq_disable)(struct intel_engine_cs *engine);
> -
> -	int		(*init_hw)(struct intel_engine_cs *engine);
> -
> -	struct {
> -		void (*prepare)(struct intel_engine_cs *engine);
> -		void (*reset)(struct intel_engine_cs *engine, bool stalled);
> -		void (*finish)(struct intel_engine_cs *engine);
> -	} reset;
> -
> -	void		(*park)(struct intel_engine_cs *engine);
> -	void		(*unpark)(struct intel_engine_cs *engine);
> -
> -	void		(*set_default_submission)(struct intel_engine_cs *engine);
> -
> -	struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
> -					     struct i915_gem_context *ctx);
> -
> -	int		(*request_alloc)(struct i915_request *rq);
> -	int		(*init_context)(struct i915_request *rq);
> -
> -	int		(*emit_flush)(struct i915_request *request, u32 mode);
> -#define EMIT_INVALIDATE	BIT(0)
> -#define EMIT_FLUSH	BIT(1)
> -#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
> -	int		(*emit_bb_start)(struct i915_request *rq,
> -					 u64 offset, u32 length,
> -					 unsigned int dispatch_flags);
> -#define I915_DISPATCH_SECURE BIT(0)
> -#define I915_DISPATCH_PINNED BIT(1)
> -	int		 (*emit_init_breadcrumb)(struct i915_request *rq);
> -	u32		*(*emit_fini_breadcrumb)(struct i915_request *rq,
> -						 u32 *cs);
> -	unsigned int	emit_fini_breadcrumb_dw;
> -
> -	/* Pass the request to the hardware queue (e.g. directly into
> -	 * the legacy ringbuffer or to the end of an execlist).
> -	 *
> -	 * This is called from an atomic context with irqs disabled; must
> -	 * be irq safe.
> -	 */
> -	void		(*submit_request)(struct i915_request *rq);
> -
> -	/*
> -	 * Call when the priority on a request has changed and it and its
> -	 * dependencies may need rescheduling. Note the request itself may
> -	 * not be ready to run!
> -	 */
> -	void		(*schedule)(struct i915_request *request,
> -				    const struct i915_sched_attr *attr);
> -
> -	/*
> -	 * Cancel all requests on the hardware, or queued for execution.
> -	 * This should only cancel the ready requests that have been
> -	 * submitted to the engine (via the engine->submit_request callback).
> -	 * This is called when marking the device as wedged.
> -	 */
> -	void		(*cancel_requests)(struct intel_engine_cs *engine);
> -
> -	void		(*cleanup)(struct intel_engine_cs *engine);
> -
> -	struct intel_engine_execlists execlists;
> -
> -	/* Contexts are pinned whilst they are active on the GPU. The last
> -	 * context executed remains active whilst the GPU is idle - the
> -	 * switch away and write to the context object only occurs on the
> -	 * next execution.  Contexts are only unpinned on retirement of the
> -	 * following request ensuring that we can always write to the object
> -	 * on the context switch even after idling. Across suspend, we switch
> -	 * to the kernel context and trash it as the save may not happen
> -	 * before the hardware is powered down.
> -	 */
> -	struct intel_context *last_retired_context;
> -
> -	/* status_notifier: list of callbacks for context-switch changes */
> -	struct atomic_notifier_head context_status_notifier;
> -
> -	struct intel_engine_hangcheck hangcheck;
> -
> -#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
> -#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
> -#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
> -#define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
> -	unsigned int flags;
> -
> -	/*
> -	 * Table of commands the command parser needs to know about
> -	 * for this engine.
> -	 */
> -	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
> -
> -	/*
> -	 * Table of registers allowed in commands that read/write registers.
> -	 */
> -	const struct drm_i915_reg_table *reg_tables;
> -	int reg_table_count;
> -
> -	/*
> -	 * Returns the bitmask for the length field of the specified command.
> -	 * Return 0 for an unrecognized/invalid command.
> -	 *
> -	 * If the command parser finds an entry for a command in the engine's
> -	 * cmd_tables, it gets the command's length based on the table entry.
> -	 * If not, it calls this function to determine the per-engine length
> -	 * field encoding for the command (i.e. different opcode ranges use
> -	 * certain bits to encode the command length in the header).
> -	 */
> -	u32 (*get_cmd_length_mask)(u32 cmd_header);
> -
> -	struct {
> -		/**
> -		 * @lock: Lock protecting the below fields.
> -		 */
> -		seqlock_t lock;
> -		/**
> -		 * @enabled: Reference count indicating number of listeners.
> -		 */
> -		unsigned int enabled;
> -		/**
> -		 * @active: Number of contexts currently scheduled in.
> -		 */
> -		unsigned int active;
> -		/**
> -		 * @enabled_at: Timestamp when busy stats were enabled.
> -		 */
> -		ktime_t enabled_at;
> -		/**
> -		 * @start: Timestamp of the last idle to active transition.
> -		 *
> -		 * Idle is defined as active == 0, active is active > 0.
> -		 */
> -		ktime_t start;
> -		/**
> -		 * @total: Total time this engine was busy.
> -		 *
> -		 * Accumulated time not counting the most recent block in cases
> -		 * where engine is currently busy (active > 0).
> -		 */
> -		ktime_t total;
> -	} stats;
> -};
> -
> -static inline bool
> -intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
> -{
> -	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
> -}
> -
> -static inline bool
> -intel_engine_supports_stats(const struct intel_engine_cs *engine)
> -{
> -	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
> -}
> -
> -static inline bool
> -intel_engine_has_preemption(const struct intel_engine_cs *engine)
> -{
> -	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
> -}
> -
> -static inline bool
> -intel_engine_has_semaphores(const struct intel_engine_cs *engine)
> -{
> -	return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
> -}
> -
>   void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
>   
>   static inline bool __execlists_need_preempt(int prio, int last)
> diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
> index 7c734714b05e..a1bf51c611a9 100644
> --- a/drivers/gpu/drm/i915/intel_workarounds.h
> +++ b/drivers/gpu/drm/i915/intel_workarounds.h
> @@ -9,18 +9,7 @@
>   
>   #include <linux/slab.h>
>   
> -struct i915_wa {
> -	i915_reg_t	  reg;
> -	u32		  mask;
> -	u32		  val;
> -};
> -
> -struct i915_wa_list {
> -	const char	*name;
> -	struct i915_wa	*list;
> -	unsigned int	count;
> -	unsigned int	wa_count;
> -};
> +#include "intel_workarounds_types.h"
>   
>   static inline void intel_wa_list_free(struct i915_wa_list *wal)
>   {
> diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/intel_workarounds_types.h
> new file mode 100644
> index 000000000000..032a0dc49275
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_workarounds_types.h
> @@ -0,0 +1,25 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2014-2018 Intel Corporation
> + */
> +
> +#ifndef __INTEL_WORKAROUNDS_TYPES_H__
> +#define __INTEL_WORKAROUNDS_TYPES_H__
> +
> +#include "i915_reg.h"
> +
> +struct i915_wa {
> +	i915_reg_t	  reg;
> +	u32		  mask;
> +	u32		  val;
> +};
> +
> +struct i915_wa_list {
> +	const char	*name;
> +	struct i915_wa	*list;
> +	unsigned int	count;
> +	unsigned int	wa_count;
> +};
> +
> +#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */
> 


More information about the Intel-gfx mailing list