[Intel-gfx] [PATCH] drm/i915: Rename drm_i915_gem_request to i915_request

Mika Kuoppala mika.kuoppala at linux.intel.com
Wed Feb 21 12:05:24 UTC 2018


Chris Wilson <chris at chris-wilson.co.uk> writes:

> We want to de-emphasize the link between the request (dependency,
> execution and fence tracking) from GEM and so rename the struct from
> drm_i915_gem_request to i915_request. That is we may implement the GEM
> user interface on top of requests, but they are an abstraction for
> tracking execution rather than an implementation detail of GEM. (Since
> they are not tied to HW, we keep the i915 prefix as opposed to intel.)
>
> In short, the spatch:
> @@
>
> @@
> - struct drm_i915_gem_request
> + struct i915_request
>
> A corollary to contracting the type name, we also harmonise on using
> 'rq' shorthand for local variables where space if of the essence and
> repetition makes 'request' unwieldy. For globals and struct members,
> 'request' is still much preferred for its clarity.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> Cc: Mika Kuoppala <mika.kuoppala at linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> Cc: Michał Winiarski <michal.winiarski at intel.com>
> Cc: Michal Wajdeczko <michal.wajdeczko at intel.com>
> ---
>  drivers/gpu/drm/i915/Makefile                      |   2 +-
>  drivers/gpu/drm/i915/gvt/scheduler.c               |  16 +-
>  drivers/gpu/drm/i915/gvt/scheduler.h               |   2 +-
>  drivers/gpu/drm/i915/i915_debugfs.c                |   6 +-
>  drivers/gpu/drm/i915/i915_drv.c                    |   6 +-
>  drivers/gpu/drm/i915/i915_drv.h                    |  26 +-
>  drivers/gpu/drm/i915/i915_gem.c                    |  88 +++--
>  drivers/gpu/drm/i915/i915_gem_batch_pool.c         |   2 +-
>  drivers/gpu/drm/i915/i915_gem_context.c            |  18 +-
>  drivers/gpu/drm/i915/i915_gem_context.h            |   2 +-
>  drivers/gpu/drm/i915/i915_gem_evict.c              |   4 +-
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c         |  60 ++--
>  drivers/gpu/drm/i915/i915_gem_gtt.c                |  38 +--
>  drivers/gpu/drm/i915/i915_gem_gtt.h                |   5 +-
>  drivers/gpu/drm/i915/i915_gem_object.h             |   2 +-
>  drivers/gpu/drm/i915/i915_gem_render_state.c       |   2 +-
>  drivers/gpu/drm/i915/i915_gem_render_state.h       |   4 +-
>  drivers/gpu/drm/i915/i915_gem_shrinker.c           |   4 +-
>  drivers/gpu/drm/i915/i915_gem_timeline.h           |   4 +-
>  drivers/gpu/drm/i915/i915_gpu_error.c              |  18 +-
>  drivers/gpu/drm/i915/i915_irq.c                    |   8 +-
>  drivers/gpu/drm/i915/i915_perf.c                   |  28 +-
>  .../i915/{i915_gem_request.c => i915_request.c}    | 378 +++++++++++----------
>  .../i915/{i915_gem_request.h => i915_request.h}    | 220 ++++++------
>  drivers/gpu/drm/i915/i915_trace.h                  | 128 ++++---
>  drivers/gpu/drm/i915/i915_vma.c                    |   3 +-
>  drivers/gpu/drm/i915/i915_vma.h                    |   2 +-
>  drivers/gpu/drm/i915/intel_breadcrumbs.c           |  31 +-
>  drivers/gpu/drm/i915/intel_display.c               |   8 +-
>  drivers/gpu/drm/i915/intel_drv.h                   |   3 +-
>  drivers/gpu/drm/i915/intel_engine_cs.c             |  26 +-
>  drivers/gpu/drm/i915/intel_guc_submission.c        |  27 +-
>  drivers/gpu/drm/i915/intel_lrc.c                   | 115 +++----
>  drivers/gpu/drm/i915/intel_mocs.c                  |  28 +-
>  drivers/gpu/drm/i915/intel_mocs.h                  |   2 +-
>  drivers/gpu/drm/i915/intel_overlay.c               |  82 ++---
>  drivers/gpu/drm/i915/intel_pm.c                    |   4 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.c            | 196 ++++++-----
>  drivers/gpu/drm/i915/intel_ringbuffer.h            |  78 ++---
>  drivers/gpu/drm/i915/selftests/huge_pages.c        |   6 +-
>  .../gpu/drm/i915/selftests/i915_gem_coherency.c    |   8 +-
>  drivers/gpu/drm/i915/selftests/i915_gem_context.c  |   8 +-
>  drivers/gpu/drm/i915/selftests/i915_gem_evict.c    |   6 +-
>  drivers/gpu/drm/i915/selftests/i915_gem_object.c   |   6 +-
>  .../gpu/drm/i915/selftests/i915_live_selftests.h   |   2 +-
>  .../gpu/drm/i915/selftests/i915_mock_selftests.h   |   2 +-
>  .../{i915_gem_request.c => i915_request.c}         | 125 ++++---
>  drivers/gpu/drm/i915/selftests/intel_hangcheck.c   | 117 ++++---
>  drivers/gpu/drm/i915/selftests/mock_engine.c       |  10 +-
>  drivers/gpu/drm/i915/selftests/mock_gem_device.c   |   2 +-
>  drivers/gpu/drm/i915/selftests/mock_request.c      |  10 +-
>  drivers/gpu/drm/i915/selftests/mock_request.h      |   8 +-
>  52 files changed, 990 insertions(+), 996 deletions(-)
>  rename drivers/gpu/drm/i915/{i915_gem_request.c => i915_request.c} (79%)
>  rename drivers/gpu/drm/i915/{i915_gem_request.h => i915_request.h} (79%)
>  rename drivers/gpu/drm/i915/selftests/{i915_gem_request.c => i915_request.c} (87%)
>
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index f55cc028b2eb..881d7124c597 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -63,13 +63,13 @@ i915-y += i915_cmd_parser.o \
>  	  i915_gem.o \
>  	  i915_gem_object.o \
>  	  i915_gem_render_state.o \
> -	  i915_gem_request.o \
>  	  i915_gem_shrinker.o \
>  	  i915_gem_stolen.o \
>  	  i915_gem_tiling.o \
>  	  i915_gem_timeline.o \
>  	  i915_gem_userptr.o \
>  	  i915_gemfs.o \
> +	  i915_request.o \
>  	  i915_trace_points.o \
>  	  i915_vma.o \
>  	  intel_breadcrumbs.o \
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index b55b3580ca1d..92df1b44fe1d 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
>  	return 0;
>  }
>  
> -static inline bool is_gvt_request(struct drm_i915_gem_request *req)
> +static inline bool is_gvt_request(struct i915_request *req)
>  {
>  	return i915_gem_context_force_single_submission(req->ctx);
>  }
> @@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
>  static int shadow_context_status_change(struct notifier_block *nb,
>  		unsigned long action, void *data)
>  {
> -	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
> +	struct i915_request *req = data;
>  	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
>  				shadow_ctx_notifier_block[req->engine->id]);
>  	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> @@ -333,13 +333,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
>  	int ring_id = workload->ring_id;
>  	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
>  	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
> -	struct drm_i915_gem_request *rq;
> +	struct i915_request *rq;
>  	struct intel_vgpu *vgpu = workload->vgpu;
>  	struct intel_vgpu_submission *s = &vgpu->submission;
>  	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
>  	int ret;
>  
> -	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
> +	rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
>  	if (IS_ERR(rq)) {
>  		gvt_vgpu_err("fail to allocate gem request\n");
>  		ret = PTR_ERR(rq);
> @@ -348,7 +348,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
>  
>  	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
>  
> -	workload->req = i915_gem_request_get(rq);
> +	workload->req = i915_request_get(rq);
>  	ret = copy_workload_to_ring_buffer(workload);
>  	if (ret)
>  		goto err_unpin;
> @@ -582,7 +582,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>  	if (!IS_ERR_OR_NULL(workload->req)) {
>  		gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
>  				ring_id, workload->req);
> -		i915_add_request(workload->req);
> +		i915_request_add(workload->req);
>  		workload->dispatched = true;
>  	}
>  
> @@ -769,7 +769,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
>  				workload->status = 0;
>  		}
>  
> -		i915_gem_request_put(fetch_and_zero(&workload->req));
> +		i915_request_put(fetch_and_zero(&workload->req));
>  
>  		if (!workload->status && !(vgpu->resetting_eng &
>  					   ENGINE_MASK(ring_id))) {
> @@ -886,7 +886,7 @@ static int workload_thread(void *priv)
>  
>  		gvt_dbg_sched("ring id %d wait workload %p\n",
>  				workload->ring_id, workload);
> -		i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
> +		i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
>  
>  complete:
>  		gvt_dbg_sched("will complete workload %p, status: %d\n",
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
> index ff175a98b19e..bab4097aa6d7 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.h
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.h
> @@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx {
>  struct intel_vgpu_workload {
>  	struct intel_vgpu *vgpu;
>  	int ring_id;
> -	struct drm_i915_gem_request *req;
> +	struct i915_request *req;
>  	/* if this workload has been dispatched to i915? */
>  	bool dispatched;
>  	bool shadowed;
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 05b41045b8f9..bad2ed7050ba 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -519,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
>  	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
>  		struct file_stats stats;
>  		struct drm_i915_file_private *file_priv = file->driver_priv;
> -		struct drm_i915_gem_request *request;
> +		struct i915_request *request;
>  		struct task_struct *task;
>  
>  		mutex_lock(&dev->struct_mutex);
> @@ -536,7 +536,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
>  		 * Therefore, we need to protect this ->comm access using RCU.
>  		 */
>  		request = list_first_entry_or_null(&file_priv->mm.request_list,
> -						   struct drm_i915_gem_request,
> +						   struct i915_request,
>  						   client_link);
>  		rcu_read_lock();
>  		task = pid_task(request && request->ctx->pid ?
> @@ -4060,7 +4060,7 @@ i915_drop_caches_set(void *data, u64 val)
>  						     I915_WAIT_LOCKED);
>  
>  		if (val & DROP_RETIRE)
> -			i915_gem_retire_requests(dev_priv);
> +			i915_retire_requests(dev_priv);
>  
>  		mutex_unlock(&dev->struct_mutex);
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index d09f8e661fbd..aaa861b51024 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -808,7 +808,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
>  	/*
>  	 * The i915 workqueue is primarily used for batched retirement of
>  	 * requests (and thus managing bo) once the task has been completed
> -	 * by the GPU. i915_gem_retire_requests() is called directly when we
> +	 * by the GPU. i915_retire_requests() is called directly when we
>  	 * need high-priority retirement, such as waiting for an explicit
>  	 * bo.
>  	 *
> @@ -1992,7 +1992,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
>  	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
>  error:
>  	i915_gem_set_wedged(i915);
> -	i915_gem_retire_requests(i915);
> +	i915_retire_requests(i915);
>  	intel_gpu_reset(i915, ALL_ENGINES);
>  	goto finish;
>  }
> @@ -2019,7 +2019,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
>  int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
>  {
>  	struct i915_gpu_error *error = &engine->i915->gpu_error;
> -	struct drm_i915_gem_request *active_request;
> +	struct i915_request *active_request;
>  	int ret;
>  
>  	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index fdcb2dca9650..873473b035d0 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -71,9 +71,9 @@
>  #include "i915_gem_fence_reg.h"
>  #include "i915_gem_object.h"
>  #include "i915_gem_gtt.h"
> -#include "i915_gem_request.h"
>  #include "i915_gem_timeline.h"
>  
> +#include "i915_request.h"
>  #include "i915_vma.h"
>  
>  #include "intel_gvt.h"
> @@ -1231,7 +1231,7 @@ struct i915_gpu_error {
>  	 *
>  	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
>  	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
> -	 * i915_gem_request_alloc(), this bit is checked and the sequence
> +	 * i915_request_alloc(), this bit is checked and the sequence
>  	 * aborted (with -EIO reported to userspace) if set.
>  	 */
>  	unsigned long flags;
> @@ -3329,7 +3329,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
>  
>  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
>  void i915_vma_move_to_active(struct i915_vma *vma,
> -			     struct drm_i915_gem_request *req,
> +			     struct i915_request *rq,
>  			     unsigned int flags);
>  int i915_gem_dumb_create(struct drm_file *file_priv,
>  			 struct drm_device *dev,
> @@ -3344,11 +3344,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
>  
>  int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
>  
> -struct drm_i915_gem_request *
> +struct i915_request *
>  i915_gem_find_active_request(struct intel_engine_cs *engine);
>  
> -void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
> -
>  static inline bool i915_reset_backoff(struct i915_gpu_error *error)
>  {
>  	return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
> @@ -3380,7 +3378,7 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
>  	return READ_ONCE(error->reset_engine_count[engine->id]);
>  }
>  
> -struct drm_i915_gem_request *
> +struct i915_request *
>  i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
>  int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
>  void i915_gem_reset(struct drm_i915_private *dev_priv);
> @@ -3389,7 +3387,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
>  void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
>  bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
>  void i915_gem_reset_engine(struct intel_engine_cs *engine,
> -			   struct drm_i915_gem_request *request);
> +			   struct i915_request *request);

s/request/rq
This would atleast make the header side consistently using rq.

Noticed few s/dev_priv/i915 and one s/uint32_t/u32 crammed in there too.

There are still some places to unify, but cramming more name changes
into this one would make it unwieldy to review.

Reviewed-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>


More information about the Intel-gfx mailing list