[Intel-gfx] [PATCH v2 10/15] drm/i915/guc: Get rid of GuC log runtime

Sagar Arun Kamble sagar.a.kamble at intel.com
Fri Mar 9 10:51:20 UTC 2018



On 3/8/2018 9:17 PM, Michał Winiarski wrote:
> Runtime is not a very good name. Let's also move counting relay
> overflows inside relay struct.
>
> v2: Rename things rather than remove the struct (Chris)
>
> Signed-off-by: Michał Winiarski <michal.winiarski at intel.com>
> Cc: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
> Cc: Sagar Arun Kamble <sagar.a.kamble at intel.com>
> Cc: Michal Wajdeczko <michal.wajdeczko at intel.com>
Some might want other stats too as part of relay since we print them 
when relay is enabled.
But this is not a big issue. w/ or w/o that change:
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble at intel.com>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c  |  4 +--
>   drivers/gpu/drm/i915/intel_guc.c     | 12 +++----
>   drivers/gpu/drm/i915/intel_guc_log.c | 66 ++++++++++++++++++------------------
>   drivers/gpu/drm/i915/intel_guc_log.h |  7 ++--
>   4 files changed, 44 insertions(+), 45 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index f99fe9910634..d7c0bf6facf6 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -2348,8 +2348,8 @@ static void i915_guc_log_info(struct seq_file *m,
>   	seq_printf(m, "\tTotal flush interrupt count: %u\n",
>   		   guc->log.flush_interrupt_count);
>   
> -	seq_printf(m, "\tCapture miss count: %u\n",
> -		   guc->log.capture_miss_count);
> +	seq_printf(m, "\tRelay full count: %u\n",
> +		   guc->log.relay.full_count);
>   }
>   
>   static void i915_guc_client_info(struct seq_file *m,
> diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
> index 0d92caf6a83f..cab158e42577 100644
> --- a/drivers/gpu/drm/i915/intel_guc.c
> +++ b/drivers/gpu/drm/i915/intel_guc.c
> @@ -87,9 +87,9 @@ int intel_guc_init_wq(struct intel_guc *guc)
>   	 * or scheduled later on resume. This way the handling of work
>   	 * item can be kept same between system suspend & rpm suspend.
>   	 */
> -	guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
> +	guc->log.relay.flush_wq = alloc_ordered_workqueue("i915-guc_log",
>   						WQ_HIGHPRI | WQ_FREEZABLE);
> -	if (!guc->log.runtime.flush_wq) {
> +	if (!guc->log.relay.flush_wq) {
>   		DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
>   		return -ENOMEM;
>   	}
> @@ -112,7 +112,7 @@ int intel_guc_init_wq(struct intel_guc *guc)
>   		guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
>   							  WQ_HIGHPRI);
>   		if (!guc->preempt_wq) {
> -			destroy_workqueue(guc->log.runtime.flush_wq);
> +			destroy_workqueue(guc->log.relay.flush_wq);
>   			DRM_ERROR("Couldn't allocate workqueue for GuC "
>   				  "preemption\n");
>   			return -ENOMEM;
> @@ -130,7 +130,7 @@ void intel_guc_fini_wq(struct intel_guc *guc)
>   	    USES_GUC_SUBMISSION(dev_priv))
>   		destroy_workqueue(guc->preempt_wq);
>   
> -	destroy_workqueue(guc->log.runtime.flush_wq);
> +	destroy_workqueue(guc->log.relay.flush_wq);
>   }
>   
>   static int guc_shared_data_create(struct intel_guc *guc)
> @@ -389,8 +389,8 @@ void intel_guc_to_host_event_handler(struct intel_guc *guc)
>   
>   	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
>   		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) {
> -		queue_work(guc->log.runtime.flush_wq,
> -			   &guc->log.runtime.flush_work);
> +		queue_work(guc->log.relay.flush_wq,
> +			   &guc->log.relay.flush_work);
>   
>   		guc->log.flush_interrupt_count++;
>   	}
> diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
> index 92a7bf0fd729..7c4339dae534 100644
> --- a/drivers/gpu/drm/i915/intel_guc_log.c
> +++ b/drivers/gpu/drm/i915/intel_guc_log.c
> @@ -151,10 +151,10 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
>   	smp_wmb();
>   
>   	/* All data has been written, so now move the offset of sub buffer. */
> -	relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
> +	relay_reserve(guc->log.relay.channel, guc->log.vma->obj->base.size);
>   
>   	/* Switch to the next sub buffer */
> -	relay_flush(guc->log.runtime.relay_chan);
> +	relay_flush(guc->log.relay.channel);
>   }
>   
>   static void *guc_get_write_buffer(struct intel_guc *guc)
> @@ -168,7 +168,7 @@ static void *guc_get_write_buffer(struct intel_guc *guc)
>   	 * done without using relay_reserve() along with relay_write(). So its
>   	 * better to use relay_reserve() alone.
>   	 */
> -	return relay_reserve(guc->log.runtime.relay_chan, 0);
> +	return relay_reserve(guc->log.relay.channel, 0);
>   }
>   
>   static bool guc_check_log_buf_overflow(struct intel_guc *guc,
> @@ -219,13 +219,13 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
>   	void *src_data, *dst_data;
>   	bool new_overflow;
>   
> -	mutex_lock(&guc->log.runtime.lock);
> +	mutex_lock(&guc->log.relay.lock);
>   
> -	if (WARN_ON(!guc->log.runtime.buf_addr))
> +	if (WARN_ON(!guc->log.relay.buf_addr))
>   		goto out_unlock;
>   
>   	/* Get the pointer to shared GuC log buffer */
> -	log_buf_state = src_data = guc->log.runtime.buf_addr;
> +	log_buf_state = src_data = guc->log.relay.buf_addr;
>   
>   	/* Get the pointer to local buffer to store the logs */
>   	log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
> @@ -236,7 +236,7 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
>   		 * getting consumed by User at a slow rate.
>   		 */
>   		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
> -		guc->log.capture_miss_count++;
> +		guc->log.relay.full_count++;
>   
>   		goto out_unlock;
>   	}
> @@ -310,20 +310,20 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
>   	guc_move_to_next_buf(guc);
>   
>   out_unlock:
> -	mutex_unlock(&guc->log.runtime.lock);
> +	mutex_unlock(&guc->log.relay.lock);
>   }
>   
>   static void capture_logs_work(struct work_struct *work)
>   {
>   	struct intel_guc *guc =
> -		container_of(work, struct intel_guc, log.runtime.flush_work);
> +		container_of(work, struct intel_guc, log.relay.flush_work);
>   
>   	guc_log_capture_logs(guc);
>   }
>   
> -static bool guc_log_has_runtime(struct intel_guc *guc)
> +static bool guc_log_relay_enabled(struct intel_guc *guc)
>   {
> -	return guc->log.runtime.buf_addr != NULL;
> +	return guc->log.relay.buf_addr != NULL;
>   }
>   
>   static int guc_log_map(struct intel_guc *guc)
> @@ -332,7 +332,7 @@ static int guc_log_map(struct intel_guc *guc)
>   	void *vaddr;
>   	int ret;
>   
> -	lockdep_assert_held(&guc->log.runtime.lock);
> +	lockdep_assert_held(&guc->log.relay.lock);
>   
>   	if (!guc->log.vma)
>   		return -ENODEV;
> @@ -354,23 +354,23 @@ static int guc_log_map(struct intel_guc *guc)
>   		return PTR_ERR(vaddr);
>   	}
>   
> -	guc->log.runtime.buf_addr = vaddr;
> +	guc->log.relay.buf_addr = vaddr;
>   
>   	return 0;
>   }
>   
>   static void guc_log_unmap(struct intel_guc *guc)
>   {
> -	lockdep_assert_held(&guc->log.runtime.lock);
> +	lockdep_assert_held(&guc->log.relay.lock);
>   
>   	i915_gem_object_unpin_map(guc->log.vma->obj);
> -	guc->log.runtime.buf_addr = NULL;
> +	guc->log.relay.buf_addr = NULL;
>   }
>   
>   void intel_guc_log_init_early(struct intel_guc *guc)
>   {
> -	mutex_init(&guc->log.runtime.lock);
> -	INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
> +	mutex_init(&guc->log.relay.lock);
> +	INIT_WORK(&guc->log.relay.flush_work, capture_logs_work);
>   }
>   
>   static int guc_log_relay_create(struct intel_guc *guc)
> @@ -380,7 +380,7 @@ static int guc_log_relay_create(struct intel_guc *guc)
>   	size_t n_subbufs, subbuf_size;
>   	int ret;
>   
> -	lockdep_assert_held(&guc->log.runtime.lock);
> +	lockdep_assert_held(&guc->log.relay.lock);
>   
>   	 /* Keep the size of sub buffers same as shared log buffer */
>   	subbuf_size = GUC_LOG_SIZE;
> @@ -410,17 +410,17 @@ static int guc_log_relay_create(struct intel_guc *guc)
>   	}
>   
>   	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
> -	guc->log.runtime.relay_chan = guc_log_relay_chan;
> +	guc->log.relay.channel = guc_log_relay_chan;
>   
>   	return 0;
>   }
>   
>   static void guc_log_relay_destroy(struct intel_guc *guc)
>   {
> -	lockdep_assert_held(&guc->log.runtime.lock);
> +	lockdep_assert_held(&guc->log.relay.lock);
>   
> -	relay_close(guc->log.runtime.relay_chan);
> -	guc->log.runtime.relay_chan = NULL;
> +	relay_close(guc->log.relay.channel);
> +	guc->log.relay.channel = NULL;
>   }
>   
>   static void guc_log_capture_logs(struct intel_guc *guc)
> @@ -553,9 +553,9 @@ int intel_guc_log_relay_open(struct intel_guc *guc)
>   {
>   	int ret;
>   
> -	mutex_lock(&guc->log.runtime.lock);
> +	mutex_lock(&guc->log.relay.lock);
>   
> -	if (guc_log_has_runtime(guc)) {
> +	if (guc_log_relay_enabled(guc)) {
>   		ret = -EEXIST;
>   		goto out_unlock;
>   	}
> @@ -578,7 +578,7 @@ int intel_guc_log_relay_open(struct intel_guc *guc)
>   	if (ret)
>   		goto out_relay;
>   
> -	mutex_unlock(&guc->log.runtime.lock);
> +	mutex_unlock(&guc->log.relay.lock);
>   
>   	guc_log_flush_irq_enable(guc);
>   
> @@ -587,14 +587,14 @@ int intel_guc_log_relay_open(struct intel_guc *guc)
>   	 * the flush notification. This means that we need to unconditionally
>   	 * flush on relay enabling, since GuC only notifies us once.
>   	 */
> -	queue_work(guc->log.runtime.flush_wq, &guc->log.runtime.flush_work);
> +	queue_work(guc->log.relay.flush_wq, &guc->log.relay.flush_work);
>   
>   	return 0;
>   
>   out_relay:
>   	guc_log_relay_destroy(guc);
>   out_unlock:
> -	mutex_unlock(&guc->log.runtime.lock);
> +	mutex_unlock(&guc->log.relay.lock);
>   
>   	return ret;
>   }
> @@ -607,7 +607,7 @@ void intel_guc_log_relay_flush(struct intel_guc *guc)
>   	 * Before initiating the forceful flush, wait for any pending/ongoing
>   	 * flush to complete otherwise forceful flush may not actually happen.
>   	 */
> -	flush_work(&guc->log.runtime.flush_work);
> +	flush_work(&guc->log.relay.flush_work);
>   
>   	intel_runtime_pm_get(i915);
>   	guc_log_flush(guc);
> @@ -620,11 +620,11 @@ void intel_guc_log_relay_flush(struct intel_guc *guc)
>   void intel_guc_log_relay_close(struct intel_guc *guc)
>   {
>   	guc_log_flush_irq_disable(guc);
> -	flush_work(&guc->log.runtime.flush_work);
> +	flush_work(&guc->log.relay.flush_work);
>   
> -	mutex_lock(&guc->log.runtime.lock);
> -	GEM_BUG_ON(!guc_log_has_runtime(guc));
> - 	guc_log_unmap(guc);
> +	mutex_lock(&guc->log.relay.lock);
> +	GEM_BUG_ON(!guc_log_relay_enabled(guc));
> +	guc_log_unmap(guc);
>   	guc_log_relay_destroy(guc);
> -	mutex_unlock(&guc->log.runtime.lock);
> +	mutex_unlock(&guc->log.relay.lock);
>   }
> diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
> index df91f12a36ed..9b1257ea2673 100644
> --- a/drivers/gpu/drm/i915/intel_guc_log.h
> +++ b/drivers/gpu/drm/i915/intel_guc_log.h
> @@ -42,16 +42,15 @@ struct intel_guc;
>   struct intel_guc_log {
>   	u32 flags;
>   	struct i915_vma *vma;
> -	/* The runtime stuff gets created only when GuC logging gets enabled */
>   	struct {
>   		void *buf_addr;
>   		struct workqueue_struct *flush_wq;
>   		struct work_struct flush_work;
> -		struct rchan *relay_chan;
> +		struct rchan *channel;
>   		struct mutex lock;
> -	} runtime;
> +		u32 full_count;
> +	} relay;
>   	/* logging related stats */
> -	u32 capture_miss_count;
>   	u32 flush_interrupt_count;
>   	u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
>   	u32 total_overflow_count[GUC_MAX_LOG_BUFFER];

-- 
Thanks,
Sagar



More information about the Intel-gfx mailing list