[Intel-gfx] [PATCH v2 2/2] drm/i915/guc: Simplify code by keeping vmap of guc_client object

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Fri Feb 19 11:10:00 UTC 2016



On 18/02/16 18:31, yu.dai at intel.com wrote:
> From: Alex Dai <yu.dai at intel.com>
>
> GuC client object is always pinned during its life cycle. We cache
> the vmap of client object, which includes guc_process_desc, doorbell
> and work queue. By doing so, we can simplify the code where driver
> communicate with GuC.
>
> As a result, this patch removes the kmap_atomic in wq_check_space,
> where usleep_range could be called while kmap_atomic is held. This
> fixes issue below.
>
> v2: Pass page actual numbers to i915_gem_object_vmap(). Also, check
>      return value for error handling. (Tvrtko Ursulin)
> v1: vmap is done by i915_gem_object_vmap().


Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Regards,

Tvrtko

> [   34.098798] BUG: scheduling while atomic: gem_close_race/1941/0x00000002
> [   34.098822] Modules linked in: hid_generic usbhid i915 asix usbnet libphy mii i2c_algo_bit drm_kms_helper cfbfillrect syscopyarea cfbimgblt sysfillrect sysimgblt fb_sys_fops cfbcopyarea drm coretemp i2c_hid hid video pinctrl_sunrisepoint pinctrl_intel acpi_pad nls_iso8859_1 e1000e ptp psmouse pps_core ahci libahci
> [   34.098824] CPU: 0 PID: 1941 Comm: gem_close_race Tainted: G     U          4.4.0-160121+ #123
> [   34.098824] Hardware name: Intel Corporation Skylake Client platform/Skylake AIO DDR3L RVP10, BIOS SKLSE2R1.R00.X100.B01.1509220551 09/22/2015
> [   34.098825]  0000000000013e40 ffff880166c27a78 ffffffff81280d02 ffff880172c13e40
> [   34.098826]  ffff880166c27a88 ffffffff810c203a ffff880166c27ac8 ffffffff814ec808
> [   34.098827]  ffff88016b7c6000 ffff880166c28000 00000000000f4240 0000000000000001
> [   34.098827] Call Trace:
> [   34.098831]  [<ffffffff81280d02>] dump_stack+0x4b/0x79
> [   34.098833]  [<ffffffff810c203a>] __schedule_bug+0x41/0x4f
> [   34.098834]  [<ffffffff814ec808>] __schedule+0x5a8/0x690
> [   34.098835]  [<ffffffff814ec927>] schedule+0x37/0x80
> [   34.098836]  [<ffffffff814ef3fd>] schedule_hrtimeout_range_clock+0xad/0x130
> [   34.098837]  [<ffffffff81090be0>] ? hrtimer_init+0x10/0x10
> [   34.098838]  [<ffffffff814ef3f1>] ? schedule_hrtimeout_range_clock+0xa1/0x130
> [   34.098839]  [<ffffffff814ef48e>] schedule_hrtimeout_range+0xe/0x10
> [   34.098840]  [<ffffffff814eef9b>] usleep_range+0x3b/0x40
> [   34.098853]  [<ffffffffa01ec109>] i915_guc_wq_check_space+0x119/0x210 [i915]
> [   34.098861]  [<ffffffffa01da47c>] intel_logical_ring_alloc_request_extras+0x5c/0x70 [i915]
> [   34.098869]  [<ffffffffa01cdbf1>] i915_gem_request_alloc+0x91/0x170 [i915]
> [   34.098875]  [<ffffffffa01c1c07>] i915_gem_do_execbuffer.isra.25+0xbc7/0x12a0 [i915]
> [   34.098882]  [<ffffffffa01cb785>] ? i915_gem_object_get_pages_gtt+0x225/0x3c0 [i915]
> [   34.098889]  [<ffffffffa01d1fb6>] ? i915_gem_pwrite_ioctl+0xd6/0x9f0 [i915]
> [   34.098895]  [<ffffffffa01c2e68>] i915_gem_execbuffer2+0xa8/0x250 [i915]
> [   34.098900]  [<ffffffffa00f65d8>] drm_ioctl+0x258/0x4f0 [drm]
> [   34.098906]  [<ffffffffa01c2dc0>] ? i915_gem_execbuffer+0x340/0x340 [i915]
> [   34.098908]  [<ffffffff8111590d>] do_vfs_ioctl+0x2cd/0x4a0
> [   34.098909]  [<ffffffff8111eac2>] ? __fget+0x72/0xb0
> [   34.098910]  [<ffffffff81115b1c>] SyS_ioctl+0x3c/0x70
> [   34.098911]  [<ffffffff814effd7>] entry_SYSCALL_64_fastpath+0x12/0x6a
> [   34.100208] ------------[ cut here ]------------
>
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93847
> Cc: Dave Gordon <david.s.gordon at intel.com>
> Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> Signed-off-by: Alex Dai <yu.dai at intel.com>
> ---
>   drivers/gpu/drm/i915/i915_guc_submission.c | 56 ++++++++++--------------------
>   drivers/gpu/drm/i915/intel_guc.h           |  3 +-
>   2 files changed, 21 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index d7543ef..3e2ea42 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -195,11 +195,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   	struct guc_process_desc *desc;
>   	union guc_doorbell_qw db_cmp, db_exc, db_ret;
>   	union guc_doorbell_qw *db;
> -	void *base;
>   	int attempt = 2, ret = -EAGAIN;
>
> -	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
> -	desc = base + gc->proc_desc_offset;
> +	desc = gc->client_base + gc->proc_desc_offset;
>
>   	/* Update the tail so it is visible to GuC */
>   	desc->tail = gc->wq_tail;
> @@ -215,7 +213,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   		db_exc.cookie = 1;
>
>   	/* pointer of current doorbell cacheline */
> -	db = base + gc->doorbell_offset;
> +	db = gc->client_base + gc->doorbell_offset;
>
>   	while (attempt--) {
>   		/* lets ring the doorbell */
> @@ -244,10 +242,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   			db_exc.cookie = 1;
>   	}
>
> -	/* Finally, update the cached copy of the GuC's WQ head */
> -	gc->wq_head = desc->head;
> -
> -	kunmap_atomic(base);
>   	return ret;
>   }
>
> @@ -341,10 +335,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
>   			       struct i915_guc_client *client)
>   {
>   	struct guc_process_desc *desc;
> -	void *base;
>
> -	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
> -	desc = base + client->proc_desc_offset;
> +	desc = client->client_base + client->proc_desc_offset;
>
>   	memset(desc, 0, sizeof(*desc));
>
> @@ -361,8 +353,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
>   	desc->wq_size_bytes = client->wq_size;
>   	desc->wq_status = WQ_STATUS_ACTIVE;
>   	desc->priority = client->priority;
> -
> -	kunmap_atomic(base);
>   }
>
>   /*
> @@ -474,25 +464,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
>   int i915_guc_wq_check_space(struct i915_guc_client *gc)
>   {
>   	struct guc_process_desc *desc;
> -	void *base;
>   	u32 size = sizeof(struct guc_wq_item);
>   	int ret = -ETIMEDOUT, timeout_counter = 200;
>
>   	if (!gc)
>   		return 0;
>
> -	/* Quickly return if wq space is available since last time we cache the
> -	 * head position. */
> -	if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
> -		return 0;
> -
> -	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
> -	desc = base + gc->proc_desc_offset;
> +	desc = gc->client_base + gc->proc_desc_offset;
>
>   	while (timeout_counter-- > 0) {
> -		gc->wq_head = desc->head;
> -
> -		if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
> +		if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
>   			ret = 0;
>   			break;
>   		}
> @@ -501,24 +482,23 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
>   			usleep_range(1000, 2000);
>   	};
>
> -	kunmap_atomic(base);
> -
>   	return ret;
>   }
>
>   static int guc_add_workqueue_item(struct i915_guc_client *gc,
>   				  struct drm_i915_gem_request *rq)
>   {
> +	struct guc_process_desc *desc;
>   	struct guc_wq_item *wqi;
> -	void *base;
> -	u32 tail, wq_len, wq_off, space;
> +	u32 tail, wq_len, wqi_off, space;
>
> -	space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
> +	desc = gc->client_base + gc->proc_desc_offset;
> +	space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
>   	if (WARN_ON(space < sizeof(struct guc_wq_item)))
>   		return -ENOSPC; /* shouldn't happen */
>
>   	/* postincrement WQ tail for next time */
> -	wq_off = gc->wq_tail;
> +	wqi_off = gc->wq_tail;
>   	gc->wq_tail += sizeof(struct guc_wq_item);
>   	gc->wq_tail &= gc->wq_size - 1;
>
> @@ -530,13 +510,10 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
>   	 * workqueue buffer dw by dw.
>   	 */
>   	WARN_ON(sizeof(struct guc_wq_item) != 16);
> -	WARN_ON(wq_off & 3);
> +	WARN_ON(wqi_off & 3);
>
>   	/* wq starts from the page after doorbell / process_desc */
> -	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
> -			(wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
> -	wq_off &= PAGE_SIZE - 1;
> -	wqi = (struct guc_wq_item *)((char *)base + wq_off);
> +	wqi = gc->client_base + gc->wq_offset + wqi_off;
>
>   	/* len does not include the header */
>   	wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
> @@ -553,8 +530,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
>   	wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
>   	wqi->fence_id = 0; /*XXX: what fence to be here */
>
> -	kunmap_atomic(base);
> -
>   	return 0;
>   }
>
> @@ -675,6 +650,8 @@ static void guc_client_free(struct drm_device *dev,
>   	 * Be sure to drop any locks
>   	 */
>
> +	vunmap(client->client_base);
> +
>   	gem_release_guc_obj(client->client_obj);
>
>   	if (client->ctx_index != GUC_INVALID_CTX_ID) {
> @@ -727,6 +704,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
>   	if (!obj)
>   		goto err;
>
> +	client->client_base = i915_gem_object_vmap(obj, 0,
> +			obj->base.size >> PAGE_SHIFT);
> +	if (client->client_base == NULL)
> +		goto err;
> +
>   	client->client_obj = obj;
>   	client->wq_offset = GUC_DB_SIZE;
>   	client->wq_size = GUC_WQ_SIZE;
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 73002e9..9f08bd7 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -34,6 +34,8 @@ struct i915_guc_client {
>   	uint32_t priority;
>   	uint32_t ctx_index;
>
> +	void *client_base;
> +
>   	uint32_t proc_desc_offset;
>   	uint32_t doorbell_offset;
>   	uint32_t cookie;
> @@ -43,7 +45,6 @@ struct i915_guc_client {
>   	uint32_t wq_offset;
>   	uint32_t wq_size;
>   	uint32_t wq_tail;
> -	uint32_t wq_head;
>
>   	/* GuC submission statistics & status */
>   	uint64_t submissions[GUC_MAX_ENGINES_NUM];
>


More information about the Intel-gfx mailing list