[Intel-gfx] [PATCH] drm/i915: s/seqno/request/ tracking inside objects

Jesse Barnes jbarnes at virtuousgeek.org
Mon Jul 28 22:44:12 CEST 2014


On Fri, 25 Jul 2014 13:27:00 +0100
Chris Wilson <chris at chris-wilson.co.uk> wrote:

> @@ -614,12 +615,12 @@ static int i915_gem_pageflip_info(struct
> seq_file *m, void *data) seq_printf(m, "Flip pending (waiting for
> vsync) on pipe %c (plane %c)\n", pipe, plane);
>  			}
> -			if (work->ring)
> +			if (work->flip_queued_request) {
> +				struct i915_gem_request *rq =
> work->flip_queued_request; seq_printf(m, "Flip queued on %s at seqno
> %u, now %u\n",
> -						work->ring->name,
> -
> work->flip_queued_seqno,
> -
> work->ring->get_seqno(work->ring, true));
> -			else
> +						rq->ring->name,
> rq->seqno,
> +
> rq->ring->get_seqno(rq->ring, true));
> +			} else
>  				seq_printf(m, "Flip not associated

I wonder if the overlay, flip queue and unlocked wait changes could be
split out somehow; I think they're the trickiest part of the change...

It does look like you're doing get/puts in the right places, though I
didn't check the error paths (and I'm not familiar at all with the
overlay bits, I guess Daniel will have to look at that).

> with any ring\n"); seq_printf(m, "Flip queued on frame %d, (was ready
> on frame %d), now %d\n", work->flip_queued_vblank,
> @@ -656,7 +657,7 @@ static int i915_gem_request_info(struct seq_file
> *m, void *data) struct drm_device *dev = node->minor->dev;
>  	struct drm_i915_private *dev_priv = dev->dev_private;
>  	struct intel_engine_cs *ring;
> -	struct drm_i915_gem_request *gem_request;
> +	struct i915_gem_request *rq;
>  	int ret, count, i;
>  
>  	ret = mutex_lock_interruptible(&dev->struct_mutex);
> @@ -669,12 +670,10 @@ static int i915_gem_request_info(struct
> seq_file *m, void *data) continue;
>  
>  		seq_printf(m, "%s requests:\n", ring->name);
> -		list_for_each_entry(gem_request,
> -				    &ring->request_list,
> -				    list) {
> +		list_for_each_entry(rq, &ring->request_list, list) {
>  			seq_printf(m, "    %d @ %d\n",
> -				   gem_request->seqno,
> -				   (int) (jiffies -
> gem_request->emitted_jiffies));
> +				   rq->seqno,
> +				   (int)(jiffies -
> rq->emitted_jiffies)); }
>  		count++;
>  	}

Semi gratuitous rename (though I know you renamed the struct to catch
all the users).

> diff --git a/drivers/gpu/drm/i915/i915_drv.h
> b/drivers/gpu/drm/i915/i915_drv.h index 9837b0f..5794d096 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -187,6 +187,7 @@ enum hpd_pin {
>  struct drm_i915_private;
>  struct i915_mm_struct;
>  struct i915_mmu_object;
> +struct i915_gem_request;
>  
>  enum intel_dpll_id {
>  	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
> @@ -1720,16 +1721,15 @@ struct drm_i915_gem_object {
>  	struct drm_mm_node *stolen;
>  	struct list_head global_list;
>  
> -	struct list_head ring_list;
>  	/** Used in execbuf to temporarily hold a ref */
>  	struct list_head obj_exec_link;
>  
>  	/**
>  	 * This is set if the object is on the active lists (has
> pending
> -	 * rendering and so a non-zero seqno), and is not set if it
> i s on
> -	 * inactive (ready to be unbound) list.
> +	 * rendering and so a submitted request), and is not set if
> it is on
> +	 * inactive (ready to be unbound) list. We track activity
> per engine. */
> -	unsigned int active:1;
> +	unsigned int active:3;

I wonder if it would be better to be explicit and have a num_rings
sized array here then.  We need 4 bits anyway for the second video ring
right?  We'd need a wrapper to check for active then though...

> @@ -1850,7 +1850,9 @@ void i915_gem_track_fb(struct
> drm_i915_gem_object *old,
>   * sequence-number comparisons on buffer last_rendering_seqnos, and
> associate
>   * an emission time with seqnos for tracking how far ahead of the
> GPU we are. */
> -struct drm_i915_gem_request {
> +struct i915_gem_request {
> +	struct kref kref;
> +
>  	/** On Which ring this request was generated */
>  	struct intel_engine_cs *ring;
>  
> @@ -1878,8 +1880,60 @@ struct drm_i915_gem_request {
>  	struct drm_i915_file_private *file_priv;
>  	/** file_priv list entry for this request */
>  	struct list_head client_list;
> +
> +	bool completed:1;
>  };

If Daniel pulls in Greg's tree, the kref could turn into a fence and
the completed could be removed.

>  
> +static inline struct intel_engine_cs *i915_request_ring(struct
> i915_gem_request *rq) +{
> +	return rq ? rq->ring : NULL;
> +}
> +
> +static inline int i915_request_ring_id(struct i915_gem_request *rq)
> +{
> +	return rq ? rq->ring->id : -1;
> +}
> +
> +static inline u32 i915_request_seqno(struct i915_gem_request *rq)
> +{
> +	return rq ? rq->seqno : 0;
> +}
> +
> +/**
> + * Returns true if seq1 is later than seq2.
> + */
> +static inline bool
> +__i915_seqno_passed(uint32_t seq1, uint32_t seq2)
> +{
> +	return (int32_t)(seq1 - seq2) >= 0;
> +}
> +
> +static inline bool
> +i915_request_complete(struct i915_gem_request *rq, bool lazy)
> +{
> +	if (!rq->completed)
> +		rq->completed =
> __i915_seqno_passed(rq->ring->get_seqno(rq->ring, lazy),
> +						    rq->seqno);
> +	return rq->completed;
> +}
> +
> +static inline struct i915_gem_request *
> +i915_request_get(struct i915_gem_request *rq)
> +{
> +	if (rq)
> +		kref_get(&rq->kref);
> +	return rq;
> +}
> +
> +void __i915_request_free(struct kref *kref);
> +
> +static inline void
> +i915_request_put(struct i915_gem_request *rq)
> +{
> +	if (rq)
> +		kref_put(&rq->kref, __i915_request_free);
> +}

Several of these helpers could be pushed separately to reduce the patch
size.

> +static void
> +i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
> +{
> +	intel_fb_obj_flush(obj, true);
> +	obj->last_write.request = NULL;
> +	list_del_init(&obj->last_write.ring_list);
> +}
> +
> +static void
> +i915_gem_object_retire__fence(struct drm_i915_gem_object *obj)
> +{
> +	obj->last_fence.request = NULL;
> +	list_del_init(&obj->last_fence.ring_list);
> +}
> +
> +static void
> +i915_gem_object_retire__read(struct drm_i915_gem_object *obj,
> +			     struct intel_engine_cs *ring)
> +{
> +	struct i915_vma *vma;
> +
> +	BUG_ON(obj->active == 0);
> +	BUG_ON(obj->base.write_domain);
> +
> +	obj->last_read[ring->id].request = NULL;
> +	list_del_init(&obj->last_read[ring->id].ring_list);
> +
> +	if (--obj->active)
> +		return;
> +
> +	BUG_ON(obj->last_write.request);
> +	BUG_ON(obj->last_fence.request);
> +
> +	list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +		if (!list_empty(&vma->mm_list))
> +			list_move_tail(&vma->mm_list,
> &vma->vm->inactive_list);
> +	}
> +
> +	drm_gem_object_unreference(&obj->base);
> +
> +	WARN_ON(i915_verify_lists(dev));
> +}

I notice this __ notation elsewhere too; I guess you're using it to
denote internal functions rather than a __ prefix?  Maybe we could use
gcc embedded functions... :)

> +
> +static void
> +i915_gem_object_retire(struct drm_i915_gem_object *obj)
> +{
> +	struct i915_gem_request *rq;
> +	int i;
> +
> +	if (!obj->active)
> +		return;
> +
> +	rq = obj->last_write.request;
> +	if (rq && i915_request_complete(rq, true))
> +		i915_gem_object_retire__write(obj);
> +
> +	rq = obj->last_fence.request;
> +	if (rq && i915_request_complete(rq, true))
> +		i915_gem_object_retire__fence(obj);
> +
> +	for (i = 0; i < I915_NUM_RINGS; i++) {
> +		rq = obj->last_read[i].request;
> +		if (rq && i915_request_complete(rq, true))
> +			i915_gem_object_retire__read(obj, rq->ring);
> +	}
> +}

Unrelated comment on GEM in general: it seems like the callers of this
all use it after they've waited on the object; maybe it should just
be called from wait_rendering() to avoid any potential trouble?  The
name is a bit ambiguous; it could be taken to mean that it does the
idling.

>
> @@ -1544,19 +1594,37 @@ static __must_check int
>  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
>  			       bool readonly)
>  {
> -	struct intel_engine_cs *ring = obj->ring;
> -	u32 seqno;
> -	int ret;
> +	int i, ret;
>  
> -	seqno = readonly ? obj->last_write_seqno :
> obj->last_read_seqno;
> -	if (seqno == 0)
> -		return 0;
> +	if (readonly) {
> +		if (obj->last_write.request == NULL)
> +			return 0;
>  
> -	ret = i915_wait_seqno(ring, seqno);
> -	if (ret)
> -		return ret;
> +		ret = i915_wait_request(obj->last_write.request);
> +		if (ret)
> +			return ret;
> +	} else {
> +		for (i = 0; i < I915_NUM_RINGS; i++) {
> +			if (obj->last_read[i].request == NULL)
> +				continue;
> +
> +			ret =
> i915_wait_request(obj->last_read[i].request);
> +			if (ret)
> +				return ret;
> +		}
> +	}
>  
> -	return i915_gem_object_wait_rendering__tail(obj, ring);
> +	/* Manually manage the write flush as we may have not yet
> +	 * retired the buffer.
> +	 *
> +	 * Note that the last_write_seqno is always the earlier of
> +	 * the two (read/write) seqno, so if we haved successfully
> waited,
> +	 * we know we have passed the last write.
> +	 */

Does this comment belong above where we do the read checking?

> +	if (obj->last_write.request)
> +		i915_gem_object_retire__write(obj);
> +
> +	return 0;
>  }

> @@ -1569,34 +1637,48 @@
> i915_gem_object_wait_rendering__nonblocking(struct
> drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev;
>  	struct drm_i915_private *dev_priv = dev->dev_private;
> -	struct intel_engine_cs *ring = obj->ring;
> +	struct i915_gem_request *rq[I915_NUM_RINGS] = {};
>  	unsigned reset_counter;
> -	u32 seqno;
> -	int ret;
> +	int i, n, ret;
>  
>  	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
>  	BUG_ON(!dev_priv->mm.interruptible);
> obj->last_read_seqno;
> -	if (seqno == 0)
> +	n = 0;
> +	if (readonly) {
> +		if (obj->last_write.request)
> +			rq[n++] =
> i915_request_get(obj->last_write.request);
> +	} else {
> +		for (i = 0; i < I915_NUM_RINGS; i++)
> +			if (obj->last_read[i].request)
> +				rq[n++] =
> i915_request_get(obj->last_read[i].request);
> +	}

There's a similar pattern elsewhere (though w/o the readonly checking),
I wonder if we can add a small function for this?

> +	if (n == 0)
>  		return 0;
>  
>  	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
>  	if (ret)
> -		return ret;
> +		goto out;
>  
> -	ret = i915_gem_check_olr(ring, seqno);
> -	if (ret)
> -		return ret;
> +	for (i = 0; i < n; i++) {
> +		ret = i915_gem_check_olr(rq[i]);
> +		if (ret)
> +			goto out;
> +	}
>  
>  	reset_counter =
> atomic_read(&dev_priv->gpu_error.reset_counter);
> mutex_unlock(&dev->struct_mutex);
> -	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL,
> file_priv); +
> +	for (i = 0; ret == 0 && i < n; i++)
> +		ret = __wait_request(rq[i], reset_counter, true,
> NULL, file_priv); +
>  	mutex_lock(&dev->struct_mutex);
> -	if (ret)
> -		return ret;
>  
> -	return i915_gem_object_wait_rendering__tail(obj, ring);
> +out:
> +	for (i = 0; i < n; i++)
> +		i915_request_put(rq[i]);
> +
> +	return ret;
>  }

Looks like you got the error path/put handling correct here.

>  void i915_vma_move_to_active(struct i915_vma *vma,
> -			     struct intel_engine_cs *ring)
> +			     struct intel_engine_cs *ring,
> +			     unsigned fenced)
>  {
> -	list_move_tail(&vma->mm_list, &vma->vm->active_list);
> -	return i915_gem_object_move_to_active(vma->obj, ring);
> -}
> +	struct drm_i915_gem_object *obj = vma->obj;
> +	struct i915_gem_request *rq = intel_ring_get_request(ring);
> +	u32 old_read = obj->base.read_domains;
> +	u32 old_write = obj->base.write_domain;

I'm already getting confused about which fence we're talking about.
But I guess that's for me to worry about when I add a fence obj.

> @@ -2533,11 +2594,10 @@ i915_gem_get_seqno(struct drm_device *dev,
> u32 *seqno) 
>  int __i915_add_request(struct intel_engine_cs *ring,
>  		       struct drm_file *file,
> -		       struct drm_i915_gem_object *obj,
> -		       u32 *out_seqno)
> +		       struct drm_i915_gem_object *obj)
>  {
>  	struct drm_i915_private *dev_priv = ring->dev->dev_private;
> -	struct drm_i915_gem_request *request;
> +	struct i915_gem_request *rq;
>  	u32 request_ring_position, request_start;
>  	int ret;
>  
> @@ -2553,8 +2613,8 @@ int __i915_add_request(struct intel_engine_cs
> *ring, if (ret)
>  		return ret;
>  
> -	request = ring->preallocated_lazy_request;
> -	if (WARN_ON(request == NULL))
> +	rq = ring->preallocated_request;
> +	if (WARN_ON(rq == NULL))
>  		return -ENOMEM;
>  
>  	/* Record the position of the start of the request so that
> @@ -2568,10 +2628,8 @@ int __i915_add_request(struct intel_engine_cs
> *ring, if (ret)
>  		return ret;
>  
> -	request->seqno = intel_ring_get_seqno(ring);
> -	request->ring = ring;
> -	request->head = request_start;
> -	request->tail = request_ring_position;
> +	rq->head = request_start;
> +	rq->tail = request_ring_position;
>  
>  	/* Whilst this request exists, batch_obj will be on the
>  	 * active_list, and so will hold the active reference. Only
> when this @@ -2579,32 +2637,31 @@ int __i915_add_request(struct
> intel_engine_cs *ring,
>  	 * inactive_list and lose its active reference. Hence we do
> not need
>  	 * to explicitly hold another reference here.
>  	 */
> -	request->batch_obj = obj;
> +	rq->batch_obj = obj;
>  
>  	/* Hold a reference to the current context so that we can
> inspect
>  	 * it later in case a hangcheck error event fires.
>  	 */
> -	request->ctx = ring->last_context;
> -	if (request->ctx)
> -		i915_gem_context_reference(request->ctx);
> +	rq->ctx = ring->last_context;
> +	if (rq->ctx)
> +		i915_gem_context_reference(rq->ctx);
>  
> -	request->emitted_jiffies = jiffies;
> -	list_add_tail(&request->list, &ring->request_list);
> -	request->file_priv = NULL;
> +	rq->emitted_jiffies = jiffies;
> +	list_add_tail(&rq->list, &ring->request_list);
> +	rq->file_priv = NULL;
>  
>  	if (file) {
>  		struct drm_i915_file_private *file_priv =
> file->driver_priv; 
>  		spin_lock(&file_priv->mm.lock);
> -		request->file_priv = file_priv;
> -		list_add_tail(&request->client_list,
> +		rq->file_priv = file_priv;
> +		list_add_tail(&rq->client_list,
>  			      &file_priv->mm.request_list);
>  		spin_unlock(&file_priv->mm.lock);
>  	}
>  
> -	trace_i915_gem_request_add(ring, request->seqno);
> -	ring->outstanding_lazy_seqno = 0;
> -	ring->preallocated_lazy_request = NULL;
> +	trace_i915_gem_request_add(ring, rq->seqno);
> +	ring->preallocated_request = NULL;
>  
>  	if (!dev_priv->ums.mm_suspended) {
>  		i915_queue_hangcheck(ring->dev);
> @@ -2616,22 +2673,20 @@ int __i915_add_request(struct intel_engine_cs
> *ring, intel_mark_busy(dev_priv->dev);
>  	}
>  
> -	if (out_seqno)
> -		*out_seqno = request->seqno;
>  	return 0;
>  }

Nearly gratuitous rename.  But only nearly.

>  
>  static inline void
> -i915_gem_request_remove_from_client(struct drm_i915_gem_request
> *request) +i915_gem_request_remove_from_client(struct
> i915_gem_request *rq) {
> -	struct drm_i915_file_private *file_priv = request->file_priv;
> +	struct drm_i915_file_private *file_priv = rq->file_priv;
>  
>  	if (!file_priv)
>  		return;
>  
>  	spin_lock(&file_priv->mm.lock);
> -	list_del(&request->client_list);
> -	request->file_priv = NULL;
> +	list_del(&rq->client_list);
> +	rq->file_priv = NULL;
>  	spin_unlock(&file_priv->mm.lock);
>  }

Gratuitous!  But still ok.

>  
> @@ -2679,30 +2734,37 @@ static void i915_set_reset_status(struct
> drm_i915_private *dev_priv, }
>  }
>  
> -static void i915_gem_free_request(struct drm_i915_gem_request
> *request) +void __i915_request_free(struct kref *kref)
> +{
> +	struct i915_gem_request *rq = container_of(kref, struct
> i915_gem_request, kref);
> +	kfree(rq);
> +}
> +
> +static void i915_request_retire(struct i915_gem_request *rq)
>  {
> -	list_del(&request->list);
> -	i915_gem_request_remove_from_client(request);
> +	rq->completed = true;
> +
> +	list_del(&rq->list);
> +	i915_gem_request_remove_from_client(rq);
>  
> -	if (request->ctx)
> -		i915_gem_context_unreference(request->ctx);
> +	if (rq->ctx) {
> +		i915_gem_context_unreference(rq->ctx);
> +		rq->ctx = NULL;
> +	}
>  
> -	kfree(request);
> +	i915_request_put(rq);
>  }

I wonder if we can somehow always use this function instead of having
both obj and request retire functions.  Maybe if we moved obj retire
into the rendering sync functions we'd have a little less pseudo
duplication.


> @@ -2967,11 +3085,10 @@ i915_gem_wait_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file) struct drm_i915_private *dev_priv
> = dev->dev_private; struct drm_i915_gem_wait *args = data;
>  	struct drm_i915_gem_object *obj;
> -	struct intel_engine_cs *ring = NULL;
>  	struct timespec timeout_stack, *timeout = NULL;
> +	struct i915_gem_request *rq[I915_NUM_RINGS] = {};
>  	unsigned reset_counter;
> -	u32 seqno = 0;
> -	int ret = 0;
> +	int i, n, ret = 0;
>  
>  	if (args->timeout_ns >= 0) {
>  		timeout_stack = ns_to_timespec(args->timeout_ns);
> @@ -2993,13 +3110,8 @@ i915_gem_wait_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file) if (ret)
>  		goto out;
>  
> -	if (obj->active) {
> -		seqno = obj->last_read_seqno;
> -		ring = obj->ring;
> -	}
> -
> -	if (seqno == 0)
> -		 goto out;
> +	if (!obj->active)
> +		goto out;
>  
>  	/* Do this after OLR check to make sure we make forward
> progress polling
>  	 * on this IOCTL with a 0 timeout (like busy ioctl)
> @@ -3009,11 +3121,25 @@ i915_gem_wait_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file) goto out;
>  	}
>  
> +	for (i = n = 0; i < I915_NUM_RINGS; i++) {
> +		if (obj->last_read[i].request == NULL)
> +			continue;
> +
> +		rq[n++] =
> i915_request_get(obj->last_read[i].request);
> +	}
> +
>  	drm_gem_object_unreference(&obj->base);
> +
>  	reset_counter =
> atomic_read(&dev_priv->gpu_error.reset_counter);
> mutex_unlock(&dev->struct_mutex); 
> -	ret = __wait_seqno(ring, seqno, reset_counter, true,
> timeout, file->driver_priv);
> +	for (i = 0; i < n; i++) {
> +		if (ret == 0)
> +			ret = __wait_request(rq[i], reset_counter,
> true, timeout, file->driver_priv); +
> +		i915_request_put(rq[i]);
> +	}
> +
>  	if (timeout)
>  		args->timeout_ns = timespec_to_ns(timeout);
>  	return ret;
> @@ -3024,6 +3150,45 @@ out:
>  	return ret;
>  }

request_retire does a put, where is the corresponding get so that we
don't lose the object when the lock is dropped here or in the
nonblocking wait?  Or should request_retire not do a put?


>  
> +static int
> +i915_request_sync(struct i915_gem_request *rq,
> +		  struct intel_engine_cs *to,
> +		  struct drm_i915_gem_object *obj)
> +{
> +	int ret, idx;
> +
> +	if (to == NULL)
> +		return i915_wait_request(rq);
> +
> +	/* XXX this is broken by VEBOX+ */

What does this comment mean?  How does VEBOX break this?  Does it not
have semaphore support or something?

> @@ -3038,44 +3203,35 @@ out:
>   */
>  int
>  i915_gem_object_sync(struct drm_i915_gem_object *obj,
> -		     struct intel_engine_cs *to)
> +		     struct intel_engine_cs *to,
> +		     bool readonly)
>  {

It might be nice to have sync_read/sync_write functions instead, since
looking up bool args or unnamed enums is a pain.

>  static bool
>  ring_idle(struct intel_engine_cs *ring, u32 seqno)
>  {
>  	return (list_empty(&ring->request_list) ||
> -		i915_seqno_passed(seqno, ring_last_seqno(ring)));
> +		__i915_seqno_passed(seqno, ring_last_seqno(ring)));
>  }

Unrelated question: why do we have two checks here?  Shouldn't the
seqno check always be sufficient?  Or is the list_empty check for the
uninitialized case?

> @@ -9856,7 +9859,7 @@ static int intel_crtc_page_flip(struct drm_crtc
> *crtc, } else if (IS_IVYBRIDGE(dev)) {
>  		ring = &dev_priv->ring[BCS];
>  	} else if (INTEL_INFO(dev)->gen >= 7) {
> -		ring = obj->ring;
> +		ring = i915_request_ring(obj->last_write.request);
>  		if (ring == NULL || ring->id != RCS)
>  			ring = &dev_priv->ring[BCS];
>  	} else {

I like the flip changes, they make things easier to follow in general.

>  static int
> -intel_ring_alloc_seqno(struct intel_engine_cs *ring)
> +intel_ring_alloc_request(struct intel_engine_cs *ring)
>  {
> -	if (ring->outstanding_lazy_seqno)
> -		return 0;
> +	struct i915_gem_request *rq;
> +	int ret;
>  
> -	if (ring->preallocated_lazy_request == NULL) {
> -		struct drm_i915_gem_request *request;
> +	if (ring->preallocated_request)
> +		return 0;
>  
> -		request = kmalloc(sizeof(*request), GFP_KERNEL);
> -		if (request == NULL)
> -			return -ENOMEM;
> +	rq = kmalloc(sizeof(*rq), GFP_KERNEL);
> +	if (rq == NULL)
> +		return -ENOMEM;
>  
> -		ring->preallocated_lazy_request = request;
> +	ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
> +	if (ret) {
> +		kfree(rq);
> +		return ret;
>  	}
>  
> -	return i915_gem_get_seqno(ring->dev,
> &ring->outstanding_lazy_seqno);
> +	kref_init(&rq->kref);
> +	rq->ring = ring;
> +	rq->completed = false;
> +
> +	ring->preallocated_request = rq;
> +	return 0;
>  }

Makes me wonder if we should have our own slab for the objs.  Might
save a bit of mem and/or perf?  But then could reduce our cache hit
rate, dunno.

Overall this gets my:
Fatigued-reviewed-by: Jesse Barnes <jbarnes at virtuousgeek.org>
given its size and with the changes/comments above addressed.  Also,
someone else will have to check over the overlay and perf bits; I
assume those are mostly mechanical, but I'm not that familiar with
those bits.

Thanks,
Jesse



More information about the Intel-gfx mailing list