[Intel-gfx] [PATCH v2 10/28] drm/i915: Convert i915_wait_seqno to i915_wait_request
John.C.Harrison at Intel.com
John.C.Harrison at Intel.com
Fri Nov 14 13:19:01 CET 2014
From: John Harrison <John.C.Harrison at Intel.com>
Updated i915_wait_seqno() to take a request structure instead of a seqno value
and renamed it accordingly. Internally, it just pulls the seqno out of the
request and calls on to __wait_seqno() as before. However, all the code further
up the stack is now simplified as it can just pass the request object straight
through without having to peek inside.
For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 19 +-----------------
drivers/gpu/drm/i915/i915_gem.c | 33 +++++++++++++++----------------
drivers/gpu/drm/i915/intel_lrc.c | 6 ++----
drivers/gpu/drm/i915/intel_overlay.c | 11 +++--------
drivers/gpu/drm/i915/intel_ringbuffer.c | 14 ++++++-------
5 files changed, 28 insertions(+), 55 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 08aad6b..180b674 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2641,8 +2641,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv);
-int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
- uint32_t seqno);
+int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -3129,20 +3128,4 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-/* XXX: Temporary solution to be removed later in patch series. */
-static inline int __must_check i915_gem_check_ols(
- struct intel_engine_cs *ring, u32 seqno)
-{
- int ret;
-
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-
- ret = 0;
- if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
- ret = i915_add_request(ring, NULL);
-
- return ret;
-}
-/* XXX: Temporary solution to be removed later in patch series. */
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b9a6f9c..60e5eec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1333,32 +1333,37 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
}
/**
- * Waits for a sequence number to be signaled, and cleans up the
+ * Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
-i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
+i915_wait_request(struct drm_i915_gem_request *req)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool interruptible = dev_priv->mm.interruptible;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ bool interruptible;
unsigned reset_counter;
int ret;
+ BUG_ON(req == NULL);
+
+ dev = req->ring->dev;
+ dev_priv = dev->dev_private;
+ interruptible = dev_priv->mm.interruptible;
+
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(seqno == 0);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
- ret = i915_gem_check_ols(ring, seqno);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
- NULL, NULL);
+ return __i915_wait_seqno(req->ring, i915_gem_request_get_seqno(req),
+ reset_counter, interruptible, NULL, NULL);
}
static int
@@ -1388,18 +1393,13 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
struct drm_i915_gem_request *req;
- struct intel_engine_cs *ring = obj->ring;
- u32 seqno;
int ret;
req = readonly ? obj->last_write_req : obj->last_read_req;
if (!req)
return 0;
- seqno = i915_gem_request_get_seqno(req);
- WARN_ON(seqno == 0);
-
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(req);
if (ret)
return ret;
@@ -3316,8 +3316,7 @@ static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_req) {
- int ret = i915_wait_seqno(obj->ring,
- i915_gem_request_get_seqno(obj->last_fenced_req));
+ int ret = i915_wait_request(obj->last_fenced_req);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b55dcb0..eba0acd 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -828,7 +828,6 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
if (ringbuf->last_retired_head != -1) {
@@ -843,15 +842,14 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= bytes) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 5defc37..6c530e2 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -224,8 +224,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
return ret;
overlay->flip_tail = tail;
- ret = i915_wait_seqno(ring,
- i915_gem_request_get_seqno(overlay->last_flip_req));
+ ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
@@ -367,19 +366,15 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == NULL)
return 0;
- ret = i915_wait_seqno(ring,
- i915_gem_request_get_seqno(overlay->last_flip_req));
+ ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(overlay->dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f3725da..74c48ed 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1888,7 +1888,6 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
if (ringbuf->last_retired_head != -1) {
@@ -1903,15 +1902,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= n) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
@@ -2007,7 +2005,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
int intel_ring_idle(struct intel_engine_cs *ring)
{
- u32 seqno;
+ struct drm_i915_gem_request *req;
int ret;
/* We need to add any requests required to flush the objects and ring */
@@ -2021,11 +2019,11 @@ int intel_ring_idle(struct intel_engine_cs *ring)
if (list_empty(&ring->request_list))
return 0;
- seqno = list_entry(ring->request_list.prev,
+ req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
- list)->seqno;
+ list);
- return i915_wait_seqno(ring, seqno);
+ return i915_wait_request(req);
}
static int
--
1.7.9.5
More information about the Intel-gfx
mailing list