[Intel-gfx] [CI 21/28] drm/i915: s/__i915_wait_request/i915_wait_request/
Chris Wilson
chris at chris-wilson.co.uk
Thu Aug 4 06:52:40 UTC 2016
There is only one wait on request function now, so drop the "expert"
indication of leading __.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 18 +++++++++---------
drivers/gpu/drm/i915/i915_gem_request.c | 16 ++++++++--------
drivers/gpu/drm/i915/i915_gem_request.h | 12 ++++++------
drivers/gpu/drm/i915/i915_gem_userptr.c | 2 +-
drivers/gpu/drm/i915/intel_display.c | 14 +++++++-------
drivers/gpu/drm/i915/intel_ringbuffer.c | 8 ++++----
6 files changed, 35 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7ba02c48bfb7..46e41c42f8b9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1420,7 +1420,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
ret = 0;
for (i = 0; ret == 0 && i < n; i++)
- ret = __i915_wait_request(requests[i], true, NULL, rps);
+ ret = i915_wait_request(requests[i], true, NULL, rps);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++)
@@ -2726,9 +2726,9 @@ out:
for (i = 0; i < n; i++) {
if (ret == 0)
- ret = __i915_wait_request(requests[i], true,
- args->timeout_ns > 0 ? &args->timeout_ns : NULL,
- to_rps_client(file));
+ ret = i915_wait_request(requests[i], true,
+ args->timeout_ns > 0 ? &args->timeout_ns : NULL,
+ to_rps_client(file));
i915_gem_request_put(requests[i]);
}
return ret;
@@ -2744,10 +2744,10 @@ __i915_gem_object_sync(struct drm_i915_gem_request *to,
return 0;
if (!i915.semaphores) {
- ret = __i915_wait_request(from,
- from->i915->mm.interruptible,
- NULL,
- NO_WAITBOOST);
+ ret = i915_wait_request(from,
+ from->i915->mm.interruptible,
+ NULL,
+ NO_WAITBOOST);
if (ret)
return ret;
} else {
@@ -3712,7 +3712,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, true, NULL, NULL);
+ ret = i915_wait_request(target, true, NULL, NULL);
i915_gem_request_put(target);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 85ec5ca5c36b..8549375aa75e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -70,9 +70,9 @@ static signed long i915_fence_wait(struct fence *fence,
timeout = NULL;
}
- ret = __i915_wait_request(to_request(fence),
- interruptible, timeout,
- NO_WAITBOOST);
+ ret = i915_wait_request(to_request(fence),
+ interruptible, timeout,
+ NO_WAITBOOST);
if (ret == -ETIME)
return 0;
@@ -579,7 +579,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
}
/**
- * __i915_wait_request - wait until execution of request has finished
+ * i915_wait_request - wait until execution of request has finished
* @req: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
@@ -595,10 +595,10 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
* Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
- s64 *timeout,
- struct intel_rps_client *rps)
+int i915_wait_request(struct drm_i915_gem_request *req,
+ bool interruptible,
+ s64 *timeout,
+ struct intel_rps_client *rps)
{
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index bc19980b6b1f..26ca697f5af7 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -214,10 +214,10 @@ struct intel_rps_client;
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
-int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
- s64 *timeout,
- struct intel_rps_client *rps)
+int i915_wait_request(struct drm_i915_gem_request *req,
+ bool interruptible,
+ s64 *timeout,
+ struct intel_rps_client *rps)
__attribute__((nonnull(1)));
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -418,7 +418,7 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
if (!request)
return 0;
- return __i915_wait_request(request, true, NULL, NULL);
+ return i915_wait_request(request, true, NULL, NULL);
}
/**
@@ -441,7 +441,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
if (!request)
return 0;
- ret = __i915_wait_request(request, true, NULL, NULL);
+ ret = i915_wait_request(request, true, NULL, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e57521dbddc6..651a84ba840c 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -83,7 +83,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
mutex_unlock(&dev->struct_mutex);
for (i = 0; i < n; i++)
- __i915_wait_request(requests[i], false, NULL, NULL);
+ i915_wait_request(requests[i], false, NULL, NULL);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d54a3ea56536..da9dcacd49d5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11463,9 +11463,9 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct reservation_object *resv;
if (work->flip_queued_req)
- WARN_ON(__i915_wait_request(work->flip_queued_req,
- false, NULL,
- NO_WAITBOOST));
+ WARN_ON(i915_wait_request(work->flip_queued_req,
+ false, NULL,
+ NO_WAITBOOST));
/* For framebuffer backed by dmabuf, wait for fence */
resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -13508,8 +13508,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
if (!intel_plane_state->wait_req)
continue;
- ret = __i915_wait_request(intel_plane_state->wait_req,
- true, NULL, NULL);
+ ret = i915_wait_request(intel_plane_state->wait_req,
+ true, NULL, NULL);
if (ret) {
/* Any hang should be swallowed by the wait */
WARN_ON(ret == -EIO);
@@ -13621,8 +13621,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_plane_state->wait_req)
continue;
- ret = __i915_wait_request(intel_plane_state->wait_req,
- true, NULL, NULL);
+ ret = i915_wait_request(intel_plane_state->wait_req,
+ true, NULL, NULL);
/* EIO should be eaten, and we can't get interrupted in the
* worker, and blocking commits have waited already. */
WARN_ON(ret);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4ab6d2365e30..ac2e610ab37f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2240,9 +2240,9 @@ int intel_engine_idle(struct intel_engine_cs *engine)
link);
/* Make sure we do not trigger any retires */
- return __i915_wait_request(req,
- req->i915->mm.interruptible,
- NULL, NULL);
+ return i915_wait_request(req,
+ req->i915->mm.interruptible,
+ NULL, NULL);
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -2299,7 +2299,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- ret = __i915_wait_request(target, true, NULL, NO_WAITBOOST);
+ ret = i915_wait_request(target, true, NULL, NO_WAITBOOST);
if (ret)
return ret;
--
2.8.1
More information about the Intel-gfx
mailing list