[Intel-gfx] [PATCH 48/55] drm/i915: s/__i915_wait_request/i915_wait_request/
Chris Wilson
chris at chris-wilson.co.uk
Mon Jul 25 17:32:27 UTC 2016
There is onl one wait on request function now, so drop the "expert"
indication of leading __.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem.c | 18 +++++++++---------
drivers/gpu/drm/i915/i915_gem_request.c | 16 ++++++++--------
drivers/gpu/drm/i915/i915_gem_request.h | 12 ++++++------
drivers/gpu/drm/i915/i915_gem_userptr.c | 2 +-
drivers/gpu/drm/i915/intel_display.c | 14 +++++++-------
drivers/gpu/drm/i915/intel_ringbuffer.c | 8 ++++----
6 files changed, 35 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b9874a99ae04..cc84f00cf883 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1420,7 +1420,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
ret = 0;
for (i = 0; ret == 0 && i < n; i++)
- ret = __i915_wait_request(requests[i], true, NULL, rps);
+ ret = i915_wait_request(requests[i], true, NULL, rps);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++)
@@ -2733,9 +2733,9 @@ out:
for (i = 0; i < n; i++) {
if (ret == 0)
- ret = __i915_wait_request(requests[i], true,
- args->timeout_ns > 0 ? &args->timeout_ns : NULL,
- to_rps_client(file));
+ ret = i915_wait_request(requests[i], true,
+ args->timeout_ns > 0 ? &args->timeout_ns : NULL,
+ to_rps_client(file));
i915_gem_request_put(requests[i]);
}
return ret;
@@ -2751,10 +2751,10 @@ __i915_gem_object_sync(struct drm_i915_gem_request *to,
return 0;
if (!i915.semaphores) {
- ret = __i915_wait_request(from,
- from->i915->mm.interruptible,
- NULL,
- NO_WAITBOOST);
+ ret = i915_wait_request(from,
+ from->i915->mm.interruptible,
+ NULL,
+ NO_WAITBOOST);
if (ret)
return ret;
} else {
@@ -3723,7 +3723,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, true, NULL, NULL);
+ ret = i915_wait_request(target, true, NULL, NULL);
i915_gem_request_put(target);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 2c0c6a37f46a..1935591a98c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -70,9 +70,9 @@ static signed long i915_fence_wait(struct fence *fence,
timeout = NULL;
}
- ret = __i915_wait_request(to_request(fence),
- interruptible, timeout,
- NO_WAITBOOST);
+ ret = i915_wait_request(to_request(fence),
+ interruptible, timeout,
+ NO_WAITBOOST);
if (ret == -ETIME)
return 0;
@@ -564,7 +564,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
}
/**
- * __i915_wait_request - wait until execution of request has finished
+ * i915_wait_request - wait until execution of request has finished
* @req: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
@@ -580,10 +580,10 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
* Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
- s64 *timeout,
- struct intel_rps_client *rps)
+int i915_wait_request(struct drm_i915_gem_request *req,
+ bool interruptible,
+ s64 *timeout,
+ struct intel_rps_client *rps)
{
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index f4aab8e60c9e..828e304b2f11 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -214,10 +214,10 @@ struct intel_rps_client;
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
-int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
- s64 *timeout,
- struct intel_rps_client *rps)
+int i915_wait_request(struct drm_i915_gem_request *req,
+ bool interruptible,
+ s64 *timeout,
+ struct intel_rps_client *rps)
__attribute__((nonnull(1)));
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -411,7 +411,7 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
if (!request)
return 0;
- return __i915_wait_request(request, true, NULL, NULL);
+ return i915_wait_request(request, true, NULL, NULL);
}
/**
@@ -434,7 +434,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
if (!request)
return 0;
- ret = __i915_wait_request(request, true, NULL, NULL);
+ ret = i915_wait_request(request, true, NULL, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e57521dbddc6..651a84ba840c 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -83,7 +83,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
mutex_unlock(&dev->struct_mutex);
for (i = 0; i < n; i++)
- __i915_wait_request(requests[i], false, NULL, NULL);
+ i915_wait_request(requests[i], false, NULL, NULL);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8ea9d5a301d4..ed2069c56036 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11471,9 +11471,9 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct reservation_object *resv;
if (work->flip_queued_req)
- WARN_ON(__i915_wait_request(work->flip_queued_req,
- false, NULL,
- NO_WAITBOOST));
+ WARN_ON(i915_wait_request(work->flip_queued_req,
+ false, NULL,
+ NO_WAITBOOST));
/* For framebuffer backed by dmabuf, wait for fence */
resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -13516,8 +13516,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
if (!intel_plane_state->wait_req)
continue;
- ret = __i915_wait_request(intel_plane_state->wait_req,
- true, NULL, NULL);
+ ret = i915_wait_request(intel_plane_state->wait_req,
+ true, NULL, NULL);
if (ret) {
/* Any hang should be swallowed by the wait */
WARN_ON(ret == -EIO);
@@ -13629,8 +13629,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_plane_state->wait_req)
continue;
- ret = __i915_wait_request(intel_plane_state->wait_req,
- true, NULL, NULL);
+ ret = i915_wait_request(intel_plane_state->wait_req,
+ true, NULL, NULL);
/* EIO should be eaten, and we can't get interrupted in the
* worker, and blocking commits have waited already. */
WARN_ON(ret);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 90e46d1b04e2..924aba1db22d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2255,9 +2255,9 @@ int intel_engine_idle(struct intel_engine_cs *engine)
link);
/* Make sure we do not trigger any retires */
- return __i915_wait_request(req,
- req->i915->mm.interruptible,
- NULL, NULL);
+ return i915_wait_request(req,
+ req->i915->mm.interruptible,
+ NULL, NULL);
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -2314,7 +2314,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- ret = __i915_wait_request(target, true, NULL, NO_WAITBOOST);
+ ret = i915_wait_request(target, true, NULL, NO_WAITBOOST);
if (ret)
return ret;
--
2.8.1
More information about the Intel-gfx
mailing list