[Intel-gfx] [PATCH 047/190] drm/i915: Rename request reference/unreference to get/put

Chris Wilson chris at chris-wilson.co.uk
Mon Jan 11 01:16:58 PST 2016


Now that we derive requests from struct fence, swap over to its
nomenclature for references. It's shorter and more idiomatic across the
kernel.

s/i915_gem_request_reference/i915_gem_request_get/
s/i915_gem_request_unreference/i915_gem_request_put/

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c          | 14 +++++++-------
 drivers/gpu/drm/i915/i915_gem_request.c  |  2 +-
 drivers/gpu/drm/i915/i915_gem_request.h  |  8 ++++----
 drivers/gpu/drm/i915/intel_breadcrumbs.c |  4 ++--
 drivers/gpu/drm/i915/intel_display.c     |  4 ++--
 drivers/gpu/drm/i915/intel_lrc.c         |  4 ++--
 drivers/gpu/drm/i915/intel_pm.c          |  5 ++---
 7 files changed, 20 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6d8d65304abf..fd61e722b595 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1185,7 +1185,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 		if (req == NULL)
 			return 0;
 
-		requests[n++] = i915_gem_request_reference(req);
+		requests[n++] = i915_gem_request_get(req);
 	} else {
 		for (i = 0; i < I915_NUM_RINGS; i++) {
 			struct drm_i915_gem_request *req;
@@ -1194,7 +1194,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 			if (req == NULL)
 				continue;
 
-			requests[n++] = i915_gem_request_reference(req);
+			requests[n++] = i915_gem_request_get(req);
 		}
 	}
 
@@ -1207,7 +1207,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
 			i915_gem_object_retire_request(obj, requests[i]);
-		i915_gem_request_unreference(requests[i]);
+		i915_gem_request_put(requests[i]);
 	}
 
 	return ret;
@@ -2492,7 +2492,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		if (obj->last_read_req[i] == NULL)
 			continue;
 
-		req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
+		req[n++] = i915_gem_request_get(obj->last_read_req[i]);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -2502,7 +2502,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 			ret = __i915_wait_request(req[i], true,
 						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
 						  to_rps_client(file));
-		i915_gem_request_unreference(req[i]);
+		i915_gem_request_put(req[i]);
 	}
 	return ret;
 
@@ -3498,14 +3498,14 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 		target = request;
 	}
 	if (target)
-		i915_gem_request_reference(target);
+		i915_gem_request_get(target);
 	spin_unlock(&file_priv->mm.lock);
 
 	if (target == NULL)
 		return 0;
 
 	ret = __i915_wait_request(target, true, NULL, NULL);
-	i915_gem_request_unreference(target);
+	i915_gem_request_put(target);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e366ca0dcd99..a796dbd1b0e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -326,7 +326,7 @@ static void __i915_gem_request_release(struct drm_i915_gem_request *request)
 	i915_gem_request_remove_from_client(request);
 
 	i915_gem_context_unreference(request->ctx);
-	i915_gem_request_unreference(request);
+	i915_gem_request_put(request);
 }
 
 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index b55d0b7c7f2a..0ab14fd0fce0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -147,13 +147,13 @@ to_request(struct fence *fence)
 }
 
 static inline struct drm_i915_gem_request *
-i915_gem_request_reference(struct drm_i915_gem_request *req)
+i915_gem_request_get(struct drm_i915_gem_request *req)
 {
 	return to_request(fence_get(&req->fence));
 }
 
 static inline void
-i915_gem_request_unreference(struct drm_i915_gem_request *req)
+i915_gem_request_put(struct drm_i915_gem_request *req)
 {
 	fence_put(&req->fence);
 }
@@ -162,10 +162,10 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
 					   struct drm_i915_gem_request *src)
 {
 	if (src)
-		i915_gem_request_reference(src);
+		i915_gem_request_get(src);
 
 	if (*pdst)
-		i915_gem_request_unreference(*pdst);
+		i915_gem_request_put(*pdst);
 
 	*pdst = src;
 }
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 61e18cb90850..aca1b72edcd8 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -391,7 +391,7 @@ static int intel_breadcrumbs_signaler(void *arg)
 			intel_engine_remove_wait(engine, &signal->wait);
 
 			fence_signal(&signal->request->fence);
-			i915_gem_request_unreference(signal->request);
+			i915_gem_request_put(signal->request);
 
 			/* Find the next oldest signal. Note that as we have
 			 * not been holding the lock, another client may
@@ -459,7 +459,7 @@ int intel_engine_enable_signaling(struct drm_i915_gem_request *request)
 	signal->wait.task = task;
 	signal->wait.seqno = request->fence.seqno;
 
-	signal->request = i915_gem_request_reference(request);
+	signal->request = i915_gem_request_get(request);
 
 	/* Insert ourselves into the retirement ordered list of signals
 	 * on this engine. We track the oldest seqno as that will be the
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 32885b8d5c02..ae247927e931 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11431,7 +11431,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
 		WARN_ON(__i915_wait_request(mmio_flip->req,
 					    false, NULL,
 					    &mmio_flip->i915->rps.mmioflips));
-		i915_gem_request_unreference(mmio_flip->req);
+		i915_gem_request_put(mmio_flip->req);
 	}
 
 	/* For framebuffer backed by dmabuf, wait for fence */
@@ -11455,7 +11455,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
 		return -ENOMEM;
 
 	mmio_flip->i915 = to_i915(dev);
-	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
+	mmio_flip->req = i915_gem_request_get(obj->last_write_req);
 	mmio_flip->crtc = to_intel_crtc(crtc);
 	mmio_flip->rotation = crtc->primary->state->rotation;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f43a94ae5c76..433e9f60e926 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -587,7 +587,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
-	i915_gem_request_reference(request);
+	i915_gem_request_get(request);
 
 	spin_lock_irq(&ring->execlist_lock);
 
@@ -983,7 +983,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
 		if (ctx_obj && (ctx != ring->default_context))
 			intel_lr_context_unpin(req);
 		list_del(&req->execlist_link);
-		i915_gem_request_unreference(req);
+		i915_gem_request_put(req);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0e13135aefaa..39b7ca9c3e66 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7289,7 +7289,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
 		gen6_rps_boost(to_i915(req->ring->dev), NULL,
 			       req->emitted_jiffies);
 
-	i915_gem_request_unreference(req);
+	i915_gem_request_put(req);
 	kfree(boost);
 }
 
@@ -7308,8 +7308,7 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
 	if (boost == NULL)
 		return;
 
-	i915_gem_request_reference(req);
-	boost->req = req;
+	boost->req = i915_gem_request_get(req);
 
 	INIT_WORK(&boost->work, __intel_rps_boost_work);
 	queue_work(to_i915(dev)->wq, &boost->work);
-- 
2.7.0.rc3



More information about the Intel-gfx mailing list