[Intel-gfx] [PATCH 25/26] drm/i915: Replace 4096 with PAGE_SIZE or I915_GTT_PAGE_SIZE

Chris Wilson chris at chris-wilson.co.uk
Sat Dec 31 12:07:01 UTC 2016


Start converting over from the byte count to its semantic macro, either
we want to allocate the size of a physical page in main memory or we
want the size of a virtual page in the GTT. 4096 could mean either, but
PAGE_SIZE and I915_GTT_PAGE_SIZE are explicit and should help improve
coe comprehension and future changes.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c              |  4 ++--
 drivers/gpu/drm/i915/i915_gem_context.c      |  5 +++--
 drivers/gpu/drm/i915/i915_gem_evict.c        |  4 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem_fence_reg.c    |  6 +++---
 drivers/gpu/drm/i915/i915_gem_gtt.c          |  6 +++---
 drivers/gpu/drm/i915/i915_gem_gtt.h          |  5 ++++-
 drivers/gpu/drm/i915/i915_gem_render_state.c |  4 ++--
 drivers/gpu/drm/i915/i915_gem_stolen.c       |  7 ++++---
 drivers/gpu/drm/i915/i915_vma.c              |  4 ++--
 drivers/gpu/drm/i915/intel_lrc.c             |  5 +++--
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 10 +++++-----
 12 files changed, 34 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0424cee5c767..bad57279e817 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2092,7 +2092,7 @@ u32 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u32 size,
 	 * if a fence register is needed for the object.
 	 */
 	if (INTEL_GEN(dev_priv) >= 4 || tiling_mode == I915_TILING_NONE)
-		return 4096;
+		return I915_GTT_MIN_ALIGNMENT;
 
 	/*
 	 * Previous chips need to be aligned to the size of the smallest
@@ -3560,7 +3560,7 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 		return;
 
 	if (--vma->obj->pin_display == 0)
-		vma->display_alignment = 4096;
+		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
 	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
 	if (!i915_vma_is_active(vma))
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a8f6b8843bdf..d4780cdfb8a0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -341,7 +341,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
 		ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
 	else
-		ctx->ggtt_offset_bias = 4096;
+		ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
 
 	return ctx;
 
@@ -456,7 +456,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
 		dev_priv->hw_context_size = 0;
 	} else if (HAS_HW_CONTEXTS(dev_priv)) {
 		dev_priv->hw_context_size =
-			round_up(get_context_size(dev_priv), 4096);
+			round_up(get_context_size(dev_priv),
+				 I915_GTT_PAGE_SIZE);
 		if (dev_priv->hw_context_size > (1<<20)) {
 			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
 					 dev_priv->hw_context_size);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 50129ec1caab..70ba92f91148 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -263,9 +263,9 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
 	if (check_color) {
 		/* Expand search to cover neighbouring guard pages (or lack!) */
 		if (start > target->vm->start)
-			start -= 4096;
+			start -= I915_GTT_PAGE_SIZE;
 		if (end < target->vm->start + target->vm->total)
-			end += 4096;
+			end += I915_GTT_PAGE_SIZE;
 	}
 
 	drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a5fe299da1d3..c6922a5f0514 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -438,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 			memset(&cache->node, 0, sizeof(cache->node));
 			ret = drm_mm_insert_node_in_range_generic
 				(&ggtt->base.mm, &cache->node,
-				 4096, 0, I915_COLOR_UNEVICTABLE,
+				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 				 0, ggtt->mappable_end,
 				 DRM_MM_SEARCH_DEFAULT,
 				 DRM_MM_CREATE_DEFAULT);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 6de527df6cdc..67835d7b39c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -81,11 +81,11 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
 		unsigned int stride = i915_gem_object_get_stride(vma->obj);
 
 		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
-		GEM_BUG_ON(vma->node.start & 4095);
-		GEM_BUG_ON(vma->fence_size & 4095);
+		GEM_BUG_ON(vma->node.start & (I915_GTT_PAGE_SIZE - 1));
+		GEM_BUG_ON(vma->fence_size & (I915_GTT_PAGE_SIZE - 1));
 		GEM_BUG_ON(stride & 127);
 
-		val = (vma->node.start + vma->fence_size - 4096) << 32;
+		val = (vma->node.start + vma->fence_size - I915_GTT_PAGE_SIZE) << 32;
 		val |= vma->node.start;
 		val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
 		if (tiling == I915_TILING_Y)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2c579946447c..116e43bffdb1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2716,11 +2716,11 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
 				  u64 *end)
 {
 	if (node->color != color)
-		*start += 4096;
+		*start += I915_GTT_PAGE_SIZE;
 
 	node = list_next_entry(node, node_list);
 	if (node->allocated && node->color != color)
-		*end -= 4096;
+		*end -= I915_GTT_PAGE_SIZE;
 }
 
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2747,7 +2747,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	/* Reserve a mappable slot for our lockless error capture */
 	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
 						  &ggtt->error_capture,
-						  4096, 0,
+						  PAGE_SIZE, 0,
 						  I915_COLOR_UNEVICTABLE,
 						  0, ggtt->mappable_end,
 						  0, 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index a4070c27d69b..e217b14e14fd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -40,6 +40,9 @@
 #include "i915_gem_timeline.h"
 #include "i915_gem_request.h"
 
+#define I915_GTT_PAGE_SIZE 4096
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
 #define I915_FENCE_REG_NONE -1
 #define I915_MAX_NUM_FENCES 32
 /* 32 fences + sign bit for FENCE_REG_NONE */
@@ -563,6 +566,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 #define PIN_HIGH		BIT(9)
 #define PIN_OFFSET_BIAS		BIT(10)
 #define PIN_OFFSET_FIXED	BIT(11)
-#define PIN_OFFSET_MASK		(~4095)
+#define PIN_OFFSET_MASK		(-I915_GTT_PAGE_SIZE)
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 5af19b0bf713..63ae7e813335 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -187,14 +187,14 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine)
 	if (!rodata)
 		return 0;
 
-	if (rodata->batch_items * 4 > 4096)
+	if (rodata->batch_items * 4 > PAGE_SIZE)
 		return -EINVAL;
 
 	so = kmalloc(sizeof(*so), GFP_KERNEL);
 	if (!so)
 		return -ENOMEM;
 
-	obj = i915_gem_object_create_internal(engine->i915, 4096);
+	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
 	if (IS_ERR(obj)) {
 		ret = PTR_ERR(obj);
 		goto err_free;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 5fd851336a99..be611f1367b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -482,7 +482,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 	stolen_usable_start = 0;
 	/* WaSkipStolenMemoryFirstPage:bdw+ */
 	if (INTEL_GEN(dev_priv) >= 8)
-		stolen_usable_start = 4096;
+		stolen_usable_start = PAGE_SIZE;
 
 	ggtt->stolen_usable_size =
 		ggtt->stolen_size - reserved_total - stolen_usable_start;
@@ -644,8 +644,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 			stolen_offset, gtt_offset, size);
 
 	/* KISS and expect everything to be page-aligned */
-	if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
-	    WARN_ON(stolen_offset & 4095))
+	if (WARN_ON(size == 0) ||
+	    WARN_ON(size & (I915_GTT_PAGE_SIZE - 1)) ||
+	    WARN_ON(stolen_offset & (I915_GTT_MIN_ALIGNMENT - 1)))
 		return NULL;
 
 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ae00d9f4e520..6596d0963da1 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -91,7 +91,7 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
 	vma->vm = vm;
 	vma->obj = obj;
 	vma->size = obj->base.size;
-	vma->display_alignment = 4096;
+	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
 	if (view) {
 		vma->ggtt_view = *view;
@@ -435,7 +435,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		 * with zero alignment, so where possible use the optimal
 		 * path.
 		 */
-		if (alignment <= 4096)
+		if (alignment <= I915_GTT_MIN_ALIGNMENT)
 			alignment = 0;
 
 search_free:
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 600518ee89db..c809eda65f61 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1937,7 +1937,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
 	engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
 	engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
 
-	ret = intel_engine_create_scratch(engine, 4096);
+	ret = intel_engine_create_scratch(engine, PAGE_SIZE);
 	if (ret)
 		return ret;
 
@@ -2219,7 +2219,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
 	WARN_ON(ce->state);
 
-	context_size = round_up(intel_lr_context_size(engine), 4096);
+	context_size = round_up(intel_lr_context_size(engine),
+				I915_GTT_PAGE_SIZE);
 
 	/* One extra page as the sharing data between driver and GuC */
 	context_size += PAGE_SIZE * LRC_PPHWSP_PN;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0971ac396b60..ab83fc22d207 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1736,7 +1736,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 	void *vaddr;
 	int ret;
 
-	obj = i915_gem_object_create_internal(engine->i915, 4096);
+	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
 	if (IS_ERR(obj)) {
 		DRM_ERROR("Failed to allocate status page\n");
 		return PTR_ERR(obj);
@@ -1777,7 +1777,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 
 	engine->status_page.vma = vma;
 	engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
-	engine->status_page.page_addr = memset(vaddr, 0, 4096);
+	engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
 
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
 			 engine->name, i915_ggtt_offset(vma));
@@ -2049,7 +2049,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	}
 
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-	ret = intel_ring_pin(ring, 4096);
+	ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
 	if (ret) {
 		intel_ring_free(ring);
 		goto error;
@@ -2466,7 +2466,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
 		struct i915_vma *vma;
 
-		obj = i915_gem_object_create(dev_priv, 4096);
+		obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
 		if (IS_ERR(obj))
 			goto err;
 
@@ -2683,7 +2683,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 		return ret;
 
 	if (INTEL_GEN(dev_priv) >= 6) {
-		ret = intel_engine_create_scratch(engine, 4096);
+		ret = intel_engine_create_scratch(engine, PAGE_SIZE);
 		if (ret)
 			return ret;
 	} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
-- 
2.11.0



More information about the Intel-gfx mailing list