[Intel-gfx] [PATCH 11/26] drm/i915: Pack the partial view size and offset into a single u64
Chris Wilson
chris at chris-wilson.co.uk
Sat Dec 31 12:06:47 UTC 2016
Since the partial offset must be page aligned, we can use those low 12
bits to encode the side of the partial view (which then cannot be larger
than 8MiB in pages).
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem.c | 22 +++++++++++++---------
drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++--
drivers/gpu/drm/i915/i915_gem_gtt.h | 25 +++++++++++++++++++++++--
drivers/gpu/drm/i915/i915_vma.c | 9 ++++-----
4 files changed, 42 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4b461f01a910..87e41d714890 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1698,6 +1698,9 @@ static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
u64 size;
+ BUILD_BUG_ON(ilog2(GEN7_FENCE_MAX_PITCH_VAL*128*32 >> PAGE_SHIFT) >
+ INTEL_PARTIAL_SIZE_BITS);
+
size = i915_gem_object_get_stride(obj);
size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
@@ -1831,24 +1834,25 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
if (IS_ERR(vma)) {
struct i915_ggtt_view view;
- unsigned int chunk_size;
+ unsigned int chunk;
/* Use a partial view if it is bigger than available space */
- chunk_size = MIN_CHUNK_PAGES;
+ chunk = MIN_CHUNK_PAGES;
if (i915_gem_object_is_tiled(obj))
- chunk_size = roundup(chunk_size, tile_row_pages(obj));
+ chunk = roundup(chunk, tile_row_pages(obj));
memset(&view, 0, sizeof(view));
view.type = I915_GGTT_VIEW_PARTIAL;
- view.params.partial.offset = rounddown(page_offset, chunk_size);
- view.params.partial.size =
- min_t(unsigned int, chunk_size,
- vma_pages(area) - view.params.partial.offset);
+ view.params.partial.offset_size = rounddown(page_offset, chunk);
+ view.params.partial.offset_size =
+ (view.params.partial.offset_size << INTEL_PARTIAL_SIZE_BITS) |
+ (min_t(unsigned int, chunk,
+ vma_pages(area) - view.params.partial.offset_size) - 1);
/* If the partial covers the entire object, just create a
* normal VMA.
*/
- if (chunk_size >= obj->base.size >> PAGE_SHIFT)
+ if (chunk >= obj->base.size >> PAGE_SHIFT)
view.type = I915_GGTT_VIEW_NORMAL;
/* Userspace is now writing through an untracked VMA, abandon
@@ -1878,7 +1882,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
+ area->vm_start + intel_partial_get_offset(&vma->ggtt_view.params.partial),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f698006fe883..4e77baf7d652 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3492,7 +3492,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
{
struct sg_table *st;
struct scatterlist *sg, *iter;
- unsigned int count = view->params.partial.size;
+ unsigned int count = intel_partial_get_pages(&view->params.partial);
unsigned int offset;
int ret = -ENOMEM;
@@ -3505,7 +3505,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
goto err_sg_alloc;
iter = i915_gem_object_get_sg(obj,
- view->params.partial.offset,
+ intel_partial_get_page_offset(&view->params.partial),
&offset);
GEM_BUG_ON(!iter);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 19ea4c942df4..023bf6ac3dc7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -156,10 +156,31 @@ struct intel_rotation_info {
};
struct intel_partial_info {
- u64 offset;
- unsigned int size;
+ /* offset is page-aligned, leaving just enough bits for the size */
+#define INTEL_PARTIAL_SIZE_BITS PAGE_SHIFT
+ u64 offset_size;
};
+static inline u32 intel_partial_get_pages(const struct intel_partial_info *pi)
+{
+ return 1 + (pi->offset_size & GENMASK(INTEL_PARTIAL_SIZE_BITS-1, 0));
+}
+
+static inline u32 intel_partial_get_size(const struct intel_partial_info *pi)
+{
+ return intel_partial_get_pages(pi) << PAGE_SHIFT;
+}
+
+static inline u64 intel_partial_get_offset(const struct intel_partial_info *pi)
+{
+ return pi->offset_size & GENMASK(63, INTEL_PARTIAL_SIZE_BITS);
+}
+
+static inline u64 intel_partial_get_page_offset(const struct intel_partial_info *pi)
+{
+ return pi->offset_size >> INTEL_PARTIAL_SIZE_BITS;
+}
+
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 58f2483362ad..65770b7109c0 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -96,11 +96,10 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
GEM_BUG_ON(range_overflows_t(u64,
- view->params.partial.offset,
- view->params.partial.size,
- obj->base.size >> PAGE_SHIFT));
- vma->size = view->params.partial.size;
- vma->size <<= PAGE_SHIFT;
+ intel_partial_get_offset(&view->params.partial),
+ intel_partial_get_size(&view->params.partial),
+ obj->base.size));
+ vma->size = intel_partial_get_size(&view->params.partial);
GEM_BUG_ON(vma->size >= obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size =
--
2.11.0
More information about the Intel-gfx
mailing list