[Intel-gfx] [PATCH 17/22] drm/i915: Fix partial GGTT faulting
Chris Wilson
chris at chris-wilson.co.uk
Tue Aug 16 10:42:43 UTC 2016
We want to always use the partial VMA as a fallback for a failure to
bind the object into the GGTT. This extends the support partial objects
in the GGTT to cover everything, not just objects too large.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 70 +++++++++++++++++++++--------------------
1 file changed, 36 insertions(+), 34 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 48e4756e89cc..74ea0926ad70 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1696,7 +1696,6 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_ggtt_view view = i915_ggtt_view_normal;
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
struct i915_vma *vma;
pgoff_t page_offset;
@@ -1730,27 +1729,31 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
goto err_unlock;
}
- /* Use a partial view if the object is bigger than the aperture. */
- if (obj->base.size >= ggtt->mappable_end &&
- !i915_gem_object_is_tiled(obj)) {
+ /* Now pin it into the GTT as needed */
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE | PIN_NONBLOCK);
+ if (IS_ERR(vma)) {
+ struct i915_ggtt_view partial;
unsigned int chunk_size;
+ /* Use a partial view if it is bigger than available space */
chunk_size = MIN_CHUNK_PAGES;
if (i915_gem_object_is_tiled(obj))
chunk_size = max(chunk_size, tile_row_pages(obj));
- memset(&view, 0, sizeof(view));
- view.type = I915_GGTT_VIEW_PARTIAL;
- view.params.partial.offset = rounddown(page_offset, chunk_size);
- view.params.partial.size =
+ memset(&partial, 0, sizeof(partial));
+ partial.type = I915_GGTT_VIEW_PARTIAL;
+ partial.params.partial.offset =
+ rounddown(page_offset, chunk_size);
+ partial.params.partial.size =
min_t(unsigned int,
chunk_size,
(area->vm_end - area->vm_start) / PAGE_SIZE -
- view.params.partial.offset);
- }
+ partial.params.partial.offset);
- /* Now pin it into the GTT if needed */
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(obj, &partial, 0, 0,
+ PIN_MAPPABLE);
+ }
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unlock;
@@ -1768,26 +1771,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
pfn = ggtt->mappable_base + i915_ggtt_offset(vma);
pfn >>= PAGE_SHIFT;
- if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
- /* Overriding existing pages in partial view does not cause
- * us any trouble as TLBs are still valid because the fault
- * is due to userspace losing part of the mapping or never
- * having accessed it before (at this partials' range).
- */
- unsigned long base = area->vm_start +
- (view.params.partial.offset << PAGE_SHIFT);
- unsigned int i;
-
- for (i = 0; i < view.params.partial.size; i++) {
- ret = vm_insert_pfn(area,
- base + i * PAGE_SIZE,
- pfn + i);
- if (ret)
- break;
- }
-
- obj->fault_mappable = true;
- } else {
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
if (!obj->fault_mappable) {
unsigned long size =
min_t(unsigned long,
@@ -1803,13 +1787,31 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
if (ret)
break;
}
-
- obj->fault_mappable = true;
} else
ret = vm_insert_pfn(area,
(unsigned long)vmf->virtual_address,
pfn + page_offset);
+ } else {
+ /* Overriding existing pages in partial view does not cause
+ * us any trouble as TLBs are still valid because the fault
+ * is due to userspace losing part of the mapping or never
+ * having accessed it before (at this partials' range).
+ */
+ const struct i915_ggtt_view *view = &vma->ggtt_view;
+ unsigned long base = area->vm_start +
+ (view->params.partial.offset << PAGE_SHIFT);
+ unsigned int i;
+
+ for (i = 0; i < view->params.partial.size; i++) {
+ ret = vm_insert_pfn(area,
+ base + i * PAGE_SIZE,
+ pfn + i);
+ if (ret)
+ break;
+ }
}
+
+ obj->fault_mappable = true;
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
--
2.8.1
More information about the Intel-gfx
mailing list