[PATCH 14/25] drm/i915: Use ttm mmap handling for ttm bo's.
Matthew Auld
matthew.auld at intel.com
Tue May 18 13:43:26 UTC 2021
From: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Use the ttm handlers for servicing page faults, and vm_access.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_mman.c | 17 ++-
drivers/gpu/drm/i915/gem/i915_gem_mman.h | 2 +
.../gpu/drm/i915/gem/i915_gem_object_types.h | 1 +
drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 105 +++++++++++++++++-
4 files changed, 118 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index fd1c9714f8d8..3d78f4d59cb2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -19,6 +19,7 @@
#include "i915_gem_mman.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
+#include "i915_gem_ttm.h"
#include "i915_vma.h"
static inline bool
@@ -788,7 +789,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
return __assign_mmap_offset(file, args->handle, type, &args->offset);
}
-static void vm_open(struct vm_area_struct *vma)
+void i915_gem_mmap_vm_open(struct vm_area_struct *vma)
{
struct i915_mmap_offset *mmo = vma->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
@@ -797,7 +798,7 @@ static void vm_open(struct vm_area_struct *vma)
i915_gem_object_get(obj);
}
-static void vm_close(struct vm_area_struct *vma)
+void i915_gem_mmap_vm_close(struct vm_area_struct *vma)
{
struct i915_mmap_offset *mmo = vma->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
@@ -809,15 +810,15 @@ static void vm_close(struct vm_area_struct *vma)
static const struct vm_operations_struct vm_ops_gtt = {
.fault = vm_fault_gtt,
.access = vm_access,
- .open = vm_open,
- .close = vm_close,
+ .open = i915_gem_mmap_vm_open,
+ .close = i915_gem_mmap_vm_close,
};
static const struct vm_operations_struct vm_ops_cpu = {
.fault = vm_fault_cpu,
.access = vm_access,
- .open = vm_open,
- .close = vm_close,
+ .open = i915_gem_mmap_vm_open,
+ .close = i915_gem_mmap_vm_close,
};
static int singleton_release(struct inode *inode, struct file *file)
@@ -952,6 +953,10 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
}
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ /* override ops per-object if desired */
+ if (obj->ops->mmap_ops)
+ vma->vm_ops = obj->ops->mmap_ops;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index efee9e0d2508..e5bd02a6db12 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -28,5 +28,7 @@ void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+void i915_gem_mmap_vm_open(struct vm_area_struct *vma);
+void i915_gem_mmap_vm_close(struct vm_area_struct *vma);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index b350765e1935..31d828e91cf4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -79,6 +79,7 @@ struct drm_i915_gem_object_ops {
void (*delayed_free)(struct drm_i915_gem_object *obj);
void (*release)(struct drm_i915_gem_object *obj);
+ const struct vm_operations_struct *mmap_ops;
const char *name; /* friendly name for debug, e.g. lockdep classes */
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 790f5ec45c4d..fe9ac50b2470 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -14,6 +14,7 @@
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "gem/i915_gem_ttm_bo_util.h"
+#include "gem/i915_gem_mman.h"
#define I915_PL_LMEM0 TTM_PL_PRIV
#define I915_PL_SYSTEM TTM_PL_SYSTEM
@@ -345,6 +346,44 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
return 0;
}
+static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
+{
+ if (mem->mem_type < I915_PL_LMEM0)
+ return 0;
+
+ /* We may need to revisit this later, but this allows all caching to be used in mmap */
+ mem->bus.caching = ttm_cached;
+ mem->bus.is_iomem = true;
+
+ return 0;
+}
+
+static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct sg_table *sgt = obj->ttm.cached_io_st;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ GEM_WARN_ON(bo->ttm);
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ unsigned long sg_max = sg->length >> PAGE_SHIFT;
+
+ if (page_offset < sg_max) {
+ unsigned long base =
+ obj->mm.region->iomap.base - obj->mm.region->region.start;
+
+ return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + page_offset;
+ }
+
+ page_offset -= sg_max;
+ }
+ GEM_BUG_ON(1);
+ return 0;
+}
+
struct ttm_device_funcs i915_ttm_bo_driver = {
.ttm_tt_create = i915_ttm_tt_create,
.ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
@@ -355,6 +394,8 @@ struct ttm_device_funcs i915_ttm_bo_driver = {
.verify_access = NULL,
.swap_notify = i915_ttm_swap_notify,
.delete_mem_notify = i915_ttm_delete_mem_notify,
+ .io_mem_reserve = i915_ttm_io_mem_reserve,
+ .io_mem_pfn = i915_ttm_io_mem_pfn,
};
static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
@@ -454,7 +495,68 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
ttm_bo_put(i915_gem_to_ttm(obj));
}
-static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
+static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
+{
+ struct vm_area_struct *area = vmf->vma;
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+ vm_fault_t ret;
+
+ /* Sanity check that we allow writing into this object */
+ if (unlikely(i915_gem_object_is_readonly(obj) &&
+ area->vm_flags & VM_WRITE))
+ return VM_FAULT_SIGBUS;
+
+ ret = ttm_bo_vm_reserve(i915_gem_to_ttm(obj), vmf);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_vm_fault_reserved(i915_gem_to_ttm(obj), vmf,
+ drm_vma_node_start(&mmo->vma_node),
+ vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ dma_resv_unlock(obj->base.resv);
+
+ return ret;
+}
+
+static int
+vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+ int err = 0;
+
+ if (i915_gem_object_is_readonly(obj) && write)
+ return -EACCES;
+
+ addr -= area->vm_start;
+ if (addr >= obj->base.size)
+ return -EINVAL;
+
+ err = i915_gem_object_lock_interruptible(obj, NULL);
+ if (err)
+ return err;
+
+ len = ttm_bo_vm_access_reserved(i915_gem_to_ttm(obj), area,
+ addr, buf, len, write);
+ i915_gem_object_unlock(obj);
+
+ return len;
+}
+
+static const struct vm_operations_struct vm_ops_ttm = {
+ .fault = vm_fault_ttm,
+ .access = vm_access_ttm,
+ .open = i915_gem_mmap_vm_open,
+ .close = i915_gem_mmap_vm_close,
+};
+
+const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.name = "i915_gem_object_ttm",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
@@ -463,6 +565,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.truncate = i915_ttm_purge,
.adjust_lru = i915_ttm_adjust_lru,
.delayed_free = i915_ttm_delayed_free,
+ .mmap_ops = &vm_ops_ttm,
};
void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
--
2.26.3
More information about the Intel-gfx-trybot
mailing list