[PATCH] drm/i915: Optionally manage system memory with TTM and poolalloc
Adrian Larumbe
adrian.larumbe at collabora.com
Tue Jul 26 00:47:13 UTC 2022
Adds a module parameter that enables selection of the memory region manager
for system memory, either the legacy shmem-based one or TTM, through its
pool allocator. This could should not affect how DGFX platforms with LMEM
work.
Signed-off-by: Adrian Larumbe <adrian.larumbe at collabora.com>
---
drivers/gpu/drm/i915/gem/i915_gem_domain.c | 2 +-
drivers/gpu/drm/i915/gem/i915_gem_mman.c | 56 ++++++++++----
drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 90 +++++++++++++++++++++-
drivers/gpu/drm/i915/gem/i915_gem_ttm.h | 9 +++
drivers/gpu/drm/i915/gt/shmem_utils.c | 32 +++++++-
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 6 +-
drivers/gpu/drm/i915/i915_params.c | 6 ++
drivers/gpu/drm/i915/i915_params.h | 4 +-
drivers/gpu/drm/i915/intel_memory_region.c | 2 +-
drivers/gpu/drm/ttm/ttm_resource.c | 4 +-
drivers/gpu/drm/ttm/ttm_tt.c | 1 -
11 files changed, 186 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 1674b0c5802b..0efc8096e31f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -72,7 +72,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
break;
case I915_GEM_DOMAIN_CPU:
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
case I915_GEM_DOMAIN_RENDER:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0c5c43852e24..b8ae6a381108 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -83,6 +83,22 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
+ if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+ addr = -EINVAL;
+ goto err;
+ }
+
+ if (i915_gem_object_is_ttm(obj)) {
+ GEM_WARN_ON(!i915->params.use_pool_alloc);
+
+ addr = i915_gem_ttm_mmap(obj, args);
+ if (IS_ERR_VALUE(addr))
+ goto err;
+
+ args->addr_ptr = (u64)addr;
+ return 0;
+ }
+
/* prime objects have no backing filp to GEM mmap
* pages from.
*/
@@ -91,11 +107,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
goto err;
}
- if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
- addr = -EINVAL;
- goto err;
- }
-
addr = vm_mmap(obj->base.filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
@@ -552,9 +563,11 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct i915_mmap_offset *mmo, *mn;
- if (obj->ops->unmap_virtual)
+ if (obj->ops->unmap_virtual &&
+ bo->type == ttm_bo_type_device)
obj->ops->unmap_virtual(obj);
spin_lock(&obj->mmo.lock);
@@ -641,11 +654,13 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
struct drm_file *file)
{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_mmap_offset *mmo;
int err;
- GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
+ GEM_BUG_ON((obj->ops->mmap_offset || obj->ops->mmap_ops) &&
+ bo->type == ttm_bo_type_device);
mmo = lookup_mmo(obj, mmap_type);
if (mmo)
@@ -694,12 +709,14 @@ __assign_mmap_offset(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
u64 *offset, struct drm_file *file)
{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct i915_mmap_offset *mmo;
if (i915_gem_object_never_mmap(obj))
return -ENODEV;
- if (obj->ops->mmap_offset) {
+ if (obj->ops->mmap_offset &&
+ bo->type == ttm_bo_type_device) {
if (mmap_type != I915_MMAP_TYPE_FIXED)
return -ENODEV;
@@ -731,7 +748,6 @@ __assign_mmap_offset_handle(struct drm_file *file,
{
struct drm_i915_gem_object *obj;
int err;
-
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
@@ -739,6 +755,7 @@ __assign_mmap_offset_handle(struct drm_file *file,
err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out_put;
+
err = __assign_mmap_offset(obj, mmap_type, offset, file);
i915_gem_object_unlock(obj);
out_put:
@@ -922,7 +939,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_vma_offset_node *node;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_object *obj = NULL;
+ struct ttm_buffer_object *bo = NULL;
struct i915_mmap_offset *mmo = NULL;
struct file *anon;
@@ -944,7 +963,8 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
- GEM_BUG_ON(obj && obj->ops->mmap_ops);
+ if (!i915->params.use_pool_alloc)
+ GEM_BUG_ON(obj && obj->ops->mmap_ops);
} else {
obj = i915_gem_object_get_rcu
(container_of(node, struct drm_i915_gem_object,
@@ -958,6 +978,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
if (!obj)
return node ? -EACCES : -EINVAL;
+ if (i915_gem_object_is_ttm(obj))
+ bo = i915_gem_to_ttm(obj);
+
if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
i915_gem_object_put(obj);
@@ -987,10 +1010,15 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
fput(anon);
if (obj->ops->mmap_ops) {
- vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
- vma->vm_ops = obj->ops->mmap_ops;
- vma->vm_private_data = node->driver_private;
- return 0;
+ /* there could be an obj backend with mmap_ops that isn't TTM */
+ if (!i915_gem_object_is_ttm(obj) ||
+ (i915_gem_object_is_ttm(obj) &&
+ bo->type == ttm_bo_type_device)) {
+ vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = obj->ops->mmap_ops;
+ vma->vm_private_data = node->driver_private;
+ return 0;
+ }
}
vma->vm_private_data = mmo;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 7e1f8b83077f..b39f036042b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -3,7 +3,9 @@
* Copyright © 2021 Intel Corporation
*/
+#include "drm/ttm/ttm_bo_api.h"
#include <linux/shmem_fs.h>
+#include <linux/mman.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -20,6 +22,7 @@
#include "gem/i915_gem_ttm.h"
#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
+#include "gem/i915_gem_clflush.h"
#include "gt/intel_gpu_commands.h"
#define I915_TTM_PRIO_PURGE 0
@@ -294,7 +297,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
page_flags |= TTM_TT_FLAG_EXTERNAL |
TTM_TT_FLAG_EXTERNAL_MAPPABLE;
- i915_tt->is_shmem = true;
+
+ i915_tt->is_shmem = i915->params.use_pool_alloc ? false : true;
}
if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
@@ -513,9 +517,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
return 0;
- GEM_BUG_ON(!i915_tt->is_shmem);
-
- if (!i915_tt->filp)
+ if (!ttm_tt_is_populated(bo->ttm))
return 0;
ret = ttm_bo_wait_ctx(bo, &ctx);
@@ -785,6 +787,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
}
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const size_t size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
+ struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
+ /*
+ * If there's no chance of allocating enough pages for the whole
+ * object, bail early.
+ */
+ if (size > resource_size(&mr->region))
+ return -ENOMEM;
+
ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
if (ret)
return ret;
@@ -1215,6 +1227,11 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
ttm_bo_type_kernel;
+ if (!HAS_LMEM(i915) && i915->params.use_pool_alloc) {
+ GEM_WARN_ON(mem->type != INTEL_MEMORY_SYSTEM);
+ bo_type = ttm_bo_type_kernel;
+ }
+
obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
/* Forcing the page size is kernel internal only */
@@ -1274,3 +1291,68 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
intel_memory_region_set_name(mr, "system-ttm");
return mr;
}
+
+bool i915_gem_object_is_ttm(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_ttm_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_ttm_from_data(struct drm_i915_private *dev_priv,
+ const void *data, resource_size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ void *vaddr;
+
+ obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+ if (IS_ERR(obj))
+ return obj;
+
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ return vaddr;
+ }
+
+ memcpy(vaddr, data, size);
+
+ i915_gem_object_unpin_map(obj);
+
+ i915_gem_object_lock(obj, NULL);
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ i915_gem_object_unlock(obj);
+
+ return obj;
+}
+
+unsigned long i915_gem_ttm_mmap(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_mmap *args)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr;
+
+ addr = vm_mmap(NULL, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+ vma = find_vma(current->mm, addr);
+ if (IS_ERR_VALUE(addr)) {
+ mmap_write_unlock(mm);
+ return addr;
+ }
+
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = obj->ops->mmap_ops;
+ vma->vm_private_data = bo;
+
+ mmap_write_unlock(mm);
+
+ return addr;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index e4842b4296fc..2b1772a08c9f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -95,4 +95,13 @@ static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
bool i915_ttm_resource_mappable(struct ttm_resource *res);
+bool i915_gem_object_is_ttm(const struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_ttm_from_data(struct drm_i915_private *dev_priv,
+ const void *data, resource_size_t size);
+
+unsigned long i915_gem_ttm_mmap(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_mmap *args);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 402f085f3a02..07d3c180112b 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -10,7 +10,9 @@
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_ttm.h"
#include "shmem_utils.h"
+#include "i915_drv.h"
struct file *shmem_create_from_data(const char *name, void *data, size_t len)
{
@@ -30,10 +32,32 @@ struct file *shmem_create_from_data(const char *name, void *data, size_t len)
return file;
}
+static int shmem_flush_object(struct file *file, unsigned long num_pages)
+{
+ struct page *page;
+ unsigned long pfn;
+
+ for (pfn = 0; pfn < num_pages; pfn++) {
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ kunmap(page);
+ put_page(page);
+ }
+
+ return 0;
+}
+
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct file *file;
void *ptr;
+ int err;
if (i915_gem_object_is_shmem(obj)) {
file = obj->base.filp;
@@ -41,7 +65,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
+ ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_ttm(obj) ?
I915_MAP_WC : I915_MAP_WB);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
@@ -49,6 +73,12 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
file = shmem_create_from_data("", ptr, obj->base.size);
i915_gem_object_unpin_map(obj);
+ err = shmem_flush_object(file, obj->base.size >> PAGE_SHIFT);
+ if (err) {
+ drm_dbg(&i915->drm, "shmem_flush_object failed\n");
+ return ERR_PTR(err);
+ }
+
return file;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 27363091e1af..c40e58ae7f5c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,6 +11,7 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_ttm.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -440,7 +441,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (!IS_ERR(obj))
obj->flags |= I915_BO_ALLOC_PM_EARLY;
} else {
- obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ if (unlikely(i915->params.use_pool_alloc))
+ obj = i915_gem_object_create_ttm_from_data(i915, fw->data, fw->size);
+ else
+ obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 701fbc98afa0..7200b1290954 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -205,6 +205,12 @@ i915_param_named_unsafe(request_timeout_ms, uint, 0600,
i915_param_named_unsafe(lmem_size, uint, 0400,
"Set the lmem size(in MiB) for each region. (default: 0, all memory)");
+i915_param_named_unsafe(use_pool_alloc, bool, 0600,
+ "Force the driver to use TTM's pool allocator API for smem objects. "
+ "This will cause TTM to take over BO allocation even in integrated platforms. "
+ "(default: false)");
+
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index b5e7ea45d191..eba54f021ec6 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -83,7 +83,9 @@ struct drm_printer;
param(bool, verbose_state_checks, true, 0) \
param(bool, nuclear_pageflip, false, 0400) \
param(bool, enable_dp_mst, true, 0600) \
- param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0)
+ param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0) \
+ param(bool, use_pool_alloc, true, 0600)
+ /* set to 'true' for trybot testing */
#define MEMBER(T, member, ...) T member;
struct i915_params {
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 9a4a7fb55582..442687285ce6 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -321,7 +321,7 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
instance = intel_region_map[i].instance;
switch (type) {
case INTEL_MEMORY_SYSTEM:
- if (IS_DGFX(i915))
+ if (IS_DGFX(i915) || i915->params.use_pool_alloc)
mem = i915_gem_ttm_system_setup(i915, type,
instance);
else
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 20f9adcc3235..10603961a391 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -171,8 +171,8 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
* Initialize a new resource object. Counterpart of ttm_resource_fini().
*/
void ttm_resource_init(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource *res)
+ const struct ttm_place *place,
+ struct ttm_resource *res)
{
struct ttm_resource_manager *man;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d505603930a7..e110db86c870 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -136,7 +136,6 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
unsigned long extra_pages)
{
ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
- ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
ttm->dma_address = NULL;
ttm->swap_storage = NULL;
--
2.37.0
More information about the Intel-gfx-trybot
mailing list