[Intel-gfx] [RFC 07/13] drm/i915/svm: Device memory support

Niranjana Vishwanathapura niranjana.vishwanathapura at intel.com
Fri Nov 22 20:57:28 UTC 2019


Plugin device memory through HMM as DEVICE_PRIVATE.
Add support functions to allocate pages and free pages from device memory.
Implement ioctl to migrate pages from host to device memory.
For now, only support migrating pages from host memory to device memory.

Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Jon Bloomfield <jon.bloomfield at intel.com>
Cc: Daniel Vetter <daniel.vetter at intel.com>
Cc: Sudeep Dutt <sudeep.dutt at intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
---
 drivers/gpu/drm/i915/Kconfig               |   9 +
 drivers/gpu/drm/i915/Makefile              |   3 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.c |  13 -
 drivers/gpu/drm/i915/i915_buddy.h          |  12 +
 drivers/gpu/drm/i915/i915_drv.c            |   1 +
 drivers/gpu/drm/i915/i915_svm.c            |   2 +
 drivers/gpu/drm/i915/i915_svm.h            |  15 +
 drivers/gpu/drm/i915/i915_svm_devmem.c     | 391 +++++++++++++++++++++
 drivers/gpu/drm/i915/intel_memory_region.h |  14 +
 drivers/gpu/drm/i915/intel_region_lmem.c   |  10 +
 10 files changed, 456 insertions(+), 14 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_svm_devmem.c

diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 689e57fe3973..66337f2ca2bf 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -141,9 +141,18 @@ config DRM_I915_SVM
 	bool "Enable Shared Virtual Memory support in i915"
 	depends on STAGING
 	depends on DRM_I915
+	depends on ARCH_ENABLE_MEMORY_HOTPLUG
+	depends on ARCH_ENABLE_MEMORY_HOTREMOVE
+	depends on MEMORY_HOTPLUG
+	depends on MEMORY_HOTREMOVE
+	depends on ARCH_HAS_PTE_DEVMAP
+	depends on SPARSEMEM_VMEMMAP
+	depends on ZONE_DEVICE
+	depends on DEVICE_PRIVATE
 	depends on MMU
 	select HMM_MIRROR
 	select MMU_NOTIFIER
+	select MIGRATE_VMA_HELPER
 	default n
 	help
 	  Choose this option if you want Shared Virtual Memory (SVM)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 7d4cd9eefd12..b574ec31ea2e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -155,7 +155,8 @@ i915-y += \
 
 # SVM code
 i915-$(CONFIG_DRM_I915_SVM) += gem/i915_gem_svm.o \
-			       i915_svm.o
+			       i915_svm.o \
+			       i915_svm_devmem.o
 
 # general-purpose microcontroller (GuC) support
 obj-y += gt/uc/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index a7dee1b749cb..dd88fa87b7fe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -480,19 +480,6 @@ int __init i915_global_objects_init(void)
 	return 0;
 }
 
-static enum intel_region_id
-__region_id(u32 region)
-{
-	enum intel_region_id id;
-
-	for (id = 0; id < INTEL_REGION_UNKNOWN; ++id) {
-		if (intel_region_map[id] == region)
-			return id;
-	}
-
-	return INTEL_REGION_UNKNOWN;
-}
-
 bool
 i915_gem_object_svm_mapped(struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
index ed41f3507cdc..afc493e6c130 100644
--- a/drivers/gpu/drm/i915/i915_buddy.h
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -9,6 +9,9 @@
 #include <linux/bitops.h>
 #include <linux/list.h>
 
+/* 512 bits (one per pages) supports 2MB blocks */
+#define I915_BUDDY_MAX_PAGES   512
+
 struct i915_buddy_block {
 #define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
 #define I915_BUDDY_HEADER_STATE  GENMASK_ULL(11, 10)
@@ -32,6 +35,15 @@ struct i915_buddy_block {
 	 */
 	struct list_head link;
 	struct list_head tmp_link;
+
+	unsigned long pfn_first;
+	/*
+	 * FIXME: There are other alternatives to bitmap. Like splitting the
+	 * block into contiguous 4K sized blocks. But it is part of bigger
+	 * issues involving partially invalidating large mapping, freeing the
+	 * blocks etc., revisit.
+	 */
+	unsigned long bitmap[BITS_TO_LONGS(I915_BUDDY_MAX_PAGES)];
 };
 
 #define I915_BUDDY_MAX_ORDER  I915_BUDDY_HEADER_ORDER
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c190df614c48..740b4b9d39a8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2771,6 +2771,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_BIND, i915_bind_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_SVM_MIGRATE, i915_svm_migrate_ioctl, DRM_RENDER_ALLOW)
 };
 
 static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_svm.c b/drivers/gpu/drm/i915/i915_svm.c
index 4e414f257121..fe7d53634606 100644
--- a/drivers/gpu/drm/i915/i915_svm.c
+++ b/drivers/gpu/drm/i915/i915_svm.c
@@ -206,6 +206,8 @@ int i915_svm_bind(struct i915_address_space *vm, struct drm_i915_bind *args)
 	if (unlikely(ret))
 		goto vma_done;
 
+	/* XXX: Assuming the range is exclusively LMEM or SMEM, fix it */
+	i915_dmem_convert_pfn(vm->i915, &range);
 	sg_page_sizes = i915_svm_build_sg(vm, &range, &st);
 
 	mutex_lock(&svm->mutex);
diff --git a/drivers/gpu/drm/i915/i915_svm.h b/drivers/gpu/drm/i915/i915_svm.h
index f176f1dc493f..a1b62997e925 100644
--- a/drivers/gpu/drm/i915/i915_svm.h
+++ b/drivers/gpu/drm/i915/i915_svm.h
@@ -33,6 +33,14 @@ static inline bool i915_vm_is_svm_enabled(struct i915_address_space *vm)
 	return vm->svm;
 }
 
+void i915_dmem_convert_pfn(struct drm_i915_private *dev_priv,
+			   struct hmm_range *range);
+int i915_svm_migrate_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915,
+					u64 size);
+void i915_svm_devmem_remove(struct i915_devmem *devmem);
+
 #else
 
 struct i915_svm { };
@@ -45,6 +53,13 @@ static inline int i915_svm_bind_mm(struct i915_address_space *vm)
 static inline bool i915_vm_is_svm_enabled(struct i915_address_space *vm)
 { return false; }
 
+static inline int i915_svm_migrate_ioctl(struct drm_device *dev, void *data,
+					 struct drm_file *file_priv)
+{ return -ENOTSUPP; }
+static inline
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915, u64 size)
+{ return NULL; }
+static inline void i915_svm_devmem_remove(struct i915_devmem *devmem) { }
 #endif
 
 #endif /* __I915_SVM_H */
diff --git a/drivers/gpu/drm/i915/i915_svm_devmem.c b/drivers/gpu/drm/i915/i915_svm_devmem.c
new file mode 100644
index 000000000000..40c2f79ff614
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_svm_devmem.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/mm_types.h>
+#include <linux/sched/mm.h>
+
+#include "i915_svm.h"
+#include "intel_memory_region.h"
+
+struct i915_devmem_migrate {
+	struct drm_i915_private *i915;
+	struct migrate_vma *args;
+
+	enum intel_region_id src_id;
+	enum intel_region_id dst_id;
+	u64 npages;
+};
+
+struct i915_devmem {
+	struct drm_i915_private *i915;
+	struct dev_pagemap pagemap;
+	unsigned long pfn_first;
+	unsigned long pfn_last;
+};
+
+static inline bool
+i915_dmem_page(struct drm_i915_private *dev_priv, struct page *page)
+{
+	if (!is_device_private_page(page))
+		return false;
+
+	return true;
+}
+
+void i915_dmem_convert_pfn(struct drm_i915_private *dev_priv,
+			   struct hmm_range *range)
+{
+	unsigned long i, npages;
+
+	npages = (range->end - range->start) >> PAGE_SHIFT;
+	for (i = 0; i < npages; ++i) {
+		struct i915_buddy_block *block;
+		struct intel_memory_region *mem;
+		struct page *page;
+		u64 addr;
+
+		page = hmm_device_entry_to_page(range, range->pfns[i]);
+		if (!page)
+			continue;
+
+		if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE]))
+			continue;
+
+		if (!i915_dmem_page(dev_priv, page)) {
+			WARN(1, "Some unknown device memory !\n");
+			range->pfns[i] = 0;
+			continue;
+		}
+
+		block = page->zone_device_data;
+		mem = block->private;
+		addr = mem->region.start +
+		       i915_buddy_block_offset(block);
+		addr += (page_to_pfn(page) - block->pfn_first) << PAGE_SHIFT;
+
+		range->pfns[i] &= ~range->flags[HMM_PFN_DEVICE_PRIVATE];
+		range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
+		range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
+	}
+}
+
+static int
+i915_devmem_page_alloc_locked(struct intel_memory_region *mem,
+			      unsigned long npages,
+			      struct list_head *blocks)
+{
+	unsigned long size = ALIGN((npages * PAGE_SIZE), mem->mm.chunk_size);
+	struct i915_buddy_block *block;
+	int ret;
+
+	INIT_LIST_HEAD(blocks);
+	ret = __intel_memory_region_get_pages_buddy(mem, size, 0, blocks);
+	if (unlikely(ret))
+		goto alloc_failed;
+
+	list_for_each_entry(block, blocks, link) {
+		block->pfn_first = mem->devmem->pfn_first;
+		block->pfn_first += i915_buddy_block_offset(block) /
+				    PAGE_SIZE;
+		bitmap_zero(block->bitmap, I915_BUDDY_MAX_PAGES);
+		DRM_DEBUG_DRIVER("%s pfn_first 0x%lx off 0x%llx size 0x%llx\n",
+				 "Allocated block", block->pfn_first,
+				 i915_buddy_block_offset(block),
+				 i915_buddy_block_size(&mem->mm, block));
+	}
+
+alloc_failed:
+	return ret;
+}
+
+static struct page *
+i915_devmem_page_get_locked(struct intel_memory_region *mem,
+			    struct list_head *blocks)
+{
+	struct i915_buddy_block *block, *on;
+
+	list_for_each_entry_safe(block, on, blocks, link) {
+		unsigned long weight, max;
+		unsigned long i, pfn;
+		struct page *page;
+
+		max = i915_buddy_block_size(&mem->mm, block) / PAGE_SIZE;
+		i = find_first_zero_bit(block->bitmap, max);
+		if (unlikely(i == max)) {
+			WARN(1, "Getting a page should have never failed\n");
+			break;
+		}
+
+		set_bit(i, block->bitmap);
+		pfn = block->pfn_first + i;
+		page = pfn_to_page(pfn);
+		get_page(page);
+		lock_page(page);
+		page->zone_device_data = block;
+		weight = bitmap_weight(block->bitmap, max);
+		if (weight == max)
+			list_del_init(&block->link);
+		DRM_DEBUG_DRIVER("%s pfn 0x%lx block weight 0x%lx\n",
+				 "Allocated page", pfn, weight);
+		return page;
+	}
+	return NULL;
+}
+
+static void
+i915_devmem_page_free_locked(struct drm_i915_private *dev_priv,
+			     struct page *page)
+{
+	unlock_page(page);
+	put_page(page);
+}
+
+static int
+i915_devmem_migrate_alloc_and_copy(struct i915_devmem_migrate *migrate)
+{
+	struct drm_i915_private *i915 = migrate->i915;
+	struct migrate_vma *args = migrate->args;
+	struct intel_memory_region *mem;
+	struct list_head blocks = {0};
+	unsigned long i, npages, cnt;
+	struct page *page;
+	int ret;
+
+	npages = (args->end - args->start) >> PAGE_SHIFT;
+	DRM_DEBUG_DRIVER("start 0x%lx npages %ld\n", args->start, npages);
+
+	/* Check source pages */
+	for (i = 0, cnt = 0; i < npages; i++) {
+		args->dst[i] = 0;
+		page = migrate_pfn_to_page(args->src[i]);
+		if (unlikely(!page || !(args->src[i] & MIGRATE_PFN_MIGRATE)))
+			continue;
+
+		args->dst[i] = MIGRATE_PFN_VALID;
+		cnt++;
+	}
+
+	if (!cnt) {
+		ret = -ENOMEM;
+		goto migrate_out;
+	}
+
+	mem = i915->mm.regions[migrate->dst_id];
+	ret = i915_devmem_page_alloc_locked(mem, cnt, &blocks);
+	if (unlikely(ret))
+		goto migrate_out;
+
+	/* Allocate device memory */
+	for (i = 0, cnt = 0; i < npages; i++) {
+		if (!args->dst[i])
+			continue;
+
+		page = i915_devmem_page_get_locked(mem, &blocks);
+		if (unlikely(!page)) {
+			WARN(1, "Failed to get dst page\n");
+			args->dst[i] = 0;
+			continue;
+		}
+
+		cnt++;
+		args->dst[i] = migrate_pfn(page_to_pfn(page)) |
+			       MIGRATE_PFN_LOCKED;
+	}
+
+	if (!cnt) {
+		ret = -ENOMEM;
+		goto migrate_out;
+	}
+
+	/* Copy the pages */
+	migrate->npages = npages;
+migrate_out:
+	if (unlikely(ret)) {
+		for (i = 0; i < npages; i++) {
+			if (args->dst[i] & MIGRATE_PFN_LOCKED) {
+				page = migrate_pfn_to_page(args->dst[i]);
+				i915_devmem_page_free_locked(i915, page);
+			}
+			args->dst[i] = 0;
+		}
+	}
+
+	return ret;
+}
+
+void i915_devmem_migrate_finalize_and_map(struct i915_devmem_migrate *migrate)
+{
+	DRM_DEBUG_DRIVER("npages %lld\n", migrate->npages);
+}
+
+static void i915_devmem_migrate_chunk(struct i915_devmem_migrate *migrate)
+{
+	int ret;
+
+	ret = i915_devmem_migrate_alloc_and_copy(migrate);
+	if (!ret) {
+		migrate_vma_pages(migrate->args);
+		i915_devmem_migrate_finalize_and_map(migrate);
+	}
+	migrate_vma_finalize(migrate->args);
+}
+
+int i915_devmem_migrate_vma(struct intel_memory_region *mem,
+			    struct vm_area_struct *vma,
+			    unsigned long start,
+			    unsigned long end)
+{
+	unsigned long npages = (end - start) >> PAGE_SHIFT;
+	unsigned long max = min_t(unsigned long, I915_BUDDY_MAX_PAGES, npages);
+	struct i915_devmem_migrate migrate = {0};
+	struct migrate_vma args = {
+		.vma		= vma,
+		.start		= start,
+	};
+	unsigned long c, i;
+	int ret = 0;
+
+	/* XXX: Opportunistically migrate additional pages? */
+	DRM_DEBUG_DRIVER("start 0x%lx end 0x%lx\n", start, end);
+	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
+	if (unlikely(!args.src))
+		return -ENOMEM;
+
+	args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
+	if (unlikely(!args.dst)) {
+		kfree(args.src);
+		return -ENOMEM;
+	}
+
+	/* XXX: Support migrating from LMEM to SMEM */
+	migrate.args = &args;
+	migrate.i915 = mem->i915;
+	migrate.src_id = INTEL_REGION_SMEM;
+	migrate.dst_id = MEMORY_TYPE_FROM_REGION(mem->id);
+	for (i = 0; i < npages; i += c) {
+		c = min_t(unsigned long, I915_BUDDY_MAX_PAGES, npages);
+		args.end = start + (c << PAGE_SHIFT);
+		ret = migrate_vma_setup(&args);
+		if (unlikely(ret))
+			goto migrate_done;
+		if (args.cpages)
+			i915_devmem_migrate_chunk(&migrate);
+		args.start = args.end;
+	}
+migrate_done:
+	kfree(args.dst);
+	kfree(args.src);
+	return ret;
+}
+
+static vm_fault_t i915_devmem_migrate_to_ram(struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+
+static void i915_devmem_page_free(struct page *page)
+{
+	struct i915_buddy_block *block = page->zone_device_data;
+	struct intel_memory_region *mem = block->private;
+	unsigned long i, max, weight;
+
+	max = i915_buddy_block_size(&mem->mm, block) / PAGE_SIZE;
+	i = page_to_pfn(page) - block->pfn_first;
+	clear_bit(i, block->bitmap);
+	weight = bitmap_weight(block->bitmap, max);
+	DRM_DEBUG_DRIVER("%s pfn 0x%lx block weight 0x%lx\n",
+			 "Freeing page", page_to_pfn(page), weight);
+	if (!weight) {
+		DRM_DEBUG_DRIVER("%s pfn_first 0x%lx off 0x%llx size 0x%llx\n",
+				 "Freeing block", block->pfn_first,
+				 i915_buddy_block_offset(block),
+				 i915_buddy_block_size(&mem->mm, block));
+		__intel_memory_region_put_block_buddy(block);
+	}
+}
+
+static const struct dev_pagemap_ops i915_devmem_pagemap_ops = {
+	.page_free = i915_devmem_page_free,
+	.migrate_to_ram = i915_devmem_migrate_to_ram,
+};
+
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915, u64 size)
+{
+	struct device *dev = &i915->drm.pdev->dev;
+	struct i915_devmem *devmem;
+	struct resource *res;
+
+	devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+	if (!devmem)
+		return NULL;
+
+	devmem->i915 = i915;
+	res = devm_request_free_mem_region(dev, &iomem_resource, size);
+	if (IS_ERR(res))
+		goto out_free;
+
+	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+	devmem->pagemap.res = *res;
+	devmem->pagemap.ops = &i915_devmem_pagemap_ops;
+	if (IS_ERR(devm_memremap_pages(dev, &devmem->pagemap)))
+		goto out_free;
+
+	devmem->pfn_first = res->start >> PAGE_SHIFT;
+	devmem->pfn_last = res->end >> PAGE_SHIFT;
+	return devmem;
+out_free:
+	kfree(devmem);
+	return NULL;
+}
+
+void i915_svm_devmem_remove(struct i915_devmem *devmem)
+{
+	/* XXX: Is it the right way to release? */
+	release_resource(&devmem->pagemap.res);
+	kfree(devmem);
+}
+
+int i915_svm_migrate_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct drm_i915_svm_migrate *args = data;
+	unsigned long addr, end, size = args->length;
+	struct intel_memory_region *mem;
+	enum intel_region_id id;
+	struct mm_struct *mm;
+
+	DRM_DEBUG_DRIVER("start 0x%llx length 0x%llx region 0x%x\n",
+			 args->start, args->length, args->region);
+	id = __region_id(args->region);
+	if ((MEMORY_TYPE_FROM_REGION(args->region) != INTEL_MEMORY_LOCAL) ||
+	    id == INTEL_REGION_UNKNOWN)
+		return -EINVAL;
+
+	mem = i915->mm.regions[id];
+
+	mm = get_task_mm(current);
+	down_read(&mm->mmap_sem);
+
+	for (addr = args->start, end = args->start + size; addr < end;) {
+		struct vm_area_struct *vma;
+		unsigned long next;
+
+		vma = find_vma_intersection(mm, addr, end);
+		if (!vma)
+			break;
+
+		addr &= PAGE_MASK;
+		next = min(vma->vm_end, end);
+		next = round_up(next, PAGE_SIZE);
+		/* This is a best effort so we ignore errors */
+		i915_devmem_migrate_vma(mem, vma, addr, next);
+		addr = next;
+	}
+
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 4622c086c06d..95e1eff0c0b0 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -71,6 +71,7 @@ struct intel_memory_region_ops {
 struct intel_memory_region {
 	struct drm_i915_private *i915;
 
+	struct i915_devmem *devmem;
 	const struct intel_memory_region_ops *ops;
 
 	struct io_mapping iomap;
@@ -100,6 +101,19 @@ struct intel_memory_region {
 	} objects;
 };
 
+static inline enum intel_region_id
+__region_id(u32 region)
+{
+	enum intel_region_id id;
+
+	for (id = 0; id < INTEL_REGION_UNKNOWN; ++id) {
+		if (intel_region_map[id] == region)
+			return id;
+	}
+
+	return INTEL_REGION_UNKNOWN;
+}
+
 int intel_memory_region_init_buddy(struct intel_memory_region *mem);
 void intel_memory_region_release_buddy(struct intel_memory_region *mem);
 
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index eddb392917aa..2ba4a4720eb6 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -4,6 +4,7 @@
  */
 
 #include "i915_drv.h"
+#include "i915_svm.h"
 #include "intel_memory_region.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
@@ -66,6 +67,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
 static void
 region_lmem_release(struct intel_memory_region *mem)
 {
+	i915_svm_devmem_remove(mem->devmem);
 	release_fake_lmem_bar(mem);
 	io_mapping_fini(&mem->iomap);
 	intel_memory_region_release_buddy(mem);
@@ -122,6 +124,14 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
 					 PAGE_SIZE,
 					 io_start,
 					 &intel_region_lmem_ops);
+	if (!IS_ERR(mem)) {
+		mem->devmem = i915_svm_devmem_add(i915, mappable_end);
+		if (IS_ERR(mem->devmem)) {
+			intel_memory_region_put(mem);
+			mem = ERR_CAST(mem->devmem);
+		}
+	}
+
 	if (!IS_ERR(mem)) {
 		DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
 		DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
-- 
2.21.0.rc0.32.g243a4c7e27



More information about the Intel-gfx mailing list