[PATCH 1/3] stolen
Matthew Auld
matthew.auld at intel.com
Tue Nov 26 16:16:06 UTC 2019
From: CQ Tang <cq.tang at intel.com>
- use the region mm.lock, ditch the stolen_lock.
- make it more region centric.
Signed-off-by: CQ Tang <cq.tang at intel.com>
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
drivers/gpu/drm/i915/display/intel_display.c | 4 +-
drivers/gpu/drm/i915/display/intel_fbc.c | 24 ++-
drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 180 +++++++++----------
drivers/gpu/drm/i915/gem/i915_gem_stolen.h | 7 +-
drivers/gpu/drm/i915/i915_drv.h | 6 -
drivers/gpu/drm/i915/intel_memory_region.h | 3 +
6 files changed, 114 insertions(+), 110 deletions(-)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 53dc310a5f6d..814b84115c2c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3139,7 +3139,9 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
}
obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
- base_aligned,
+/* XXX: WTF does this do??? */
+ base_aligned -
+ dev_priv->dsm.start,
base_aligned,
size_aligned);
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 92c7eb243559..ecebe3fa3c64 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -443,6 +443,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size,
int fb_cpp)
{
+ struct intel_memory_region *mem = dev_priv->mm.regions[INTEL_REGION_STOLEN];
int compression_threshold = 1;
int ret;
u64 end;
@@ -464,7 +465,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
*/
/* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
+ ret = i915_gem_stolen_insert_node_in_range(mem, node, size <<= 1,
4096, 0, end);
if (ret == 0)
return compression_threshold;
@@ -475,7 +476,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
(fb_cpp == 2 && compression_threshold == 2))
return 0;
- ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
+ ret = i915_gem_stolen_insert_node_in_range(mem, node, size >>= 1,
4096, 0, end);
if (ret && INTEL_GEN(dev_priv) <= 4) {
return 0;
@@ -490,6 +491,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_memory_region *mem = dev_priv->mm.regions[INTEL_REGION_STOLEN];
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
int size, fb_cpp, ret;
@@ -519,7 +521,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
if (!compressed_llb)
goto err_fb;
- ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+ ret = i915_gem_stolen_insert_node(mem, compressed_llb,
4096, 4096);
if (ret)
goto err_fb;
@@ -545,22 +547,23 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
err_fb:
kfree(compressed_llb);
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+ i915_gem_stolen_remove_node(mem, &fbc->compressed_fb);
err_llb:
- if (drm_mm_initialized(&dev_priv->mm.stolen))
+ if (drm_mm_initialized(&mem->stolen))
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
+ struct intel_memory_region *mem = dev_priv->mm.regions[INTEL_REGION_STOLEN];
struct intel_fbc *fbc = &dev_priv->fbc;
if (drm_mm_node_allocated(&fbc->compressed_fb))
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+ i915_gem_stolen_remove_node(mem, &fbc->compressed_fb);
if (fbc->compressed_llb) {
- i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
+ i915_gem_stolen_remove_node(mem, fbc->compressed_llb);
kfree(fbc->compressed_llb);
}
}
@@ -1313,6 +1316,7 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
+ struct intel_memory_region *mem = dev_priv->mm.regions[INTEL_REGION_STOLEN];
struct intel_fbc *fbc = &dev_priv->fbc;
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
@@ -1320,7 +1324,11 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->enabled = false;
fbc->active = false;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ /* XXX: can mem really be NULL ???? */
+
+ GEM_WARN_ON(!mem);
+
+ if (!mem || !drm_mm_initialized(&mem->stolen))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
if (need_fbc_vtd_wa(dev_priv))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index afb08a1704a2..5d71e4a3408a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -26,42 +26,42 @@
* for is a boon.
*/
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
+int i915_gem_stolen_insert_node_in_range(struct intel_memory_region *mem,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start, u64 end)
{
int ret;
- if (!drm_mm_initialized(&i915->mm.stolen))
+ if (!drm_mm_initialized(&mem->stolen))
return -ENODEV;
/* WaSkipStolenMemoryFirstPage:bdw+ */
- if (INTEL_GEN(i915) >= 8 && start < 4096)
+ if (INTEL_GEN(mem->i915) >= 8 && start < 4096)
start = 4096;
- mutex_lock(&i915->mm.stolen_lock);
- ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
+ mutex_lock(&mem->mm_lock);
+ ret = drm_mm_insert_node_in_range(&mem->stolen, node,
size, alignment, 0,
start, end, DRM_MM_INSERT_BEST);
- mutex_unlock(&i915->mm.stolen_lock);
+ mutex_unlock(&mem->mm_lock);
return ret;
}
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
+int i915_gem_stolen_insert_node(struct intel_memory_region *mem,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
- return i915_gem_stolen_insert_node_in_range(i915, node, size,
+ return i915_gem_stolen_insert_node_in_range(mem, node, size,
alignment, 0, U64_MAX);
}
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
+void i915_gem_stolen_remove_node(struct intel_memory_region *mem,
struct drm_mm_node *node)
{
- mutex_lock(&i915->mm.stolen_lock);
+ mutex_lock(&mem->mm_lock);
drm_mm_remove_node(node);
- mutex_unlock(&i915->mm.stolen_lock);
+ mutex_unlock(&mem->mm_lock);
}
static int i915_adjust_stolen(struct drm_i915_private *i915,
@@ -152,12 +152,12 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
return 0;
}
-static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
+static void i915_gem_cleanup_stolen(struct intel_memory_region *mem)
{
- if (!drm_mm_initialized(&i915->mm.stolen))
+ if (!drm_mm_initialized(&mem->stolen))
return;
- drm_mm_takedown(&i915->mm.stolen);
+ drm_mm_takedown(&mem->stolen);
}
static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
@@ -365,14 +365,13 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-static int i915_gem_init_stolen(struct drm_i915_private *i915)
+static int i915_gem_init_stolen(struct intel_memory_region *mem)
{
+ struct drm_i915_private *i915 = mem->i915;
struct intel_uncore *uncore = &i915->uncore;
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
- mutex_init(&i915->mm.stolen_lock);
-
if (intel_vgpu_active(i915)) {
dev_notice(i915->drm.dev,
"%s, disabling use of stolen memory\n",
@@ -387,10 +386,10 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
return 0;
}
- if (resource_size(&intel_graphics_stolen_res) == 0)
+ if (resource_size(&mem->region) == 0)
return 0;
- i915->dsm = intel_graphics_stolen_res;
+ i915->dsm = mem->region;
if (i915_adjust_stolen(i915, &i915->dsm))
return 0;
@@ -480,20 +479,20 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
resource_size(&i915->dsm) - reserved_total;
/* Basic memrange allocator for stolen space. */
- drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
+ drm_mm_init(&mem->stolen, 0, i915->stolen_usable_size);
return 0;
}
static struct sg_table *
-i915_pages_create_for_stolen(struct drm_device *dev,
+i915_pages_create_for_stolen(struct drm_i915_gem_object *obj,
resource_size_t offset, resource_size_t size)
{
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_memory_region *mem = obj->mm.region;
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&mem->region)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -513,7 +512,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
+ sg_dma_address(sg) = (dma_addr_t)mem->region.start + offset;
sg_dma_len(sg) = size;
return st;
@@ -522,7 +521,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
struct sg_table *pages =
- i915_pages_create_for_stolen(obj->base.dev,
+ i915_pages_create_for_stolen(obj,
obj->stolen->start,
obj->stolen->size);
if (IS_ERR(pages))
@@ -536,111 +535,103 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- /* Should only be called from i915_gem_object_release_stolen() */
- sg_free_table(pages);
- kfree(pages);
-}
-
-static void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mem = obj->mm.region;
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
+ GEM_BUG_ON(!mem);
GEM_BUG_ON(!stolen);
- i915_gem_object_release_memory_region(obj);
-
- i915_gem_stolen_remove_node(i915, stolen);
+ i915_gem_stolen_remove_node(mem, stolen);
kfree(stolen);
+
+ sg_free_table(pages);
+ kfree(pages);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
- .release = i915_gem_object_release_stolen,
+ .release = i915_gem_object_release_memory_region,
};
static struct drm_i915_gem_object *
__i915_gem_object_create_stolen(struct intel_memory_region *mem,
- struct drm_mm_node *stolen)
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
- unsigned int cache_level;
- int err = -ENOMEM;
+
+ if (!drm_mm_initialized(&mem->stolen))
+ return ERR_PTR(-ENODEV);
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
obj = i915_gem_object_alloc();
if (!obj)
- goto err;
+ return ERR_PTR(-ENOMEM);
- drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
- obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
- i915_gem_object_set_cache_coherency(obj, cache_level);
-
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto cleanup;
+ obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
- i915_gem_object_init_memory_region(obj, mem, 0);
+ i915_gem_object_init_memory_region(obj, mem, flags);
return obj;
-
-cleanup:
- i915_gem_object_free(obj);
-err:
- return ERR_PTR(err);
}
-static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
+ resource_size_t size)
{
- struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mem;
+ struct drm_i915_gem_object *obj, *err;
struct drm_mm_node *stolen;
int ret;
- if (!drm_mm_initialized(&i915->mm.stolen))
- return ERR_PTR(-ENODEV);
-
- if (size == 0)
- return ERR_PTR(-EINVAL);
+ mem = i915->mm.regions[INTEL_REGION_STOLEN];
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return ERR_PTR(-ENOMEM);
- ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
+ ret = i915_gem_stolen_insert_node(mem, stolen, size,
+ mem->min_page_size);
if (ret) {
obj = ERR_PTR(ret);
goto err_free;
}
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ obj = i915_gem_object_create_region(mem, size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ err = obj;
goto err_remove;
+ }
+
+ /* must set before pin pages */
+ obj->stolen = stolen;
+
+ /* if pinning fails, caller needs to free stolen */
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret) {
+ err = ERR_PTR(ret);
+ goto free_obj;
+ }
return obj;
+free_obj:
+ i915_gem_object_put(obj);
err_remove:
- i915_gem_stolen_remove_node(i915, stolen);
+ i915_gem_stolen_remove_node(mem, stolen);
err_free:
kfree(stolen);
- return obj;
-}
-
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *i915,
- resource_size_t size)
-{
- return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
- size, I915_BO_ALLOC_CONTIGUOUS);
+ return err;
}
static int init_stolen(struct intel_memory_region *mem)
@@ -649,18 +640,18 @@ static int init_stolen(struct intel_memory_region *mem)
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
- return i915_gem_init_stolen(mem->i915);
+ return i915_gem_init_stolen(mem);
}
static void release_stolen(struct intel_memory_region *mem)
{
- i915_gem_cleanup_stolen(mem->i915);
+ i915_gem_cleanup_stolen(mem);
}
static const struct intel_memory_region_ops i915_region_stolen_ops = {
.init = init_stolen,
.release = release_stolen,
- .create_object = _i915_gem_object_create_stolen,
+ .create_object = __i915_gem_object_create_stolen,
};
struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
@@ -685,9 +676,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
struct i915_vma *vma;
int ret;
- if (!drm_mm_initialized(&i915->mm.stolen))
- return ERR_PTR(-ENODEV);
-
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
&stolen_offset, >t_offset, &size);
@@ -703,30 +691,38 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
stolen->start = stolen_offset;
stolen->size = size;
- mutex_lock(&i915->mm.stolen_lock);
- ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
- mutex_unlock(&i915->mm.stolen_lock);
+ mutex_lock(&mem->mm_lock);
+ ret = drm_mm_reserve_node(&mem->stolen, stolen);
+ mutex_unlock(&mem->mm_lock);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
return ERR_PTR(ret);
}
- obj = __i915_gem_object_create_stolen(mem, stolen);
+ obj = i915_gem_object_create_region(mem, size,
+ I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
- i915_gem_stolen_remove_node(i915, stolen);
+ i915_gem_stolen_remove_node(mem, stolen);
kfree(stolen);
return obj;
}
+ /* must set before pin pages */
+ obj->stolen = stolen;
+
/* Some objects just need physical mem from stolen space */
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
ret = i915_gem_object_pin_pages(obj);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed to pin stolen object\n");
+ i915_gem_stolen_remove_node(mem, stolen);
+ kfree(stolen);
goto err;
+ }
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index c1040627fbf3..511e8ffcf377 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -11,15 +11,16 @@
struct drm_i915_private;
struct drm_mm_node;
struct drm_i915_gem_object;
+struct intel_memory_region;
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct intel_memory_region *mem,
struct drm_mm_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct intel_memory_region *mem,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct intel_memory_region *mem,
struct drm_mm_node *node);
struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915);
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fdae5a919bc8..95e22667dd2b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -553,12 +553,6 @@ struct intel_l3_parity {
};
struct i915_gem_mm {
- /** Memory allocator for GTT stolen memory */
- struct drm_mm stolen;
- /** Protects the usage of the GTT stolen memory allocator. This is
- * always the inner lock when overlapping with struct_mutex. */
- struct mutex stolen_lock;
-
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
spinlock_t obj_lock;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 238722009677..3d8be30ee437 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_MEMORY_REGION_H__
#define __INTEL_MEMORY_REGION_H__
+#include <drm/drm_mm.h>
#include <linux/kref.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
@@ -75,6 +76,8 @@ struct intel_memory_region {
/* For fake LMEM */
struct drm_mm_node fake_mappable;
+ struct drm_mm stolen;
+
struct i915_buddy_mm mm;
struct mutex mm_lock;
--
2.20.1
More information about the Intel-gfx-trybot
mailing list