[Intel-gfx] [PATCH 07/15] drm/i915: pass mm.gtt_page_sizes to ppgtt insert_entries
Matthew Auld
matthew.auld at intel.com
Wed May 31 18:52:02 UTC 2017
In preparation for supporting huge-pages for the ppgtt, we need to know
the details of mm.page_sizes at insertion time, such that we can we can
easily determine the page sizes we are allowed to use. This is
especially true for 64K where we can't just arbitrarily use it, since we
require aligning/padding the vm space to 2M, which sometimes we can't
enforce.
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 24 ++++++++++++++++--------
drivers/gpu/drm/i915/i915_gem_gtt.h | 3 +++
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 3 ++-
drivers/gpu/drm/i915/selftests/mock_gtt.c | 1 +
4 files changed, 22 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0c1008a2bbda..935656802f09 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -208,7 +208,8 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
- cache_level, pte_flags);
+ &vma->obj->mm.page_sizes, cache_level,
+ pte_flags);
return 0;
}
@@ -909,6 +910,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
struct sg_table *pages,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level cache_level,
u32 unused)
{
@@ -927,6 +929,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct sg_table *pages,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level cache_level,
u32 unused)
{
@@ -1623,6 +1626,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -2092,6 +2096,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level level,
u32 unused)
{
@@ -2139,6 +2144,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level level,
u32 flags)
{
@@ -2238,7 +2244,7 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+ gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, 0, arg->level, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
@@ -2247,6 +2253,7 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct sg_table *st,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level level,
u32 unused)
{
@@ -2318,6 +2325,7 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level cache_level,
u32 unused)
{
@@ -2353,7 +2361,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
+ vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, 0,
cache_level, pte_flags);
intel_runtime_pm_put(i915);
@@ -2407,16 +2415,16 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
goto err_pages;
}
- appgtt->base.insert_entries(&appgtt->base,
- vma->pages, vma->node.start,
+ appgtt->base.insert_entries(&appgtt->base, vma->pages,
+ vma->node.start,
+ &vma->obj->mm.page_sizes,
cache_level, pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm,
- vma->pages, vma->node.start,
- cache_level, pte_flags);
+ vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
+ 0, cache_level, pte_flags);
intel_runtime_pm_put(i915);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index f8db231c28aa..d45729b9da0c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -206,6 +206,8 @@ enum i915_cache_level;
struct i915_vma;
+struct i915_page_sizes;
+
struct i915_page_dma {
struct page *page;
union {
@@ -320,6 +322,7 @@ struct i915_address_space {
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 50710e3f1caa..81c0d6b87e68 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -256,7 +256,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
break;
vm->insert_entries(vm, obj->mm.pages, addr,
- I915_CACHE_NONE, 0);
+ &obj->mm.page_sizes, I915_CACHE_NONE,
+ 0);
}
count = n;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a61309c7cb3e..4d594eaf0b53 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -35,6 +35,7 @@ static void mock_insert_page(struct i915_address_space *vm,
static void mock_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
u64 start,
+ struct i915_page_sizes *page_sizes,
enum i915_cache_level level, u32 flags)
{
}
--
2.9.4
More information about the Intel-gfx
mailing list