[Intel-gfx] [PATCH 1/2] drm/i915/bxt: prevent allocating context object from HIGHMEM
Imre Deak
imre.deak at intel.com
Thu Sep 17 09:17:43 PDT 2015
At least on BXT A stepping we need to map part of the context object as
uncached. For this we first set the corresponding page to uncached and
then use kmap/kunmap to get a kernel mapping whenever we want to update
the context. Since kmap for a HIGHMEM page always returns a write-back
mapping we need to prevent allocating the context object from HIGHMEM.
Needed by the next patch implementing the actual workaround for BXT A.
Signed-off-by: Imre Deak <imre.deak at intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 2 ++
drivers/gpu/drm/i915/i915_gem.c | 21 ++++++++++++++++++---
drivers/gpu/drm/i915/intel_lrc.c | 5 ++++-
3 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4ea3e7b..6e7f91e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2806,6 +2806,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
+struct drm_i915_gem_object *
+i915_gem_alloc_object_no_highmem(struct drm_device *dev, size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
void i915_init_vm(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cb0df7e..11c9191 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4243,8 +4243,8 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
- size_t size)
+static struct drm_i915_gem_object *
+__i915_gem_alloc_object(struct drm_device *dev, size_t size, bool highmem)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
@@ -4262,10 +4262,13 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
/* 965gm cannot relocate objects above 4GiB. */
- mask &= ~__GFP_HIGHMEM;
+ highmem = false;
mask |= __GFP_DMA32;
}
+ if (!highmem)
+ mask &= ~__GFP_HIGHMEM;
+
mapping = file_inode(obj->base.filp)->i_mapping;
mapping_set_gfp_mask(mapping, mask);
@@ -4296,6 +4299,18 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return obj;
}
+struct drm_i915_gem_object *
+i915_gem_alloc_object(struct drm_device *dev, size_t size)
+{
+ return __i915_gem_alloc_object(dev, size, true);
+}
+
+struct drm_i915_gem_object *
+i915_gem_alloc_object_no_highmem(struct drm_device *dev, size_t size)
+{
+ return __i915_gem_alloc_object(dev, size, false);
+}
+
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
/* If we are the last user of the backing storage (be it shmemfs
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index fe06accb0..942069f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2459,7 +2459,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
- ctx_obj = i915_gem_alloc_object(dev, context_size);
+ if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
+ ctx_obj = i915_gem_alloc_object_no_highmem(dev, context_size);
+ else
+ ctx_obj = i915_gem_alloc_object(dev, context_size);
if (!ctx_obj) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
return -ENOMEM;
--
2.1.4
More information about the Intel-gfx
mailing list