[Intel-gfx] [RFC 1/2] drm/i915: opt-out CPU and WC mmaps from FBC
Paulo Zanoni
paulo.r.zanoni at intel.com
Thu Feb 25 16:48:27 UTC 2016
FBC and the frontbuffer tracking infrastructure were designed assuming
that user space applications would follow a specific set of rules
regarding frontbuffer management and mmapping. I recently discovered
that current user space is not exactly following these rules: my
investigation led me to the conclusion that the generic backend from
SNA - used by SKL and the other new platforms without a specific
backend - is not issuing sw_finish/dirty_fb/set_domain IOCTLs when
using the CPU and WC mmaps. I discovered this when running lightdm: I
would type the password and nothing would appear on the screen unless
I moved the mouse over the place where the letters were supposed to
appear.
Since we can't detect whether the current DRM master is a well-behaved
application or not, let's use the sledgehammer approach and assume
that every user space client on every gen is bad by disabling FBC when
the frontbuffer is CPU/WC mmapped. This will allow us to enable FBC
on SKL, moving its FBC-related power savings from "always 0%" to "> 0%
in some cases". This also has the potential to hurt FBC on the older
platforms a little bit, but my quick check suggests we're fine, so I
didn't make this commit SKL-only.
Notice that we may need an equivalent commit for PSR too. We also need
to investigate whether PSR needs to be disabled on GTT mmaps: if
that's the case, we'll have problems since we seem to always have GTT
mmaps on our current user space, so we may end up keeping PSR disabled
forever.
And if you're reading this commit message because either
igt/kms_frontbuffer_tracking or igt/kms_fbc_crc broke and you decided
to bisect, please update your IGT repository. You'll need a series of
commits ending with:
kms_frontbuffer_tracking: be aware of the new FBC Kernel workaround
Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni at intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 9 +++++++++
drivers/gpu/drm/i915/i915_gem.c | 18 ++++++++++++------
drivers/gpu/drm/i915/intel_drv.h | 4 ++++
drivers/gpu/drm/i915/intel_fbc.c | 32 ++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/intel_frontbuffer.c | 25 +++++++++++++++++++++++++
5 files changed, 82 insertions(+), 6 deletions(-)
We've been discussing other possible alternatives, but here's one.
Clever alternative ideas are always welcome.
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9e76bfc..fede522 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -904,6 +904,13 @@ enum fb_op_origin {
ORIGIN_DIRTYFB,
};
+enum mmap_bits {
+ MMAP_BITS_CPU = 1 << 0,
+ MMAP_BITS_GTT = 1 << 1,
+ MMAP_BITS_WC = 1 << 2,
+ MMAP_BITS_COUNT = 3,
+};
+
struct intel_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
@@ -941,6 +948,7 @@ struct intel_fbc {
unsigned int stride;
int fence_reg;
unsigned int tiling_mode;
+ unsigned int mmap_bits;
} fb;
} state_cache;
@@ -2163,6 +2171,7 @@ struct drm_i915_gem_object {
unsigned int cache_dirty:1;
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+ unsigned int mmap_bits:MMAP_BITS_COUNT;
unsigned int pin_display;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f68f346..9c0e7b9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1726,7 +1726,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_mmap *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
unsigned long addr;
if (args->flags & ~(I915_MMAP_WC))
@@ -1735,19 +1735,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_MMAP_WC && !cpu_has_pat)
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
/* prime objects have no backing filp to GEM mmap
* pages from.
*/
- if (!obj->filp) {
- drm_gem_object_unreference_unlocked(obj);
+ if (!obj->base.filp) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
- addr = vm_mmap(obj->filp, 0, args->size,
+ addr = vm_mmap(obj->base.filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
if (args->flags & I915_MMAP_WC) {
@@ -1763,10 +1763,14 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = -ENOMEM;
up_write(&mm->mmap_sem);
}
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(&obj->base);
if (IS_ERR((void *)addr))
return addr;
+ obj->mmap_bits |= (args->flags & I915_MMAP_WC) ?
+ MMAP_BITS_WC : MMAP_BITS_CPU;
+ intel_frontbuffer_obj_mmap(obj);
+
args->addr_ptr = (uint64_t) addr;
return 0;
@@ -2101,6 +2105,8 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto out;
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+ obj->mmap_bits |= MMAP_BITS_GTT;
+ intel_frontbuffer_obj_mmap(obj);
out:
drm_gem_object_unreference(&obj->base);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4852049..d02bd7b1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1042,6 +1042,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits);
+void intel_frontbuffer_obj_mmap(struct drm_i915_gem_object *obj);
unsigned int intel_fb_align_height(struct drm_device *dev,
unsigned int height,
uint32_t pixel_format,
@@ -1343,6 +1344,9 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
+void intel_fbc_mmap_notify(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ unsigned int mmap_bits);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 0f0492f..a3fd699 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -745,6 +745,12 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
cache->fb.stride = fb->pitches[0];
cache->fb.fence_reg = obj->fence_reg;
cache->fb.tiling_mode = obj->tiling_mode;
+ cache->fb.mmap_bits = obj->mmap_bits;
+}
+
+static bool need_mmap_disable_workaround(struct intel_fbc *fbc)
+{
+ return fbc->state_cache.fb.mmap_bits & (MMAP_BITS_CPU | MMAP_BITS_WC);
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -816,6 +822,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ if (need_mmap_disable_workaround(fbc)) {
+ fbc->no_fbc_reason = "FB is CPU or WC mmapped";
+ return false;
+ }
+
return true;
}
@@ -1014,6 +1025,27 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
mutex_unlock(&fbc->lock);
}
+void intel_fbc_mmap_notify(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ unsigned int mmap_bits)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (!fbc_supported(dev_priv))
+ return;
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->enabled &&
+ (intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits)) {
+ fbc->state_cache.fb.mmap_bits = mmap_bits;
+ if (need_mmap_disable_workaround(fbc))
+ intel_fbc_deactivate(dev_priv);
+ }
+
+ mutex_unlock(&fbc->lock);
+}
+
/**
* intel_fbc_choose_crtc - select a CRTC to enable FBC on
* @dev_priv: i915 device instance
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index ac85357..a4471c3 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -241,3 +241,28 @@ void intel_frontbuffer_flip(struct drm_device *dev,
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}
+
+/**
+ * intel_frontbuffer_obj_mmap - notifies about frontbuffer objects being mmapped
+ * @obj: GEM object being mmapped
+ *
+ * This function gets called whenever an object gets mmapped. As of February
+ * 2016, not every user space application follows the protocol assumed by the
+ * frontbuffer tracking subsystem when it was created, so this mmap notify
+ * callback can be used to completely disable frontbuffer features such as FBC
+ * and PSR. Once every user space is properly behaving - and once we can verify
+ * this somehow -, we'll be able to remove this function.
+ *
+ * Also notice that there's no munmap API because user space calls munmap()
+ * directly. Even if we had, it probably wouldn't help since munmap() calls are
+ * not common.
+ */
+void intel_frontbuffer_obj_mmap(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ intel_fbc_mmap_notify(dev_priv, obj->frontbuffer_bits, obj->mmap_bits);
+}
--
2.7.0
More information about the Intel-gfx
mailing list