[PATCH] drm: Inject a cond_resched() into long drm_clflush_sg()

Chris Wilson chris at chris-wilson.co.uk
Wed Jan 15 14:04:44 UTC 2020


Since we may try and flush the cachelines associated with large buffers
(an 8K framebuffer is about 128MiB, even before we try HDR), this leads
to unacceptably long latencies (when using a voluntary CONFIG_PREEMPT).
If we call cond_resched() between each sg chunk, that it about every 128
pages, we have a natural break point in which to check if the process
needs to be rescheduled. Naturally, this means that drm_clflush_sg() can
only be called from process context -- which is true at the moment. The
other clflush routines remain usable from atomic context.

The only real question is whether we should prevent preemption over the
clflush range -- i.e. avoid switching CPUs entirely in the middle of
flushing the cachelines.

Reported-by: David Laight <David.Laight at ACULAB.COM>
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: David Laight <David.Laight at ACULAB.COM>
---
 drivers/gpu/drm/drm_cache.c | 49 ++++++++++++++++++++++++++++++++++---
 1 file changed, 45 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 03e01b000f7a..fc8abd3f72f3 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -112,23 +112,64 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
 }
 EXPORT_SYMBOL(drm_clflush_pages);
 
+static __always_inline struct sgt_iter {
+	struct scatterlist *sgp;
+	unsigned long pfn;
+	unsigned int curr;
+	unsigned int max;
+} __sgt_iter(struct scatterlist *sgl) {
+	struct sgt_iter s = { .sgp = sgl };
+
+	if (s.sgp) {
+		s.max = s.curr = s.sgp->offset;
+		s.max += s.sgp->length;
+		s.pfn = page_to_pfn(sg_page(s.sgp));
+	}
+
+	return s;
+}
+
+static inline struct scatterlist *__sg_next_resched(struct scatterlist *sg)
+{
+	if (sg_is_last(sg))
+		return NULL;
+
+	++sg;
+	if (unlikely(sg_is_chain(sg))) {
+		sg = sg_chain_ptr(sg);
+		cond_resched();
+	}
+	return sg;
+}
+
+#define for_each_sgt_page(__pp, __iter, __sgt)				\
+	for ((__iter) = __sgt_iter((__sgt)->sgl);			\
+	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
+	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
+	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
+	     (__iter) = __sgt_iter(__sg_next_resched((__iter).sgp)), 0 : 0)
+
 /**
  * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
  * @st: struct sg_table.
  *
  * Flush every data cache line entry that points to an address in the
- * sg.
+ * sg. This may schedule between scatterlist chunks, in order to keep
+ * the system preemption-latency down for large buffers.
  */
 void
 drm_clflush_sg(struct sg_table *st)
 {
+	might_sleep();
+
 #if defined(CONFIG_X86)
 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
-		struct sg_page_iter sg_iter;
+		struct sgt_iter it;
+		struct page *page;
 
 		mb(); /*CLFLUSH is ordered only by using memory barriers*/
-		for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-			drm_clflush_page(sg_page_iter_page(&sg_iter));
+		for_each_sgt_page(page, it, st)
+			drm_clflush_page(page);
 		mb(); /*Make sure that all cache line entry is flushed*/
 
 		return;
-- 
2.25.0



More information about the Intel-gfx-trybot mailing list