[PATCH v8 5/9] drm/i915: Add intel_bo_panic_setup and intel_bo_panic_finish

Jocelyn Falempe jfalempe at redhat.com
Fri Jun 6 11:48:09 UTC 2025


Implement both functions for i915 and xe, they prepare the work for
drm_panic support.
They both use kmap_try_from_panic(), and map one page at a time, to
write the panic screen on the framebuffer.

Signed-off-by: Jocelyn Falempe <jfalempe at redhat.com>
---


v5:
 * Use iosys_map for intel_bo_panic_map().

v7:
 * Return int for i915_gem_object_panic_map() (Ville Syrjälä)

v8:
 * Complete rewrite, to use kmap_try_from_panic() which is safe
   to call from a panic handler

 drivers/gpu/drm/i915/display/intel_bo.c    | 11 +++
 drivers/gpu/drm/i915/display/intel_bo.h    |  3 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h |  4 +
 drivers/gpu/drm/i915/gem/i915_gem_pages.c  | 92 ++++++++++++++++++++++
 drivers/gpu/drm/xe/display/intel_bo.c      | 55 +++++++++++++
 5 files changed, 165 insertions(+)

diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index fbd16d7b58d9..83dbd8ae16fe 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: MIT
 /* Copyright © 2024 Intel Corporation */
 
+#include <drm/drm_panic.h>
 #include "gem/i915_gem_mman.h"
 #include "gem/i915_gem_object.h"
 #include "gem/i915_gem_object_frontbuffer.h"
@@ -57,3 +58,13 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
 {
 	i915_debugfs_describe_obj(m, to_intel_bo(obj));
 }
+
+int intel_bo_panic_setup(struct drm_gem_object *obj, struct drm_scanout_buffer *sb)
+{
+	return i915_gem_object_panic_setup(to_intel_bo(obj), sb);
+}
+
+void intel_bo_panic_finish(struct drm_gem_object *obj)
+{
+	return i915_gem_object_panic_finish(to_intel_bo(obj));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index ea7a2253aaa5..9ac087ea275d 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -4,6 +4,7 @@
 #ifndef __INTEL_BO__
 #define __INTEL_BO__
 
+#include <drm/drm_panic.h>
 #include <linux/types.h>
 
 struct drm_gem_object;
@@ -23,5 +24,7 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
 						   struct intel_frontbuffer *front);
 
 void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
+int intel_bo_panic_setup(struct drm_gem_object *obj, struct drm_scanout_buffer *sb);
+void intel_bo_panic_finish(struct drm_gem_object *obj);
 
 #endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index c34f41605b46..9a0c1019dcad 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -9,6 +9,7 @@
 #include <drm/drm_gem.h>
 #include <drm/drm_file.h>
 #include <drm/drm_device.h>
+#include <drm/drm_panic.h>
 
 #include "intel_memory_region.h"
 #include "i915_gem_object_types.h"
@@ -691,6 +692,9 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
+int i915_gem_object_panic_setup(struct drm_i915_gem_object *obj, struct drm_scanout_buffer *sb);
+void i915_gem_object_panic_finish(struct drm_i915_gem_object *obj);
+
 /**
  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
  * @obj: the object to map into kernel address space
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 7f83f8bdc8fb..9bdbac3d9433 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -3,6 +3,7 @@
  * Copyright © 2014-2016 Intel Corporation
  */
 
+#include <drm/drm_panic.h>
 #include <drm/drm_cache.h>
 #include <linux/vmalloc.h>
 
@@ -354,6 +355,97 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
 	return vaddr ?: ERR_PTR(-ENOMEM);
 }
 
+static struct page **i915_panic_pages;
+static int i915_panic_page = -1;
+static void *i915_panic_vaddr;
+
+static void i915_panic_kunmap(void)
+{
+	if (i915_panic_vaddr) {
+		drm_clflush_virt_range(i915_panic_vaddr, PAGE_SIZE);
+		kunmap_local(i915_panic_vaddr);
+		i915_panic_vaddr = NULL;
+	}
+}
+
+static struct page **i915_gem_object_panic_pages(struct drm_i915_gem_object *obj)
+{
+	unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
+	struct page *page;
+	struct page **pages;
+	struct sgt_iter iter;
+
+	pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC);
+	if (!pages)
+		return NULL;
+
+	i = 0;
+	for_each_sgt_page(page, iter, obj->mm.pages)
+		pages[i++] = page;
+	return pages;
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+						 unsigned int y, u32 color)
+{
+	unsigned int new_page;
+	unsigned int offset;
+
+	offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+	new_page = offset >> PAGE_SHIFT;
+	offset = offset % PAGE_SIZE;
+	if (new_page != i915_panic_page) {
+		i915_panic_kunmap();
+		i915_panic_page = new_page;
+		i915_panic_vaddr = kmap_local_page_try_from_panic(
+				   i915_panic_pages[i915_panic_page]);
+	}
+	if (i915_panic_vaddr) {
+		u32 *pix = i915_panic_vaddr + offset;
+		*pix = color;
+	}
+}
+
+/*
+ * Setup the gem framebuffer for drm_panic access.
+ * Use current vaddr if it exists, or setup a list of pages.
+ * pfn is not supported yet.
+ */
+int i915_gem_object_panic_setup(struct drm_i915_gem_object *obj, struct drm_scanout_buffer *sb)
+{
+	enum i915_map_type has_type;
+	void *ptr;
+
+	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+	if (ptr) {
+		if (i915_gem_object_has_iomem(obj))
+			iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr);
+		else
+			iosys_map_set_vaddr(&sb->map[0], ptr);
+
+		return 0;
+	}
+	if (i915_gem_object_has_struct_page(obj)) {
+		i915_panic_pages = i915_gem_object_panic_pages(obj);
+		sb->set_pixel = i915_gem_object_panic_page_set_pixel;
+		i915_panic_page = -1;
+		return 0;
+	}
+	return -EOPNOTSUPP;
+}
+
+void i915_gem_object_panic_finish(struct drm_i915_gem_object *obj)
+{
+	i915_panic_kunmap();
+	i915_panic_page = -1;
+}
+
 /* get, pin, and map the pages of the object into kernel space */
 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			      enum i915_map_type type)
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 27437c22bd70..eb9a3400c110 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: MIT
 /* Copyright © 2024 Intel Corporation */
 
+#include <drm/drm_cache.h>
 #include <drm/drm_gem.h>
 
 #include "xe_bo.h"
@@ -59,3 +60,57 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
 {
 	/* FIXME */
 }
+
+static int xe_panic_page = -1;
+static void *xe_panic_vaddr;
+static struct xe_bo *xe_panic_bo;
+
+static void xe_panic_kunmap(void)
+{
+	if (xe_panic_vaddr) {
+		drm_clflush_virt_range(xe_panic_vaddr, PAGE_SIZE);
+		kunmap_local(xe_panic_vaddr);
+		xe_panic_vaddr = NULL;
+	}
+}
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+				    unsigned int y, u32 color)
+{
+	unsigned int new_page;
+	unsigned int offset;
+
+	offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+	new_page = offset >> PAGE_SHIFT;
+	offset = offset % PAGE_SIZE;
+	if (new_page != xe_panic_page) {
+		xe_panic_kunmap();
+		xe_panic_page = new_page;
+		xe_panic_vaddr = ttm_bo_kmap_try_from_panic(&xe_panic_bo->ttm,
+							    xe_panic_page);
+	}
+	if (xe_panic_vaddr) {
+		u32 *pix = xe_panic_vaddr + offset;
+		*pix = color;
+	}
+}
+
+int intel_bo_panic_setup(struct drm_gem_object *obj, struct drm_scanout_buffer *sb)
+{
+	struct xe_bo *bo = gem_to_xe_bo(obj);
+
+	xe_panic_bo = bo;
+	sb->set_pixel = xe_panic_page_set_pixel;
+	return 0;
+}
+
+void intel_bo_panic_finish(struct drm_gem_object *obj)
+{
+	xe_panic_kunmap();
+	xe_panic_page = -1;
+}
-- 
2.49.0



More information about the Intel-xe mailing list