[Intel-gfx] [CI 2/2] drm/i915: Use a cached mapping for the physical HWS

Chris Wilson chris at chris-wilson.co.uk
Wed Oct 11 11:19:15 UTC 2017


Older gen use a physical address for the hardware status page, for which
we use cache-coherent writes. As the writes are into the cpu cache, we use
a normal WB mapped page to read the HWS, used for our seqno tracking.

Anecdotally, I observed lost breadcrumbs writes into the HWS on i965gm,
which so far have not reoccurred with this patch. How reliable that
evidence is remains to be seen.

v2: Explicitly pass the expected physical address to the hw
v3: Also remember the wild writes we once had for HWS above 4G.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Daniel Vetter <daniel at ffwll.ch>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  1 -
 drivers/gpu/drm/i915/intel_engine_cs.c  | 22 ++++++++++------------
 drivers/gpu/drm/i915/intel_ringbuffer.c |  9 +++++----
 3 files changed, 15 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6bbc4b83aa0a..34ea81c25bac 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2274,7 +2274,6 @@ struct drm_i915_private {
 	struct i915_gem_context *preempt_context;
 	struct i915_vma *semaphore;
 
-	struct drm_dma_handle *status_page_dmah;
 	struct resource mch_res;
 
 	/* protects the irq masks */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a59b2a30ff5a..ad09bbd75e8c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -478,12 +478,10 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
 
 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	if (!dev_priv->status_page_dmah)
+	if (!engine->status_page.page_addr)
 		return;
 
-	drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
+	__free_page(virt_to_page(engine->status_page.page_addr));
 	engine->status_page.page_addr = NULL;
 }
 
@@ -571,17 +569,17 @@ static int init_status_page(struct intel_engine_cs *engine)
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct page *page;
 
-	GEM_BUG_ON(engine->id != RCS);
-
-	dev_priv->status_page_dmah =
-		drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
-	if (!dev_priv->status_page_dmah)
+	/* Though the HWS register does support 36bit addresses, historically
+	 * we have had hangs and corruption reported due to wild writes if
+	 * the HWS is placed above 4G.
+	 */
+	page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO);
+	if (!page)
 		return -ENOMEM;
 
-	engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+	engine->status_page.page_addr = page_address(page);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4285f09ff8b8..293bbdb9356d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -384,11 +384,14 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
+	struct page *page = virt_to_page(engine->status_page.page_addr);
+	phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
 	u32 addr;
 
-	addr = dev_priv->status_page_dmah->busaddr;
+	addr = lower_32_bits(phys);
 	if (INTEL_GEN(dev_priv) >= 4)
-		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+		addr |= (phys >> 28) & 0xf0;
+
 	I915_WRITE(HWS_PGA, addr);
 }
 
@@ -1205,8 +1208,6 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 	return 0;
 }
 
-
-
 int intel_ring_pin(struct intel_ring *ring,
 		   struct drm_i915_private *i915,
 		   unsigned int offset_bias)
-- 
2.15.0.rc0



More information about the Intel-gfx mailing list