[Nouveau] [PATCH 8/9] bus/hwsq: Implement VBLANK waiting heuristic

Roy Spliet rspliet at eclipso.eu
Tue Sep 29 16:23:51 PDT 2015


Avoids waiting for VBLANKS that never arrive on headless or otherwise
unconventional set-ups. Strategy taken from MEMX.

Signed-off-by: Roy Spliet <rspliet at eclipso.eu>
Tested-by: Pierre Moreau <pierre.morrow at free.fr>
---
 drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h |  1 +
 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c    | 32 +++++++++++++++++++++++
 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h    |  6 +++++
 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c  |  3 +--
 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h   |  1 +
 5 files changed, 41 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index 6a04d9c..33a057c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -14,6 +14,7 @@ int  nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec);
 void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data);
 void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data);
 void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
+void nvkm_hwsq_wait_vblank(struct nvkm_hwsq *);
 void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
 
 int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index 79f1cf5..2a56689 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -132,6 +132,38 @@ nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
 }
 
 void
+nvkm_hwsq_wait_vblank(struct nvkm_hwsq *hwsq)
+{
+	struct nvkm_subdev *subdev = hwsq->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 heads, x, y, px = 0;
+	int i, head_sync;
+
+	heads = nvkm_rd32(device, 0x610050);
+	for (i = 0; i < 2; i++) {
+		/* Heuristic: sync to head with biggest resolution */
+		if (heads & (2 << (i << 3))) {
+			x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
+			y = (x & 0xffff0000) >> 16;
+			x &= 0x0000ffff;
+			if ((x * y) > px) {
+				px = (x * y);
+				head_sync = i;
+			}
+		}
+	}
+
+	if (px == 0) {
+		nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
+		return;
+	}
+
+	nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
+	nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x0);
+	nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x1);
+}
+
+void
 nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
 {
 	u8 shift = 0, usec = nsec / 1000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index 8117ec5..54ec3b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -134,6 +134,12 @@ hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
 }
 
 static inline void
+hwsq_wait_vblank(struct hwsq *ram)
+{
+	nvkm_hwsq_wait_vblank(ram->hwsq);
+}
+
+static inline void
 hwsq_nsec(struct hwsq *ram, u32 nsec)
 {
 	nvkm_hwsq_nsec(ram->hwsq, nsec);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index 651b74a..94b340f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -308,8 +308,7 @@ nv50_ram_calc(struct nvkm_ram *base, u32 freq)
 	/* Always disable this bit during reclock */
 	ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000);
 
-	ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
-	ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
+	ram_wait_vblank(hwsq);
 	ram_wr32(hwsq, 0x611200, 0x00003300);
 	ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */
 	ram_nsec(hwsq, 8000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
index 0f1f97c..8df7306 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
@@ -11,5 +11,6 @@
 #define ram_mask(s,r,m,d)   hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
 #define ram_setf(s,f,d)     hwsq_setf(&(s)->base, (f), (d))
 #define ram_wait(s,f,d)     hwsq_wait(&(s)->base, (f), (d))
+#define ram_wait_vblank(s)  hwsq_wait_vblank(&(s)->base)
 #define ram_nsec(s,n)       hwsq_nsec(&(s)->base, (n))
 #endif
-- 
2.4.3





More information about the Nouveau mailing list