[PATCH 2/3] drm/radeon: split PT setup in more functions

Christian König deathsimple at vodafone.de
Wed Jul 30 12:05:17 PDT 2014


From: Christian König <christian.koenig at amd.com>

Move the decision what to use into the common VM code.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/radeon/cik_sdma.c    | 189 +++++++++++++++++++++------------
 drivers/gpu/drm/radeon/ni_dma.c      | 197 ++++++++++++++++++++++-------------
 drivers/gpu/drm/radeon/radeon.h      |  25 +++--
 drivers/gpu/drm/radeon/radeon_asic.c |  25 ++++-
 drivers/gpu/drm/radeon/radeon_asic.h |  64 +++++++++---
 drivers/gpu/drm/radeon/radeon_vm.c   |  67 +++++++++---
 drivers/gpu/drm/radeon/si_dma.c      | 172 ++++++++++++++++++------------
 7 files changed, 492 insertions(+), 247 deletions(-)

diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index db8ce30..bcf4805 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -749,7 +749,43 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 }
 
 /**
- * cik_sdma_vm_set_page - update the page tables using sDMA
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+			    struct radeon_ib *ib,
+			    uint64_t pe, uint64_t src,
+			    unsigned count)
+{
+	while (count) {
+		unsigned bytes = count * 8;
+		if (bytes > 0x1FFFF8)
+			bytes = 0x1FFFF8;
+
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+		ib->ptr[ib->length_dw++] = bytes;
+		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+		ib->ptr[ib->length_dw++] = lower_32_bits(src);
+		ib->ptr[ib->length_dw++] = upper_32_bits(src);
+		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+		pe += bytes;
+		src += bytes;
+		count -= bytes / 8;
+	}
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
  *
  * @rdev: radeon_device pointer
  * @ib: indirect buffer to fill with commands
@@ -759,84 +795,103 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  * @incr: increase next addr by incr bytes
  * @flags: access flags
  *
- * Update the page tables using sDMA (CIK).
+ * Update PTEs by writing them manually using sDMA (CIK).
  */
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
-			  struct radeon_ib *ib,
-			  uint64_t pe,
-			  uint64_t addr, unsigned count,
-			  uint32_t incr, uint32_t flags)
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+			     struct radeon_ib *ib,
+			     uint64_t pe,
+			     uint64_t addr, unsigned count,
+			     uint32_t incr, uint32_t flags)
 {
 	uint64_t value;
 	unsigned ndw;
 
-	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
-	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
-		uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
-		while (count) {
-			unsigned bytes = count * 8;
-			if (bytes > 0x1FFFF8)
-				bytes = 0x1FFFF8;
-
-			ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-			ib->ptr[ib->length_dw++] = bytes;
-			ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-			ib->ptr[ib->length_dw++] = lower_32_bits(src);
-			ib->ptr[ib->length_dw++] = upper_32_bits(src);
-			ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-			pe += bytes;
-			src += bytes;
-			count -= bytes / 8;
-		}
-	} else if (flags & R600_PTE_SYSTEM) {
-		while (count) {
-			ndw = count * 2;
-			if (ndw > 0xFFFFE)
-				ndw = 0xFFFFE;
-
-			/* for non-physically contiguous pages (system) */
-			ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-			ib->ptr[ib->length_dw++] = ndw;
-			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		/* for non-physically contiguous pages (system) */
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+		ib->ptr[ib->length_dw++] = pe;
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+		ib->ptr[ib->length_dw++] = ndw;
+		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+			if (flags & R600_PTE_SYSTEM) {
 				value = radeon_vm_map_gart(rdev, addr);
 				value &= 0xFFFFFFFFFFFFF000ULL;
-				addr += incr;
-				value |= flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		while (count) {
-			ndw = count;
-			if (ndw > 0x7FFFF)
-				ndw = 0x7FFFF;
-
-			if (flags & R600_PTE_VALID)
+			} else if (flags & R600_PTE_VALID) {
 				value = addr;
-			else
+			} else {
 				value = 0;
-			/* for physically contiguous pages (vram) */
-			ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
-			ib->ptr[ib->length_dw++] = pe; /* dst addr */
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-			ib->ptr[ib->length_dw++] = flags; /* mask */
-			ib->ptr[ib->length_dw++] = 0;
-			ib->ptr[ib->length_dw++] = value; /* value */
+			}
+			addr += incr;
+			value |= flags;
+			ib->ptr[ib->length_dw++] = value;
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			ib->ptr[ib->length_dw++] = incr; /* increment size */
-			ib->ptr[ib->length_dw++] = 0;
-			ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-			pe += ndw * 8;
-			addr += ndw * incr;
-			count -= ndw;
 		}
 	}
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+			   struct radeon_ib *ib,
+			   uint64_t pe,
+			   uint64_t addr, unsigned count,
+			   uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count;
+		if (ndw > 0x7FFFF)
+			ndw = 0x7FFFF;
+
+		if (flags & R600_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+		pe += ndw * 8;
+		addr += ndw * incr;
+		count -= ndw;
+	}
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
+{
 	while (ib->length_dw & 0x7)
 		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 }
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 66325ef..8a3e622 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 }
 
 /**
- * cayman_dma_vm_set_page - update the page tables using the DMA
+ * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+			      struct radeon_ib *ib,
+			      uint64_t pe, uint64_t src,
+			      unsigned count)
+{
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+						      0, 0, ndw);
+		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+		ib->ptr[ib->length_dw++] = lower_32_bits(src);
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+		pe += ndw * 4;
+		src += ndw * 4;
+		count -= ndw / 2;
+	}
+}
+
+/**
+ * cayman_dma_vm_write_pages - update PTEs by writing them manually
  *
  * @rdev: radeon_device pointer
  * @ib: indirect buffer to fill with commands
@@ -315,90 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: hw access flags 
+ * @flags: hw access flags
  *
- * Update the page tables using the DMA (cayman/TN).
+ * Update PTEs by writing them manually using the DMA (cayman/TN).
  */
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
-			    struct radeon_ib *ib,
-			    uint64_t pe,
-			    uint64_t addr, unsigned count,
-			    uint32_t incr, uint32_t flags)
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+			       struct radeon_ib *ib,
+			       uint64_t pe,
+			       uint64_t addr, unsigned count,
+			       uint32_t incr, uint32_t flags)
 {
 	uint64_t value;
 	unsigned ndw;
 
-	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
-	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
-		uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
-		while (count) {
-			ndw = count * 2;
-			if (ndw > 0xFFFFE)
-				ndw = 0xFFFFE;
-
-			ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
-							      0, 0, ndw);
-			ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-			ib->ptr[ib->length_dw++] = lower_32_bits(src);
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
-
-			pe += ndw * 4;
-			src += ndw * 4;
-			count -= ndw / 2;
-		}
-
-	} else if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
-		while (count) {
-			ndw = count * 2;
-			if (ndw > 0xFFFFE)
-				ndw = 0xFFFFE;
-
-			/* for non-physically contiguous pages (system) */
-			ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-				if (flags & R600_PTE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & R600_PTE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
-				addr += incr;
-				value |= flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		while (count) {
-			ndw = count * 2;
-			if (ndw > 0xFFFFE)
-				ndw = 0xFFFFE;
-
-			if (flags & R600_PTE_VALID)
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		/* for non-physically contiguous pages (system) */
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
+						      0, 0, ndw);
+		ib->ptr[ib->length_dw++] = pe;
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+			if (flags & R600_PTE_SYSTEM) {
+				value = radeon_vm_map_gart(rdev, addr);
+				value &= 0xFFFFFFFFFFFFF000ULL;
+			} else if (flags & R600_PTE_VALID) {
 				value = addr;
-			else
+			} else {
 				value = 0;
-			/* for physically contiguous pages (vram) */
-			ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
-			ib->ptr[ib->length_dw++] = pe; /* dst addr */
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			ib->ptr[ib->length_dw++] = flags; /* mask */
-			ib->ptr[ib->length_dw++] = 0;
-			ib->ptr[ib->length_dw++] = value; /* value */
+			}
+			addr += incr;
+			value |= flags;
+			ib->ptr[ib->length_dw++] = value;
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			ib->ptr[ib->length_dw++] = incr; /* increment size */
-			ib->ptr[ib->length_dw++] = 0;
-			pe += ndw * 4;
-			addr += (ndw / 2) * incr;
-			count -= ndw / 2;
 		}
 	}
+}
+
+/**
+ * cayman_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+			     struct radeon_ib *ib,
+			     uint64_t pe,
+			     uint64_t addr, unsigned count,
+			     uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		if (flags & R600_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+
+		pe += ndw * 4;
+		addr += (ndw / 2) * incr;
+		count -= ndw / 2;
+	}
+}
+
+/**
+ * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
+{
 	while (ib->length_dw & 0x7)
 		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index cd0d960..ba46d4d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1798,11 +1798,21 @@ struct radeon_asic {
 	struct {
 		int (*init)(struct radeon_device *rdev);
 		void (*fini)(struct radeon_device *rdev);
-		void (*set_page)(struct radeon_device *rdev,
-				 struct radeon_ib *ib,
-				 uint64_t pe,
-				 uint64_t addr, unsigned count,
-				 uint32_t incr, uint32_t flags);
+		void (*copy_pages)(struct radeon_device *rdev,
+				   struct radeon_ib *ib,
+				   uint64_t pe, uint64_t src,
+				   unsigned count);
+		void (*write_pages)(struct radeon_device *rdev,
+				    struct radeon_ib *ib,
+				    uint64_t pe,
+				    uint64_t addr, unsigned count,
+				    uint32_t incr, uint32_t flags);
+		void (*set_pages)(struct radeon_device *rdev,
+				  struct radeon_ib *ib,
+				  uint64_t pe,
+				  uint64_t addr, unsigned count,
+				  uint32_t incr, uint32_t flags);
+		void (*pad_ib)(struct radeon_ib *ib);
 	} vm;
 	/* ring specific callbacks */
 	struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
@@ -2762,7 +2772,10 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
 #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
 #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
 #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index d321b04..fdbe875 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1611,7 +1611,10 @@ static struct radeon_asic cayman_asic = {
 	.vm = {
 		.init = &cayman_vm_init,
 		.fini = &cayman_vm_fini,
-		.set_page = &cayman_dma_vm_set_page,
+		.copy_pages = &cayman_dma_vm_copy_pages,
+		.write_pages = &cayman_dma_vm_write_pages,
+		.set_pages = &cayman_dma_vm_set_pages,
+		.pad_ib = &cayman_dma_vm_pad_ib,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1711,7 +1714,10 @@ static struct radeon_asic trinity_asic = {
 	.vm = {
 		.init = &cayman_vm_init,
 		.fini = &cayman_vm_fini,
-		.set_page = &cayman_dma_vm_set_page,
+		.copy_pages = &cayman_dma_vm_copy_pages,
+		.write_pages = &cayman_dma_vm_write_pages,
+		.set_pages = &cayman_dma_vm_set_pages,
+		.pad_ib = &cayman_dma_vm_pad_ib,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1841,7 +1847,10 @@ static struct radeon_asic si_asic = {
 	.vm = {
 		.init = &si_vm_init,
 		.fini = &si_vm_fini,
-		.set_page = &si_dma_vm_set_page,
+		.copy_pages = &si_dma_vm_copy_pages,
+		.write_pages = &si_dma_vm_write_pages,
+		.set_pages = &si_dma_vm_set_pages,
+		.pad_ib = &cayman_dma_vm_pad_ib,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1999,7 +2008,10 @@ static struct radeon_asic ci_asic = {
 	.vm = {
 		.init = &cik_vm_init,
 		.fini = &cik_vm_fini,
-		.set_page = &cik_sdma_vm_set_page,
+		.copy_pages = &cik_sdma_vm_copy_pages,
+		.write_pages = &cik_sdma_vm_write_pages,
+		.set_pages = &cik_sdma_vm_set_pages,
+		.pad_ib = &cik_sdma_vm_pad_ib,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2103,7 +2115,10 @@ static struct radeon_asic kv_asic = {
 	.vm = {
 		.init = &cik_vm_init,
 		.fini = &cik_vm_fini,
-		.set_page = &cik_sdma_vm_set_page,
+		.copy_pages = &cik_sdma_vm_copy_pages,
+		.write_pages = &cik_sdma_vm_write_pages,
+		.set_pages = &cik_sdma_vm_set_pages,
+		.pad_ib = &cik_sdma_vm_pad_ib,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f632e31..ed38d19 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -606,11 +606,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
 				struct radeon_ib *ib);
 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
-			    struct radeon_ib *ib,
-			    uint64_t pe,
-			    uint64_t addr, unsigned count,
-			    uint32_t incr, uint32_t flags);
+
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+			      struct radeon_ib *ib,
+			      uint64_t pe, uint64_t src,
+			      unsigned count);
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+			       struct radeon_ib *ib,
+			       uint64_t pe,
+			       uint64_t addr, unsigned count,
+			       uint32_t incr, uint32_t flags);
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+			     struct radeon_ib *ib,
+			     uint64_t pe,
+			     uint64_t addr, unsigned count,
+			     uint32_t incr, uint32_t flags);
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
 
 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
@@ -693,11 +704,22 @@ int si_copy_dma(struct radeon_device *rdev,
 		uint64_t src_offset, uint64_t dst_offset,
 		unsigned num_gpu_pages,
 		struct radeon_fence **fence);
-void si_dma_vm_set_page(struct radeon_device *rdev,
-			struct radeon_ib *ib,
-			uint64_t pe,
-			uint64_t addr, unsigned count,
-			uint32_t incr, uint32_t flags);
+
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+			  struct radeon_ib *ib,
+			  uint64_t pe, uint64_t src,
+			  unsigned count);
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+			   struct radeon_ib *ib,
+			   uint64_t pe,
+			   uint64_t addr, unsigned count,
+			   uint32_t incr, uint32_t flags);
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+			 struct radeon_ib *ib,
+			 uint64_t pe,
+			 uint64_t addr, unsigned count,
+			 uint32_t incr, uint32_t flags);
+
 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 u32 si_get_xclk(struct radeon_device *rdev);
 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -771,11 +793,23 @@ int cik_irq_process(struct radeon_device *rdev);
 int cik_vm_init(struct radeon_device *rdev);
 void cik_vm_fini(struct radeon_device *rdev);
 void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
-			  struct radeon_ib *ib,
-			  uint64_t pe,
-			  uint64_t addr, unsigned count,
-			  uint32_t incr, uint32_t flags);
+
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+			    struct radeon_ib *ib,
+			    uint64_t pe, uint64_t src,
+			    unsigned count);
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+			     struct radeon_ib *ib,
+			     uint64_t pe,
+			     uint64_t addr, unsigned count,
+			     uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+			   struct radeon_ib *ib,
+			   uint64_t pe,
+			   uint64_t addr, unsigned count,
+			   uint32_t incr, uint32_t flags);
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
+
 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 u32 cik_gfx_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index d520ab7..e975881 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -341,6 +341,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
 }
 
 /**
+ * radeon_vm_set_pages - helper to call the right asic function
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void radeon_vm_set_pages(struct radeon_device *rdev,
+				struct radeon_ib *ib,
+				uint64_t pe,
+				uint64_t addr, unsigned count,
+				uint32_t incr, uint32_t flags)
+{
+	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+		uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+		radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
+
+	} else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
+		radeon_asic_vm_write_pages(rdev, ib, pe, addr,
+					   count, incr, flags);
+
+	} else {
+		radeon_asic_vm_set_pages(rdev, ib, pe, addr,
+					 count, incr, flags);
+	}
+}
+
+/**
  * radeon_vm_clear_bo - initially clear the page dir/table
  *
  * @rdev: radeon_device pointer
@@ -381,7 +417,8 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
 
 	ib.length_dw = 0;
 
-	radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+	radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
+	radeon_asic_vm_pad_ib(rdev, &ib);
 
 	r = radeon_ib_schedule(rdev, &ib, NULL);
 	if (r)
@@ -634,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
 		    ((last_pt + incr * count) != pt)) {
 
 			if (count) {
-				radeon_asic_vm_set_page(rdev, &ib, last_pde,
-							last_pt, count, incr,
-							R600_PTE_VALID);
+				radeon_vm_set_pages(rdev, &ib, last_pde,
+						    last_pt, count, incr,
+						    R600_PTE_VALID);
 			}
 
 			count = 1;
@@ -648,10 +685,11 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
 	}
 
 	if (count)
-		radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
-					incr, R600_PTE_VALID);
+		radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
+				    incr, R600_PTE_VALID);
 
 	if (ib.length_dw != 0) {
+		radeon_asic_vm_pad_ib(rdev, &ib);
 		radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
 		radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
 		r = radeon_ib_schedule(rdev, &ib, NULL);
@@ -719,30 +757,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
 	    (frag_start >= frag_end)) {
 
 		count = (pe_end - pe_start) / 8;
-		radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
-					RADEON_GPU_PAGE_SIZE, flags);
+		radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+				    RADEON_GPU_PAGE_SIZE, flags);
 		return;
 	}
 
 	/* handle the 4K area at the beginning */
 	if (pe_start != frag_start) {
 		count = (frag_start - pe_start) / 8;
-		radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
-					RADEON_GPU_PAGE_SIZE, flags);
+		radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+				    RADEON_GPU_PAGE_SIZE, flags);
 		addr += RADEON_GPU_PAGE_SIZE * count;
 	}
 
 	/* handle the area in the middle */
 	count = (frag_end - frag_start) / 8;
-	radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
-				RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+	radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
+			    RADEON_GPU_PAGE_SIZE, flags | frag_flags);
 
 	/* handle the 4K area at the end */
 	if (frag_end != pe_end) {
 		addr += RADEON_GPU_PAGE_SIZE * count;
 		count = (pe_end - frag_end) / 8;
-		radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
-					RADEON_GPU_PAGE_SIZE, flags);
+		radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
+				    RADEON_GPU_PAGE_SIZE, flags);
 	}
 }
 
@@ -900,6 +938,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 			      bo_va->it.last + 1, addr,
 			      radeon_vm_page_flags(bo_va->flags));
 
+	radeon_asic_vm_pad_ib(rdev, &ib);
 	radeon_semaphore_sync_to(ib.semaphore, vm->fence);
 	r = radeon_ib_schedule(rdev, &ib, NULL);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index a26e842..7165051 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -56,7 +56,41 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 }
 
 /**
- * si_dma_vm_set_page - update the page tables using the DMA
+ * si_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (SI).
+ */
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+			  struct radeon_ib *ib,
+			  uint64_t pe, uint64_t src,
+			  unsigned count)
+{
+	while (count) {
+		unsigned bytes = count * 8;
+		if (bytes > 0xFFFF8)
+			bytes = 0xFFFF8;
+
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+						      1, 0, 0, bytes);
+		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+		ib->ptr[ib->length_dw++] = lower_32_bits(src);
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+		pe += bytes;
+		src += bytes;
+		count -= bytes / 8;
+	}
+}
+
+/**
+ * si_dma_vm_write_pages - update PTEs by writing them manually
  *
  * @rdev: radeon_device pointer
  * @ib: indirect buffer to fill with commands
@@ -66,83 +100,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  * @incr: increase next addr by incr bytes
  * @flags: access flags
  *
- * Update the page tables using the DMA (SI).
+ * Update PTEs by writing them manually using the DMA (SI).
  */
-void si_dma_vm_set_page(struct radeon_device *rdev,
-			struct radeon_ib *ib,
-			uint64_t pe,
-			uint64_t addr, unsigned count,
-			uint32_t incr, uint32_t flags)
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+			   struct radeon_ib *ib,
+			   uint64_t pe,
+			   uint64_t addr, unsigned count,
+			   uint32_t incr, uint32_t flags)
 {
 	uint64_t value;
 	unsigned ndw;
 
-	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
-	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
-		uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
-		while (count) {
-			unsigned bytes = count * 8;
-			if (bytes > 0xFFFF8)
-				bytes = 0xFFFF8;
-
-			ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
-							      1, 0, 0, bytes);
-			ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-			ib->ptr[ib->length_dw++] = lower_32_bits(src);
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
-
-			pe += bytes;
-			src += bytes;
-			count -= bytes / 8;
-		}
-	} else if (flags & R600_PTE_SYSTEM) {
-		while (count) {
-			ndw = count * 2;
-			if (ndw > 0xFFFFE)
-				ndw = 0xFFFFE;
-
-			/* for non-physically contiguous pages (system) */
-			ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		/* for non-physically contiguous pages (system) */
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+		ib->ptr[ib->length_dw++] = pe;
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+			if (flags & R600_PTE_SYSTEM) {
 				value = radeon_vm_map_gart(rdev, addr);
 				value &= 0xFFFFFFFFFFFFF000ULL;
-				addr += incr;
-				value |= flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		while (count) {
-			ndw = count * 2;
-			if (ndw > 0xFFFFE)
-				ndw = 0xFFFFE;
-
-			if (flags & R600_PTE_VALID)
+			} else if (flags & R600_PTE_VALID) {
 				value = addr;
-			else
+			} else {
 				value = 0;
-			/* for physically contiguous pages (vram) */
-			ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
-			ib->ptr[ib->length_dw++] = pe; /* dst addr */
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			ib->ptr[ib->length_dw++] = flags; /* mask */
-			ib->ptr[ib->length_dw++] = 0;
-			ib->ptr[ib->length_dw++] = value; /* value */
+			}
+			addr += incr;
+			value |= flags;
+			ib->ptr[ib->length_dw++] = value;
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			ib->ptr[ib->length_dw++] = incr; /* increment size */
-			ib->ptr[ib->length_dw++] = 0;
-			pe += ndw * 4;
-			addr += (ndw / 2) * incr;
-			count -= ndw / 2;
 		}
 	}
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * si_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+			 struct radeon_ib *ib,
+			 uint64_t pe,
+			 uint64_t addr, unsigned count,
+			 uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		if (flags & R600_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+		pe += ndw * 4;
+		addr += (ndw / 2) * incr;
+		count -= ndw / 2;
+	}
 }
 
 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-- 
1.9.1



More information about the dri-devel mailing list