[Intel-gfx] [PATCH 01/17] intel-gtt: initialize our own scratch page

Daniel Vetter daniel.vetter at ffwll.ch
Tue Sep 14 00:34:58 CEST 2010


The intel gtt fake agp driver is the only agp driver to use dma
address remapping. So it makes sense to fold this code back into the
only user (and thus reduce the reliance on the agp code).

This patch does the first step by initializing (and remapping) the
scratch page in a new function intel_gtt_setup_scratch_page.
Unfortunately intel_gtt_cleanup had to move to avoid a forward
declaration. The new scratch page is not yet used, though.

v2: Refactor out scratch page teardown.  Suggested by Chris Wilson on
irc. This makes it clear what's going on and results in a nice
symmetry between setup and teardown.

Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
 drivers/char/agp/intel-gtt.c |   81 +++++++++++++++++++++++++++++++++---------
 1 files changed, 64 insertions(+), 17 deletions(-)

diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9cb7c98..af920b5 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -35,6 +35,8 @@
  */
 #ifdef CONFIG_DMAR
 #define USE_PCI_DMA_API 1
+#else
+#define USE_PCI_DMA_API 0
 #endif
 
 /* Max amount of stolen space, anything above will be returned to Linux */
@@ -107,6 +109,8 @@ static struct _intel_private {
 	struct page *i8xx_page;
 	struct resource ifp_resource;
 	int resource_valid;
+	struct page *scratch_page;
+	dma_addr_t scratch_page_dma;
 } intel_private;
 
 #define INTEL_GTT_GEN	intel_private.driver->gen
@@ -114,7 +118,7 @@ static struct _intel_private {
 #define IS_PINEVIEW	intel_private.driver->is_pineview
 #define IS_IRONLAKE	intel_private.driver->is_ironlake
 
-#ifdef USE_PCI_DMA_API
+#if USE_PCI_DMA_API
 static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
 {
 	*ret = pci_map_page(intel_private.pcidev, page, 0,
@@ -539,6 +543,32 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
 	return addr | bridge->driver->masks[type].mask;
 }
 
+static int intel_gtt_setup_scratch_page(void)
+{
+	struct page *page;
+	dma_addr_t dma_addr;
+
+	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+	if (page == NULL)
+		return -ENOMEM;
+	get_page(page);
+	set_pages_uc(page, 1);
+
+	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
+				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+			return -EINVAL;
+
+		intel_private.scratch_page_dma = dma_addr;
+	} else
+		intel_private.scratch_page_dma = page_to_phys(page);
+
+	intel_private.scratch_page = page;
+
+	return 0;
+}
+
 static struct aper_size_info_fixed intel_fake_agp_sizes[] =
 {
 	{128, 32768, 5},
@@ -795,6 +825,29 @@ static unsigned int intel_gtt_mappable_entries(void)
 	return aperture_size >> PAGE_SHIFT;
 }
 
+static void intel_gtt_teardown_scratch_page(void)
+{
+	set_pages_wb(intel_private.scratch_page, 1);
+	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	put_page(intel_private.scratch_page);
+	__free_page(intel_private.scratch_page);
+}
+
+static void intel_gtt_cleanup(void)
+{
+	if (intel_private.i9xx_flush_page)
+		iounmap(intel_private.i9xx_flush_page);
+	if (intel_private.resource_valid)
+		release_resource(&intel_private.ifp_resource);
+	intel_private.ifp_resource.start = 0;
+	intel_private.resource_valid = 0;
+	iounmap(intel_private.gtt);
+	iounmap(intel_private.registers);
+	
+	intel_gtt_teardown_scratch_page();
+}
+
 static int intel_gtt_init(void)
 {
 	u32 gtt_map_size;
@@ -828,6 +881,12 @@ static int intel_gtt_init(void)
 		return -ENOMEM;
 	}
 
+	ret = intel_gtt_setup_scratch_page();
+	if (ret != 0) {
+		intel_gtt_cleanup();
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -1175,18 +1234,6 @@ static int intel_i9xx_configure(void)
 	return 0;
 }
 
-static void intel_gtt_cleanup(void)
-{
-	if (intel_private.i9xx_flush_page)
-		iounmap(intel_private.i9xx_flush_page);
-	if (intel_private.resource_valid)
-		release_resource(&intel_private.ifp_resource);
-	intel_private.ifp_resource.start = 0;
-	intel_private.resource_valid = 0;
-	iounmap(intel_private.gtt);
-	iounmap(intel_private.registers);
-}
-
 static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
 {
 	if (intel_private.i9xx_flush_page)
@@ -1413,7 +1460,7 @@ static const struct agp_bridge_driver intel_915_driver = {
 	.agp_destroy_pages      = agp_generic_destroy_pages,
 	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
 	.chipset_flush		= intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
+#if USE_PCI_DMA_API
 	.agp_map_page		= intel_agp_map_page,
 	.agp_unmap_page		= intel_agp_unmap_page,
 	.agp_map_memory		= intel_agp_map_memory,
@@ -1446,7 +1493,7 @@ static const struct agp_bridge_driver intel_i965_driver = {
 	.agp_destroy_pages      = agp_generic_destroy_pages,
 	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
 	.chipset_flush		= intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
+#if USE_PCI_DMA_API
 	.agp_map_page		= intel_agp_map_page,
 	.agp_unmap_page		= intel_agp_unmap_page,
 	.agp_map_memory		= intel_agp_map_memory,
@@ -1479,7 +1526,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
 	.agp_destroy_pages      = agp_generic_destroy_pages,
 	.agp_type_to_mask_type	= intel_gen6_type_to_mask_type,
 	.chipset_flush		= intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
+#if USE_PCI_DMA_API
 	.agp_map_page		= intel_agp_map_page,
 	.agp_unmap_page		= intel_agp_unmap_page,
 	.agp_map_memory		= intel_agp_map_memory,
@@ -1512,7 +1559,7 @@ static const struct agp_bridge_driver intel_g33_driver = {
 	.agp_destroy_pages      = agp_generic_destroy_pages,
 	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
 	.chipset_flush		= intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
+#if USE_PCI_DMA_API
 	.agp_map_page		= intel_agp_map_page,
 	.agp_unmap_page		= intel_agp_unmap_page,
 	.agp_map_memory		= intel_agp_map_memory,
-- 
1.7.1




More information about the Intel-gfx mailing list