[Intel-gfx] [PATCH] drm/i915: Prevent use of pages >4GiB on 965G[M]

Chris Wilson chris at chris-wilson.co.uk
Sun Apr 17 18:37:32 CEST 2011


The 965G (Broadwater) and 965GM (Crestline) chipsets had many errata in
handling pages allocation above 4GiB. So we should be careful never to
allocate and attempt to pass such through to the GPU and so limit
ourselves to GFP_DMA32 on those chipsets.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem.c |   61 ++++++++++++++++++++-------------------
 1 files changed, 31 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1949048..d9c6ef2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -59,6 +59,29 @@ static int i915_gem_inactive_shrink(struct shrinker *shrinker,
 				    int nr_to_scan,
 				    gfp_t gfp_mask);
 
+static int i915_gem_get_gfp(struct drm_device *dev)
+{
+	gfp_t gfp = GFP_HIGHUSER | __GFP_RECLAIMABLE | __GFP_COLD;
+
+	/* The Broadwater/Crestline chipset (965G[M]) has many bugs in
+	 * handling pages located above 4GiB, such as returning undefined
+	 * results when accessed by the Command Streamer. So prevent us
+	 * from ever utilizing high pages on those chipsets.
+	 */
+	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+		gfp |= GFP_DMA32;
+
+	return gfp;
+}
+
+static struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+					     int page_index,
+					     gfp_t gfp)
+{
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+	return read_cache_page_gfp(mapping, page_index,
+				   i915_gem_get_gfp(obj->base.dev) | gfp);
+}
 
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -385,7 +408,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
 			  struct drm_i915_gem_pread *args,
 			  struct drm_file *file)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	ssize_t remain;
 	loff_t offset;
 	char __user *user_data;
@@ -411,8 +433,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
 
-		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT, 0);
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
@@ -447,7 +468,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
 			  struct drm_i915_gem_pread *args,
 			  struct drm_file *file)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	struct page **user_pages;
 	ssize_t remain;
 	loff_t offset;
@@ -505,8 +525,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
 		if ((data_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - data_page_offset;
 
-		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT, 0);
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
@@ -799,7 +818,6 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
 			   struct drm_i915_gem_pwrite *args,
 			   struct drm_file *file)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	ssize_t remain;
 	loff_t offset;
 	char __user *user_data;
@@ -826,8 +844,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
 
-		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT, 0);
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
@@ -869,7 +886,6 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
 			   struct drm_i915_gem_pwrite *args,
 			   struct drm_file *file)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	struct page **user_pages;
 	ssize_t remain;
 	loff_t first_data_page, last_data_page, offset;
@@ -925,8 +941,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
 		if ((data_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - data_page_offset;
 
-		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT, 0);
 		if (IS_ERR(page)) {
 			ret = PTR_ERR(page);
 			goto out;
@@ -1250,9 +1265,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		obj->fault_mappable = true;
 		mutex_unlock(&dev->struct_mutex);
 
-		page = read_cache_page_gfp(obj->base.filp->f_path.dentry->d_inode->i_mapping,
-					   page_offset,
-					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		page = i915_gem_object_get_page(obj, page_offset, 0);
 		if (IS_ERR(page)) {
 			ret = PTR_ERR(page);
 			goto out;
@@ -1589,8 +1602,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
 			      gfp_t gfpmask)
 {
 	int page_count, i;
-	struct address_space *mapping;
-	struct inode *inode;
 	struct page *page;
 
 	/* Get the list of pages out of our struct file.  They'll be pinned
@@ -1602,14 +1613,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
 	if (obj->pages == NULL)
 		return -ENOMEM;
 
-	inode = obj->base.filp->f_path.dentry->d_inode;
-	mapping = inode->i_mapping;
 	for (i = 0; i < page_count; i++) {
-		page = read_cache_page_gfp(mapping, i,
-					   GFP_HIGHUSER |
-					   __GFP_COLD |
-					   __GFP_RECLAIMABLE |
-					   gfpmask);
+		page = i915_gem_object_get_page(obj, i, gfpmask);
 		if (IS_ERR(page))
 			goto err_pages;
 
@@ -4072,7 +4077,6 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_i915_gem_object *obj)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	char *vaddr;
 	int i;
 	int page_count;
@@ -4083,8 +4087,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 
 	page_count = obj->base.size / PAGE_SIZE;
 	for (i = 0; i < page_count; i++) {
-		struct page *page = read_cache_page_gfp(mapping, i,
-							GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		struct page *page = i915_gem_object_get_page(obj, i, 0);
 		if (!IS_ERR(page)) {
 			char *dst = kmap_atomic(page);
 			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4109,7 +4112,6 @@ i915_gem_attach_phys_object(struct drm_device *dev,
 			    int id,
 			    int align)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret = 0;
 	int page_count;
@@ -4145,8 +4147,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
 		struct page *page;
 		char *dst, *src;
 
-		page = read_cache_page_gfp(mapping, i,
-					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		page = i915_gem_object_get_page(obj, i, 0);
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
-- 
1.7.4.4




More information about the Intel-gfx mailing list