[Intel-gfx] [PATCH 10/37] drm/i915: Specify gfpmask to use with drm_malloc_ab()

Chris Wilson chris at chris-wilson.co.uk
Wed Mar 10 23:44:57 CET 2010


There are a couple of places where it is useful to disable the
OOM-killer and preferentially try to reclaim pages from our own inactive
list first. In order to do so, we need to call kmalloc with the
 __GFP_NOWARN | __GFP_NORETRY flags. In the process of adding these
flags where possible, we can also eliminate the specialization of
drm_calloc_large which is just drm_malloc_ab with a __GFP_ZERO flag.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |   26 ++++++++++++++------------
 include/drm/drmP.h              |   19 ++++---------------
 2 files changed, 18 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0215bb1..b93f7e1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -376,7 +376,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 	num_pages = last_data_page - first_data_page + 1;
 
-	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *), __GFP_ZERO);
 	if (user_pages == NULL)
 		return -ENOMEM;
 
@@ -676,7 +676,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 	num_pages = last_data_page - first_data_page + 1;
 
-	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *), __GFP_ZERO);
 	if (user_pages == NULL)
 		return -ENOMEM;
 
@@ -851,7 +851,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 	num_pages = last_data_page - first_data_page + 1;
 
-	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *), __GFP_ZERO);
 	if (user_pages == NULL)
 		return -ENOMEM;
 
@@ -2251,7 +2251,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
 	struct address_space *mapping;
 	struct inode *inode;
 	struct page *page;
-	int ret;
 
 	if (obj_priv->pages_refcount++ != 0)
 		return 0;
@@ -2261,7 +2260,8 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
 	 */
 	page_count = obj->size / PAGE_SIZE;
 	BUG_ON(obj_priv->pages != NULL);
-	obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
+	obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *),
+					__GFP_NOWARN | __GFP_NORETRY);
 	if (obj_priv->pages == NULL) {
 		obj_priv->pages_refcount--;
 		return -ENOMEM;
@@ -2275,9 +2275,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
 					   __GFP_COLD |
 					   gfpmask);
 		if (IS_ERR(page)) {
-			ret = PTR_ERR(page);
+			obj_priv->pages[i] = NULL;
 			i915_gem_object_put_pages(obj);
-			return ret;
+			return PTR_ERR(page);
 		}
 		obj_priv->pages[i] = page;
 	}
@@ -3592,7 +3592,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
 		reloc_count += exec_list[i].relocation_count;
 	}
 
-	*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
+	*relocs = drm_malloc_ab(reloc_count, sizeof(**relocs), __GFP_ZERO);
 	if (*relocs == NULL) {
 		DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
 		return -ENOMEM;
@@ -3734,7 +3734,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
 		return -EINVAL;
 	}
-	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
+
+	/* Copy in the exec list from userland */
+	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count, 0);
 	if (object_list == NULL) {
 		DRM_ERROR("Failed to allocate object list for %d buffers\n",
 			  args->buffer_count);
@@ -4051,8 +4053,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 	}
 
 	/* Copy in the exec list from userland */
-	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
-	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count, 0);
+	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count, 0);
 	if (exec_list == NULL || exec2_list == NULL) {
 		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
 			  args->buffer_count);
@@ -4135,7 +4137,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
-	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count, 0);
 	if (exec2_list == NULL) {
 		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
 			  args->buffer_count);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 4a3c4e4..e803ee7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1546,29 +1546,18 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
 }
 
 
-static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
-{
-	if (size != 0 && nmemb > ULONG_MAX / size)
-		return NULL;
-
-	if (size * nmemb <= PAGE_SIZE)
-	    return kcalloc(nmemb, size, GFP_KERNEL);
-
-	return __vmalloc(size * nmemb,
-			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
-}
-
 /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
-static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size, gfp_t gfp)
 {
 	if (size != 0 && nmemb > ULONG_MAX / size)
 		return NULL;
 
 	if (size * nmemb <= PAGE_SIZE)
-	    return kmalloc(nmemb * size, GFP_KERNEL);
+	    return kmalloc(nmemb * size, GFP_KERNEL | gfp);
 
 	return __vmalloc(size * nmemb,
-			 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+			 GFP_KERNEL | __GFP_HIGHMEM | gfp,
+			 PAGE_KERNEL);
 }
 
 static __inline void drm_free_large(void *ptr)
-- 
1.7.0




More information about the Intel-gfx mailing list