[Intel-gfx] [PATCH 06/10] drm/i915: Propagating correct error codes to the userspace

ankitprasad.r.sharma at intel.com ankitprasad.r.sharma at intel.com
Fri Mar 18 06:22:06 UTC 2016


From: Ankitprasad Sharma <ankitprasad.r.sharma at intel.com>

Propagating correct error codes to userspace by using ERR_PTR and
PTR_ERR macros for stolen memory based object allocation. We generally
return -ENOMEM to the user whenever there is a failure in object
allocation. This patch helps user to identify the correct reason for the
failure and not just -ENOMEM each time.

v2: Moved the patch up in the series, added error propagation for
i915_gem_alloc_object too (Chris)

v3: Removed storing of error pointer inside structs, Corrected error
propagation in caller functions (Chris)

v4: Remove assignments inside the predicate (Chris)

v5: Removed unnecessary initializations, updated kerneldoc for
i915_guc_client, corrected missed error pointer handling (Tvrtko)

v6: Use ERR_CAST/temporary variable to avoid storing invalid pointer
in a common field (Chris)

v7: Resolved rebasing conflicts (Ankit)

v8: Removed redundant code (Chris)

v9: Rebase

v10: Rebase, resolve merge conflicts

Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma at intel.com>
Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c              | 23 ++++++------
 drivers/gpu/drm/i915/i915_gem_batch_pool.c   |  4 +--
 drivers/gpu/drm/i915/i915_gem_context.c      |  4 +--
 drivers/gpu/drm/i915/i915_gem_render_state.c |  7 ++--
 drivers/gpu/drm/i915/i915_gem_stolen.c       | 53 +++++++++++++++-------------
 drivers/gpu/drm/i915/i915_guc_submission.c   | 52 +++++++++++++++++----------
 drivers/gpu/drm/i915/intel_display.c         |  2 +-
 drivers/gpu/drm/i915/intel_fbdev.c           |  6 ++--
 drivers/gpu/drm/i915/intel_lrc.c             | 10 +++---
 drivers/gpu/drm/i915/intel_overlay.c         |  4 +--
 drivers/gpu/drm/i915/intel_pm.c              |  7 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 21 +++++------
 12 files changed, 110 insertions(+), 83 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 43aacb1..7dfa026 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -397,19 +397,18 @@ i915_gem_alloc_object_stolen(struct drm_device *dev, size_t size)
 
 	mutex_lock(&dev->struct_mutex);
 	obj = i915_gem_object_create_stolen(dev, size);
-	if (!obj) {
-		mutex_unlock(&dev->struct_mutex);
-		return NULL;
-	}
+	if (IS_ERR(obj))
+		goto out;
 
 	/* Always clear fresh buffers before handing to userspace */
 	ret = i915_gem_object_clear(obj);
 	if (ret) {
 		drm_gem_object_unreference(&obj->base);
-		mutex_unlock(&dev->struct_mutex);
-		return NULL;
+		obj = ERR_PTR(ret);
+		goto out;
 	}
 
+out:
 	mutex_unlock(&dev->struct_mutex);
 	return obj;
 }
@@ -444,8 +443,8 @@ i915_gem_create(struct drm_file *file,
 		return -EINVAL;
 	}
 
-	if (obj == NULL)
-		return -ENOMEM;
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
 
 	ret = drm_gem_handle_create(file, &obj->base, &handle);
 	/* drop reference from allocate - handle holds it now */
@@ -4560,14 +4559,16 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 	struct drm_i915_gem_object *obj;
 	struct address_space *mapping;
 	gfp_t mask;
+	int ret;
 
 	obj = i915_gem_object_alloc(dev);
 	if (obj == NULL)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
-	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+	ret = drm_gem_object_init(dev, &obj->base, size);
+	if (ret) {
 		i915_gem_object_free(obj);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 7bf2f3f..d79caa2 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -135,8 +135,8 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 		int ret;
 
 		obj = i915_gem_alloc_object(pool->dev, size);
-		if (obj == NULL)
-			return ERR_PTR(-ENOMEM);
+		if (IS_ERR(obj))
+			return obj;
 
 		ret = i915_gem_object_get_pages(obj);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 1993449..af76e36 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -179,8 +179,8 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
 	int ret;
 
 	obj = i915_gem_alloc_object(dev, size);
-	if (obj == NULL)
-		return ERR_PTR(-ENOMEM);
+	if (IS_ERR(obj))
+		return obj;
 
 	/*
 	 * Try to make the context utilize L3 as well as LLC.
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 71611bf..071c11e 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -58,8 +58,11 @@ static int render_state_init(struct render_state *so, struct drm_device *dev)
 		return -EINVAL;
 
 	so->obj = i915_gem_alloc_object(dev, 4096);
-	if (so->obj == NULL)
-		return -ENOMEM;
+	if (IS_ERR(so->obj)) {
+		ret = PTR_ERR(so->obj);
+		so->obj = NULL;
+		return ret;
+	}
 
 	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 21e72533..f4de5bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -495,6 +495,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct sg_table *st;
 	struct scatterlist *sg;
+	int ret;
 
 	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
 	BUG_ON(offset > dev_priv->gtt.stolen_size - size);
@@ -506,11 +507,12 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (st == NULL)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+	ret = sg_alloc_table(st, 1, GFP_KERNEL);
+	if (ret) {
 		kfree(st);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	sg = st->sgl;
@@ -559,18 +561,23 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
 			       struct drm_mm_node *stolen)
 {
 	struct drm_i915_gem_object *obj;
+	struct sg_table *pages;
 
 	obj = i915_gem_object_alloc(dev);
 	if (obj == NULL)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	drm_gem_private_object_init(dev, &obj->base, stolen->size);
 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
-	obj->pages = i915_pages_create_for_stolen(dev,
-						  stolen->start, stolen->size);
-	if (obj->pages == NULL)
-		goto cleanup;
+	pages = i915_pages_create_for_stolen(dev,
+					     stolen->start, stolen->size);
+	if (IS_ERR(pages)) {
+		i915_gem_object_free(obj);
+		return ERR_CAST(pages);
+	}
+
+	obj->pages = pages;
 
 	obj->get_page.sg = obj->pages->sgl;
 	obj->get_page.last = 0;
@@ -582,10 +589,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
 	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
 
 	return obj;
-
-cleanup:
-	i915_gem_object_free(obj);
-	return NULL;
 }
 
 struct drm_i915_gem_object *
@@ -597,29 +600,29 @@ i915_gem_object_create_stolen(struct drm_device *dev, u64 size)
 	int ret;
 
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
-		return NULL;
+		return ERR_PTR(-ENODEV);
 
 	DRM_DEBUG_KMS("creating stolen object: size=%llx\n", size);
 	if (size == 0)
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
 	if (!stolen)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
 	if (ret) {
 		kfree(stolen);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	obj = _i915_gem_object_create_stolen(dev, stolen);
-	if (obj)
+	if (!IS_ERR(obj))
 		return obj;
 
 	i915_gem_stolen_remove_node(dev_priv, stolen);
 	kfree(stolen);
-	return NULL;
+	return obj;
 }
 
 struct drm_i915_gem_object *
@@ -636,7 +639,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	int ret;
 
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
-		return NULL;
+		return ERR_PTR(-ENODEV);
 
 	lockdep_assert_held(&dev->struct_mutex);
 
@@ -646,11 +649,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	/* KISS and expect everything to be page-aligned */
 	if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
 	    WARN_ON(stolen_offset & 4095))
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
 	if (!stolen)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	stolen->start = stolen_offset;
 	stolen->size = size;
@@ -660,15 +663,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	if (ret) {
 		DRM_DEBUG_KMS("failed to allocate stolen space\n");
 		kfree(stolen);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	obj = _i915_gem_object_create_stolen(dev, stolen);
-	if (obj == NULL) {
+	if (IS_ERR(obj)) {
 		DRM_DEBUG_KMS("failed to allocate stolen object\n");
 		i915_gem_stolen_remove_node(dev_priv, stolen);
 		kfree(stolen);
-		return NULL;
+		return obj;
 	}
 
 	/* Some objects just need physical mem from stolen space */
@@ -707,5 +710,5 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
 err:
 	drm_gem_object_unreference(&obj->base);
-	return NULL;
+	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index ae1f58d..a876ac1 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -608,27 +608,30 @@ int i915_guc_submit(struct i915_guc_client *client,
  * object needs to be pinned lifetime. Also we must pin it to gtt space other
  * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
  *
- * Return:	A drm_i915_gem_object if successful, otherwise NULL.
+ * Return:	A drm_i915_gem_object if successful, otherwise error pointer.
  */
 static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
 							u32 size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
+	int ret;
 
 	obj = i915_gem_alloc_object(dev, size);
-	if (!obj)
-		return NULL;
+	if (IS_ERR(obj))
+		return obj;
 
-	if (i915_gem_object_get_pages(obj)) {
+	ret = i915_gem_object_get_pages(obj);
+	if (ret) {
 		drm_gem_object_unreference(&obj->base);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
-	if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-			PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
+	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+				    PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+	if (ret) {
 		drm_gem_object_unreference(&obj->base);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
@@ -696,7 +699,7 @@ static void guc_client_free(struct drm_device *dev,
  * @ctx:	the context that owns the client (we use the default render
  * 		context)
  *
- * Return:	An i915_guc_client object if success.
+ * Return:	An i915_guc_client object if success, error pointer on failure.
  */
 static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
 						uint32_t priority,
@@ -706,10 +709,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_guc *guc = &dev_priv->guc;
 	struct drm_i915_gem_object *obj;
+	int ret;
 
 	client = kzalloc(sizeof(*client), GFP_KERNEL);
 	if (!client)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	client->doorbell_id = GUC_INVALID_DOORBELL_ID;
 	client->priority = priority;
@@ -720,13 +724,16 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
 			GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
 	if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
 		client->ctx_index = GUC_INVALID_CTX_ID;
+		ret = -EINVAL;
 		goto err;
 	}
 
 	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
 	obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
-	if (!obj)
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
 		goto err;
+	}
 
 	client->client_obj = obj;
 	client->wq_offset = GUC_DB_SIZE;
@@ -745,9 +752,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
 		client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
 	client->doorbell_id = assign_doorbell(guc, client->priority);
-	if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
+	if (client->doorbell_id == GUC_INVALID_DOORBELL_ID) {
 		/* XXX: evict a doorbell instead */
+		ret = -EINVAL;
 		goto err;
+	}
 
 	guc_init_proc_desc(guc, client);
 	guc_init_ctx_desc(guc, client);
@@ -755,7 +764,8 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
 
 	/* XXX: Any cache flushes needed? General domain mgmt calls? */
 
-	if (host2guc_allocate_doorbell(guc, client))
+	ret = host2guc_allocate_doorbell(guc, client);
+	if (ret)
 		goto err;
 
 	DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
@@ -767,7 +777,7 @@ err:
 	DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
 
 	guc_client_free(dev, client);
-	return NULL;
+	return ERR_PTR(ret);
 }
 
 static void guc_create_log(struct intel_guc *guc)
@@ -792,7 +802,7 @@ static void guc_create_log(struct intel_guc *guc)
 	obj = guc->log_obj;
 	if (!obj) {
 		obj = gem_allocate_guc_obj(dev_priv->dev, size);
-		if (!obj) {
+		if (IS_ERR(obj)) {
 			/* logging will be off */
 			i915.guc_log_level = -1;
 			return;
@@ -912,6 +922,7 @@ int i915_guc_submission_init(struct drm_device *dev)
 	const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
 	const size_t gemsize = round_up(poolsize, PAGE_SIZE);
 	struct intel_guc *guc = &dev_priv->guc;
+	int ret;
 
 	if (!i915.enable_guc_submission)
 		return 0; /* not enabled  */
@@ -920,8 +931,11 @@ int i915_guc_submission_init(struct drm_device *dev)
 		return 0; /* already allocated */
 
 	guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
-	if (!guc->ctx_pool_obj)
-		return -ENOMEM;
+	if (IS_ERR(guc->ctx_pool_obj)) {
+		ret = PTR_ERR(guc->ctx_pool_obj);
+		guc->ctx_pool_obj = NULL;
+		return ret;
+	}
 
 	ida_init(&guc->ctx_ids);
 
@@ -941,9 +955,9 @@ int i915_guc_submission_enable(struct drm_device *dev)
 
 	/* client for execbuf submission */
 	client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
-	if (!client) {
+	if (IS_ERR(client)) {
 		DRM_ERROR("Failed to create execbuf guc_client\n");
-		return -ENOMEM;
+		return PTR_ERR(client);
 	}
 
 	guc->execbuf_client = client;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5f8c5ad8..c5a80bb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2512,7 +2512,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 							     base_aligned,
 							     base_aligned,
 							     size_aligned);
-	if (!obj) {
+	if (IS_ERR(obj)) {
 		mutex_unlock(&dev->struct_mutex);
 		return false;
 	}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ae9cf6f..d4509d7 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -148,11 +148,11 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 	 * features. */
 	if (size * 2 < dev_priv->gtt.stolen_usable_size)
 		obj = i915_gem_object_create_stolen(dev, size);
-	if (obj == NULL)
+	if (IS_ERR_OR_NULL(obj))
 		obj = i915_gem_alloc_object(dev, size);
-	if (!obj) {
+	if (IS_ERR(obj)) {
 		DRM_ERROR("failed to allocate framebuffer\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(obj);
 		goto out;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f727822..eb5d314 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1465,9 +1465,11 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 
 	engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
 						   PAGE_ALIGN(size));
-	if (!engine->wa_ctx.obj) {
+	if (IS_ERR(engine->wa_ctx.obj)) {
 		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
-		return -ENOMEM;
+		ret = PTR_ERR(engine->wa_ctx.obj);
+		engine->wa_ctx.obj = NULL;
+		return ret;
 	}
 
 	ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -2617,9 +2619,9 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
 	context_size += PAGE_SIZE * LRC_PPHWSP_PN;
 
 	ctx_obj = i915_gem_alloc_object(dev, context_size);
-	if (!ctx_obj) {
+	if (IS_ERR(ctx_obj)) {
 		DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
-		return -ENOMEM;
+		return PTR_ERR(ctx_obj);
 	}
 
 	ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 13e22f5..60750ef 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1394,9 +1394,9 @@ void intel_setup_overlay(struct drm_device *dev)
 	reg_bo = NULL;
 	if (!OVERLAY_NEEDS_PHYSICAL(dev))
 		reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
-	if (reg_bo == NULL)
+	if (IS_ERR_OR_NULL(reg_bo))
 		reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
-	if (reg_bo == NULL)
+	if (IS_ERR(reg_bo))
 		goto out_free;
 	overlay->reg_bo = reg_bo;
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 20c8243..01078d7 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5328,10 +5328,13 @@ static void valleyview_setup_pctx(struct drm_device *dev)
 								      pcbr_offset,
 								      I915_GTT_OFFSET_NONE,
 								      pctx_size);
+		if (IS_ERR(pctx))
+			pctx = NULL;
+
 		goto out;
 	}
 
-	DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+	DRM_DEBUG_DRIVER("BIOS didn't set up PCBR or prealloc failed, fixing up\n");
 
 	/*
 	 * From the Gunit register HAS:
@@ -5342,7 +5345,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
 	 * memory, or any other relevant ranges.
 	 */
 	pctx = i915_gem_object_create_stolen(dev, pctx_size);
-	if (!pctx) {
+	if (IS_ERR(pctx)) {
 		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
 		goto out;
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 015dc7d..f54314e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -667,9 +667,10 @@ intel_init_pipe_control(struct intel_engine_cs *engine)
 	WARN_ON(engine->scratch.obj);
 
 	engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
-	if (engine->scratch.obj == NULL) {
+	if (IS_ERR(engine->scratch.obj)) {
 		DRM_ERROR("Failed to allocate seqno page\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(engine->scratch.obj);
+		engine->scratch.obj = NULL;
 		goto err;
 	}
 
@@ -2006,9 +2007,9 @@ static int init_status_page(struct intel_engine_cs *engine)
 		int ret;
 
 		obj = i915_gem_alloc_object(engine->dev, 4096);
-		if (obj == NULL) {
+		if (IS_ERR(obj)) {
 			DRM_ERROR("Failed to allocate status page\n");
-			return -ENOMEM;
+			return PTR_ERR(obj);
 		}
 
 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2161,10 +2162,10 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
 	obj = NULL;
 	if (!HAS_LLC(dev))
 		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
-	if (obj == NULL)
+	if (IS_ERR_OR_NULL(obj))
 		obj = i915_gem_alloc_object(dev, ringbuf->size);
-	if (obj == NULL)
-		return -ENOMEM;
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
 
 	/* mark ring buffers as read-only from GPU side by default */
 	obj->gt_ro = 1;
@@ -2768,7 +2769,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen >= 8) {
 		if (i915_semaphore_is_enabled(dev)) {
 			obj = i915_gem_alloc_object(dev, 4096);
-			if (obj == NULL) {
+			if (IS_ERR(obj)) {
 				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
 				i915.semaphores = 0;
 			} else {
@@ -2875,9 +2876,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 	/* Workaround batchbuffer to combat CS tlb bug. */
 	if (HAS_BROKEN_CS_TLB(dev)) {
 		obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
-		if (obj == NULL) {
+		if (IS_ERR(obj)) {
 			DRM_ERROR("Failed to allocate batch bo\n");
-			return -ENOMEM;
+			return PTR_ERR(obj);
 		}
 
 		ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
-- 
1.9.1



More information about the Intel-gfx mailing list