[Intel-gfx] [RFC-v19 05/13] drm/i915/pxp: Func to send hardware session termination

Huang, Sean Z sean.z.huang at intel.com
Tue Jan 12 18:53:07 UTC 2021


Hi Rodrigo,

I made the modification as below according to Chris and your suggestion, will reflect at rev20. All my comments (// sean: ...) the won't be checked in.
This change can address part of the comment Chris provided. But please let me go through the remaining comments at rev19 first.


Best regards,
Sean

diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
index d9298cf5e1a7..6898b8826302 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
@@ -34,11 +34,7 @@ struct i915_vma *intel_pxp_cmd_get_batch(struct intel_pxp *pxp,
 
 	memcpy(cmd, cmd_buf, cmd_size_in_dw * 4);
 
-	if (drm_debug_enabled(DRM_UT_DRIVER)) {                                                        // sean: my original intension just to print out the hex command for debug. But let me remove this.
-		print_hex_dump(KERN_DEBUG, "cmd binaries:",
-			       DUMP_PREFIX_OFFSET, 4, 4, cmd, cmd_size_in_dw * 4, true);
-	}
-
+	i915_gem_object_flush_map(pool->obj);                                                                // sean: we should flush map before unpin, thanks Chris's suggestion.
 	i915_gem_object_unpin_map(pool->obj);
 
 	batch = i915_vma_instance(pool->obj, ce->vm, NULL);
@@ -56,103 +52,73 @@ int intel_pxp_cmd_submit(struct intel_pxp *pxp, u32 *cmd, int cmd_size_in_dw)
 	struct i915_vma *batch;
 	struct i915_request *rq;
 	struct intel_context *ce = NULL;
-	bool is_engine_pm_get = false;                                                               // sean: replace bool with goto label.
-	bool is_batch_vma_pin = false;
-	bool is_skip_req_on_err = false;
-	bool is_engine_get_pool = false;
 	struct intel_gt_buffer_pool_node *pool = NULL;
 	struct intel_gt *gt = container_of(pxp, struct intel_gt, pxp);
+	int size = cmd_size_in_dw * 4;
 
 	ce = pxp->vcs_engine->kernel_context;
-	if (!ce) {
-		drm_err(&gt->i915->drm, "VCS engine does not have context\n");
-		err = -EINVAL;
-		goto end;
-	}
+	if (!ce)
+		return -EINVAL;
 
-	if (!cmd || (cmd_size_in_dw * 4) > PAGE_SIZE) { 
-		drm_err(&gt->i915->drm, "Failed to %s bad params\n", __func__);
+	if (!cmd || cmd_size_in_dw == 0)
 		return -EINVAL;
-	}
 
 	intel_engine_pm_get(ce->engine);
-	is_engine_pm_get = true;
 
-	pool = intel_gt_get_buffer_pool(gt, PAGE_SIZE);
+	size = round_up(size, PAGE_SIZE);                                                                                   // sean: I think this would be better to handle the allocation size.
+	pool = intel_gt_get_buffer_pool(gt, size);
 	if (IS_ERR(pool)) {
-		drm_err(&gt->i915->drm, "Failed to intel_engine_get_pool()\n");
 		err = PTR_ERR(pool);
-		goto end;
+		goto out_engine_pm_put;
 	}
-	is_engine_get_pool = true;
 
 	batch = intel_pxp_cmd_get_batch(pxp, ce, pool, cmd, cmd_size_in_dw);
 	if (IS_ERR(batch)) {
-		drm_err(&gt->i915->drm, "Failed to intel_pxp_cmd_get_batch()\n");
 		err = PTR_ERR(batch);
-		goto end;
+		goto out_engine_pool_put;
 	}
 
 	err = i915_vma_pin(batch, 0, 0, PIN_USER);
-	if (err) {
-		drm_err(&gt->i915->drm, "Failed to i915_vma_pin()\n");
-		goto end;
-	}
-	is_batch_vma_pin = true;
+	if (err)
+		goto out_engine_pool_put;
 
 	rq = intel_context_create_request(ce);
 	if (IS_ERR(rq)) {
-		drm_err(&gt->i915->drm, "Failed to intel_context_create_request()\n");
 		err = PTR_ERR(rq);
-		goto end;
+		goto out_vma_unpin;
 	}
-	is_skip_req_on_err = true;
 
 	err = intel_gt_buffer_pool_mark_active(pool, rq);
-	if (err) {
-		drm_err(&gt->i915->drm, "Failed to intel_engine_pool_mark_active()\n");
-		goto end;
-	}
+	if (err)
+		goto out_vma_unpin;
 
 	i915_vma_lock(batch);
 	err = i915_request_await_object(rq, batch->obj, false);
 	if (!err)
 		err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
-	if (err) {
-		drm_err(&gt->i915->drm, "Failed to i915_request_await_object()\n");
-		goto end;
-	}
+	if (err)
+		goto out_vma_unpin;
 
 	if (ce->engine->emit_init_breadcrumb) {
 		err = ce->engine->emit_init_breadcrumb(rq);
-		if (err) {
-			drm_err(&gt->i915->drm, "Failed to emit_init_breadcrumb()\n");
-			goto end;
-		}
+		if (err)
+			goto out_vma_unpin;
 	}
 
 	err = ce->engine->emit_bb_start(rq, batch->node.start,
-		batch->node.size, 0);
-	if (err) {
-		drm_err(&gt->i915->drm, "Failed to emit_bb_start()\n");
-		goto end;
-	}
+					batch->node.size, 0);
+	if (err)
+		goto out_vma_unpin;
 
 	i915_request_add(rq);
 
-end:
-	if (unlikely(err) && is_skip_req_on_err)                                    // sean: I think we should not skip if there are further errors, thanks Chris pointed out this.
-		i915_request_set_error_once(rq, err);
-
-	if (is_batch_vma_pin)
-		i915_vma_unpin(batch);
-
-	if (is_engine_get_pool)
-		intel_gt_buffer_pool_put(pool);
-
-	if (is_engine_pm_get)
-		intel_engine_pm_put(ce->engine);
+out_vma_unpin:
+	i915_vma_unpin(batch);
+out_engine_pool_put:
+	intel_gt_buffer_pool_put(pool);
+out_engine_pm_put:
+	intel_engine_pm_put(ce->engine);
 
 	return err;
 }




More information about the Intel-gfx mailing list