[PATCH] drm/i915/gvt: scan non-privileged batch buffer for debug purpose
Zhenyu Wang
zhenyuw at linux.intel.com
Fri Mar 30 07:35:42 UTC 2018
On 2018.03.30 14:34:33 +0800, Zhao Yan wrote:
> For perfomance purpose, scanning of non-privileged batch buffer is turned
> off by default. But for debugging purpose, it can be turned on via debugfs.
> After scanning, we submit the original non-privileged batch buffer into
> hardware, so that the scanning is only a peeking window of guest submitted
> commands and will not affect the execution results.
>
> v2:
> - rebase
> - update comments for start_gma_offset (henry)
>
> Signed-off-by: Zhao Yan <yan.y.zhao at intel.com>
> ---
My only concern is that as this debug option would really affect VM,
need to warn user and explicitly tell state change in kernel log. So
better change debugfs create with ops that can handle that. With that change
Reviewed-by: Zhenyu Wang <zhenyuw at linux.intel.com>
> drivers/gpu/drm/i915/gvt/cmd_parser.c | 54 ++++++++++++++++++++++-------
> drivers/gpu/drm/i915/gvt/debugfs.c | 5 +++
> drivers/gpu/drm/i915/gvt/gvt.h | 1 +
> drivers/gpu/drm/i915/gvt/scheduler.c | 64 +++++++++++++++++++++++------------
> drivers/gpu/drm/i915/gvt/scheduler.h | 1 +
> drivers/gpu/drm/i915/gvt/trace.h | 24 ++++++++++---
> 6 files changed, 110 insertions(+), 39 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> index d85939b..416df5b 100644
> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> @@ -1604,7 +1604,7 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
> if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
> || IS_KABYLAKE(gvt->dev_priv)) {
> /* BDW decides privilege based on address space */
> - if (cmd_val(s, 0) & (1 << 8))
> + if (cmd_val(s, 0) & (1 << 8) && !s->vgpu->scan_nonprivbb)
> return 0;
> }
> return 1;
> @@ -1618,6 +1618,8 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
> bool bb_end = false;
> struct intel_vgpu *vgpu = s->vgpu;
> u32 cmd;
> + struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
> + s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
>
> *bb_size = 0;
>
> @@ -1629,18 +1631,22 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
> cmd = cmd_val(s, 0);
> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
> if (info == NULL) {
> - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
> - cmd, get_opcode(cmd, s->ring_id));
> + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
> + cmd, get_opcode(cmd, s->ring_id),
> + (s->buf_addr_type == PPGTT_BUFFER) ?
> + "ppgtt" : "ggtt", s->ring_id, s->workload);
> return -EBADRQC;
> }
> do {
> - if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
> + if (copy_gma_to_hva(s->vgpu, mm,
> gma, gma + 4, &cmd) < 0)
> return -EFAULT;
> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
> if (info == NULL) {
> - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
> - cmd, get_opcode(cmd, s->ring_id));
> + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
> + cmd, get_opcode(cmd, s->ring_id),
> + (s->buf_addr_type == PPGTT_BUFFER) ?
> + "ppgtt" : "ggtt", s->ring_id, s->workload);
> return -EBADRQC;
> }
>
> @@ -1666,6 +1672,9 @@ static int perform_bb_shadow(struct parser_exec_state *s)
> unsigned long gma = 0;
> unsigned long bb_size;
> int ret = 0;
> + struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
> + s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
> + unsigned long gma_start_offset = 0;
>
> /* get the start gm address of the batch buffer */
> gma = get_gma_bb_from_cmd(s, 1);
> @@ -1680,8 +1689,24 @@ static int perform_bb_shadow(struct parser_exec_state *s)
> if (!bb)
> return -ENOMEM;
>
> + bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
> +
> + /* the gma_start_offset stores the batch buffer's start gma's
> + * offset relative to page boundary. so for non-privileged batch
> + * buffer, the shadowed gem object holds exactly the same page
> + * layout as original gem object. This is for the convience of
> + * replacing the whole non-privilged batch buffer page to this
> + * shadowed one in PPGTT at the same gma address. (this replacing
> + * action is not implemented yet now, but may be necessary in
> + * future).
> + * for prileged batch buffer, we just change start gma address to
> + * that of shadowed page.
> + */
> + if (bb->ppgtt)
> + gma_start_offset = gma & ~I915_GTT_PAGE_MASK;
> +
> bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
> - roundup(bb_size, PAGE_SIZE));
> + roundup(bb_size + gma_start_offset, PAGE_SIZE));
> if (IS_ERR(bb->obj)) {
> ret = PTR_ERR(bb->obj);
> goto err_free_bb;
> @@ -1702,9 +1727,9 @@ static int perform_bb_shadow(struct parser_exec_state *s)
> bb->clflush &= ~CLFLUSH_BEFORE;
> }
>
> - ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
> + ret = copy_gma_to_hva(s->vgpu, mm,
> gma, gma + bb_size,
> - bb->va);
> + bb->va + gma_start_offset);
> if (ret < 0) {
> gvt_vgpu_err("fail to copy guest ring buffer\n");
> ret = -EFAULT;
> @@ -1730,7 +1755,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
> * buffer's gma in pair. After all, we don't want to pin the shadow
> * buffer here (too early).
> */
> - s->ip_va = bb->va;
> + s->ip_va = bb->va + gma_start_offset;
> s->ip_gma = gma;
> return 0;
> err_unmap:
> @@ -2469,15 +2494,18 @@ static int cmd_parser_exec(struct parser_exec_state *s)
>
> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
> if (info == NULL) {
> - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
> - cmd, get_opcode(cmd, s->ring_id));
> + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
> + cmd, get_opcode(cmd, s->ring_id),
> + (s->buf_addr_type == PPGTT_BUFFER) ?
> + "ppgtt" : "ggtt", s->ring_id, s->workload);
> return -EBADRQC;
> }
>
> s->info = info;
>
> trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
> - cmd_length(s), s->buf_type);
> + cmd_length(s), s->buf_type, s->buf_addr_type,
> + s->workload, info->name);
>
> if (info->handler) {
> ret = info->handler(s);
> diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
> index f7d0078..ef05378 100644
> --- a/drivers/gpu/drm/i915/gvt/debugfs.c
> +++ b/drivers/gpu/drm/i915/gvt/debugfs.c
> @@ -151,6 +151,11 @@ int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
> if (!ent)
> return -ENOMEM;
>
> + ent = debugfs_create_bool("scan_nonprivbb", 0644, vgpu->debugfs,
> + &vgpu->scan_nonprivbb);
> + if (!ent)
> + return -ENOMEM;
> +
> return 0;
> }
>
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index efacd8a..b52b133 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -226,6 +226,7 @@ struct intel_vgpu {
>
> struct completion vblank_done;
>
> + bool scan_nonprivbb;
> };
>
> /* validating GM healthy status*/
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index d71e3de..5366519 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -452,12 +452,6 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
> int ret;
>
> list_for_each_entry(bb, &workload->shadow_bb, list) {
> - bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
> - if (IS_ERR(bb->vma)) {
> - ret = PTR_ERR(bb->vma);
> - goto err;
> - }
> -
> /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
> * is only updated into ring_scan_buffer, not real ring address
> * allocated in later copy_workload_to_ring_buffer. pls be noted
> @@ -469,25 +463,53 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
> bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
> + bb->bb_offset;
>
> - /* relocate shadow batch buffer */
> - bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
> - if (gmadr_bytes == 8)
> - bb->bb_start_cmd_va[2] = 0;
> + if (bb->ppgtt) {
> + /* for non-priv bb, scan&shadow is only for
> + * debugging purpose, so the content of shadow bb
> + * is the same as original bb. Therefore,
> + * here, rather than switch to shadow bb's gma
> + * address, we directly use original batch buffer's
> + * gma address, and send original bb to hardware
> + * directly
> + */
> + if (bb->clflush & CLFLUSH_AFTER) {
> + drm_clflush_virt_range(bb->va,
> + bb->obj->base.size);
> + bb->clflush &= ~CLFLUSH_AFTER;
> + }
> + i915_gem_obj_finish_shmem_access(bb->obj);
> + bb->accessing = false;
> +
> + } else {
> + bb->vma = i915_gem_object_ggtt_pin(bb->obj,
> + NULL, 0, 0, 0);
> + if (IS_ERR(bb->vma)) {
> + ret = PTR_ERR(bb->vma);
> + goto err;
> + }
>
> - /* No one is going to touch shadow bb from now on. */
> - if (bb->clflush & CLFLUSH_AFTER) {
> - drm_clflush_virt_range(bb->va, bb->obj->base.size);
> - bb->clflush &= ~CLFLUSH_AFTER;
> - }
> + /* relocate shadow batch buffer */
> + bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
> + if (gmadr_bytes == 8)
> + bb->bb_start_cmd_va[2] = 0;
>
> - ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
> - if (ret)
> - goto err;
> + /* No one is going to touch shadow bb from now on. */
> + if (bb->clflush & CLFLUSH_AFTER) {
> + drm_clflush_virt_range(bb->va,
> + bb->obj->base.size);
> + bb->clflush &= ~CLFLUSH_AFTER;
> + }
>
> - i915_gem_obj_finish_shmem_access(bb->obj);
> - bb->accessing = false;
> + ret = i915_gem_object_set_to_gtt_domain(bb->obj,
> + false);
> + if (ret)
> + goto err;
>
> - i915_vma_move_to_active(bb->vma, workload->req, 0);
> + i915_gem_obj_finish_shmem_access(bb->obj);
> + bb->accessing = false;
> +
> + i915_vma_move_to_active(bb->vma, workload->req, 0);
> + }
> }
> return 0;
> err:
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
> index 486ed57..6c64478 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.h
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.h
> @@ -125,6 +125,7 @@ struct intel_vgpu_shadow_bb {
> unsigned int clflush;
> bool accessing;
> unsigned long bb_offset;
> + bool ppgtt;
> };
>
> #define workload_q_head(vgpu, ring_id) \
> diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
> index 82093f1..1fd6420 100644
> --- a/drivers/gpu/drm/i915/gvt/trace.h
> +++ b/drivers/gpu/drm/i915/gvt/trace.h
> @@ -224,19 +224,25 @@
> TP_printk("%s", __entry->buf)
> );
>
> +#define GVT_CMD_STR_LEN 40
> TRACE_EVENT(gvt_command,
> - TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len,
> - u32 buf_type),
> + TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
> + u32 cmd_len, u32 buf_type, u32 buf_addr_type,
> + void *workload, char *cmd_name),
>
> - TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type),
> + TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
> + buf_addr_type, workload, cmd_name),
>
> TP_STRUCT__entry(
> __field(u8, vgpu_id)
> __field(u8, ring_id)
> __field(u32, ip_gma)
> __field(u32, buf_type)
> + __field(u32, buf_addr_type)
> __field(u32, cmd_len)
> + __field(void*, workload)
> __dynamic_array(u32, raw_cmd, cmd_len)
> + __array(char, cmd_name, GVT_CMD_STR_LEN)
> ),
>
> TP_fast_assign(
> @@ -244,17 +250,25 @@
> __entry->ring_id = ring_id;
> __entry->ip_gma = ip_gma;
> __entry->buf_type = buf_type;
> + __entry->buf_addr_type = buf_addr_type;
> __entry->cmd_len = cmd_len;
> + __entry->workload = workload;
> + snprintf(__entry->cmd_name, GVT_CMD_STR_LEN, "%s", cmd_name);
> memcpy(__get_dynamic_array(raw_cmd), cmd_va, cmd_len * sizeof(*cmd_va));
> ),
>
>
> - TP_printk("vgpu%d ring %d: buf_type %u, ip_gma %08x, raw cmd %s",
> + TP_printk("vgpu%d ring %d: address_type %u, buf_type %u, ip_gma %08x,cmd (name=%s,len=%u,raw cmd=%s), workload=%p\n",
> __entry->vgpu_id,
> __entry->ring_id,
> + __entry->buf_addr_type,
> __entry->buf_type,
> __entry->ip_gma,
> - __print_array(__get_dynamic_array(raw_cmd), __entry->cmd_len, 4))
> + __entry->cmd_name,
> + __entry->cmd_len,
> + __print_array(__get_dynamic_array(raw_cmd),
> + __entry->cmd_len, 4),
> + __entry->workload)
> );
>
> #define GVT_TEMP_STR_LEN 10
> --
> 1.9.1
>
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
--
Open Source Technology Center, Intel ltd.
$gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 195 bytes
Desc: not available
URL: <https://lists.freedesktop.org/archives/intel-gvt-dev/attachments/20180330/3b3caa6a/attachment-0001.sig>
More information about the intel-gvt-dev
mailing list