[PATCH v8] drm/i915/gvt: replace the gvt_err with gvt_vgpu_err
Zhang, Tina
tina.zhang at intel.com
Fri Mar 10 07:48:28 UTC 2017
This patch rebase to gvt_fixes branch.
> -----Original Message-----
> From: Zhang, Tina
> Sent: Friday, March 10, 2017 3:42 PM
> To: intel-gvt-dev at lists.freedesktop.org
> Cc: Zhang, Tina <tina.zhang at intel.com>
> Subject: [PATCH v8] drm/i915/gvt: replace the gvt_err with gvt_vgpu_err
>
> gvt_err should be used only for the very few critical error message during host
> i915 drvier initialization. This patch 1. removes the redundant gvt_err; 2.
> creates a new gvt_vgpu_err to show errors caused by vgpu; 3. replaces the
> most gvt_err with gvt_vgpu_err; 4. leaves very few gvt_err for dumping gvt
> error during host gvt
> initialization.
>
> v2. change name to gvt_vgpu_err and add vgpu id to the message. (Kevin)
> add gpu id to gvt_vgpu_err. (Zhi)
> v3. remove gpu id from gvt_vgpu_err caller. (Zhi) v4. add vgpu check to the
> gvt_vgpu_err macro. (Zhiyuan) v5. add comments for v3 and v4.
> v6. split the big patch into two, with this patch only for checking
> gvt_vgpu_err. (Zhenyu)
> v7. rebase to staging branch
> v8. rebase to fix branch
>
> Signed-off-by: Tina Zhang <tina.zhang at intel.com>
>
> diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c
> b/drivers/gpu/drm/i915/gvt/aperture_gm.c
> index 3b6caac..325618d 100644
> --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
> +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
> @@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
> const char *item;
>
> if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz)
> {
> - gvt_err("Invalid vGPU creation params\n");
> + gvt_vgpu_err("Invalid vGPU creation params\n");
> return -EINVAL;
> }
>
> @@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
> return 0;
>
> no_enough_resource:
> - gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
> - gvt_err("vgpu%d: request %luMB avail %luMB max %luMB
> taken %luMB\n",
> - vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
> + gvt_vgpu_err("fail to allocate resource %s\n", item);
> + gvt_vgpu_err("request %luMB avail %luMB max %luMB
> taken %luMB\n",
> + BYTES_TO_MB(request), BYTES_TO_MB(avail),
> BYTES_TO_MB(max), BYTES_TO_MB(taken));
> return -ENOSPC;
> }
> diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c
> b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> index b9c8e24..ff9a595 100644
> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
> @@ -824,20 +824,19 @@ static int cmd_reg_handler(struct parser_exec_state
> *s,
> struct intel_gvt *gvt = vgpu->gvt;
>
> if (offset + 4 > gvt->device_info.mmio_size) {
> - gvt_err("%s access to (%x) outside of MMIO range\n",
> + gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
> cmd, offset);
> return -EINVAL;
> }
>
> if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
> - gvt_err("vgpu%d: %s access to non-render register (%x)\n",
> - s->vgpu->id, cmd, offset);
> + gvt_vgpu_err("%s access to non-render register (%x)\n",
> + cmd, offset);
> return 0;
> }
>
> if (is_shadowed_mmio(offset)) {
> - gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
> - s->vgpu->id, offset);
> + gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
> return 0;
> }
>
> @@ -1129,6 +1128,7 @@ static int skl_decode_mi_display_flip(struct
> parser_exec_state *s,
> struct mi_display_flip_command_info *info) {
> struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
> + struct intel_vgpu *vgpu = s->vgpu;
> u32 dword0 = cmd_val(s, 0);
> u32 dword1 = cmd_val(s, 1);
> u32 dword2 = cmd_val(s, 2);
> @@ -1167,7 +1167,7 @@ static int skl_decode_mi_display_flip(struct
> parser_exec_state *s,
> break;
>
> default:
> - gvt_err("unknown plane code %d\n", plane);
> + gvt_vgpu_err("unknown plane code %d\n", plane);
> return -EINVAL;
> }
>
> @@ -1274,25 +1274,26 @@ static int
> update_plane_mmio_from_mi_display_flip(
> static int cmd_handler_mi_display_flip(struct parser_exec_state *s) {
> struct mi_display_flip_command_info info;
> + struct intel_vgpu *vgpu = s->vgpu;
> int ret;
> int i;
> int len = cmd_length(s);
>
> ret = decode_mi_display_flip(s, &info);
> if (ret) {
> - gvt_err("fail to decode MI display flip command\n");
> + gvt_vgpu_err("fail to decode MI display flip command\n");
> return ret;
> }
>
> ret = check_mi_display_flip(s, &info);
> if (ret) {
> - gvt_err("invalid MI display flip command\n");
> + gvt_vgpu_err("invalid MI display flip command\n");
> return ret;
> }
>
> ret = update_plane_mmio_from_mi_display_flip(s, &info);
> if (ret) {
> - gvt_err("fail to update plane mmio\n");
> + gvt_vgpu_err("fail to update plane mmio\n");
> return ret;
> }
>
> @@ -1350,7 +1351,8 @@ static inline int cmd_address_audit(struct
> parser_exec_state *s,
> int ret;
>
> if (op_size > max_surface_size) {
> - gvt_err("command address audit fail name %s\n", s->info-
> >name);
> + gvt_vgpu_err("command address audit fail name %s\n",
> + s->info->name);
> return -EINVAL;
> }
>
> @@ -1367,7 +1369,7 @@ static inline int cmd_address_audit(struct
> parser_exec_state *s,
> }
> return 0;
> err:
> - gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
> + gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx,
> +len=%d!\n",
> s->info->name, guest_gma, op_size);
>
> pr_err("cmd dump: ");
> @@ -1412,8 +1414,10 @@ static int cmd_handler_mi_store_data_imm(struct
> parser_exec_state *s)
>
> static inline int unexpected_cmd(struct parser_exec_state *s) {
> - gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
> - s->vgpu->id, s->info->name);
> + struct intel_vgpu *vgpu = s->vgpu;
> +
> + gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
> +
> return -EINVAL;
> }
>
> @@ -1516,7 +1520,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu,
> struct intel_vgpu_mm *mm,
> while (gma != end_gma) {
> gpa = intel_vgpu_gma_to_gpa(mm, gma);
> if (gpa == INTEL_GVT_INVALID_ADDR) {
> - gvt_err("invalid gma address: %lx\n", gma);
> + gvt_vgpu_err("invalid gma address: %lx\n", gma);
> return -EFAULT;
> }
>
> @@ -1557,6 +1561,7 @@ static uint32_t find_bb_size(struct
> parser_exec_state *s)
> uint32_t bb_size = 0;
> uint32_t cmd_len = 0;
> bool met_bb_end = false;
> + struct intel_vgpu *vgpu = s->vgpu;
> u32 cmd;
>
> /* get the start gm address of the batch buffer */ @@ -1565,7 +1570,7
> @@ static uint32_t find_bb_size(struct parser_exec_state *s)
>
> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
> if (info == NULL) {
> - gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
> + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
> cmd, get_opcode(cmd, s->ring_id));
> return -EINVAL;
> }
> @@ -1574,7 +1579,7 @@ static uint32_t find_bb_size(struct
> parser_exec_state *s)
> gma, gma + 4, &cmd);
> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
> if (info == NULL) {
> - gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
> + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
> cmd, get_opcode(cmd, s->ring_id));
> return -EINVAL;
> }
> @@ -1599,6 +1604,7 @@ static uint32_t find_bb_size(struct
> parser_exec_state *s) static int perform_bb_shadow(struct parser_exec_state
> *s) {
> struct intel_shadow_bb_entry *entry_obj;
> + struct intel_vgpu *vgpu = s->vgpu;
> unsigned long gma = 0;
> uint32_t bb_size;
> void *dst = NULL;
> @@ -1633,7 +1639,7 @@ static int perform_bb_shadow(struct
> parser_exec_state *s)
>
> ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
> if (ret) {
> - gvt_err("failed to set shadow batch to CPU\n");
> + gvt_vgpu_err("failed to set shadow batch to CPU\n");
> goto unmap_src;
> }
>
> @@ -1645,7 +1651,7 @@ static int perform_bb_shadow(struct
> parser_exec_state *s)
> gma, gma + bb_size,
> dst);
> if (ret) {
> - gvt_err("fail to copy guest ring buffer\n");
> + gvt_vgpu_err("fail to copy guest ring buffer\n");
> goto unmap_src;
> }
>
> @@ -1676,15 +1682,16 @@ static int
> cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) {
> bool second_level;
> int ret = 0;
> + struct intel_vgpu *vgpu = s->vgpu;
>
> if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
> - gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
> + gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level
> BB\n");
> return -EINVAL;
> }
>
> second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
> if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
> - gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
> + gvt_vgpu_err("Jumping to 2nd level BB from RB is not
> allowed\n");
> return -EINVAL;
> }
>
> @@ -1702,7 +1709,7 @@ static int
> cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
> if (batch_buffer_needs_scan(s)) {
> ret = perform_bb_shadow(s);
> if (ret < 0)
> - gvt_err("invalid shadow batch buffer\n");
> + gvt_vgpu_err("invalid shadow batch buffer\n");
> } else {
> /* emulate a batch buffer end to do return right */
> ret = cmd_handler_mi_batch_buffer_end(s);
> @@ -2429,6 +2436,7 @@ static int cmd_parser_exec(struct parser_exec_state
> *s)
> int ret = 0;
> cycles_t t0, t1, t2;
> struct parser_exec_state s_before_advance_custom;
> + struct intel_vgpu *vgpu = s->vgpu;
>
> t0 = get_cycles();
>
> @@ -2436,7 +2444,7 @@ static int cmd_parser_exec(struct parser_exec_state
> *s)
>
> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
> if (info == NULL) {
> - gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
> + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
> cmd, get_opcode(cmd, s->ring_id));
> return -EINVAL;
> }
> @@ -2452,7 +2460,7 @@ static int cmd_parser_exec(struct parser_exec_state
> *s)
> if (info->handler) {
> ret = info->handler(s);
> if (ret < 0) {
> - gvt_err("%s handler error\n", info->name);
> + gvt_vgpu_err("%s handler error\n", info->name);
> return ret;
> }
> }
> @@ -2463,7 +2471,7 @@ static int cmd_parser_exec(struct parser_exec_state
> *s)
> if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
> ret = cmd_advance_default(s);
> if (ret) {
> - gvt_err("%s IP advance error\n", info->name);
> + gvt_vgpu_err("%s IP advance error\n", info->name);
> return ret;
> }
> }
> @@ -2486,6 +2494,7 @@ static int command_scan(struct parser_exec_state
> *s,
>
> unsigned long gma_head, gma_tail, gma_bottom;
> int ret = 0;
> + struct intel_vgpu *vgpu = s->vgpu;
>
> gma_head = rb_start + rb_head;
> gma_tail = rb_start + rb_tail;
> @@ -2497,7 +2506,7 @@ static int command_scan(struct parser_exec_state
> *s,
> if (s->buf_type == RING_BUFFER_INSTRUCTION) {
> if (!(s->ip_gma >= rb_start) ||
> !(s->ip_gma < gma_bottom)) {
> - gvt_err("ip_gma %lx out of ring scope."
> + gvt_vgpu_err("ip_gma %lx out of ring scope."
> "(base:0x%lx, bottom: 0x%lx)\n",
> s->ip_gma, rb_start,
> gma_bottom);
> @@ -2505,7 +2514,7 @@ static int command_scan(struct parser_exec_state
> *s,
> return -EINVAL;
> }
> if (gma_out_of_range(s->ip_gma, gma_head,
> gma_tail)) {
> - gvt_err("ip_gma %lx out of range."
> + gvt_vgpu_err("ip_gma %lx out of range."
> "base 0x%lx head 0x%lx tail 0x%lx\n",
> s->ip_gma, rb_start,
> rb_head, rb_tail);
> @@ -2515,7 +2524,7 @@ static int command_scan(struct parser_exec_state
> *s,
> }
> ret = cmd_parser_exec(s);
> if (ret) {
> - gvt_err("cmd parser error\n");
> + gvt_vgpu_err("cmd parser error\n");
> parser_exec_state_dump(s);
> break;
> }
> @@ -2639,7 +2648,7 @@ static int shadow_workload_ring_buffer(struct
> intel_vgpu_workload *workload)
> gma_head, gma_top,
> workload->shadow_ring_buffer_va);
> if (ret) {
> - gvt_err("fail to copy guest ring buffer\n");
> + gvt_vgpu_err("fail to copy guest ring buffer\n");
> return ret;
> }
> copy_len = gma_top - gma_head;
> @@ -2651,7 +2660,7 @@ static int shadow_workload_ring_buffer(struct
> intel_vgpu_workload *workload)
> gma_head, gma_tail,
> workload->shadow_ring_buffer_va + copy_len);
> if (ret) {
> - gvt_err("fail to copy guest ring buffer\n");
> + gvt_vgpu_err("fail to copy guest ring buffer\n");
> return ret;
> }
> ring->tail += workload->rb_len;
> @@ -2662,16 +2671,17 @@ static int shadow_workload_ring_buffer(struct
> intel_vgpu_workload *workload) int
> intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
> {
> int ret;
> + struct intel_vgpu *vgpu = workload->vgpu;
>
> ret = shadow_workload_ring_buffer(workload);
> if (ret) {
> - gvt_err("fail to shadow workload ring_buffer\n");
> + gvt_vgpu_err("fail to shadow workload ring_buffer\n");
> return ret;
> }
>
> ret = scan_workload(workload);
> if (ret) {
> - gvt_err("scan workload error\n");
> + gvt_vgpu_err("scan workload error\n");
> return ret;
> }
> return 0;
> @@ -2681,6 +2691,7 @@ static int shadow_indirect_ctx(struct
> intel_shadow_wa_ctx *wa_ctx) {
> int ctx_size = wa_ctx->indirect_ctx.size;
> unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
> + struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
> struct drm_i915_gem_object *obj;
> int ret = 0;
> void *map;
> @@ -2694,14 +2705,14 @@ static int shadow_indirect_ctx(struct
> intel_shadow_wa_ctx *wa_ctx)
> /* get the va of the shadow batch buffer */
> map = i915_gem_object_pin_map(obj, I915_MAP_WB);
> if (IS_ERR(map)) {
> - gvt_err("failed to vmap shadow indirect ctx\n");
> + gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
> ret = PTR_ERR(map);
> goto put_obj;
> }
>
> ret = i915_gem_object_set_to_cpu_domain(obj, false);
> if (ret) {
> - gvt_err("failed to set shadow indirect ctx to CPU\n");
> + gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
> goto unmap_src;
> }
>
> @@ -2710,7 +2721,7 @@ static int shadow_indirect_ctx(struct
> intel_shadow_wa_ctx *wa_ctx)
> guest_gma, guest_gma + ctx_size,
> map);
> if (ret) {
> - gvt_err("fail to copy guest indirect ctx\n");
> + gvt_vgpu_err("fail to copy guest indirect ctx\n");
> goto unmap_src;
> }
>
> @@ -2744,13 +2755,14 @@ static int combine_wa_ctx(struct
> intel_shadow_wa_ctx *wa_ctx) int intel_gvt_scan_and_shadow_wa_ctx(struct
> intel_shadow_wa_ctx *wa_ctx) {
> int ret;
> + struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
>
> if (wa_ctx->indirect_ctx.size == 0)
> return 0;
>
> ret = shadow_indirect_ctx(wa_ctx);
> if (ret) {
> - gvt_err("fail to shadow indirect ctx\n");
> + gvt_vgpu_err("fail to shadow indirect ctx\n");
> return ret;
> }
>
> @@ -2758,7 +2770,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct
> intel_shadow_wa_ctx *wa_ctx)
>
> ret = scan_wa_ctx(wa_ctx);
> if (ret) {
> - gvt_err("scan wa ctx error\n");
> + gvt_vgpu_err("scan wa ctx error\n");
> return ret;
> }
>
> diff --git a/drivers/gpu/drm/i915/gvt/debug.h
> b/drivers/gpu/drm/i915/gvt/debug.h
> index 68cba7b..b0cff4d 100644
> --- a/drivers/gpu/drm/i915/gvt/debug.h
> +++ b/drivers/gpu/drm/i915/gvt/debug.h
> @@ -27,6 +27,14 @@
> #define gvt_err(fmt, args...) \
> DRM_ERROR("gvt: "fmt, ##args)
>
> +#define gvt_vgpu_err(fmt, args...) \
> +do { \
> + if (IS_ERR_OR_NULL(vgpu)) \
> + DRM_DEBUG_DRIVER("gvt: "fmt, ##args);
> \
> + else \
> + DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id,
> ##args);\ } while (0)
> +
> #define gvt_dbg_core(fmt, args...) \
> DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
>
> diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
> index bda85df..f1648fe 100644
> --- a/drivers/gpu/drm/i915/gvt/edid.c
> +++ b/drivers/gpu/drm/i915/gvt/edid.c
> @@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu
> *vgpu)
> unsigned char chr = 0;
>
> if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
> - gvt_err("Driver tries to read EDID without proper
> sequence!\n");
> + gvt_vgpu_err("Driver tries to read EDID without proper
> sequence!\n");
> return 0;
> }
> if (edid->current_edid_read >= EDID_SIZE) {
> - gvt_err("edid_get_byte() exceeds the size of EDID!\n");
> + gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
> return 0;
> }
>
> if (!edid->edid_available) {
> - gvt_err("Reading EDID but EDID is not available!\n");
> + gvt_vgpu_err("Reading EDID but EDID is not available!\n");
> return 0;
> }
>
> @@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu
> *vgpu)
> chr = edid_data->edid_block[edid->current_edid_read];
> edid->current_edid_read++;
> } else {
> - gvt_err("No EDID available during the reading?\n");
> + gvt_vgpu_err("No EDID available during the reading?\n");
> }
> return chr;
> }
> @@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu,
> unsigned int offset,
> vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
> break;
> default:
> - gvt_err("Unknown/reserved GMBUS cycle
> detected!\n");
> + gvt_vgpu_err("Unknown/reserved GMBUS cycle
> detected!\n");
> break;
> }
> /*
> @@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu,
> unsigned int offset,
> */
> } else {
> memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
> - gvt_err("vgpu%d: warning: gmbus3 read with nothing
> returned\n",
> - vgpu->id);
> + gvt_vgpu_err("warning: gmbus3 read with nothing
> returned\n");
> }
> return 0;
> }
> diff --git a/drivers/gpu/drm/i915/gvt/execlist.c
> b/drivers/gpu/drm/i915/gvt/execlist.c
> index 46eb9fd..f1f426a 100644
> --- a/drivers/gpu/drm/i915/gvt/execlist.c
> +++ b/drivers/gpu/drm/i915/gvt/execlist.c
> @@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
> struct intel_vgpu_execlist *execlist,
> struct execlist_ctx_descriptor_format *ctx) {
> + struct intel_vgpu *vgpu = execlist->vgpu;
> struct intel_vgpu_execlist_slot *running = execlist->running_slot;
> struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
> struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; @@ -
> 183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
> gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
>
> if (WARN_ON(!same_context(ctx, execlist->running_context))) {
> - gvt_err("schedule out context is not running context,"
> + gvt_vgpu_err("schedule out context is not running context,"
> "ctx id %x running ctx id %x\n",
> ctx->context_id,
> execlist->running_context->context_id);
> @@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot
> *get_next_execlist_slot(
> status.udw = vgpu_vreg(vgpu, status_reg + 4);
>
> if (status.execlist_queue_full) {
> - gvt_err("virtual execlist slots are full\n");
> + gvt_vgpu_err("virtual execlist slots are full\n");
> return NULL;
> }
>
> @@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct
> intel_vgpu_execlist *execlist,
>
> struct execlist_ctx_descriptor_format *ctx0, *ctx1;
> struct execlist_context_status_format status;
> + struct intel_vgpu *vgpu = execlist->vgpu;
>
> gvt_dbg_el("emulate schedule-in\n");
>
> if (!slot) {
> - gvt_err("no available execlist slot\n");
> + gvt_vgpu_err("no available execlist slot\n");
> return -EINVAL;
> }
>
> @@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct
> intel_vgpu_workload *workload)
>
> vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4,
> 0);
> if (IS_ERR(vma)) {
> - gvt_err("Cannot pin\n");
> return;
> }
>
> @@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct
> intel_shadow_wa_ctx *wa_ctx)
> vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
> 0, CACHELINE_BYTES, 0);
> if (IS_ERR(vma)) {
> - gvt_err("Cannot pin indirect ctx obj\n");
> return;
> }
>
> @@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload
> *workload) {
> struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
> struct intel_vgpu_mm *mm;
> + struct intel_vgpu *vgpu = workload->vgpu;
> int page_table_level;
> u32 pdp[8];
>
> @@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload
> *workload)
> } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
> page_table_level = 4;
> } else {
> - gvt_err("Advanced Context mode(SVM) is not supported!\n");
> + gvt_vgpu_err("Advanced Context mode(SVM) is not
> supported!\n");
> return -EINVAL;
> }
>
> @@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload
> *workload)
> mm = intel_vgpu_create_mm(workload->vgpu,
> INTEL_GVT_MM_PPGTT,
> pdp, page_table_level, 0);
> if (IS_ERR(mm)) {
> - gvt_err("fail to create mm object.\n");
> + gvt_vgpu_err("fail to create mm object.\n");
> return PTR_ERR(mm);
> }
> }
> @@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int
> ring_id,
> ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
> (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
> if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
> - gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
> + gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
> return -EINVAL;
> }
>
> @@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
> int ring_id)
> continue;
>
> if (!desc[i]->privilege_access) {
> - gvt_err("vgpu%d: unexpected GGTT elsp
> submission\n",
> - vgpu->id);
> + gvt_vgpu_err("unexpected GGTT elsp submission\n");
> return -EINVAL;
> }
>
> @@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu
> *vgpu, int ring_id)
> }
>
> if (!valid_desc_bitmap) {
> - gvt_err("vgpu%d: no valid desc in a elsp submission\n",
> - vgpu->id);
> + gvt_vgpu_err("no valid desc in a elsp submission\n");
> return -EINVAL;
> }
>
> if (!test_bit(0, (void *)&valid_desc_bitmap) &&
> test_bit(1, (void *)&valid_desc_bitmap)) {
> - gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
> - vgpu->id);
> + gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
> return -EINVAL;
> }
>
> @@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
> int ring_id)
> ret = submit_context(vgpu, ring_id, &valid_desc[i],
> emulate_schedule_in);
> if (ret) {
> - gvt_err("vgpu%d: fail to schedule workload\n",
> - vgpu->id);
> + gvt_vgpu_err("fail to schedule workload\n");
> return ret;
> }
> emulate_schedule_in = false;
> diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
> index 28c9234..3a2a80d 100644
> --- a/drivers/gpu/drm/i915/gvt/gtt.c
> +++ b/drivers/gpu/drm/i915/gvt/gtt.c
> @@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu
> *vgpu, u64 addr, u32 size) {
> if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
> && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
> - gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
> - vgpu->id, addr, size);
> + gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
> + addr, size);
> return false;
> }
> return true;
> @@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu,
> struct intel_gvt_gtt_entry *p,
>
> mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
> if (mfn == INTEL_GVT_INVALID_ADDR) {
> - gvt_err("fail to translate gfn: 0x%lx\n", gfn);
> + gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
> return -ENXIO;
> }
>
> @@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu
> *vgpu,
>
> daddr = dma_map_page(kdev, p->page, 0, 4096,
> PCI_DMA_BIDIRECTIONAL);
> if (dma_mapping_error(kdev, daddr)) {
> - gvt_err("fail to map dma addr\n");
> + gvt_vgpu_err("fail to map dma addr\n");
> return -EINVAL;
> }
>
> @@ -735,7 +735,7 @@ static struct intel_vgpu_ppgtt_spt
> *ppgtt_alloc_shadow_page(
> if (reclaim_one_mm(vgpu->gvt))
> goto retry;
>
> - gvt_err("fail to allocate ppgtt shadow page\n");
> + gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
> return ERR_PTR(-ENOMEM);
> }
>
> @@ -750,14 +750,14 @@ static struct intel_vgpu_ppgtt_spt
> *ppgtt_alloc_shadow_page(
> */
> ret = init_shadow_page(vgpu, &spt->shadow_page, type);
> if (ret) {
> - gvt_err("fail to initialize shadow page for spt\n");
> + gvt_vgpu_err("fail to initialize shadow page for spt\n");
> goto err;
> }
>
> ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
> gfn, ppgtt_write_protection_handler, NULL);
> if (ret) {
> - gvt_err("fail to initialize guest page for spt\n");
> + gvt_vgpu_err("fail to initialize guest page for spt\n");
> goto err;
> }
>
> @@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt
> *ppgtt_find_shadow_page(
> if (p)
> return shadow_page_to_ppgtt_spt(p);
>
> - gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
> - vgpu->id, mfn);
> + gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
> return NULL;
> }
>
> @@ -827,8 +826,8 @@ static int
> ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
> }
> s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
> if (!s) {
> - gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
> - vgpu->id, ops->get_pfn(e));
> + gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
> + ops->get_pfn(e));
> return -ENXIO;
> }
> return ppgtt_invalidate_shadow_page(s); @@ -836,6 +835,7 @@
> static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu
> *vgpu,
>
> static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) {
> + struct intel_vgpu *vgpu = spt->vgpu;
> struct intel_gvt_gtt_entry e;
> unsigned long index;
> int ret;
> @@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct
> intel_vgpu_ppgtt_spt *spt)
>
> for_each_present_shadow_entry(spt, &e, index) {
> if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
> - gvt_err("GVT doesn't support pse bit for now\n");
> + gvt_vgpu_err("GVT doesn't support pse bit for
> now\n");
> return -EINVAL;
> }
> ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
> @@ -868,8 +868,8 @@ static int ppgtt_invalidate_shadow_page(struct
> intel_vgpu_ppgtt_spt *spt)
> ppgtt_free_shadow_page(spt);
> return 0;
> fail:
> - gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx
> type %d\n",
> - spt->vgpu->id, spt, e.val64, e.type);
> + gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
> + spt, e.val64, e.type);
> return ret;
> }
>
> @@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt
> *ppgtt_populate_shadow_page_by_guest_entry(
> }
> return s;
> fail:
> - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
> - vgpu->id, s, we->val64, we->type);
> + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
> + s, we->val64, we->type);
> return ERR_PTR(ret);
> }
>
> @@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct
> intel_vgpu_ppgtt_spt *spt)
>
> for_each_present_guest_entry(spt, &ge, i) {
> if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
> - gvt_err("GVT doesn't support pse bit now\n");
> + gvt_vgpu_err("GVT doesn't support pse bit now\n");
> ret = -EINVAL;
> goto fail;
> }
> @@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct
> intel_vgpu_ppgtt_spt *spt)
> }
> return 0;
> fail:
> - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
> - vgpu->id, spt, ge.val64, ge.type);
> + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
> + spt, ge.val64, ge.type);
> return ret;
> }
>
> @@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct
> intel_vgpu_guest_page *gpt,
> struct intel_vgpu_ppgtt_spt *s =
> ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
> if (!s) {
> - gvt_err("fail to find guest page\n");
> + gvt_vgpu_err("fail to find guest page\n");
> ret = -ENXIO;
> goto fail;
> }
> @@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct
> intel_vgpu_guest_page *gpt,
> ppgtt_set_shadow_entry(spt, &e, index);
> return 0;
> fail:
> - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
> - vgpu->id, spt, e.val64, e.type);
> + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
> + spt, e.val64, e.type);
> return ret;
> }
>
> @@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct
> intel_vgpu_guest_page *gpt,
> }
> return 0;
> fail:
> - gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
> - spt, we->val64, we->type);
> + gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
> + spt, we->val64, we->type);
> return ret;
> }
>
> @@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
> }
> return 0;
> fail:
> - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
> - vgpu->id, spt, we->val64, we->type);
> + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
> + spt, we->val64, we->type);
> return ret;
> }
>
> @@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
>
> spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu,
> &ge);
> if (IS_ERR(spt)) {
> - gvt_err("fail to populate guest root pointer\n");
> + gvt_vgpu_err("fail to populate guest root pointer\n");
> ret = PTR_ERR(spt);
> goto fail;
> }
> @@ -1566,7 +1566,7 @@ struct intel_vgpu_mm
> *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
>
> ret = gtt->mm_alloc_page_table(mm);
> if (ret) {
> - gvt_err("fail to allocate page table for mm\n");
> + gvt_vgpu_err("fail to allocate page table for mm\n");
> goto fail;
> }
>
> @@ -1584,7 +1584,7 @@ struct intel_vgpu_mm
> *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
> }
> return mm;
> fail:
> - gvt_err("fail to create mm\n");
> + gvt_vgpu_err("fail to create mm\n");
> if (mm)
> intel_gvt_mm_unreference(mm);
> return ERR_PTR(ret);
> @@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct
> intel_vgpu_mm *mm, unsigned long gma)
> mm->page_table_level, gma, gpa);
> return gpa;
> err:
> - gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
> + gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
> return INTEL_GVT_INVALID_ADDR;
> }
>
> @@ -1839,8 +1839,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu
> *vgpu, unsigned int off,
> if (ops->test_present(&e)) {
> ret = gtt_entry_p2m(vgpu, &e, &m);
> if (ret) {
> - gvt_err("vgpu%d: fail to translate guest gtt entry\n",
> - vgpu->id);
> + gvt_vgpu_err("fail to translate guest gtt entry\n");
> return ret;
> }
> } else {
> @@ -1896,14 +1895,14 @@ static int alloc_scratch_pages(struct intel_vgpu
> *vgpu,
>
> scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
> if (!scratch_pt) {
> - gvt_err("fail to allocate scratch page\n");
> + gvt_vgpu_err("fail to allocate scratch page\n");
> return -ENOMEM;
> }
>
> daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
> 4096, PCI_DMA_BIDIRECTIONAL);
> if (dma_mapping_error(dev, daddr)) {
> - gvt_err("fail to dmamap scratch_pt\n");
> + gvt_vgpu_err("fail to dmamap scratch_pt\n");
> __free_page(virt_to_page(scratch_pt));
> return -ENOMEM;
> }
> @@ -2006,7 +2005,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
> ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
> NULL, 1, 0);
> if (IS_ERR(ggtt_mm)) {
> - gvt_err("fail to create mm for ggtt.\n");
> + gvt_vgpu_err("fail to create mm for ggtt.\n");
> return PTR_ERR(ggtt_mm);
> }
>
> @@ -2071,7 +2070,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
> for (i = 0; i < preallocated_oos_pages; i++) {
> oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
> if (!oos_page) {
> - gvt_err("fail to pre-allocate oos page\n");
> ret = -ENOMEM;
> goto fail;
> }
> @@ -2161,7 +2159,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct
> intel_vgpu *vgpu,
> mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
> pdp, page_table_level, 0);
> if (IS_ERR(mm)) {
> - gvt_err("fail to create mm\n");
> + gvt_vgpu_err("fail to create mm\n");
> return PTR_ERR(mm);
> }
> }
> @@ -2191,7 +2189,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct
> intel_vgpu *vgpu,
>
> mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
> if (!mm) {
> - gvt_err("fail to find ppgtt instance.\n");
> + gvt_vgpu_err("fail to find ppgtt instance.\n");
> return -EINVAL;
> }
> intel_gvt_mm_unreference(mm);
> diff --git a/drivers/gpu/drm/i915/gvt/handlers.c
> b/drivers/gpu/drm/i915/gvt/handlers.c
> index 1d45062..0a3c481 100644
> --- a/drivers/gpu/drm/i915/gvt/handlers.c
> +++ b/drivers/gpu/drm/i915/gvt/handlers.c
> @@ -154,10 +154,9 @@ static int sanitize_fence_mmio_access(struct
> intel_vgpu *vgpu,
> unsigned int fence_num, void *p_data, unsigned int bytes) {
> if (fence_num >= vgpu_fence_sz(vgpu)) {
> - gvt_err("vgpu%d: found oob fence register access\n",
> - vgpu->id);
> - gvt_err("vgpu%d: total fence num %d access fence num %d\n",
> - vgpu->id, vgpu_fence_sz(vgpu), fence_num);
> + gvt_vgpu_err("found oob fence register access\n");
> + gvt_vgpu_err("total fence num %d access fence num %d\n",
> + vgpu_fence_sz(vgpu), fence_num);
> memset(p_data, 0, bytes);
> }
> return 0;
> @@ -219,7 +218,7 @@ static int mul_force_wake_write(struct intel_vgpu
> *vgpu,
> break;
> default:
> /*should not hit here*/
> - gvt_err("invalid forcewake offset 0x%x\n", offset);
> + gvt_vgpu_err("invalid forcewake offset 0x%x\n",
> offset);
> return -EINVAL;
> }
> } else {
> @@ -432,7 +431,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu
> *vgpu,
> fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
> fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
> } else {
> - gvt_err("Invalid train pattern %d\n", train_pattern);
> + gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
> return -EINVAL;
> }
>
> @@ -490,7 +489,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu
> *vgpu,
> else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
> index = FDI_RX_IMR_TO_PIPE(offset);
> else {
> - gvt_err("Unsupport registers %x\n", offset);
> + gvt_vgpu_err("Unsupport registers %x\n", offset);
> return -EINVAL;
> }
>
> @@ -720,7 +719,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu
> *vgpu,
> u32 data;
>
> if (!dpy_is_valid_port(port_index)) {
> - gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
> + gvt_vgpu_err("Unsupported DP port access!\n");
> return 0;
> }
>
> @@ -918,8 +917,7 @@ static void write_virtual_sbi_register(struct intel_vgpu
> *vgpu,
>
> if (i == num) {
> if (num == SBI_REG_MAX) {
> - gvt_err("vgpu%d: SBI caching meets maximum
> limits\n",
> - vgpu->id);
> + gvt_vgpu_err("SBI caching meets maximum limits\n");
> return;
> }
> display->sbi.number++;
> @@ -999,7 +997,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu,
> unsigned int offset,
> break;
> }
> if (invalid_read)
> - gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
> + gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
> offset, bytes, *(u32 *)p_data);
> return 0;
> }
> @@ -1026,7 +1024,7 @@ static int handle_g2v_notification(struct intel_vgpu
> *vgpu, int notification)
> case 1: /* Remove this in guest driver. */
> break;
> default:
> - gvt_err("Invalid PV notification %d\n", notification);
> + gvt_vgpu_err("Invalid PV notification %d\n", notification);
> }
> return ret;
> }
> @@ -1079,7 +1077,7 @@ static int pvinfo_mmio_write(struct intel_vgpu
> *vgpu, unsigned int offset,
> case _vgtif_reg(execlist_context_descriptor_hi):
> break;
> default:
> - gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
> + gvt_vgpu_err("invalid pvinfo write offset %x bytes %x
> data %x\n",
> offset, bytes, data);
> break;
> }
> @@ -1302,7 +1300,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu,
> unsigned int offset,
> if (execlist->elsp_dwords.index == 3) {
> ret = intel_vgpu_submit_execlist(vgpu, ring_id);
> if(ret)
> - gvt_err("fail submit workload on ring %d\n", ring_id);
> + gvt_vgpu_err("fail submit workload on ring %d\n",
> + ring_id);
> }
>
> ++execlist->elsp_dwords.index;
> diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c
> b/drivers/gpu/drm/i915/gvt/kvmgt.c
> index 0f7f5d9..4f08b3b 100644
> --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
> +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
> @@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct
> kvmgt_guest_info *info,
>
> static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) {
> - struct intel_vgpu *vgpu;
> + struct intel_vgpu *vgpu = NULL;
> struct intel_vgpu_type *type;
> struct device *pdev;
> void *gvt;
> @@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct
> mdev_device *mdev)
>
> type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
> if (!type) {
> - gvt_err("failed to find type %s to create\n",
> + gvt_vgpu_err("failed to find type %s to create\n",
> kobject_name(kobj));
> ret = -EINVAL;
> goto out;
> @@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct
> mdev_device *mdev)
> vgpu = intel_gvt_ops->vgpu_create(gvt, type);
> if (IS_ERR_OR_NULL(vgpu)) {
> ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
> - gvt_err("failed to create intel vgpu: %d\n", ret);
> + gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
> goto out;
> }
>
> @@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
> ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
> &events,
> &vgpu->vdev.iommu_notifier);
> if (ret != 0) {
> - gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
> + gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
> + ret);
> goto out;
> }
>
> @@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
> ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
> &events,
> &vgpu->vdev.group_notifier);
> if (ret != 0) {
> - gvt_err("vfio_register_notifier for group failed: %d\n", ret);
> + gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
> + ret);
> goto undo_iommu;
> }
>
> @@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device
> *mdev, char *buf,
>
>
> if (index >= VFIO_PCI_NUM_REGIONS) {
> - gvt_err("invalid index: %u\n", index);
> + gvt_vgpu_err("invalid index: %u\n", index);
> return -EINVAL;
> }
>
> @@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device
> *mdev, char *buf,
> case VFIO_PCI_VGA_REGION_INDEX:
> case VFIO_PCI_ROM_REGION_INDEX:
> default:
> - gvt_err("unsupported region: %u\n", index);
> + gvt_vgpu_err("unsupported region: %u\n", index);
> }
>
> return ret == 0 ? count : ret;
> @@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu
> *vgpu,
>
> trigger = eventfd_ctx_fdget(fd);
> if (IS_ERR(trigger)) {
> - gvt_err("eventfd_ctx_fdget failed\n");
> + gvt_vgpu_err("eventfd_ctx_fdget failed\n");
> return PTR_ERR(trigger);
> }
> vgpu->vdev.msi_trigger = trigger;
> @@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device
> *mdev, unsigned int cmd,
> ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
> VFIO_PCI_NUM_IRQS,
> &data_size);
> if (ret) {
> -
> gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
> +
> gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
> return -EINVAL;
> }
> if (data_size) {
> @@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device
> *mdev)
>
> kvm = vgpu->vdev.kvm;
> if (!kvm || kvm->mm != current->mm) {
> - gvt_err("KVM is required to use Intel vGPU\n");
> + gvt_vgpu_err("KVM is required to use Intel vGPU\n");
> return -ESRCH;
> }
>
> @@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device
> *mdev)
>
> static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) {
> + struct intel_vgpu *vgpu = info->vgpu;
> +
> if (!info) {
> - gvt_err("kvmgt_guest_info invalid\n");
> + gvt_vgpu_err("kvmgt_guest_info invalid\n");
> return false;
> }
>
> @@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned
> long handle, unsigned long gfn)
> unsigned long iova, pfn;
> struct kvmgt_guest_info *info;
> struct device *dev;
> + struct intel_vgpu *vgpu;
> int rc;
>
> if (!handle_valid(handle))
> return INTEL_GVT_INVALID_ADDR;
>
> info = (struct kvmgt_guest_info *)handle;
> + vgpu = info->vgpu;
> iova = gvt_cache_find(info->vgpu, gfn);
> if (iova != INTEL_GVT_INVALID_ADDR)
> return iova;
> @@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned
> long handle, unsigned long gfn)
> dev = mdev_dev(info->vgpu->vdev.mdev);
> rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE,
> &pfn);
> if (rc != 1) {
> - gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
> + gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
> + gfn, rc);
> return INTEL_GVT_INVALID_ADDR;
> }
> /* transfer to host iova for GFX to use DMA */
> rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
> if (rc) {
> - gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
> + gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n",
> gfn);
> vfio_unpin_pages(dev, &gfn, 1);
> return INTEL_GVT_INVALID_ADDR;
> }
> diff --git a/drivers/gpu/drm/i915/gvt/mmio.c
> b/drivers/gpu/drm/i915/gvt/mmio.c index 4df078b..8589ccc 100644
> --- a/drivers/gpu/drm/i915/gvt/mmio.c
> +++ b/drivers/gpu/drm/i915/gvt/mmio.c
> @@ -85,10 +85,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu
> *vgpu, uint64_t pa,
> ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
> p_data, bytes);
> if (ret) {
> - gvt_err("vgpu%d: guest page read error %d, "
> + gvt_vgpu_err("guest page read error %d, "
> "gfn 0x%lx, pa 0x%llx, var 0x%x,
> len %d\n",
> - vgpu->id, ret,
> - gp->gfn, pa, *(u32 *)p_data, bytes);
> + ret, gp->gfn, pa, *(u32 *)p_data,
> + bytes);
> }
> mutex_unlock(&gvt->lock);
> return ret;
> @@ -143,14 +143,13 @@ int intel_vgpu_emulate_mmio_read(struct
> intel_vgpu *vgpu, uint64_t pa,
> ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data,
> bytes);
>
> if (!vgpu->mmio.disable_warn_untrack) {
> - gvt_err("vgpu%d: read untracked MMIO %x(%dB)
> val %x\n",
> - vgpu->id, offset, bytes, *(u32 *)p_data);
> + gvt_vgpu_err("read untracked MMIO %x(%dB)
> val %x\n",
> + offset, bytes, *(u32 *)p_data);
>
> if (offset == 0x206c) {
> - gvt_err("------------------------------------------\n");
> - gvt_err("vgpu%d: likely triggers a gfx reset\n",
> - vgpu->id);
> - gvt_err("------------------------------------------\n");
> + gvt_vgpu_err("----------------------------------------
> --\n");
> + gvt_vgpu_err("likely triggers a gfx reset\n");
> + gvt_vgpu_err("----------------------------------------
> --\n");
> vgpu->mmio.disable_warn_untrack = true;
> }
> }
> @@ -163,8 +162,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu
> *vgpu, uint64_t pa,
> mutex_unlock(&gvt->lock);
> return 0;
> err:
> - gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
> - vgpu->id, offset, bytes);
> + gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
> + offset, bytes);
> mutex_unlock(&gvt->lock);
> return ret;
> }
> @@ -197,10 +196,11 @@ int intel_vgpu_emulate_mmio_write(struct
> intel_vgpu *vgpu, uint64_t pa,
> if (gp) {
> ret = gp->handler(gp, pa, p_data, bytes);
> if (ret) {
> - gvt_err("vgpu%d: guest page write error %d, "
> - "gfn 0x%lx, pa 0x%llx, var 0x%x,
> len %d\n",
> - vgpu->id, ret,
> - gp->gfn, pa, *(u32 *)p_data, bytes);
> + gvt_err("guest page write error %d, "
> + "gfn 0x%lx, pa 0x%llx, "
> + "var 0x%x, len %d\n",
> + ret, gp->gfn, pa,
> + *(u32 *)p_data, bytes);
> }
> mutex_unlock(&gvt->lock);
> return ret;
> @@ -267,8 +267,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu
> *vgpu, uint64_t pa,
>
> /* all register bits are RO. */
> if (ro_mask == ~(u64)0) {
> - gvt_err("vgpu%d: try to write RO reg %x\n",
> - vgpu->id, offset);
> + gvt_vgpu_err("try to write RO reg %x\n",
> + offset);
> ret = 0;
> goto out;
> }
> @@ -298,8 +298,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu
> *vgpu, uint64_t pa,
> mutex_unlock(&gvt->lock);
> return 0;
> err:
> - gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
> - vgpu->id, offset, bytes);
> + gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
> + bytes);
> mutex_unlock(&gvt->lock);
> return ret;
> }
> diff --git a/drivers/gpu/drm/i915/gvt/opregion.c
> b/drivers/gpu/drm/i915/gvt/opregion.c
> index d9fb41a..198b498 100644
> --- a/drivers/gpu/drm/i915/gvt/opregion.c
> +++ b/drivers/gpu/drm/i915/gvt/opregion.c
> @@ -68,14 +68,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu,
> bool map)
> mfn =
> intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
> + i * PAGE_SIZE);
> if (mfn == INTEL_GVT_INVALID_ADDR) {
> - gvt_err("fail to get MFN from VA\n");
> + gvt_vgpu_err("fail to get MFN from VA\n");
> return -EINVAL;
> }
> ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
> vgpu_opregion(vgpu)->gfn[i],
> mfn, 1, map);
> if (ret) {
> - gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
> + gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
> + ret);
> return ret;
> }
> }
> @@ -288,7 +289,7 @@ int intel_vgpu_emulate_opregion_request(struct
> intel_vgpu *vgpu, u32 swsci)
> parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
>
> if (!(swsci & SWSCI_SCI_SELECT)) {
> - gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
> + gvt_vgpu_err("requesting SMI service\n");
> return 0;
> }
> /* ignore non 0->1 trasitions */
> @@ -301,9 +302,8 @@ int intel_vgpu_emulate_opregion_request(struct
> intel_vgpu *vgpu, u32 swsci)
> func = GVT_OPREGION_FUNC(*scic);
> subfunc = GVT_OPREGION_SUBFUNC(*scic);
> if (!querying_capabilities(*scic)) {
> - gvt_err("vgpu%d: requesting runtime service: func \"%s\","
> + gvt_vgpu_err("requesting runtime service: func \"%s\","
> " subfunc \"%s\"\n",
> - vgpu->id,
> opregion_func_name(func),
> opregion_subfunc_name(subfunc));
> /*
> diff --git a/drivers/gpu/drm/i915/gvt/render.c
> b/drivers/gpu/drm/i915/gvt/render.c
> index 2b3a642..3de7a9b 100644
> --- a/drivers/gpu/drm/i915/gvt/render.c
> +++ b/drivers/gpu/drm/i915/gvt/render.c
> @@ -151,7 +151,7 @@ static void handle_tlb_pending_event(struct
> intel_vgpu *vgpu, int ring_id)
> I915_WRITE_FW(reg, 0x1);
>
> if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
> - gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
> + gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
> else
> vgpu_vreg(vgpu, regs[ring_id]) = 0;
>
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c
> b/drivers/gpu/drm/i915/gvt/scheduler.c
> index d6b6d0e..8f203cf 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -84,7 +84,7 @@ static int populate_shadow_context(struct
> intel_vgpu_workload *workload)
> (u32)((workload->ctx_desc.lrca + i) <<
> GTT_PAGE_SHIFT));
> if (context_gpa == INTEL_GVT_INVALID_ADDR) {
> - gvt_err("Invalid guest context descriptor\n");
> + gvt_vgpu_err("Invalid guest context descriptor\n");
> return -EINVAL;
> }
>
> @@ -164,6 +164,7 @@ static int dispatch_workload(struct
> intel_vgpu_workload *workload)
> struct i915_gem_context *shadow_ctx = workload->vgpu-
> >shadow_ctx;
> struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
> struct drm_i915_gem_request *rq;
> + struct intel_vgpu *vgpu = workload->vgpu;
> int ret;
>
> gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", @@ -
> 177,7 +178,7 @@ static int dispatch_workload(struct intel_vgpu_workload
> *workload)
>
> rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
> if (IS_ERR(rq)) {
> - gvt_err("fail to allocate gem request\n");
> + gvt_vgpu_err("fail to allocate gem request\n");
> ret = PTR_ERR(rq);
> goto out;
> }
> @@ -310,7 +311,7 @@ static void update_guest_context(struct
> intel_vgpu_workload *workload)
> (u32)((workload->ctx_desc.lrca + i) <<
> GTT_PAGE_SHIFT));
> if (context_gpa == INTEL_GVT_INVALID_ADDR) {
> - gvt_err("invalid guest context descriptor\n");
> + gvt_vgpu_err("invalid guest context descriptor\n");
> return;
> }
>
> @@ -398,6 +399,7 @@ static int workload_thread(void *priv)
> struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> struct intel_vgpu_workload *workload = NULL;
> long lret;
> + struct intel_vgpu *vgpu = NULL;
> int ret;
> bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
> DEFINE_WAIT_FUNC(wait, woken_wake_function); @@ -440,7 +442,8
> @@ static int workload_thread(void *priv)
> mutex_unlock(&gvt->lock);
>
> if (ret) {
> - gvt_err("fail to dispatch workload, skip\n");
> + vgpu = workload->vgpu;
> + gvt_vgpu_err("fail to dispatch workload, skip\n");
> goto complete;
> }
>
> --
> 2.7.4
More information about the intel-gvt-dev
mailing list