[PATCH] drm/i915/gvt: replace the gvt_err with gvt_vm_err
Zhi Wang
zhi.a.wang at intel.com
Fri Mar 3 06:06:02 UTC 2017
Hi Kevin:
Can we assume that there will be a struct intel_vgpu * in current
context when call gvt_vgpu_err()? It would be nice that we could know
this error message comes from which vgpu. How about
#define gvt_vgpu_err(fmt, args...) \
xxx_dbg("[vgpu %d] "fmt, ##args)
On 03/03/17 13:50, Zhang, Tina wrote:
>
>
>> -----Original Message-----
>> From: Tian, Kevin
>> Sent: Friday, March 3, 2017 1:37 PM
>> To: Zhang, Tina <tina.zhang at intel.com>; intel-gvt-dev at lists.freedesktop.org
>> Cc: Zhang, Tina <tina.zhang at intel.com>
>> Subject: RE: [PATCH] drm/i915/gvt: replace the gvt_err with gvt_vm_err
>>
>>> From: Tina Zhang
>>> Sent: Friday, March 03, 2017 9:45 AM
>>>
>>> gvt_err should be used only for the very few critical error message
>>> during host i915 drvier initialization. This patch 1. removes the
>>> redundant gvt_err; 2. creates a new gvt_vm_err to show errors caused
>>> by vgpu; 3. replaces the most gvt_err with gvt_vm_err; 4. leaves very
>>> few gvt_err for dumping gvt error during host gvt
>>> initialization.
>>
>> gvt_vm_err -> gvt_vgpu_err
> Thanks. I'm going to submit the next version.
>
>>
>>>
>>> Signed-off-by: Tina Zhang <tina.zhang at intel.com>
>>>
>>> diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c
>>> b/drivers/gpu/drm/i915/gvt/aperture_gm.c
>>> index f7bce86..4fd88b6 100644
>>> --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
>>> +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
>>> @@ -75,7 +75,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool
>> high_gm)
>>> if (ret == 0 && ++retried < 3)
>>> goto search_again;
>>>
>>> - gvt_err("fail to alloc %s gm space from host, retried %d\n",
>>> + gvt_vm_err("fail to alloc %s gm space from host, retried %d\n",
>>> high_gm ? "high" : "low", retried);
>>> }
>>> mutex_unlock(&dev_priv->drm.struct_mutex);
>>> @@ -251,7 +251,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
>>> const char *item;
>>>
>>> if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz)
>> {
>>> - gvt_err("Invalid vGPU creation params\n");
>>> + gvt_vm_err("Invalid vGPU creation params\n");
>>> return -EINVAL;
>>> }
>>>
>>> @@ -294,8 +294,8 @@ static int alloc_resource(struct intel_vgpu *vgpu,
>>> return 0;
>>>
>>> no_enough_resource:
>>> - gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
>>> - gvt_err("vgpu%d: request %luMB avail %luMB max %luMB
>> taken %luMB\n",
>>> + gvt_vm_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
>>> + gvt_vm_err("vgpu%d: request %luMB avail %luMB max %luMB taken
>>> +%luMB\n",
>>> vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
>>> BYTES_TO_MB(max), BYTES_TO_MB(taken));
>>> return -ENOSPC;
>>> diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c
>>> b/drivers/gpu/drm/i915/gvt/cmd_parser.c
>>> index 7a5d7f5..f7ef349 100644
>>> --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
>>> +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
>>> @@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
>>> if (d_info == NULL)
>>> return;
>>>
>>> - gvt_err("opcode=0x%x %s sub_ops:",
>>> + gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
>>> cmd >> (32 - d_info->op_len), d_info->name);
>>>
>>> for (i = 0; i < d_info->nr_sub_op; i++) @@ -693,23 +693,23 @@ static
>>> void parser_exec_state_dump(struct parser_exec_state
>>> *s)
>>> int cnt = 0;
>>> int i;
>>>
>>> - gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
>>> + gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
>>> " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
>>> s->ring_id, s->ring_start, s->ring_start + s->ring_size,
>>> s->ring_head, s->ring_tail);
>>>
>>> - gvt_err(" %s %s ip_gma(%08lx) ",
>>> + gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
>>> s->buf_type == RING_BUFFER_INSTRUCTION ?
>>> "RING_BUFFER" : "BATCH_BUFFER",
>>> s->buf_addr_type == GTT_BUFFER ?
>>> "GTT" : "PPGTT", s->ip_gma);
>>>
>>> if (s->ip_va == NULL) {
>>> - gvt_err(" ip_va(NULL)");
>>> + gvt_vm_err(" ip_va(NULL)");
>>> return;
>>> }
>>>
>>> - gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
>>> + gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
>>> s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
>>> cmd_val(s, 2), cmd_val(s, 3));
>>>
>>> @@ -824,19 +824,19 @@ static int cmd_reg_handler(struct
>> parser_exec_state *s,
>>> struct intel_gvt *gvt = vgpu->gvt;
>>>
>>> if (offset + 4 > gvt->device_info.mmio_size) {
>>> - gvt_err("%s access to (%x) outside of MMIO range\n",
>>> + gvt_vm_err("%s access to (%x) outside of MMIO range\n",
>>> cmd, offset);
>>> return -EINVAL;
>>> }
>>>
>>> if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
>>> - gvt_err("vgpu%d: %s access to non-render register (%x)\n",
>>> + gvt_vm_err("vgpu%d: %s access to non-render register (%x)\n",
>>> s->vgpu->id, cmd, offset);
>>> return 0;
>>> }
>>>
>>> if (is_shadowed_mmio(offset)) {
>>> - gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
>>> + gvt_vm_err("vgpu%d: found access of shadowed MMIO %x\n",
>>> s->vgpu->id, offset);
>>> return 0;
>>> }
>>> @@ -1167,7 +1167,7 @@ static int skl_decode_mi_display_flip(struct
>>> parser_exec_state *s,
>>> break;
>>>
>>> default:
>>> - gvt_err("unknown plane code %d\n", plane);
>>> + gvt_vm_err("unknown plane code %d\n", plane);
>>> return -EINVAL;
>>> }
>>>
>>> @@ -1280,19 +1280,19 @@ static int cmd_handler_mi_display_flip(struct
>>> parser_exec_state *s)
>>>
>>> ret = decode_mi_display_flip(s, &info);
>>> if (ret) {
>>> - gvt_err("fail to decode MI display flip command\n");
>>> + gvt_vm_err("fail to decode MI display flip command\n");
>>> return ret;
>>> }
>>>
>>> ret = check_mi_display_flip(s, &info);
>>> if (ret) {
>>> - gvt_err("invalid MI display flip command\n");
>>> + gvt_vm_err("invalid MI display flip command\n");
>>> return ret;
>>> }
>>>
>>> ret = update_plane_mmio_from_mi_display_flip(s, &info);
>>> if (ret) {
>>> - gvt_err("fail to update plane mmio\n");
>>> + gvt_vm_err("fail to update plane mmio\n");
>>> return ret;
>>> }
>>>
>>> @@ -1350,7 +1350,8 @@ static inline int cmd_address_audit(struct
>>> parser_exec_state *s,
>>> int ret;
>>>
>>> if (op_size > max_surface_size) {
>>> - gvt_err("command address audit fail name %s\n", s->info-
>>> name);
>>> + gvt_vm_err("command address audit fail name %s\n",
>>> + s->info->name);
>>> return -EINVAL;
>>> }
>>>
>>> @@ -1367,7 +1368,7 @@ static inline int cmd_address_audit(struct
>>> parser_exec_state *s,
>>> }
>>> return 0;
>>> err:
>>> - gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
>>> + gvt_vm_err("cmd_parser: Malicious %s detected, addr=0x%lx,
>>> +len=%d!\n",
>>> s->info->name, guest_gma, op_size);
>>>
>>> pr_err("cmd dump: ");
>>> @@ -1412,7 +1413,7 @@ static int cmd_handler_mi_store_data_imm(struct
>>> parser_exec_state *s)
>>>
>>> static inline int unexpected_cmd(struct parser_exec_state *s) {
>>> - gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
>>> + gvt_vm_err("vgpu%d: Unexpected %s in command buffer!\n",
>>> s->vgpu->id, s->info->name);
>>> return -EINVAL;
>>> }
>>> @@ -1516,7 +1517,7 @@ static int copy_gma_to_hva(struct intel_vgpu
>>> *vgpu, struct intel_vgpu_mm *mm,
>>> while (gma != end_gma) {
>>> gpa = intel_vgpu_gma_to_gpa(mm, gma);
>>> if (gpa == INTEL_GVT_INVALID_ADDR) {
>>> - gvt_err("invalid gma address: %lx\n", gma);
>>> + gvt_vm_err("invalid gma address: %lx\n", gma);
>>> return -EFAULT;
>>> }
>>>
>>> @@ -1565,7 +1566,7 @@ static uint32_t find_bb_size(struct
>>> parser_exec_state *s)
>>>
>>> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
>>> if (info == NULL) {
>>> - gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
>>> + gvt_vm_err("unknown cmd 0x%x, opcode=0x%x\n",
>>> cmd, get_opcode(cmd, s->ring_id));
>>> return -EINVAL;
>>> }
>>> @@ -1574,7 +1575,7 @@ static uint32_t find_bb_size(struct
>> parser_exec_state *s)
>>> gma, gma + 4, &cmd);
>>> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
>>> if (info == NULL) {
>>> - gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
>>> + gvt_vm_err("unknown cmd 0x%x, opcode=0x%x\n",
>>> cmd, get_opcode(cmd, s->ring_id));
>>> return -EINVAL;
>>> }
>>> @@ -1633,7 +1634,7 @@ static int perform_bb_shadow(struct
>>> parser_exec_state *s)
>>>
>>> ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
>>> if (ret) {
>>> - gvt_err("failed to set shadow batch to CPU\n");
>>> + gvt_vm_err("failed to set shadow batch to CPU\n");
>>> goto unmap_src;
>>> }
>>>
>>> @@ -1645,7 +1646,7 @@ static int perform_bb_shadow(struct
>> parser_exec_state *s)
>>> gma, gma + bb_size,
>>> dst);
>>> if (ret) {
>>> - gvt_err("fail to copy guest ring buffer\n");
>>> + gvt_vm_err("fail to copy guest ring buffer\n");
>>> goto unmap_src;
>>> }
>>>
>>> @@ -1678,13 +1679,13 @@ static int
>>> cmd_handler_mi_batch_buffer_start(struct
>>> parser_exec_state *s)
>>> int ret = 0;
>>>
>>> if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
>>> - gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
>>> + gvt_vm_err("Found MI_BATCH_BUFFER_START in 2nd level
>> BB\n");
>>> return -EINVAL;
>>> }
>>>
>>> second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
>>> if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
>>> - gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
>>> + gvt_vm_err("Jumping to 2nd level BB from RB is not
>> allowed\n");
>>> return -EINVAL;
>>> }
>>>
>>> @@ -1702,7 +1703,7 @@ static int
>>> cmd_handler_mi_batch_buffer_start(struct
>>> parser_exec_state *s)
>>> if (batch_buffer_needs_scan(s)) {
>>> ret = perform_bb_shadow(s);
>>> if (ret < 0)
>>> - gvt_err("invalid shadow batch buffer\n");
>>> + gvt_vm_err("invalid shadow batch buffer\n");
>>> } else {
>>> /* emulate a batch buffer end to do return right */
>>> ret = cmd_handler_mi_batch_buffer_end(s);
>>> @@ -2436,7 +2437,7 @@ static int cmd_parser_exec(struct
>>> parser_exec_state *s)
>>>
>>> info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
>>> if (info == NULL) {
>>> - gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
>>> + gvt_vm_err("unknown cmd 0x%x, opcode=0x%x\n",
>>> cmd, get_opcode(cmd, s->ring_id));
>>> return -EINVAL;
>>> }
>>> @@ -2452,7 +2453,7 @@ static int cmd_parser_exec(struct
>> parser_exec_state *s)
>>> if (info->handler) {
>>> ret = info->handler(s);
>>> if (ret < 0) {
>>> - gvt_err("%s handler error\n", info->name);
>>> + gvt_vm_err("%s handler error\n", info->name);
>>> return ret;
>>> }
>>> }
>>> @@ -2463,7 +2464,7 @@ static int cmd_parser_exec(struct
>> parser_exec_state *s)
>>> if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
>>> ret = cmd_advance_default(s);
>>> if (ret) {
>>> - gvt_err("%s IP advance error\n", info->name);
>>> + gvt_vm_err("%s IP advance error\n", info->name);
>>> return ret;
>>> }
>>> }
>>> @@ -2497,7 +2498,7 @@ static int command_scan(struct parser_exec_state
>> *s,
>>> if (s->buf_type == RING_BUFFER_INSTRUCTION) {
>>> if (!(s->ip_gma >= rb_start) ||
>>> !(s->ip_gma < gma_bottom)) {
>>> - gvt_err("ip_gma %lx out of ring scope."
>>> + gvt_vm_err("ip_gma %lx out of ring scope."
>>> "(base:0x%lx, bottom: 0x%lx)\n",
>>> s->ip_gma, rb_start,
>>> gma_bottom);
>>> @@ -2505,7 +2506,7 @@ static int command_scan(struct parser_exec_state
>> *s,
>>> return -EINVAL;
>>> }
>>> if (gma_out_of_range(s->ip_gma, gma_head,
>> gma_tail)) {
>>> - gvt_err("ip_gma %lx out of range."
>>> + gvt_vm_err("ip_gma %lx out of range."
>>> "base 0x%lx head 0x%lx tail 0x%lx\n",
>>> s->ip_gma, rb_start,
>>> rb_head, rb_tail);
>>> @@ -2515,7 +2516,7 @@ static int command_scan(struct parser_exec_state
>> *s,
>>> }
>>> ret = cmd_parser_exec(s);
>>> if (ret) {
>>> - gvt_err("cmd parser error\n");
>>> + gvt_vm_err("cmd parser error\n");
>>> parser_exec_state_dump(s);
>>> break;
>>> }
>>> @@ -2639,7 +2640,7 @@ static int shadow_workload_ring_buffer(struct
>>> intel_vgpu_workload *workload)
>>> gma_head, gma_top,
>>> workload->shadow_ring_buffer_va);
>>> if (ret) {
>>> - gvt_err("fail to copy guest ring buffer\n");
>>> + gvt_vm_err("fail to copy guest ring buffer\n");
>>> return ret;
>>> }
>>> copy_len = gma_top - gma_head;
>>> @@ -2651,7 +2652,7 @@ static int shadow_workload_ring_buffer(struct
>>> intel_vgpu_workload *workload)
>>> gma_head, gma_tail,
>>> workload->shadow_ring_buffer_va + copy_len);
>>> if (ret) {
>>> - gvt_err("fail to copy guest ring buffer\n");
>>> + gvt_vm_err("fail to copy guest ring buffer\n");
>>> return ret;
>>> }
>>> ring->tail += workload->rb_len;
>>> @@ -2665,13 +2666,13 @@ int
>> intel_gvt_scan_and_shadow_workload(struct
>>> intel_vgpu_workload *workload)
>>>
>>> ret = shadow_workload_ring_buffer(workload);
>>> if (ret) {
>>> - gvt_err("fail to shadow workload ring_buffer\n");
>>> + gvt_vm_err("fail to shadow workload ring_buffer\n");
>>> return ret;
>>> }
>>>
>>> ret = scan_workload(workload);
>>> if (ret) {
>>> - gvt_err("scan workload error\n");
>>> + gvt_vm_err("scan workload error\n");
>>> return ret;
>>> }
>>> return 0;
>>> @@ -2695,14 +2696,14 @@ static int shadow_indirect_ctx(struct
>>> intel_shadow_wa_ctx
>>> *wa_ctx)
>>> /* get the va of the shadow batch buffer */
>>> map = i915_gem_object_pin_map(obj, I915_MAP_WB);
>>> if (IS_ERR(map)) {
>>> - gvt_err("failed to vmap shadow indirect ctx\n");
>>> + gvt_vm_err("failed to vmap shadow indirect ctx\n");
>>> ret = PTR_ERR(map);
>>> goto put_obj;
>>> }
>>>
>>> ret = i915_gem_object_set_to_cpu_domain(obj, false);
>>> if (ret) {
>>> - gvt_err("failed to set shadow indirect ctx to CPU\n");
>>> + gvt_vm_err("failed to set shadow indirect ctx to CPU\n");
>>> goto unmap_src;
>>> }
>>>
>>> @@ -2711,7 +2712,7 @@ static int shadow_indirect_ctx(struct
>>> intel_shadow_wa_ctx
>>> *wa_ctx)
>>> guest_gma, guest_gma + ctx_size,
>>> map);
>>> if (ret) {
>>> - gvt_err("fail to copy guest indirect ctx\n");
>>> + gvt_vm_err("fail to copy guest indirect ctx\n");
>>> goto unmap_src;
>>> }
>>>
>>> @@ -2751,7 +2752,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct
>>> intel_shadow_wa_ctx *wa_ctx)
>>>
>>> ret = shadow_indirect_ctx(wa_ctx);
>>> if (ret) {
>>> - gvt_err("fail to shadow indirect ctx\n");
>>> + gvt_vm_err("fail to shadow indirect ctx\n");
>>> return ret;
>>> }
>>>
>>> @@ -2759,7 +2760,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct
>>> intel_shadow_wa_ctx *wa_ctx)
>>>
>>> ret = scan_wa_ctx(wa_ctx);
>>> if (ret) {
>>> - gvt_err("scan wa ctx error\n");
>>> + gvt_vm_err("scan wa ctx error\n");
>>> return ret;
>>> }
>>>
>>> diff --git a/drivers/gpu/drm/i915/gvt/debug.h
>>> b/drivers/gpu/drm/i915/gvt/debug.h
>>> index 68cba7b..9c9e42a 100644
>>> --- a/drivers/gpu/drm/i915/gvt/debug.h
>>> +++ b/drivers/gpu/drm/i915/gvt/debug.h
>>> @@ -27,6 +27,9 @@
>>> #define gvt_err(fmt, args...) \
>>> DRM_ERROR("gvt: "fmt, ##args)
>>>
>>> +#define gvt_vm_err(fmt, args...) \
>>> + DRM_DEBUG_DRIVER("gvt: vm: "fmt, ##args)
>>> +
>>> #define gvt_dbg_core(fmt, args...) \
>>> DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gvt/edid.c
>>> b/drivers/gpu/drm/i915/gvt/edid.c index bda85df..6e166b5 100644
>>> --- a/drivers/gpu/drm/i915/gvt/edid.c
>>> +++ b/drivers/gpu/drm/i915/gvt/edid.c
>>> @@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu
>> *vgpu)
>>> unsigned char chr = 0;
>>>
>>> if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
>>> - gvt_err("Driver tries to read EDID without proper
>> sequence!\n");
>>> + gvt_vm_err("Driver tries to read EDID without proper
>> sequence!\n");
>>> return 0;
>>> }
>>> if (edid->current_edid_read >= EDID_SIZE) {
>>> - gvt_err("edid_get_byte() exceeds the size of EDID!\n");
>>> + gvt_vm_err("edid_get_byte() exceeds the size of EDID!\n");
>>> return 0;
>>> }
>>>
>>> if (!edid->edid_available) {
>>> - gvt_err("Reading EDID but EDID is not available!\n");
>>> + gvt_vm_err("Reading EDID but EDID is not available!\n");
>>> return 0;
>>> }
>>>
>>> @@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu
>> *vgpu)
>>> chr = edid_data->edid_block[edid->current_edid_read];
>>> edid->current_edid_read++;
>>> } else {
>>> - gvt_err("No EDID available during the reading?\n");
>>> + gvt_vm_err("No EDID available during the reading?\n");
>>> }
>>> return chr;
>>> }
>>> @@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu
>>> *vgpu, unsigned int offset,
>>> vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
>>> break;
>>> default:
>>> - gvt_err("Unknown/reserved GMBUS cycle
>> detected!\n");
>>> + gvt_vm_err("Unknown/reserved GMBUS cycle
>> detected!\n");
>>> break;
>>> }
>>> /*
>>> @@ -292,7 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu
>>> *vgpu, unsigned int offset,
>>> */
>>> } else {
>>> memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
>>> - gvt_err("vgpu%d: warning: gmbus3 read with nothing
>> returned\n",
>>> + gvt_vm_err("vgpu%d: warning: gmbus3 read with nothing
>> returned\n",
>>> vgpu->id);
>>> }
>>> return 0;
>>> diff --git a/drivers/gpu/drm/i915/gvt/execlist.c
>>> b/drivers/gpu/drm/i915/gvt/execlist.c
>>> index 46eb9fd..601cb5c 100644
>>> --- a/drivers/gpu/drm/i915/gvt/execlist.c
>>> +++ b/drivers/gpu/drm/i915/gvt/execlist.c
>>> @@ -183,7 +183,7 @@ static int emulate_execlist_ctx_schedule_out(
>>> gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
>>>
>>> if (WARN_ON(!same_context(ctx, execlist->running_context))) {
>>> - gvt_err("schedule out context is not running context,"
>>> + gvt_vm_err("schedule out context is not running context,"
>>> "ctx id %x running ctx id %x\n",
>>> ctx->context_id,
>>> execlist->running_context->context_id);
>>> @@ -254,7 +254,7 @@ static struct intel_vgpu_execlist_slot
>> *get_next_execlist_slot(
>>> status.udw = vgpu_vreg(vgpu, status_reg + 4);
>>>
>>> if (status.execlist_queue_full) {
>>> - gvt_err("virtual execlist slots are full\n");
>>> + gvt_vm_err("virtual execlist slots are full\n");
>>> return NULL;
>>> }
>>>
>>> @@ -274,7 +274,7 @@ static int emulate_execlist_schedule_in(struct
>>> intel_vgpu_execlist *execlist,
>>> gvt_dbg_el("emulate schedule-in\n");
>>>
>>> if (!slot) {
>>> - gvt_err("no available execlist slot\n");
>>> + gvt_vm_err("no available execlist slot\n");
>>> return -EINVAL;
>>> }
>>>
>>> @@ -375,7 +375,6 @@ static void prepare_shadow_batch_buffer(struct
>>> intel_vgpu_workload *workload)
>>>
>>> vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4,
>> 0);
>>> if (IS_ERR(vma)) {
>>> - gvt_err("Cannot pin\n");
>>> return;
>>> }
>>>
>>> @@ -428,7 +427,6 @@ static void prepare_shadow_wa_ctx(struct
>>> intel_shadow_wa_ctx
>>> *wa_ctx)
>>> vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
>>> 0, CACHELINE_BYTES, 0);
>>> if (IS_ERR(vma)) {
>>> - gvt_err("Cannot pin indirect ctx obj\n");
>>> return;
>>> }
>>>
>>> @@ -569,7 +567,7 @@ static int prepare_mm(struct intel_vgpu_workload
>> *workload)
>>> } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
>>> page_table_level = 4;
>>> } else {
>>> - gvt_err("Advanced Context mode(SVM) is not supported!\n");
>>> + gvt_vm_err("Advanced Context mode(SVM) is not
>> supported!\n");
>>> return -EINVAL;
>>> }
>>>
>>> @@ -583,7 +581,7 @@ static int prepare_mm(struct intel_vgpu_workload
>> *workload)
>>> mm = intel_vgpu_create_mm(workload->vgpu,
>> INTEL_GVT_MM_PPGTT,
>>> pdp, page_table_level, 0);
>>> if (IS_ERR(mm)) {
>>> - gvt_err("fail to create mm object.\n");
>>> + gvt_vm_err("fail to create mm object.\n");
>>> return PTR_ERR(mm);
>>> }
>>> }
>>> @@ -609,7 +607,7 @@ static int submit_context(struct intel_vgpu *vgpu, int
>> ring_id,
>>> ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
>>> (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
>>> if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
>>> - gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
>>> + gvt_vm_err("invalid guest context LRCA: %x\n", desc->lrca);
>>> return -EINVAL;
>>> }
>>>
>>> @@ -724,7 +722,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu
>> *vgpu, int ring_id)
>>> continue;
>>>
>>> if (!desc[i]->privilege_access) {
>>> - gvt_err("vgpu%d: unexpected GGTT elsp
>> submission\n",
>>> + gvt_vm_err("vgpu%d: unexpected GGTT elsp
>> submission\n",
>>> vgpu->id);
>>> return -EINVAL;
>>> }
>>> @@ -735,14 +733,14 @@ int intel_vgpu_submit_execlist(struct intel_vgpu
>>> *vgpu, int
>>> ring_id)
>>> }
>>>
>>> if (!valid_desc_bitmap) {
>>> - gvt_err("vgpu%d: no valid desc in a elsp submission\n",
>>> + gvt_vm_err("vgpu%d: no valid desc in a elsp submission\n",
>>> vgpu->id);
>>> return -EINVAL;
>>> }
>>>
>>> if (!test_bit(0, (void *)&valid_desc_bitmap) &&
>>> test_bit(1, (void *)&valid_desc_bitmap)) {
>>> - gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
>>> + gvt_vm_err("vgpu%d: weird elsp submission, desc 0 is not
>> valid\n",
>>> vgpu->id);
>>> return -EINVAL;
>>> }
>>> @@ -752,7 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu
>> *vgpu, int ring_id)
>>> ret = submit_context(vgpu, ring_id, &valid_desc[i],
>>> emulate_schedule_in);
>>> if (ret) {
>>> - gvt_err("vgpu%d: fail to schedule workload\n",
>>> + gvt_vm_err("vgpu%d: fail to schedule workload\n",
>>> vgpu->id);
>>> return ret;
>>> }
>>> diff --git a/drivers/gpu/drm/i915/gvt/gtt.c
>>> b/drivers/gpu/drm/i915/gvt/gtt.c index 6a5ff23..1cca3e3 100644
>>> --- a/drivers/gpu/drm/i915/gvt/gtt.c
>>> +++ b/drivers/gpu/drm/i915/gvt/gtt.c
>>> @@ -49,7 +49,7 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu
>>> *vgpu, u64 addr,
>>> u32 size)
>>> {
>>> if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
>>> && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
>>> - gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
>>> + gvt_vm_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
>>> vgpu->id, addr, size);
>>> return false;
>>> }
>>> @@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu,
>>> struct intel_gvt_gtt_entry *p,
>>>
>>> mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
>>> if (mfn == INTEL_GVT_INVALID_ADDR) {
>>> - gvt_err("fail to translate gfn: 0x%lx\n", gfn);
>>> + gvt_vm_err("fail to translate gfn: 0x%lx\n", gfn);
>>> return -ENXIO;
>>> }
>>>
>>> @@ -611,7 +611,7 @@ static inline int init_shadow_page(struct
>>> intel_vgpu *vgpu,
>>>
>>> daddr = dma_map_page(kdev, p->page, 0, 4096,
>> PCI_DMA_BIDIRECTIONAL);
>>> if (dma_mapping_error(kdev, daddr)) {
>>> - gvt_err("fail to map dma addr\n");
>>> + gvt_vm_err("fail to map dma addr\n");
>>> return -EINVAL;
>>> }
>>>
>>> @@ -735,7 +735,7 @@ static struct intel_vgpu_ppgtt_spt
>> *ppgtt_alloc_shadow_page(
>>> if (reclaim_one_mm(vgpu->gvt))
>>> goto retry;
>>>
>>> - gvt_err("fail to allocate ppgtt shadow page\n");
>>> + gvt_vm_err("fail to allocate ppgtt shadow page\n");
>>> return ERR_PTR(-ENOMEM);
>>> }
>>>
>>> @@ -750,14 +750,14 @@ static struct intel_vgpu_ppgtt_spt
>> *ppgtt_alloc_shadow_page(
>>> */
>>> ret = init_shadow_page(vgpu, &spt->shadow_page, type);
>>> if (ret) {
>>> - gvt_err("fail to initialize shadow page for spt\n");
>>> + gvt_vm_err("fail to initialize shadow page for spt\n");
>>> goto err;
>>> }
>>>
>>> ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
>>> gfn, ppgtt_write_protection_handler, NULL);
>>> if (ret) {
>>> - gvt_err("fail to initialize guest page for spt\n");
>>> + gvt_vm_err("fail to initialize guest page for spt\n");
>>> goto err;
>>> }
>>>
>>> @@ -776,7 +776,7 @@ static struct intel_vgpu_ppgtt_spt
>> *ppgtt_find_shadow_page(
>>> if (p)
>>> return shadow_page_to_ppgtt_spt(p);
>>>
>>> - gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
>>> + gvt_vm_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
>>> vgpu->id, mfn);
>>> return NULL;
>>> }
>>> @@ -827,7 +827,7 @@ static int
>>> ppgtt_invalidate_shadow_page_by_shadow_entry(struct
>>> intel_vgpu *vgpu,
>>> }
>>> s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
>>> if (!s) {
>>> - gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
>>> + gvt_vm_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
>>> vgpu->id, ops->get_pfn(e));
>>> return -ENXIO;
>>> }
>>> @@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct
>>> intel_vgpu_ppgtt_spt *spt)
>>>
>>> for_each_present_shadow_entry(spt, &e, index) {
>>> if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
>>> - gvt_err("GVT doesn't support pse bit for now\n");
>>> + gvt_vm_err("GVT doesn't support pse bit for now\n");
>>> return -EINVAL;
>>> }
>>> ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
>>> @@ -868,7 +868,7 @@ static int ppgtt_invalidate_shadow_page(struct
>>> intel_vgpu_ppgtt_spt *spt)
>>> ppgtt_free_shadow_page(spt);
>>> return 0;
>>> fail:
>>> - gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx
>> type %d\n",
>>> + gvt_vm_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type
>>> +%d\n",
>>> spt->vgpu->id, spt, e.val64, e.type);
>>> return ret;
>>> }
>>> @@ -914,7 +914,7 @@ static struct intel_vgpu_ppgtt_spt
>>> *ppgtt_populate_shadow_page_by_guest_entry(
>>> }
>>> return s;
>>> fail:
>>> - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
>>> + gvt_vm_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type
>>> +%d\n",
>>> vgpu->id, s, we->val64, we->type);
>>> return ERR_PTR(ret);
>>> }
>>> @@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct
>>> intel_vgpu_ppgtt_spt *spt)
>>>
>>> for_each_present_guest_entry(spt, &ge, i) {
>>> if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
>>> - gvt_err("GVT doesn't support pse bit now\n");
>>> + gvt_vm_err("GVT doesn't support pse bit now\n");
>>> ret = -EINVAL;
>>> goto fail;
>>> }
>>> @@ -969,7 +969,7 @@ static int ppgtt_populate_shadow_page(struct
>>> intel_vgpu_ppgtt_spt *spt)
>>> }
>>> return 0;
>>> fail:
>>> - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
>>> + gvt_vm_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type
>>> +%d\n",
>>> vgpu->id, spt, ge.val64, ge.type);
>>> return ret;
>>> }
>>> @@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct
>>> intel_vgpu_guest_page *gpt,
>>> struct intel_vgpu_ppgtt_spt *s =
>>> ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
>>> if (!s) {
>>> - gvt_err("fail to find guest page\n");
>>> + gvt_vm_err("fail to find guest page\n");
>>> ret = -ENXIO;
>>> goto fail;
>>> }
>>> @@ -1011,7 +1011,7 @@ static int
>>> ppgtt_handle_guest_entry_removal(struct
>>> intel_vgpu_guest_page *gpt,
>>> ppgtt_set_shadow_entry(spt, &e, index);
>>> return 0;
>>> fail:
>>> - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
>>> + gvt_vm_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type
>>> +%d\n",
>>> vgpu->id, spt, e.val64, e.type);
>>> return ret;
>>> }
>>> @@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct
>>> intel_vgpu_guest_page *gpt,
>>> }
>>> return 0;
>>> fail:
>>> - gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
>>> - spt, we->val64, we->type);
>>> + gvt_vm_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n",
>>> + vgpu->id, spt, we->val64, we->type);
>>> return ret;
>>> }
>>>
>>> @@ -1250,7 +1250,7 @@ static int ppgtt_handle_guest_write_page_table(
>>> }
>>> return 0;
>>> fail:
>>> - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
>>> + gvt_vm_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type
>>> +%d.\n",
>>> vgpu->id, spt, we->val64, we->type);
>>> return ret;
>>> }
>>> @@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
>>>
>>> spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu,
>> &ge);
>>> if (IS_ERR(spt)) {
>>> - gvt_err("fail to populate guest root pointer\n");
>>> + gvt_vm_err("fail to populate guest root pointer\n");
>>> ret = PTR_ERR(spt);
>>> goto fail;
>>> }
>>> @@ -1566,7 +1566,7 @@ struct intel_vgpu_mm
>>> *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
>>>
>>> ret = gtt->mm_alloc_page_table(mm);
>>> if (ret) {
>>> - gvt_err("fail to allocate page table for mm\n");
>>> + gvt_vm_err("fail to allocate page table for mm\n");
>>> goto fail;
>>> }
>>>
>>> @@ -1584,7 +1584,7 @@ struct intel_vgpu_mm
>>> *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
>>> }
>>> return mm;
>>> fail:
>>> - gvt_err("fail to create mm\n");
>>> + gvt_vm_err("fail to create mm\n");
>>> if (mm)
>>> intel_gvt_mm_unreference(mm);
>>> return ERR_PTR(ret);
>>> @@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct
>>> intel_vgpu_mm *mm, unsigned long gma)
>>> mm->page_table_level, gma, gpa);
>>> return gpa;
>>> err:
>>> - gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
>>> + gvt_vm_err("invalid mm type: %d gma %lx\n", mm->type, gma);
>>> return INTEL_GVT_INVALID_ADDR;
>>> }
>>>
>>> @@ -1836,7 +1836,7 @@ static int emulate_gtt_mmio_write(struct
>>> intel_vgpu *vgpu, unsigned int off,
>>> if (ops->test_present(&e)) {
>>> ret = gtt_entry_p2m(vgpu, &e, &m);
>>> if (ret) {
>>> - gvt_err("vgpu%d: fail to translate guest gtt entry\n",
>>> + gvt_vm_err("vgpu%d: fail to translate guest gtt
>> entry\n",
>>> vgpu->id);
>>> return ret;
>>> }
>>> @@ -1893,14 +1893,14 @@ static int alloc_scratch_pages(struct
>>> intel_vgpu *vgpu,
>>>
>>> scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
>>> if (!scratch_pt) {
>>> - gvt_err("fail to allocate scratch page\n");
>>> + gvt_vm_err("fail to allocate scratch page\n");
>>> return -ENOMEM;
>>> }
>>>
>>> daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
>>> 4096, PCI_DMA_BIDIRECTIONAL);
>>> if (dma_mapping_error(dev, daddr)) {
>>> - gvt_err("fail to dmamap scratch_pt\n");
>>> + gvt_vm_err("fail to dmamap scratch_pt\n");
>>> __free_page(virt_to_page(scratch_pt));
>>> return -ENOMEM;
>>> }
>>> @@ -2003,7 +2003,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
>>> ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
>>> NULL, 1, 0);
>>> if (IS_ERR(ggtt_mm)) {
>>> - gvt_err("fail to create mm for ggtt.\n");
>>> + gvt_vm_err("fail to create mm for ggtt.\n");
>>> return PTR_ERR(ggtt_mm);
>>> }
>>>
>>> @@ -2076,7 +2076,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
>>> for (i = 0; i < preallocated_oos_pages; i++) {
>>> oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
>>> if (!oos_page) {
>>> - gvt_err("fail to pre-allocate oos page\n");
>>> ret = -ENOMEM;
>>> goto fail;
>>> }
>>> @@ -2166,7 +2165,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct
>> intel_vgpu *vgpu,
>>> mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
>>> pdp, page_table_level, 0);
>>> if (IS_ERR(mm)) {
>>> - gvt_err("fail to create mm\n");
>>> + gvt_vm_err("fail to create mm\n");
>>> return PTR_ERR(mm);
>>> }
>>> }
>>> @@ -2196,7 +2195,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct
>>> intel_vgpu *vgpu,
>>>
>>> mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
>>> if (!mm) {
>>> - gvt_err("fail to find ppgtt instance.\n");
>>> + gvt_vm_err("fail to find ppgtt instance.\n");
>>> return -EINVAL;
>>> }
>>> intel_gvt_mm_unreference(mm);
>>> diff --git a/drivers/gpu/drm/i915/gvt/handlers.c
>>> b/drivers/gpu/drm/i915/gvt/handlers.c
>>> index 1934ea0..37591a5 100644
>>> --- a/drivers/gpu/drm/i915/gvt/handlers.c
>>> +++ b/drivers/gpu/drm/i915/gvt/handlers.c
>>> @@ -116,7 +116,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
>>> info->offset = i;
>>> p = intel_gvt_find_mmio_info(gvt, info->offset);
>>> if (p)
>>> - gvt_err("dup mmio definition offset %x\n",
>>> + gvt_vm_err("dup mmio definition offset %x\n",
>>> info->offset);
>>> info->size = size;
>>> info->length = (i + 4) < end ? 4 : (end - i); @@ -180,9 +180,9
>> @@
>>> static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
>>>
>> GVT_FAILSAFE_UNSUPPORTED_GUEST);
>>>
>>> if (!vgpu->mmio.disable_warn_untrack) {
>>> - gvt_err("vgpu%d: found oob fence register access\n",
>>> + gvt_vm_err("vgpu%d: found oob fence register
>> access\n",
>>> vgpu->id);
>>> - gvt_err("vgpu%d: total fence %d, access fence %d\n",
>>> + gvt_vm_err("vgpu%d: total fence %d, access
>> fence %d\n",
>>> vgpu->id, vgpu_fence_sz(vgpu),
>>> fence_num);
>>> }
>>> @@ -248,7 +248,7 @@ static int mul_force_wake_write(struct intel_vgpu
>> *vgpu,
>>> break;
>>> default:
>>> /*should not hit here*/
>>> - gvt_err("invalid forcewake offset 0x%x\n", offset);
>>> + gvt_vm_err("invalid forcewake offset 0x%x\n", offset);
>>> return -EINVAL;
>>> }
>>> } else {
>>> @@ -451,7 +451,7 @@ static int force_nonpriv_write(struct intel_vgpu
>> *vgpu,
>>> int ret = -EINVAL;
>>>
>>> if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
>>> - gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
>>> + gvt_vm_err("vgpu(%d) Invalid FORCE_NONPRIV
>> offset %x(%dB)\n",
>>> vgpu->id, offset, bytes);
>>> return ret;
>>> }
>>> @@ -460,7 +460,7 @@ static int force_nonpriv_write(struct intel_vgpu
>> *vgpu,
>>> ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
>>> bytes);
>>> } else {
>>> - gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
>>> + gvt_vm_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
>>> vgpu->id, reg_nonpriv);
>>> }
>>> return ret;
>>> @@ -529,7 +529,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu
>> *vgpu,
>>> fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
>>> fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
>>> } else {
>>> - gvt_err("Invalid train pattern %d\n", train_pattern);
>>> + gvt_vm_err("Invalid train pattern %d\n", train_pattern);
>>> return -EINVAL;
>>> }
>>>
>>> @@ -587,7 +587,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu
>> *vgpu,
>>> else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
>>> index = FDI_RX_IMR_TO_PIPE(offset);
>>> else {
>>> - gvt_err("Unsupport registers %x\n", offset);
>>> + gvt_vm_err("Unsupport registers %x\n", offset);
>>> return -EINVAL;
>>> }
>>>
>>> @@ -817,7 +817,7 @@ static int dp_aux_ch_ctl_mmio_write(struct
>> intel_vgpu *vgpu,
>>> u32 data;
>>>
>>> if (!dpy_is_valid_port(port_index)) {
>>> - gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
>>> + gvt_vm_err("GVT(%d): Unsupported DP port access!\n", vgpu-
>>> id);
>>> return 0;
>>> }
>>>
>>> @@ -1015,7 +1015,7 @@ static void write_virtual_sbi_register(struct
>>> intel_vgpu *vgpu,
>>>
>>> if (i == num) {
>>> if (num == SBI_REG_MAX) {
>>> - gvt_err("vgpu%d: SBI caching meets maximum
>> limits\n",
>>> + gvt_vm_err("vgpu%d: SBI caching meets maximum
>> limits\n",
>>> vgpu->id);
>>> return;
>>> }
>>> @@ -1096,7 +1096,7 @@ static int pvinfo_mmio_read(struct intel_vgpu
>>> *vgpu, unsigned int offset,
>>> break;
>>> }
>>> if (invalid_read)
>>> - gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
>>> + gvt_vm_err("invalid pvinfo read: [%x:%x] = %x\n",
>>> offset, bytes, *(u32 *)p_data);
>>> vgpu->pv_notified = true;
>>> return 0;
>>> @@ -1124,7 +1124,7 @@ static int handle_g2v_notification(struct
>>> intel_vgpu *vgpu, int
>>> notification)
>>> case 1: /* Remove this in guest driver. */
>>> break;
>>> default:
>>> - gvt_err("Invalid PV notification %d\n", notification);
>>> + gvt_vm_err("Invalid PV notification %d\n", notification);
>>> }
>>> return ret;
>>> }
>>> @@ -1180,7 +1180,7 @@ static int pvinfo_mmio_write(struct intel_vgpu
>>> *vgpu, unsigned int offset,
>>> enter_failsafe_mode(vgpu,
>> GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
>>> break;
>>> default:
>>> - gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
>>> + gvt_vm_err("invalid pvinfo write offset %x bytes %x data %x\n",
>>> offset, bytes, data);
>>> break;
>>> }
>>> @@ -1414,7 +1414,8 @@ static int elsp_mmio_write(struct intel_vgpu
>>> *vgpu, unsigned int offset,
>>> if (execlist->elsp_dwords.index == 3) {
>>> ret = intel_vgpu_submit_execlist(vgpu, ring_id);
>>> if(ret)
>>> - gvt_err("fail submit workload on ring %d\n", ring_id);
>>> + gvt_vm_err("fail submit workload on ring %d\n",
>>> + ring_id);
>>> }
>>>
>>> ++execlist->elsp_dwords.index;
>>> diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c
>>> b/drivers/gpu/drm/i915/gvt/kvmgt.c
>>> index f07cb8b..9bf08fb 100644
>>> --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
>>> +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
>>> @@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj,
>>> struct mdev_device *mdev)
>>>
>>> type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
>>> if (!type) {
>>> - gvt_err("failed to find type %s to create\n",
>>> + gvt_vm_err("failed to find type %s to create\n",
>>> kobject_name(kobj));
>>> ret = -EINVAL;
>>> goto out;
>>> @@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj,
>>> struct mdev_device *mdev)
>>> vgpu = intel_gvt_ops->vgpu_create(gvt, type);
>>> if (IS_ERR_OR_NULL(vgpu)) {
>>> ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
>>> - gvt_err("failed to create intel vgpu: %d\n", ret);
>>> + gvt_vm_err("failed to create intel vgpu: %d\n", ret);
>>> goto out;
>>> }
>>>
>>> @@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device
>> *mdev)
>>> ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
>> &events,
>>> &vgpu->vdev.iommu_notifier);
>>> if (ret != 0) {
>>> - gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
>>> + gvt_vm_err("vfio_register_notifier for iommu failed: %d\n",
>>> + ret);
>>> goto out;
>>> }
>>>
>>> @@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device
>> *mdev)
>>> ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
>> &events,
>>> &vgpu->vdev.group_notifier);
>>> if (ret != 0) {
>>> - gvt_err("vfio_register_notifier for group failed: %d\n", ret);
>>> + gvt_vm_err("vfio_register_notifier for group failed: %d\n",
>>> + ret);
>>> goto undo_iommu;
>>> }
>>>
>>> @@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device
>>> *mdev, char *buf,
>>>
>>>
>>> if (index >= VFIO_PCI_NUM_REGIONS) {
>>> - gvt_err("invalid index: %u\n", index);
>>> + gvt_vm_err("invalid index: %u\n", index);
>>> return -EINVAL;
>>> }
>>>
>>> @@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device
>>> *mdev, char *buf,
>>> case VFIO_PCI_VGA_REGION_INDEX:
>>> case VFIO_PCI_ROM_REGION_INDEX:
>>> default:
>>> - gvt_err("unsupported region: %u\n", index);
>>> + gvt_vm_err("unsupported region: %u\n", index);
>>> }
>>>
>>> return ret == 0 ? count : ret;
>>> @@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct
>>> intel_vgpu *vgpu,
>>>
>>> trigger = eventfd_ctx_fdget(fd);
>>> if (IS_ERR(trigger)) {
>>> - gvt_err("eventfd_ctx_fdget failed\n");
>>> + gvt_vm_err("eventfd_ctx_fdget failed\n");
>>> return PTR_ERR(trigger);
>>> }
>>> vgpu->vdev.msi_trigger = trigger;
>>> @@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device
>>> *mdev, unsigned int cmd,
>>> ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
>>> VFIO_PCI_NUM_IRQS,
>> &data_size);
>>> if (ret) {
>>> -
>> gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
>>> +
>> gvt_vm_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
>>> return -EINVAL;
>>> }
>>> if (data_size) {
>>> @@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device
>>> *mdev)
>>>
>>> kvm = vgpu->vdev.kvm;
>>> if (!kvm || kvm->mm != current->mm) {
>>> - gvt_err("KVM is required to use Intel vGPU\n");
>>> + gvt_vm_err("KVM is required to use Intel vGPU\n");
>>> return -ESRCH;
>>> }
>>>
>>> @@ -1338,7 +1340,7 @@ static int kvmgt_guest_init(struct mdev_device
>>> *mdev) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) {
>>> if (!info) {
>>> - gvt_err("kvmgt_guest_info invalid\n");
>>> + gvt_vm_err("kvmgt_guest_info invalid\n");
>>> return false;
>>> }
>>>
>>> @@ -1397,13 +1399,14 @@ static unsigned long
>> kvmgt_gfn_to_pfn(unsigned
>>> long handle, unsigned long gfn)
>>> dev = mdev_dev(info->vgpu->vdev.mdev);
>>> rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE,
>> &pfn);
>>> if (rc != 1) {
>>> - gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
>>> + gvt_vm_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
>>> + gfn, rc);
>>> return INTEL_GVT_INVALID_ADDR;
>>> }
>>> /* transfer to host iova for GFX to use DMA */
>>> rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
>>> if (rc) {
>>> - gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
>>> + gvt_vm_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
>>> vfio_unpin_pages(dev, &gfn, 1);
>>> return INTEL_GVT_INVALID_ADDR;
>>> }
>>> diff --git a/drivers/gpu/drm/i915/gvt/mmio.c
>>> b/drivers/gpu/drm/i915/gvt/mmio.c index 60b698c..347c6c7 100644
>>> --- a/drivers/gpu/drm/i915/gvt/mmio.c
>>> +++ b/drivers/gpu/drm/i915/gvt/mmio.c
>>> @@ -142,7 +142,7 @@ int intel_vgpu_emulate_mmio_read(struct
>> intel_vgpu
>>> *vgpu, uint64_t pa,
>>> ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
>>> p_data, bytes);
>>> if (ret) {
>>> - gvt_err("vgpu%d: guest page read error %d, "
>>> + gvt_vm_err("vgpu%d: guest page read
>> error %d, "
>>> "gfn 0x%lx, pa 0x%llx, var 0x%x,
>> len %d\n",
>>> vgpu->id, ret,
>>> gp->gfn, pa, *(u32 *)p_data, bytes);
>> @@ -200,14 +200,14 @@ int
>>> intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
>>> ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data,
>> bytes);
>>>
>>> if (!vgpu->mmio.disable_warn_untrack) {
>>> - gvt_err("vgpu%d: read untracked MMIO %x(%dB)
>> val %x\n",
>>> + gvt_vm_err("vgpu%d: read untracked MMIO %x(%dB)
>> val %x\n",
>>> vgpu->id, offset, bytes, *(u32 *)p_data);
>>>
>>> if (offset == 0x206c) {
>>> - gvt_err("------------------------------------------\n");
>>> - gvt_err("vgpu%d: likely triggers a gfx reset\n",
>>> + gvt_vm_err("------------------------------------------
>> \n");
>>> + gvt_vm_err("vgpu%d: likely triggers a gfx
>> reset\n",
>>> vgpu->id);
>>> - gvt_err("------------------------------------------\n");
>>> + gvt_vm_err("------------------------------------------
>> \n");
>>> vgpu->mmio.disable_warn_untrack = true;
>>> }
>>> }
>>> @@ -220,7 +220,7 @@ int intel_vgpu_emulate_mmio_read(struct
>> intel_vgpu
>>> *vgpu, uint64_t pa,
>>> mutex_unlock(&gvt->lock);
>>> return 0;
>>> err:
>>> - gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
>>> + gvt_vm_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
>>> vgpu->id, offset, bytes);
>>> mutex_unlock(&gvt->lock);
>>> return ret;
>>> @@ -259,7 +259,7 @@ int intel_vgpu_emulate_mmio_write(struct
>>> intel_vgpu *vgpu, uint64_t pa,
>>> if (gp) {
>>> ret = gp->handler(gp, pa, p_data, bytes);
>>> if (ret) {
>>> - gvt_err("vgpu%d: guest page write error %d, "
>>> + gvt_vm_err("vgpu%d: guest page write
>> error %d, "
>>> "gfn 0x%lx, pa 0x%llx, var 0x%x,
>> len %d\n",
>>> vgpu->id, ret,
>>> gp->gfn, pa, *(u32 *)p_data, bytes);
>> @@ -329,7 +329,7 @@ int
>>> intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
>>>
>>> /* all register bits are RO. */
>>> if (ro_mask == ~(u64)0) {
>>> - gvt_err("vgpu%d: try to write RO reg %x\n",
>>> + gvt_vm_err("vgpu%d: try to write RO
>> reg %x\n",
>>> vgpu->id, offset);
>>> ret = 0;
>>> goto out;
>>> @@ -360,7 +360,7 @@ int intel_vgpu_emulate_mmio_write(struct
>>> intel_vgpu *vgpu, uint64_t pa,
>>> mutex_unlock(&gvt->lock);
>>> return 0;
>>> err:
>>> - gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
>>> + gvt_vm_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
>>> vgpu->id, offset, bytes);
>>> mutex_unlock(&gvt->lock);
>>> return ret;
>>> diff --git a/drivers/gpu/drm/i915/gvt/opregion.c
>>> b/drivers/gpu/drm/i915/gvt/opregion.c
>>> index 5d1caf9..b5f275a 100644
>>> --- a/drivers/gpu/drm/i915/gvt/opregion.c
>>> +++ b/drivers/gpu/drm/i915/gvt/opregion.c
>>> @@ -67,14 +67,14 @@ static int map_vgpu_opregion(struct intel_vgpu
>> *vgpu, bool map)
>>> mfn =
>> intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
>>> + i * PAGE_SIZE);
>>> if (mfn == INTEL_GVT_INVALID_ADDR) {
>>> - gvt_err("fail to get MFN from VA\n");
>>> + gvt_vm_err("fail to get MFN from VA\n");
>>> return -EINVAL;
>>> }
>>> ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
>>> vgpu_opregion(vgpu)->gfn[i],
>>> mfn, 1, map);
>>> if (ret) {
>>> - gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
>>> + gvt_vm_err("fail to map GFN to MFN, errno: %d\n",
>> ret);
>>> return ret;
>>> }
>>> }
>>> @@ -287,7 +287,7 @@ int intel_vgpu_emulate_opregion_request(struct
>>> intel_vgpu *vgpu,
>>> u32 swsci)
>>> parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
>>>
>>> if (!(swsci & SWSCI_SCI_SELECT)) {
>>> - gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
>>> + gvt_vm_err("vgpu%d: requesting SMI service\n", vgpu->id);
>>> return 0;
>>> }
>>> /* ignore non 0->1 trasitions */
>>> @@ -300,7 +300,7 @@ int intel_vgpu_emulate_opregion_request(struct
>>> intel_vgpu *vgpu,
>>> u32 swsci)
>>> func = GVT_OPREGION_FUNC(*scic);
>>> subfunc = GVT_OPREGION_SUBFUNC(*scic);
>>> if (!querying_capabilities(*scic)) {
>>> - gvt_err("vgpu%d: requesting runtime service: func \"%s\","
>>> + gvt_vm_err("vgpu%d: requesting runtime service: func
>> \"%s\","
>>> " subfunc \"%s\"\n",
>>> vgpu->id,
>>> opregion_func_name(func),
>>> diff --git a/drivers/gpu/drm/i915/gvt/render.c
>>> b/drivers/gpu/drm/i915/gvt/render.c
>>> index 73f052a..9b35709 100644
>>> --- a/drivers/gpu/drm/i915/gvt/render.c
>>> +++ b/drivers/gpu/drm/i915/gvt/render.c
>>> @@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct
>>> intel_vgpu *vgpu, int
>>> ring_id)
>>> I915_WRITE_FW(reg, 0x1);
>>>
>>> if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
>>> - gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
>>> + gvt_vm_err("timeout in invalidate ring (%d) tlb\n", ring_id);
>>> else
>>> vgpu_vreg(vgpu, regs[ring_id]) = 0;
>>>
>>> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c
>>> b/drivers/gpu/drm/i915/gvt/scheduler.c
>>> index c576800..9417ff3 100644
>>> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
>>> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
>>> @@ -84,7 +84,7 @@ static int populate_shadow_context(struct
>>> intel_vgpu_workload
>>> *workload)
>>> (u32)((workload->ctx_desc.lrca + i) <<
>>> GTT_PAGE_SHIFT));
>>> if (context_gpa == INTEL_GVT_INVALID_ADDR) {
>>> - gvt_err("Invalid guest context descriptor\n");
>>> + gvt_vm_err("Invalid guest context descriptor\n");
>>> return -EINVAL;
>>> }
>>>
>>> @@ -180,7 +180,7 @@ static int dispatch_workload(struct
>>> intel_vgpu_workload
>>> *workload)
>>>
>>> rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
>>> if (IS_ERR(rq)) {
>>> - gvt_err("fail to allocate gem request\n");
>>> + gvt_vm_err("fail to allocate gem request\n");
>>> ret = PTR_ERR(rq);
>>> goto out;
>>> }
>>> @@ -313,7 +313,7 @@ static void update_guest_context(struct
>>> intel_vgpu_workload
>>> *workload)
>>> (u32)((workload->ctx_desc.lrca + i) <<
>>> GTT_PAGE_SHIFT));
>>> if (context_gpa == INTEL_GVT_INVALID_ADDR) {
>>> - gvt_err("invalid guest context descriptor\n");
>>> + gvt_vm_err("invalid guest context descriptor\n");
>>> return;
>>> }
>>>
>>> @@ -443,7 +443,7 @@ static int workload_thread(void *priv)
>>> mutex_unlock(&gvt->lock);
>>>
>>> if (ret) {
>>> - gvt_err("fail to dispatch workload, skip\n");
>>> + gvt_vm_err("fail to dispatch workload, skip\n");
>>> goto complete;
>>> }
>>>
>>> @@ -454,7 +454,7 @@ static int workload_thread(void *priv)
>>> 0, MAX_SCHEDULE_TIMEOUT);
>>> if (lret < 0) {
>>> workload->status = lret;
>>> - gvt_err("fail to wait workload, skip\n");
>>> + gvt_vm_err("fail to wait workload, skip\n");
>>> } else {
>>> workload->status = 0;
>>> }
>>> --
>>> 2.7.4
>>>
>>> _______________________________________________
>>> intel-gvt-dev mailing list
>>> intel-gvt-dev at lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
>
More information about the intel-gvt-dev
mailing list