[PATCH 8/8] drm/i915/gvt: Move clean_workloads() into scheduler.c
Wang, Zhi A
zhi.a.wang at intel.com
Fri Sep 15 06:28:26 UTC 2017
-----Original Message-----
From: intel-gvt-dev [mailto:intel-gvt-dev-bounces at lists.freedesktop.org] On Behalf Of Zhenyu Wang
Sent: Friday, September 15, 2017 9:18 AM
To: Wang, Zhi A <zhi.a.wang at intel.com>
Cc: intel-gvt-dev at lists.freedesktop.org
Subject: Re: [PATCH 8/8] drm/i915/gvt: Move clean_workloads() into scheduler.c
On 2017.09.14 02:50:48 +0800, Zhi Wang wrote:
> Move clean_workloads() into scheduler.c since it's not specific to
> execlist.
>
> Signed-off-by: Zhi Wang <zhi.a.wang at intel.com>
> ---
> drivers/gpu/drm/i915/gvt/execlist.c | 41
> +-----------------------------------
> drivers/gpu/drm/i915/gvt/scheduler.c | 39
> ++++++++++++++++++++++++++++++++++
> 2 files changed, 40 insertions(+), 40 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/execlist.c
> b/drivers/gpu/drm/i915/gvt/execlist.c
> index 5ce2a04..0c35c3b 100644
> --- a/drivers/gpu/drm/i915/gvt/execlist.c
> +++ b/drivers/gpu/drm/i915/gvt/execlist.c
> @@ -46,8 +46,6 @@
> #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
> ((a)->lrca == (b)->lrca))
>
> -static void clean_workloads(struct intel_vgpu *vgpu, unsigned long
> engine_mask);
> -
> static int context_switch_events[] = {
> [RCS] = RCS_AS_CONTEXT_SWITCH,
> [BCS] = BCS_AS_CONTEXT_SWITCH,
> @@ -397,23 +395,8 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
> gvt_dbg_el("complete workload %p status %d\n", workload,
> workload->status);
>
> - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
> - /* if workload->status is not successful means HW GPU
> - * has occurred GPU hang or something wrong with i915/GVT,
> - * and GVT won't inject context switch interrupt to guest.
> - * So this error is a vGPU hang actually to the guest.
> - * According to this we should emunlate a vGPU hang. If
> - * there are pending workloads which are already submitted
> - * from guest, we should clean them up like HW GPU does.
> - *
> - * if it is in middle of engine resetting, the pending
> - * workloads won't be submitted to HW GPU and will be
> - * cleaned up during the resetting process later, so doing
> - * the workload clean up here doesn't have any impact.
> - **/
> - clean_workloads(vgpu, ENGINE_MASK(ring_id));
> + if (workload->status || (vgpu->resetting_eng &
> +ENGINE_MASK(ring_id)))
> goto out;
> - }
>
> if (!list_empty(workload_q_head(vgpu, ring_id))) {
> struct execlist_ctx_descriptor_format *this_desc, *next_desc; @@
> -534,32 +517,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
> vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; }
>
> -static void clean_workloads(struct intel_vgpu *vgpu, unsigned long
> engine_mask) -{
> - struct intel_vgpu_submission *s = &vgpu->submission;
> - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
> - struct intel_engine_cs *engine;
> - struct intel_vgpu_workload *pos, *n;
> - unsigned int tmp;
> -
> - /* free the unsubmited workloads in the queues. */
> - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
> - list_for_each_entry_safe(pos, n,
> - &s->workload_q_head[engine->id], list) {
> - list_del_init(&pos->list);
> - intel_vgpu_destroy_workload(pos);
> - }
> - clear_bit(engine->id, s->shadow_ctx_desc_updated);
> - }
> -}
> -
> void clean_execlist(struct intel_vgpu *vgpu) {
> enum intel_engine_id i;
> struct intel_engine_cs *engine;
>
> - clean_workloads(vgpu, ALL_ENGINES);
> -
> for_each_engine(engine, vgpu->gvt->dev_priv, i) {
> struct intel_vgpu_submission *s = &vgpu->submission;
>
> @@ -576,7 +538,6 @@ void reset_execlist(struct intel_vgpu *vgpu,
> struct intel_engine_cs *engine;
> unsigned int tmp;
>
> - clean_workloads(vgpu, engine_mask);
> for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
> init_vgpu_execlist(vgpu, engine->id); } diff --git
> a/drivers/gpu/drm/i915/gvt/scheduler.c
> b/drivers/gpu/drm/i915/gvt/scheduler.c
> index b0d3b65..1656de6 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -643,6 +643,25 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
> kunmap(page);
> }
>
> +static void clean_workloads(struct intel_vgpu *vgpu, unsigned long
> +engine_mask) {
> + struct intel_vgpu_submission *s = &vgpu->submission;
> + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
> + struct intel_engine_cs *engine;
> + struct intel_vgpu_workload *pos, *n;
> + unsigned int tmp;
> +
> + /* free the unsubmited workloads in the queues. */
> + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
> + list_for_each_entry_safe(pos, n,
> + &s->workload_q_head[engine->id], list) {
> + list_del_init(&pos->list);
> + intel_vgpu_destroy_workload(pos);
> + }
> + clear_bit(engine->id, s->shadow_ctx_desc_updated);
> + }
> +}
> +
> static void complete_current_workload(struct intel_gvt *gvt, int
> ring_id) {
> struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@
> -706,6 +725,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
> release_shadow_wa_ctx(&workload->wa_ctx);
> }
>
> + if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
> + /* if workload->status is not successful means HW GPU
> + * has occurred GPU hang or something wrong with i915/GVT,
> + * and GVT won't inject context switch interrupt to guest.
> + * So this error is a vGPU hang actually to the guest.
> + * According to this we should emunlate a vGPU hang. If
> + * there are pending workloads which are already submitted
> + * from guest, we should clean them up like HW GPU does.
> + *
> + * if it is in middle of engine resetting, the pending
> + * workloads won't be submitted to HW GPU and will be
> + * cleaned up during the resetting process later, so doing
> + * the workload clean up here doesn't have any impact.
> + **/
> + clean_workloads(vgpu, ENGINE_MASK(ring_id));
> + }
> +
> workload->complete(workload);
>
> atomic_dec(&s->running_workload_num);
> @@ -903,6 +939,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
> if (!s->active)
> return;
>
> + clean_workloads(vgpu, engine_mask);
> s->ops->reset(vgpu, engine_mask);
> }
>
> @@ -980,6 +1017,8 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
> return -EINVAL;
>
> if (s->active) {
> + clean_workloads(vgpu, ALL_ENGINES);
> +
I don't think you still need to do clean here, you can take that reset should have already been done, or add a warning if not. Then seems more clean.
Zhi: Thanks let me find a better place.
So I think in previous one, maybe only need to call select submission ops 0 for dmlr case instead of for ALL_ENGINE case, as that's supposed not to change submission mode.
Zhi: From the spec, SW can change the submission mode after a full vGPU reset...
> s->ops->clean(vgpu);
> s->active = false;
> gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
> --
> 2.7.4
>
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
--
Open Source Technology Center, Intel ltd.
$gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
More information about the intel-gvt-dev
mailing list