[PATCH][Security Check] drm/i915/gvt: Limit read hw reg to active vgpu
Christophe de Dinechin
christophe.de.dinechin at gmail.com
Mon Oct 30 09:21:32 UTC 2017
Xiong Zhang writes:
> mmio_read_from_hw() let vgpu could read hw reg, if vgpu's workload
> is running on hw, things is good. Otherwise vgpu will get other
> vgpu's reg val, it is unsafe.
>
> This patch limit such hw access to active vgpu. If vgpu isn't
> running on hw, the reg read of this vgpu will get the last active
> val which saved at schedule_out.
>
> Signed-off-by: Xiong Zhang <xiong.y.zhang at intel.com>
> ---
> drivers/gpu/drm/i915/gvt/handlers.c | 19 +++++++++++++++----
> drivers/gpu/drm/i915/gvt/scheduler.c | 23 +++++++++++++++++++++++
> 2 files changed, 38 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
> index 4f8d470..eb8ef7e 100644
> --- a/drivers/gpu/drm/i915/gvt/handlers.c
> +++ b/drivers/gpu/drm/i915/gvt/handlers.c
> @@ -1471,11 +1471,22 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
> static int mmio_read_from_hw(struct intel_vgpu *vgpu,
> unsigned int offset, void *p_data, unsigned int bytes)
> {
> - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
> + struct intel_gvt *gvt = vgpu->gvt;
> + struct drm_i915_private *dev_priv = gvt->dev_priv;
> + int ring_id;
> +
> + ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
> + /**
> + * Read HW reg in two case
> + * a. the offset isn't a ring mmio
> + * b. the offset's ring is running on hw.
> + */
> + if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id]) {
If adding a security check, shouldn't you also test ring_id against
upper limit (I915_NUM_ENGINES I guess)?
> + mmio_hw_access_pre(dev_priv);
> + vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
> + mmio_hw_access_post(dev_priv);
> + }
>
> - mmio_hw_access_pre(dev_priv);
> - vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
> - mmio_hw_access_post(dev_priv);
> return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
> }
>
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 09d357c..f079ffd 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -131,6 +131,28 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
> return i915_gem_context_force_single_submission(req->ctx);
> }
>
> +static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
> +{
> + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
> + uint32_t reg_base[I915_NUM_ENGINES] = {RENDER_RING_BASE,
> + BLT_RING_BASE,
> + GEN6_BSD_RING_BASE,
> + GEN8_BSD2_RING_BASE,
> + VEBOX_RING_BASE };
> + i915_reg_t reg;
> +
> + reg = RING_INSTDONE(reg_base[ring_id]);
Same reg_base[ring_id] everywhere, code may be more readable if
you compute it only once.
> + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
> + reg = RING_TIMESTAMP(reg_base[ring_id]);
> + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
> + reg = RING_TIMESTAMP_UDW(reg_base[ring_id]);
> + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
> + reg = RING_ACTHD(reg_base[ring_id]);
> + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
> + reg = RING_ACTHD_UDW(reg_base[ring_id]);
> + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
> +}
> +
> static int shadow_context_status_change(struct notifier_block *nb,
> unsigned long action, void *data)
> {
> @@ -175,6 +197,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
> break;
> case INTEL_CONTEXT_SCHEDULE_OUT:
> case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
> + save_ring_hw_state(workload->vgpu, ring_id);
> atomic_set(&workload->shadow_ctx_active, 0);
> break;
> default:
--
Cheers,
Christophe de Dinechin (c3d)
More information about the intel-gvt-dev
mailing list