[PATCH 3/6] drm/i915/gvt: Prevent invalid index access to vgpu->gtt.scratch_pt[]
Colin Xu
colin.xu at intel.com
Wed Mar 20 03:21:27 UTC 2019
Invalid index could result in array overflow so limit it to array size.
Although these access are protected by current gvt logic and won't hit
during running, but still has potential security risk in future.
Another mistake is gvt_vgpu_err shouldn't access spt after ppgtt_free_spt.
Signed-off-by: Colin Xu <colin.xu at intel.com>
---
drivers/gpu/drm/i915/gvt/gtt.c | 50 ++++++++++++++++++++++++++++++----
1 file changed, 44 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index f4c992d96087..340565338a2a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -937,6 +937,12 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
cur_pt_type = get_next_pt_type(e->type) + 1;
+ if (WARN_ON(cur_pt_type == GTT_TYPE_INVALID ||
+ cur_pt_type >= GTT_TYPE_MAX)) {
+ gvt_vgpu_err("fail: invalid ppgtt_spt type %d.\n",
+ cur_pt_type);
+ return -EINVAL;
+ }
if (ops->get_pfn(e) ==
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
return 0;
@@ -961,6 +967,12 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
pfn = ops->get_pfn(entry);
type = spt->shadow_page.type;
+ if (WARN_ON(type == GTT_TYPE_INVALID || type >= GTT_TYPE_MAX)) {
+ gvt_dbg_mm("fail: invalid spt(%p) type %d.\n",
+ spt, type);
+ return;
+ }
+
/* Uninitialized spte or unshadowed spte. */
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
@@ -1064,7 +1076,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
ret = ppgtt_populate_spt(spt);
if (ret) {
ppgtt_put_spt(spt);
- goto err;
+ goto err_populate;
}
}
} else {
@@ -1073,16 +1085,22 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
- goto err;
+ goto err_alloc_gfn;
}
ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
- if (ret)
+ if (ret) {
+ gvt_vgpu_err("fail: enable track shadow page %p guest entry 0x%llx type %d\n",
+ spt, we->val64, we->type);
goto err_free_spt;
+ }
ret = ppgtt_populate_spt(spt);
- if (ret)
+ if (ret) {
+ gvt_vgpu_err("fail: populate shadow page %p guest entry 0x%llx type %d\n",
+ spt, we->val64, we->type);
goto err_free_spt;
+ }
trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
spt->shadow_page.type);
@@ -1091,9 +1109,19 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
err_free_spt:
ppgtt_free_spt(spt);
-err:
- gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+ goto err;
+
+err_populate:
+ gvt_vgpu_err("fail: populate shadow page %p guest entry 0x%llx type %d\n",
+ spt, we->val64, we->type);
+ goto err;
+
+err_alloc_gfn:
+ gvt_vgpu_err("fail: alloc gfn shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
+ goto err;
+
+err:
return ERR_PTR(ret);
}
@@ -1572,6 +1600,9 @@ static int ppgtt_handle_guest_write_page_table(
int new_present;
int i, ret;
+ if (WARN_ON(type == GTT_TYPE_INVALID || type >= GTT_TYPE_MAX))
+ gvt_vgpu_err("fail: invalid spt(%p) type %d.\n", spt, type);
+
new_present = ops->test_present(we);
/*
@@ -1714,6 +1745,13 @@ static int ppgtt_handle_guest_write_page_table_bytes(
if (!test_bit(index, spt->post_shadow_bitmap)) {
int type = spt->shadow_page.type;
+ if (WARN_ON(type == GTT_TYPE_INVALID ||
+ type >= GTT_TYPE_MAX)) {
+ gvt_vgpu_err("fail: invalid spt(%p) type %d.\n",
+ spt, type);
+ return -EINVAL;
+ }
+
ppgtt_get_shadow_entry(spt, &se, index);
ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
if (ret)
--
2.21.0
More information about the intel-gvt-dev
mailing list