[PATCH] drm/i915/gvt: Use KVM r/w to access guest opregion
Zhang, Tina
tina.zhang at intel.com
Fri Jan 26 00:03:03 UTC 2018
> -----Original Message-----
> From: Zhenyu Wang [mailto:zhenyuw at linux.intel.com]
> Sent: Thursday, January 25, 2018 5:28 PM
> To: Zhang, Tina <tina.zhang at intel.com>
> Cc: zhenyuw at linux.intel.com; intel-gvt-dev at lists.freedesktop.org; Zhao, Yan Y
> <yan.y.zhao at intel.com>; Zhang, Xiong Y <xiong.y.zhang at intel.com>
> Subject: Re: [PATCH] drm/i915/gvt: Use KVM r/w to access guest opregion
>
> On 2018.01.25 16:40:21 +0800, Tina Zhang wrote:
> > For KVMGT, the guest opregion, which is handled by VFIO, is actually a
> > piece of guest memory which won't be accessed by devices. So, its mfn
> > shouldn't be obtained through VFIO interface. This patch uses KVM r/w
> > interface to access the data in guest opregion.
> >
>
> gvt uses virtualized opregion for vgpu, why not just r/w vgpu opregion?
As the comments said in "kvmgt.c":
static int kvmgt_set_opregion(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
void *base;
int ret;
/* Each vgpu has its own opregion, although VFIO would create another
* one later. This one is used to expose opregion to VFIO. And the
* other one created by VFIO later, is used by guest actually.
*/
base = vgpu_opregion(vgpu)->va;
if (!base)
return -ENOMEM;
Here "This one" means the vgpu opregion created and populated by device model.
"the other one" is actually used by guest VM.
BR,
Tina
>
> > Fix the guest opregion accessing issue when host "intel_iommu=on".
> >
> > Signed-off-by: Tina Zhang <tina.zhang at intel.com>
> > Cc: Yan Zhao <yan.y.zhao at intel.com>
> > Cc: Xiong Zhang <xiong.y.zhang at intel.com>
> > ---
> > drivers/gpu/drm/i915/gvt/gvt.h | 1 -
> > drivers/gpu/drm/i915/gvt/opregion.c | 95
> > ++++++++++++++++++++++++++-----------
> > 2 files changed, 68 insertions(+), 28 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gvt/gvt.h
> > b/drivers/gpu/drm/i915/gvt/gvt.h index 7dc7a80..205d88d 100644
> > --- a/drivers/gpu/drm/i915/gvt/gvt.h
> > +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> > @@ -127,7 +127,6 @@ struct intel_vgpu_irq { struct
> > intel_vgpu_opregion {
> > bool mapped;
> > void *va;
> > - void *va_gopregion;
> > u32 gfn[INTEL_GVT_OPREGION_PAGES];
> > };
> >
> > diff --git a/drivers/gpu/drm/i915/gvt/opregion.c
> > b/drivers/gpu/drm/i915/gvt/opregion.c
> > index 8420d1f..4030233 100644
> > --- a/drivers/gpu/drm/i915/gvt/opregion.c
> > +++ b/drivers/gpu/drm/i915/gvt/opregion.c
> > @@ -299,20 +299,13 @@ int
> > intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32
> > gpa) {
> >
> > int i, ret = 0;
> > - unsigned long pfn;
> >
> > gvt_dbg_core("emulate opregion from kernel\n");
> >
> > switch (intel_gvt_host.hypervisor_type) {
> > case INTEL_GVT_HYPERVISOR_KVM:
> > - pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >>
> PAGE_SHIFT);
> > - vgpu_opregion(vgpu)->va_gopregion = memremap(pfn <<
> PAGE_SHIFT,
> > - INTEL_GVT_OPREGION_SIZE,
> > - MEMREMAP_WB);
> > - if (!vgpu_opregion(vgpu)->va_gopregion) {
> > - gvt_vgpu_err("failed to map guest opregion\n");
> > - ret = -EFAULT;
> > - }
> > + for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
> > + vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) +
> i;
> > vgpu_opregion(vgpu)->mapped = true;
> > break;
> > case INTEL_GVT_HYPERVISOR_XEN:
> > @@ -352,10 +345,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu
> *vgpu)
> > if (vgpu_opregion(vgpu)->mapped)
> > map_vgpu_opregion(vgpu, false);
> > } else if (intel_gvt_host.hypervisor_type ==
> INTEL_GVT_HYPERVISOR_KVM) {
> > - if (vgpu_opregion(vgpu)->mapped) {
> > - memunmap(vgpu_opregion(vgpu)->va_gopregion);
> > - vgpu_opregion(vgpu)->va_gopregion = NULL;
> > - }
> > + /* Guest opregion is released by VFIO */
> > }
> > free_pages((unsigned long)vgpu_opregion(vgpu)->va,
> > get_order(INTEL_GVT_OPREGION_SIZE));
> > @@ -480,19 +470,38 @@ static bool querying_capabilities(u32 scic)
> > */
> > int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32
> > swsci) {
> > - u32 *scic, *parm;
> > + u32 scic, parm;
> > u32 func, subfunc;
> > + u64 scic_pa = 0, parm_pa = 0;
> > + int ret;
> >
> > switch (intel_gvt_host.hypervisor_type) {
> > case INTEL_GVT_HYPERVISOR_XEN:
> > - scic = vgpu_opregion(vgpu)->va +
> INTEL_GVT_OPREGION_SCIC;
> > - parm = vgpu_opregion(vgpu)->va +
> INTEL_GVT_OPREGION_PARM;
> > + scic = *((u32 *)vgpu_opregion(vgpu)->va +
> > + INTEL_GVT_OPREGION_SCIC);
> > + parm = *((u32 *)vgpu_opregion(vgpu)->va +
> > + INTEL_GVT_OPREGION_PARM);
> > break;
> > case INTEL_GVT_HYPERVISOR_KVM:
> > - scic = vgpu_opregion(vgpu)->va_gopregion +
> > - INTEL_GVT_OPREGION_SCIC;
> > - parm = vgpu_opregion(vgpu)->va_gopregion +
> > -
> INTEL_GVT_OPREGION_PARM;
> > + scic_pa = vgpu_opregion(vgpu)->gfn[0] +
> INTEL_GVT_OPREGION_SCIC;
> > + parm_pa = vgpu_opregion(vgpu)->gfn[0] +
> INTEL_GVT_OPREGION_PARM;
> > +
> > + ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
> > + &scic, sizeof(scic));
> > + if (ret) {
> > + gvt_vgpu_err("guet opregion read error %d, gpa
> 0x%llx, len %lu\n",
> > + ret, scic_pa, sizeof(scic));
> > + return ret;
> > + }
> > +
> > + ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
> > + &parm, sizeof(parm));
> > + if (ret) {
> > + gvt_vgpu_err("guet opregion read error %d, gpa
> 0x%llx, len %lu\n",
> > + ret, scic_pa, sizeof(scic));
> > + return ret;
> > + }
> > +
> > break;
> > default:
> > gvt_vgpu_err("not supported hypervisor\n"); @@ -510,9
> +519,9 @@ int
> > intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
> > return 0;
> > }
> >
> > - func = GVT_OPREGION_FUNC(*scic);
> > - subfunc = GVT_OPREGION_SUBFUNC(*scic);
> > - if (!querying_capabilities(*scic)) {
> > + func = GVT_OPREGION_FUNC(scic);
> > + subfunc = GVT_OPREGION_SUBFUNC(scic);
> > + if (!querying_capabilities(scic)) {
> > gvt_vgpu_err("requesting runtime service: func \"%s\","
> > " subfunc \"%s\"\n",
> > opregion_func_name(func),
> > @@ -521,11 +530,43 @@ int intel_vgpu_emulate_opregion_request(struct
> intel_vgpu *vgpu, u32 swsci)
> > * emulate exit status of function call, '0' means
> > * "failure, generic, unsupported or unknown cause"
> > */
> > - *scic &= ~OPREGION_SCIC_EXIT_MASK;
> > - return 0;
> > + scic &= ~OPREGION_SCIC_EXIT_MASK;
> > + goto out;
> > + }
> > +
> > + scic = 0;
> > + parm = 0;
> > +
> > +out:
> > + switch (intel_gvt_host.hypervisor_type) {
> > + case INTEL_GVT_HYPERVISOR_XEN:
> > + *((u32 *)vgpu_opregion(vgpu)->va +
> > + INTEL_GVT_OPREGION_SCIC) = scic;
> > + *((u32 *)vgpu_opregion(vgpu)->va +
> > + INTEL_GVT_OPREGION_PARM) =
> parm;
> > + break;
> > + case INTEL_GVT_HYPERVISOR_KVM:
> > + ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
> > + &scic, sizeof(scic));
> > + if (ret) {
> > + gvt_vgpu_err("guet opregion write error %d, gpa
> 0x%llx, len %lu\n",
> > + ret, scic_pa, sizeof(scic));
> > + return ret;
> > + }
> > +
> > + ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
> > + &parm, sizeof(parm));
> > + if (ret) {
> > + gvt_vgpu_err("guet opregion write error %d, gpa
> 0x%llx, len %lu\n",
> > + ret, scic_pa, sizeof(scic));
> > + return ret;
> > + }
> > +
> > + break;
> > + default:
> > + gvt_vgpu_err("not supported hypervisor\n");
> > + return -EINVAL;
> > }
> >
> > - *scic = 0;
> > - *parm = 0;
> > return 0;
> > }
> > --
> > 2.7.4
> >
>
> --
> Open Source Technology Center, Intel ltd.
>
> $gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
More information about the intel-gvt-dev
mailing list