[PATCH 1/2] drm/i915/gvt: Select appropriate mmio list at initialization time
changbin.du at intel.com
changbin.du at intel.com
Wed Nov 29 07:35:56 UTC 2017
From: Changbin Du <changbin.du at intel.com>
Select appropriate mmio list at initialization time, so we don't need to
do duplicated work at where requires the mmio list.
Signed-off-by: Changbin Du <changbin.du at intel.com>
---
drivers/gpu/drm/i915/gvt/gvt.c | 2 ++
drivers/gpu/drm/i915/gvt/gvt.h | 5 +++
drivers/gpu/drm/i915/gvt/render.c | 65 ++++++++++++++++++++-------------------
drivers/gpu/drm/i915/gvt/render.h | 9 ++++++
4 files changed, 50 insertions(+), 31 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 9a5dce3..643bb96 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -386,6 +386,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_idr;
+ intel_gvt_init_engine_mmio_context(gvt);
+
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 77df9ba..62ed012 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -310,6 +310,11 @@ struct intel_gvt {
wait_queue_head_t service_thread_wq;
unsigned long service_request;
+ struct {
+ struct engine_mmio *mmio_list;
+ int nr_mmio;
+ } engine_mmio_context;
+
struct dentry *debugfs_root;
};
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 8de5a2a..2c1184f 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -37,14 +37,6 @@
#include "gvt.h"
#include "trace.h"
-struct render_mmio {
- int ring_id;
- i915_reg_t reg;
- u32 mask;
- bool in_context;
- u32 value;
-};
-
/**
* Defined in Intel Open Source PRM.
* Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
@@ -59,7 +51,7 @@ struct render_mmio {
#define VF_GUARDBAND _MMIO(0x83a4)
/* Raw offset is appened to each line for convenience. */
-static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
+static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
{RCS, HWSTAM, 0x0, false}, /* 0x2098 */
@@ -90,7 +82,7 @@ static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
};
-static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
+static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
{RCS, HWSTAM, 0x0, false}, /* 0x2098 */
@@ -282,21 +274,17 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
i915_reg_t last_reg = _MMIO(0);
- struct render_mmio *mmio;
+ struct engine_mmio *mmio;
+ int i, nr_mmio;
u32 v;
- int i, array_size;
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
- mmio = gen9_render_mmio_list;
- array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
load_mocs(vgpu, ring_id);
- } else {
- mmio = gen8_render_mmio_list;
- array_size = ARRAY_SIZE(gen8_render_mmio_list);
- }
- for (i = 0; i < array_size; i++, mmio++) {
+ mmio = vgpu->gvt->engine_mmio_context.mmio_list;
+ nr_mmio = vgpu->gvt->engine_mmio_context.nr_mmio;
+
+ for (i = 0; i < nr_mmio; i++, mmio++) {
if (mmio->ring_id != ring_id)
continue;
@@ -335,21 +323,18 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct render_mmio *mmio;
i915_reg_t last_reg = _MMIO(0);
+ struct engine_mmio *mmio;
+ int i, nr_mmio;
u32 v;
- int i, array_size;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- mmio = gen9_render_mmio_list;
- array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
restore_mocs(vgpu, ring_id);
- } else {
- mmio = gen8_render_mmio_list;
- array_size = ARRAY_SIZE(gen8_render_mmio_list);
- }
- for (i = 0; i < array_size; i++, mmio++) {
+ mmio = vgpu->gvt->engine_mmio_context.mmio_list;
+ nr_mmio = vgpu->gvt->engine_mmio_context.nr_mmio;
+
+ for (i = 0; i < nr_mmio; i++, mmio++) {
if (mmio->ring_id != ring_id)
continue;
@@ -418,3 +403,21 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
+
+/**
+ * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
+ * @gvt: GVT device
+ *
+ */
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
+{
+ if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
+ gvt->engine_mmio_context.mmio_list = gen9_engine_mmio_list;
+ gvt->engine_mmio_context.nr_mmio =
+ ARRAY_SIZE(gen9_engine_mmio_list);
+ } else {
+ gvt->engine_mmio_context.mmio_list = gen8_engine_mmio_list;
+ gvt->engine_mmio_context.nr_mmio =
+ ARRAY_SIZE(gen8_engine_mmio_list);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
index 91db1d3..ca2c6a7 100644
--- a/drivers/gpu/drm/i915/gvt/render.h
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -36,8 +36,17 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
+struct engine_mmio {
+ int ring_id;
+ i915_reg_t reg;
+ u32 mask;
+ bool in_context;
+ u32 value;
+};
+
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id);
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
#endif
--
2.7.4
More information about the intel-gvt-dev
mailing list