[Intel-gfx] [RFC 22/29] drm/i915: gvt: Full display virtualization
Zhi Wang
zhi.a.wang at intel.com
Thu Jan 28 02:21:44 PST 2016
From: Bing Niu <bing.niu at intel.com>
This patch introduces the GVT-g full display virtualization subsystem.
It consists a collection of display MMIO handlers, like power well register
handler, pipe register handler, plane register handler, which will emulate
all display MMIOs behavior to support virtual mode setting sequence for
guest.
Signed-off-by: Bing Niu <bing.niu at intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang at intel.com>
---
drivers/gpu/drm/i915/gvt/Makefile | 2 +-
drivers/gpu/drm/i915/gvt/display.c | 233 +++++++++
drivers/gpu/drm/i915/gvt/display.h | 129 +++++
drivers/gpu/drm/i915/gvt/edid.c | 493 ++++++++++++++++++
drivers/gpu/drm/i915/gvt/edid.h | 184 +++++++
drivers/gpu/drm/i915/gvt/gvt.c | 6 +
drivers/gpu/drm/i915/gvt/gvt.h | 12 +
drivers/gpu/drm/i915/gvt/handlers.c | 974 ++++++++++++++++++++++++++++++++++-
drivers/gpu/drm/i915/gvt/instance.c | 4 +
drivers/gpu/drm/i915/gvt/interrupt.c | 26 +-
drivers/gpu/drm/i915/gvt/interrupt.h | 7 +
drivers/gpu/drm/i915/gvt/reg.h | 3 +
12 files changed, 2069 insertions(+), 4 deletions(-)
create mode 100644 drivers/gpu/drm/i915/gvt/display.c
create mode 100644 drivers/gpu/drm/i915/gvt/display.h
create mode 100644 drivers/gpu/drm/i915/gvt/edid.c
create mode 100644 drivers/gpu/drm/i915/gvt/edid.h
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index b0a3a1a..c146c57 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,6 +1,6 @@
GVT_SOURCE := gvt.o params.o aperture_gm.o mmio.o handlers.o instance.o \
trace_points.o interrupt.o gtt.o cfg_space.o opregion.o utility.o \
- fb_decoder.o
+ fb_decoder.o display.o edid.o
ccflags-y += -I$(src) -I$(src)/.. -Wall -Werror -Wno-unused-function
i915_gvt-y := $(GVT_SOURCE)
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
new file mode 100644
index 0000000..c951150
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "gvt.h"
+
+int gvt_get_edp_pipe(struct vgt_device *vgt)
+{
+ u32 data = __vreg(vgt, _TRANS_DDI_FUNC_CTL_EDP);
+ int pipe = I915_MAX_PIPES;
+
+ switch (data & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+ return pipe;
+}
+
+bool gvt_edp_pipe_is_enabled(struct vgt_device *vgt)
+{
+ if (!(__vreg(vgt, _REG_PIPE_EDP_CONF) & PIPECONF_ENABLE))
+ return false;
+
+ if (!(__vreg(vgt, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
+ return false;
+
+ return true;
+}
+
+bool gvt_pipe_is_enabled(struct vgt_device *vgt, int pipe)
+{
+ ASSERT(pipe >= PIPE_A && pipe < I915_MAX_PIPES);
+
+ if (__vreg(vgt, GVT_PIPECONF(pipe)) & PIPECONF_ENABLE)
+ return true;
+
+ if (gvt_edp_pipe_is_enabled(vgt) &&
+ gvt_get_edp_pipe(vgt) == pipe)
+ return true;
+
+ return false;
+}
+
+static const unsigned char virtual_dp_monitor_edid[] = {
+ 0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x22,0xf0,0x54,0x29,
+ 0x00,0x00,0x00,0x00,0x04,0x17,0x01,0x04,0xa5,0x34,0x20,0x78,
+ 0x23,0xfc,0x81,0xa4,0x55,0x4d,0x9d,0x25,0x12,0x50,0x54,0x21,
+ 0x08,0x00,0xd1,0xc0,0x81,0xc0,0x81,0x40,0x81,0x80,0x95,0x00,
+ 0xa9,0x40,0xb3,0x00,0x01,0x01,0x28,0x3c,0x80,0xa0,0x70,0xb0,
+ 0x23,0x40,0x30,0x20,0x36,0x00,0x06,0x44,0x21,0x00,0x00,0x1a,
+ 0x00,0x00,0x00,0xfd,0x00,0x18,0x3c,0x18,0x50,0x11,0x00,0x0a,
+ 0x20,0x20,0x20,0x20,0x20,0x20,0x00,0x00,0x00,0xfc,0x00,0x48,
+ 0x50,0x20,0x5a,0x52,0x32,0x34,0x34,0x30,0x77,0x0a,0x20,0x20,
+ 0x00,0x00,0x00,0xff,0x00,0x43,0x4e,0x34,0x33,0x30,0x34,0x30,
+ 0x44,0x58,0x51,0x0a,0x20,0x20,0x01,0x44,0x02,0x03,0x19,0xc1,
+ 0x4c,0x90,0x1f,0x05,0x14,0x04,0x13,0x03,0x02,0x07,0x06,0x12,
+ 0x01,0x23,0x09,0x07,0x07,0x83,0x01,0x00,0x00,0x02,0x3a,0x80,
+ 0x18,0x71,0x38,0x2d,0x40,0x58,0x2c,0x45,0x00,0x06,0x44,0x21,
+ 0x00,0x00,0x1e,0x02,0x3a,0x80,0xd0,0x72,0x38,0x2d,0x40,0x10,
+ 0x2c,0x45,0x80,0x06,0x44,0x21,0x00,0x00,0x1e,0x01,0x1d,0x00,
+ 0x72,0x51,0xd0,0x1e,0x20,0x6e,0x28,0x55,0x00,0x06,0x44,0x21,
+ 0x00,0x00,0x1e,0x01,0x1d,0x00,0xbc,0x52,0xd0,0x1e,0x20,0xb8,
+ 0x28,0x55,0x40,0x06,0x44,0x21,0x00,0x00,0x1e,0x8c,0x0a,0xd0,
+ 0x8a,0x20,0xe0,0x2d,0x10,0x10,0x3e,0x96,0x00,0x06,0x44,0x21,
+ 0x00,0x00,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x7b,
+};
+
+#define DPCD_HEADER_SIZE 0xb
+
+u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
+ 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static void emulate_monitor_status_change(struct vgt_device *vgt)
+{
+ __vreg(vgt, _SDEISR) &= ~(_REGBIT_DP_B_HOTPLUG |
+ _REGBIT_DP_C_HOTPLUG |
+ _REGBIT_DP_D_HOTPLUG);
+
+ if (dpy_has_monitor_on_port(vgt, PORT_B))
+ __vreg(vgt, _SDEISR) |= _REGBIT_DP_B_HOTPLUG;
+
+ if (dpy_has_monitor_on_port(vgt, PORT_C))
+ __vreg(vgt, _SDEISR) |= _REGBIT_DP_C_HOTPLUG;
+
+ if (dpy_has_monitor_on_port(vgt, PORT_D))
+ __vreg(vgt, _SDEISR) |= _REGBIT_DP_D_HOTPLUG;
+
+ if (dpy_has_monitor_on_port(vgt, PORT_A))
+ __vreg(vgt, _GEN8_DE_PORT_ISR) |= GEN8_PORT_DP_A_HOTPLUG;
+}
+
+static void clean_virtual_dp_monitor(struct vgt_device *vgt)
+{
+ struct gt_port *port = gvt_vport(vgt, PORT_A);
+
+ if (port->edid) {
+ kfree(port->edid);
+ port->edid = NULL;
+ }
+
+ if (port->dpcd) {
+ kfree(port->dpcd);
+ port->dpcd = NULL;
+ }
+}
+
+static bool setup_virtual_dp_monitor(struct vgt_device *vgt)
+{
+ struct gt_port *port = gvt_vport(vgt, PORT_A);
+
+ port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
+ if (!port->edid)
+ goto err;
+
+ port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
+ if (!port->dpcd)
+ goto err;
+
+ memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+ EDID_SIZE);
+ port->edid->data_valid = true;
+
+ memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
+ port->dpcd->data_valid = true;
+
+ port->type = GVT_DP_A;
+
+ emulate_monitor_status_change(vgt);
+ return true;
+err:
+ clean_virtual_dp_monitor(vgt);
+ return false;
+}
+
+bool gvt_update_display_events_emulation(struct pgt_device *pdev)
+{
+ struct gvt_irq_state *irq = &pdev->irq_state;
+ struct vgt_device *vgt;
+ bool have_enabled_pipe = false;
+ int pipe, id;
+
+ ASSERT(mutex_is_locked(&pdev->lock));
+
+ hrtimer_cancel(&irq->dpy_timer.timer);
+
+ for_each_online_instance(pdev, vgt, id) {
+ for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
+ have_enabled_pipe =
+ gvt_pipe_is_enabled(vgt, pipe);
+ if (have_enabled_pipe)
+ break;
+ }
+ }
+
+ if (have_enabled_pipe)
+ hrtimer_start(&irq->dpy_timer.timer,
+ ktime_add_ns(ktime_get(), irq->dpy_timer.period),
+ HRTIMER_MODE_ABS);
+ return true;
+}
+
+static void emulate_vblank_on_pipe(struct vgt_device *vgt, int pipe)
+{
+ int vblank_event[] = {
+ [PIPE_A] = PIPE_A_VBLANK,
+ [PIPE_B] = PIPE_B_VBLANK,
+ [PIPE_C] = PIPE_C_VBLANK,
+ };
+
+ ASSERT(pipe >= PIPE_A && pipe <= PIPE_C);
+
+ if (gvt_pipe_is_enabled(vgt, pipe))
+ gvt_trigger_virtual_event(vgt, vblank_event[pipe]);
+}
+
+static void emulate_vblank_for_instance(struct vgt_device *vgt)
+{
+ int pipe;
+
+ for (pipe = 0; pipe < I915_MAX_PIPES; pipe++)
+ emulate_vblank_on_pipe(vgt, pipe);
+}
+
+void gvt_emulate_display_events(struct pgt_device *pdev)
+{
+ struct vgt_device *vgt;
+ int id;
+
+ ASSERT(mutex_is_locked(&pdev->lock));
+
+ for_each_online_instance(pdev, vgt, id)
+ emulate_vblank_for_instance(vgt);
+}
+
+void gvt_clean_virtual_display_state(struct vgt_device *vgt)
+{
+ clean_virtual_dp_monitor(vgt);
+}
+
+bool gvt_init_virtual_display_state(struct vgt_device *vgt)
+{
+ gvt_init_i2c_edid(vgt);
+ return setup_virtual_dp_monitor(vgt);
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
new file mode 100644
index 0000000..3aa8aaa
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_DISPLAY_H_
+#define _GVT_DISPLAY_H_
+
+#define SBI_REG_MAX 20
+#define DPCD_SIZE 0x700
+
+#define dpy_is_valid_port(port) \
+ (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
+
+#define gvt_vport(vgt, port) \
+ (&(vgt)->state.display.ports[port])
+
+#define dpy_has_monitor_on_port(vgt, port) \
+ (vgt && dpy_is_valid_port(port) && \
+ gvt_vport(vgt, port)->edid && gvt_vport(vgt, port)->edid->data_valid)
+
+#define dpy_port_is_dp(vgt, port) \
+ ((vgt) && dpy_is_valid_port(port) \
+ && ((gvt_vport(vgt, port)->type == GVT_DP_A) || \
+ (gvt_vport(vgt, port)->type == GVT_DP_B) || \
+ (gvt_vport(vgt, port)->type == GVT_DP_C) || \
+ (gvt_vport(vgt, port)->type == GVT_DP_D)))
+
+#define GVT_MAX_UEVENT_VARS 3
+/* DPCD start */
+#define DPCD_SIZE 0x700
+
+/* DPCD addresses */
+#define DPCD_REV 0x000
+#define DPCD_MAX_LINK_RATE 0x001
+#define DPCD_MAX_LANE_COUNT 0x002
+
+#define DPCD_TRAINING_PATTERN_SET 0x102
+#define DPCD_SINK_COUNT 0x200
+#define DPCD_LANE0_1_STATUS 0x202
+#define DPCD_LANE2_3_STATUS 0x203
+#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DPCD_SINK_STATUS 0x205
+
+/* link training */
+#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
+#define DPCD_LINK_TRAINING_DISABLED 0x00
+#define DPCD_TRAINING_PATTERN_1 0x01
+#define DPCD_TRAINING_PATTERN_2 0x02
+
+#define DPCD_CP_READY_MASK (1 << 6)
+
+/* lane status */
+#define DPCD_LANES_CR_DONE 0x11
+#define DPCD_LANES_EQ_DONE 0x22
+#define DPCD_SYMBOL_LOCKED 0x44
+
+#define DPCD_INTERLANE_ALIGN_DONE 0x01
+
+#define DPCD_SINK_IN_SYNC 0x03
+
+/* DPCD end */
+
+struct sbi_register {
+ unsigned int offset;
+ u32 value;
+};
+
+struct sbi_registers {
+ int number;
+ struct sbi_register registers[SBI_REG_MAX];
+};
+
+enum gvt_plane_type {
+ PRIMARY_PLANE = 0,
+ CURSOR_PLANE,
+ SPRITE_PLANE,
+ MAX_PLANE
+};
+
+struct gvt_dpcd_data {
+ bool data_valid;
+ u8 data[DPCD_SIZE];
+};
+
+enum gvt_port_type {
+ GVT_CRT = 0,
+ GVT_DP_A,
+ GVT_DP_B,
+ GVT_DP_C,
+ GVT_DP_D,
+ GVT_HDMI_B,
+ GVT_HDMI_C,
+ GVT_HDMI_D,
+ GVT_PORT_MAX
+};
+
+struct gt_port {
+ struct gvt_edid_data_t *edid; /* per display EDID information */
+ struct gvt_dpcd_data *dpcd; /* per display DPCD information */
+ enum gvt_port_type type;
+};
+
+extern int gvt_get_edp_pipe(struct vgt_device *vgt);
+extern bool gvt_edp_pipe_is_enabled(struct vgt_device *vgt);
+extern bool gvt_pipe_is_enabled(struct vgt_device *vgt, int pipe);
+
+bool gvt_init_virtual_display_state(struct vgt_device *vgt);
+void gvt_clean_virtual_display_state(struct vgt_device *vgt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
new file mode 100644
index 0000000..dd10ba3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "gvt.h"
+
+static unsigned char edid_get_byte(struct vgt_device *vgt)
+{
+ unsigned char chr = 0;
+ struct gvt_i2c_edid_t *edid = &vgt->state.display.gvt_i2c_edid;
+
+ if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
+ gvt_warn("Driver tries to read EDID without proper sequence!\n");
+ return 0;
+ }
+ if (edid->current_edid_read >= EDID_SIZE) {
+ gvt_warn("edid_get_byte() exceeds the size of EDID!\n");
+ return 0;
+ }
+
+ if (!edid->edid_available) {
+ gvt_warn("Reading EDID but EDID is not available!"
+ " Will return 0.\n");
+ return 0;
+ }
+
+ if (dpy_has_monitor_on_port(vgt, edid->port)) {
+ struct gvt_edid_data_t *edid_data = gvt_vport(vgt, edid->port)->edid;
+ chr = edid_data->edid_block[edid->current_edid_read];
+ gvt_dbg(GVT_DBG_EDID,
+ "edid_get_byte with offset %d and value %d\n",
+ edid->current_edid_read, chr);
+ edid->current_edid_read ++;
+ } else {
+ gvt_warn("No EDID available during the reading?\n");
+ }
+
+ return chr;
+}
+
+static inline enum port gvt_get_port_from_gmbus0(u32 gmbus0){
+ enum port port = I915_MAX_PORTS;
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+
+ if (port_select == 2)
+ port = PORT_E;
+ else if (port_select == 4)
+ port = PORT_C;
+ else if (port_select == 5)
+ port = PORT_B;
+ else if (port_select == 6)
+ port = PORT_D;
+
+ return port;
+}
+
+void gvt_reset_gmbus_controller(struct vgt_device *vgt)
+{
+ __vreg(vgt, _PCH_GMBUS2) = GMBUS_HW_RDY;
+ if (!vgt->state.display.gvt_i2c_edid.edid_available) {
+ __vreg(vgt, _PCH_GMBUS2) |= GMBUS_SATOER;
+ }
+ vgt->state.display.gvt_i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+}
+
+
+/* GMBUS0 */
+static bool gvt_gmbus0_mmio_write(struct vgt_device *vgt,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 wvalue = *(u32 *)p_data;
+ enum port port = I915_MAX_PORTS;
+ int pin_select = wvalue & _GMBUS_PIN_SEL_MASK;
+
+ gvt_init_i2c_edid(vgt);
+
+ if (pin_select == 0)
+ return true;
+
+ vgt->state.display.gvt_i2c_edid.state = I2C_GMBUS;
+ port = gvt_get_port_from_gmbus0(pin_select);
+ if (!dpy_is_valid_port(port)) {
+ gvt_dbg(GVT_DBG_EDID,
+ "VM(%d): Driver tries GMBUS write not on valid port!\n"
+ "gmbus write value is: 0x%x\n", vgt->id, wvalue);
+ return true;
+ }
+
+ vgt->state.display.gvt_i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+
+ /* FIXME: never clear GMBUS_HW_WAIT_PHASE */
+ __vreg(vgt, _PCH_GMBUS2) &= ~ GMBUS_ACTIVE;
+ __vreg(vgt, _PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+
+ if (dpy_has_monitor_on_port(vgt, port) && !dpy_port_is_dp(vgt, port)) {
+ vgt->state.display.gvt_i2c_edid.port = port;
+ vgt->state.display.gvt_i2c_edid.edid_available = true;
+ __vreg(vgt, _PCH_GMBUS2) &= ~GMBUS_SATOER;
+ } else {
+ __vreg(vgt, _PCH_GMBUS2) |= GMBUS_SATOER;
+ }
+
+ memcpy(p_data, (char *)vgt->state.mmio.vreg + offset, bytes);
+ return true;
+}
+
+static bool gvt_gmbus1_mmio_write(struct vgt_device *vgt, unsigned int offset,
+void *p_data, unsigned int bytes)
+{
+ u32 slave_addr;
+ struct gvt_i2c_edid_t *i2c_edid = &vgt->state.display.gvt_i2c_edid;
+
+ u32 wvalue = *(u32 *)p_data;
+ if (__vreg(vgt, offset) & GMBUS_SW_CLR_INT) {
+ if (!(wvalue & GMBUS_SW_CLR_INT)) {
+ __vreg(vgt, offset) &= ~GMBUS_SW_CLR_INT;
+ gvt_reset_gmbus_controller(vgt);
+ }
+ /* TODO: "This bit is cleared to zero when an event
+ * causes the HW_RDY bit transition to occur "*/
+ } else {
+ /* per bspec setting this bit can cause:
+ 1) INT status bit cleared
+ 2) HW_RDY bit asserted
+ */
+ if (wvalue & GMBUS_SW_CLR_INT) {
+ __vreg(vgt, _PCH_GMBUS2) &= ~GMBUS_INT;
+ __vreg(vgt, _PCH_GMBUS2) |= GMBUS_HW_RDY;
+ }
+
+ /* For virtualization, we suppose that HW is always ready,
+ * so GMBUS_SW_RDY should always be cleared
+ */
+ if (wvalue & GMBUS_SW_RDY)
+ wvalue &= ~GMBUS_SW_RDY;
+
+ i2c_edid->gmbus.total_byte_count =
+ gmbus1_total_byte_count(wvalue);
+ slave_addr = gmbus1_slave_addr(wvalue);
+
+ /* vgt gmbus only support EDID */
+ if (slave_addr == EDID_ADDR) {
+ i2c_edid->slave_selected = true;
+ } else if (slave_addr != 0) {
+ gvt_dbg(GVT_DBG_DPY,
+ "vGT(%d): unsupported gmbus slave addr(0x%x)\n"
+ " gmbus operations will be ignored.\n",
+ vgt->id, slave_addr);
+ }
+
+ if (wvalue & GMBUS_CYCLE_INDEX) {
+ i2c_edid->current_edid_read = gmbus1_slave_index(wvalue);
+ }
+
+ i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
+ switch (gmbus1_bus_cycle(wvalue)) {
+ case GMBUS_NOCYCLE:
+ break;
+ case GMBUS_STOP:
+ /* From spec:
+ This can only cause a STOP to be generated
+ if a GMBUS cycle is generated, the GMBUS is
+ currently in a data/wait/idle phase, or it is in a
+ WAIT phase
+ */
+ if (gmbus1_bus_cycle(__vreg(vgt, offset)) != GMBUS_NOCYCLE) {
+ gvt_init_i2c_edid(vgt);
+ /* After the 'stop' cycle, hw state would become
+ * 'stop phase' and then 'idle phase' after a few
+ * milliseconds. In emulation, we just set it as
+ * 'idle phase' ('stop phase' is not
+ * visible in gmbus interface)
+ */
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ /*
+ FIXME: never clear GMBUS_WAIT
+ __vreg(vgt, _PCH_GMBUS2) &=
+ ~(GMBUS_ACTIVE | GMBUS_HW_WAIT_PHASE);
+ */
+ __vreg(vgt, _PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ }
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ case NIDX_STOP:
+ case IDX_STOP:
+ /* From hw spec the GMBUS phase
+ * transition like this:
+ * START (-->INDEX) -->DATA
+ */
+ i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
+ __vreg(vgt, _PCH_GMBUS2) |= GMBUS_ACTIVE;
+ /* FIXME: never clear GMBUS_WAIT */
+ //__vreg(vgt, _PCH_GMBUS2) &= ~GMBUS_HW_WAIT_PHASE;
+ break;
+ default:
+ gvt_err("Unknown/reserved GMBUS cycle detected!");
+ break;
+ }
+ /* From hw spec the WAIT state will be
+ * cleared:
+ * (1) in a new GMBUS cycle
+ * (2) by generating a stop
+ */
+ /* FIXME: never clear GMBUS_WAIT
+ if (gmbus1_bus_cycle(wvalue) != GMBUS_NOCYCLE)
+ __vreg(vgt, _PCH_GMBUS2) &= ~GMBUS_HW_WAIT_PHASE;
+ */
+
+ __vreg(vgt, offset) = wvalue;
+ }
+ return true;
+}
+
+bool gvt_gmbus3_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ ASSERT_VM(0, vgt);
+ return true;
+}
+
+bool gvt_gmbus3_mmio_read(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int i;
+ unsigned char byte_data;
+ struct gvt_i2c_edid_t *i2c_edid = &vgt->state.display.gvt_i2c_edid;
+ int byte_left = i2c_edid->gmbus.total_byte_count -
+ i2c_edid->current_edid_read;
+ int byte_count = byte_left;
+ u32 reg_data = 0;
+
+ /* Data can only be recevied if previous settings correct */
+ if (__vreg(vgt, _PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+ if (byte_left <= 0) {
+ memcpy((char *)p_data, (char *)vgt->state.mmio.vreg + offset, bytes);
+ return true;
+ }
+
+ if (byte_count > 4)
+ byte_count = 4;
+ for (i = 0; i< byte_count; i++) {
+ byte_data = edid_get_byte(vgt);
+ reg_data |= (byte_data << (i << 3));
+ }
+
+ memcpy((char *)p_data, (char *)®_data, byte_count);
+ memcpy((char *)vgt->state.mmio.vreg + offset, (char *)®_data, byte_count);
+
+ if (byte_left <= 4) {
+ switch (i2c_edid->gmbus.cycle_type) {
+ case NIDX_STOP:
+ case IDX_STOP:
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ default:
+ i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
+ break;
+ }
+ gvt_init_i2c_edid(vgt);
+ }
+
+ /* Read GMBUS3 during send operation, return the latest written value */
+ } else {
+ memcpy((char *)p_data, (char *)vgt->state.mmio.vreg + offset, bytes);
+ printk("vGT(%d): warning: gmbus3 read with nothing retuned\n",
+ vgt->id);
+ }
+
+ return true;
+}
+
+static bool gvt_gmbus2_mmio_read(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = __vreg(vgt, offset);
+ if (!(__vreg(vgt, offset) & GMBUS_INUSE)) {
+ __vreg(vgt, offset) |= GMBUS_INUSE;
+ }
+
+ memcpy(p_data, (void *)&value, bytes);
+ return true;
+}
+
+bool gvt_gmbus2_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 wvalue = *(u32 *)p_data;
+ if (wvalue & GMBUS_INUSE)
+ __vreg(vgt, offset) &= ~GMBUS_INUSE;
+ /* All other bits are read-only */
+ return true;
+}
+
+bool gvt_i2c_handle_gmbus_read(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ ASSERT(bytes <= 8 && !(offset & (bytes - 1)));
+ switch (offset) {
+ case _PCH_GMBUS2:
+ return gvt_gmbus2_mmio_read(vgt, offset, p_data, bytes);
+ case _PCH_GMBUS3:
+ return gvt_gmbus3_mmio_read(vgt, offset, p_data, bytes);
+ default:
+ memcpy(p_data, (char *)vgt->state.mmio.vreg + offset, bytes);
+ }
+ return true;
+}
+
+bool gvt_i2c_handle_gmbus_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ ASSERT(bytes <= 8 && !(offset & (bytes - 1)));
+ switch (offset) {
+ case _PCH_GMBUS0:
+ return gvt_gmbus0_mmio_write(vgt, offset, p_data, bytes);
+ case _PCH_GMBUS1:
+ return gvt_gmbus1_mmio_write(vgt, offset, p_data, bytes);
+ case _PCH_GMBUS2:
+ return gvt_gmbus2_mmio_write(vgt, offset, p_data, bytes);
+ /* TODO: */
+ case _PCH_GMBUS3:
+ BUG();
+ return false;
+ default:
+ memcpy((char *)vgt->state.mmio.vreg + offset, p_data, bytes);
+ }
+ return true;
+}
+
+static inline AUX_CH_REGISTERS gvt_get_aux_ch_reg(unsigned int offset)
+{
+ AUX_CH_REGISTERS reg;
+ switch (offset & 0xff) {
+ case 0x10:
+ reg = AUX_CH_CTL;
+ break;
+ case 0x14:
+ reg = AUX_CH_DATA1;
+ break;
+ case 0x18:
+ reg = AUX_CH_DATA2;
+ break;
+ case 0x1c:
+ reg = AUX_CH_DATA3;
+ break;
+ case 0x20:
+ reg = AUX_CH_DATA4;
+ break;
+ case 0x24:
+ reg = AUX_CH_DATA5;
+ break;
+ default:
+ reg = AUX_CH_INV;
+ break;
+ }
+ return reg;
+}
+
+#define AUX_CTL_MSG_LENGTH(reg) \
+ ((reg & _DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
+ _DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
+
+void gvt_i2c_handle_aux_ch_write(struct vgt_device *vgt,
+ enum port port_idx,
+ unsigned int offset,
+ void *p_data)
+{
+ struct gvt_i2c_edid_t *i2c_edid = &vgt->state.display.gvt_i2c_edid;
+ int msg_length, ret_msg_size;
+ int msg, addr, ctrl, op;
+ int value = *(int *)p_data;
+ int aux_data_for_write = 0;
+ AUX_CH_REGISTERS reg = gvt_get_aux_ch_reg(offset);
+
+ if (reg != AUX_CH_CTL) {
+ __vreg(vgt, offset) = value;
+ return;
+ }
+
+ msg_length = AUX_CTL_MSG_LENGTH(value);
+ // check the msg in DATA register.
+ msg = __vreg(vgt, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24)& 0xff;
+ op = ctrl >> 4;
+ if (!(value & _REGBIT_DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* The ctl write to clear some states */
+ return;
+ }
+
+ /* Always set the wanted value for vms. */
+ ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
+ __vreg(vgt, offset) =
+ _REGBIT_DP_AUX_CH_CTL_DONE |
+ ((ret_msg_size << _DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
+ _DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
+
+ if (msg_length == 3) {
+ if (!(op & GVT_AUX_I2C_MOT)) {
+ /* stop */
+ gvt_dbg(GVT_DBG_EDID,
+ "AUX_CH: stop. reset I2C!\n");
+ gvt_init_i2c_edid(vgt);
+ } else {
+ /* start or restart */
+ gvt_dbg(GVT_DBG_EDID,
+ "AUX_CH: start or restart I2C!\n");
+ i2c_edid->aux_ch.i2c_over_aux_ch = true;
+ i2c_edid->aux_ch.aux_ch_mot = true;
+ if (addr == 0) {
+ /* reset the address */
+ gvt_dbg(GVT_DBG_EDID,
+ "AUX_CH: reset I2C!\n");
+ gvt_init_i2c_edid(vgt);
+ } else if (addr == EDID_ADDR) {
+ gvt_dbg(GVT_DBG_EDID,
+ "AUX_CH: setting EDID_ADDR!\n");
+ i2c_edid->state = I2C_AUX_CH;
+ i2c_edid->port = port_idx;
+ i2c_edid->slave_selected = true;
+ if (dpy_has_monitor_on_port(vgt, port_idx) &&
+ dpy_port_is_dp(vgt, port_idx))
+ i2c_edid->edid_available = true;
+ } else {
+ gvt_dbg(GVT_DBG_EDID,
+ "Not supported address access [0x%x]with I2C over AUX_CH!\n",
+ addr);
+ }
+ }
+ } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
+ /* TODO
+ * We only support EDID reading from I2C_over_AUX. And
+ * we do not expect the index mode to be used. Right now
+ * the WRITE operation is ignored. It is good enough to
+ * support the gfx driver to do EDID access.
+ */
+ } else {
+ ASSERT((op & 0x1) == GVT_AUX_I2C_READ);
+ ASSERT(msg_length == 4);
+ if (i2c_edid->edid_available && i2c_edid->slave_selected) {
+ unsigned char val = edid_get_byte(vgt);
+ aux_data_for_write = (val << 16);
+ }
+ }
+
+ /* write the return value in AUX_CH_DATA reg which includes:
+ * ACK of I2C_WRITE
+ * returned byte if it is READ
+ */
+ aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24;
+ __vreg(vgt, offset + 4) = aux_data_for_write;
+
+ return;
+}
+
+void gvt_init_i2c_edid(struct vgt_device *vgt)
+{
+ struct gvt_i2c_edid_t *edid = &vgt->state.display.gvt_i2c_edid;
+
+ edid->state = I2C_NOT_SPECIFIED;
+
+ edid->port = I915_MAX_PORTS;
+ edid->slave_selected = false;
+ edid->edid_available = false;
+ edid->current_edid_read = 0;
+
+ memset(&edid->gmbus, 0, sizeof(struct gvt_i2c_gmbus_t));
+
+ edid->aux_ch.i2c_over_aux_ch = false;
+ edid->aux_ch.aux_ch_mot = false;
+}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
new file mode 100644
index 0000000..aa80ffd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_EDID_H_
+#define _GVT_EDID_H_
+
+#define EDID_SIZE 128
+#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
+
+#define GVT_AUX_NATIVE_WRITE 0x8
+#define GVT_AUX_NATIVE_READ 0x9
+#define GVT_AUX_I2C_WRITE 0x0
+#define GVT_AUX_I2C_READ 0x1
+#define GVT_AUX_I2C_STATUS 0x2
+#define GVT_AUX_I2C_MOT 0x4
+#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6)
+
+#define _REGBIT_DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
+#define _REGBIT_DP_AUX_CH_CTL_DONE (1 << 30)
+#define _DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define _DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+
+struct gvt_edid_data_t{
+ bool data_valid;
+ unsigned char edid_block[EDID_SIZE];
+};
+
+enum gmbus_cycle_type_t{
+ GMBUS_NOCYCLE = 0x0,
+ NIDX_NS_W = 0x1,
+ IDX_NS_W = 0x3,
+ GMBUS_STOP = 0x4,
+ NIDX_STOP = 0x5,
+ IDX_STOP = 0x7
+};
+
+/*
+ * States of GMBUS
+ *
+ * GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
+ * registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
+ * not considered here. Below describes the usage of GMBUS registers that are
+ * cared by the EDID virtualization
+ *
+ * GMBUS0:
+ * R/W
+ * port selection. value of bit0 - bit2 corresponds to the GPIO registers.
+ *
+ * GMBUS1:
+ * R/W Protect
+ * Command and Status.
+ * bit0 is the direction bit: 1 is read; 0 is write.
+ * bit1 - bit7 is slave 7-bit address.
+ * bit16 - bit24 total byte count (ignore?)
+ *
+ * GMBUS2:
+ * Most of bits are read only except bit 15 (IN_USE)
+ * Status register
+ * bit0 - bit8 current byte count
+ * bit 11: hardware ready;
+ *
+ * GMBUS3:
+ * Read/Write
+ * Data for transfer
+ */
+
+/* From hw specs, Other phases like START, ADDRESS, INDEX
+ * are invisible to GMBUS MMIO interface. So no definitions
+ * in below enum types
+ */
+enum gvt_gmbus_phase_t{
+ GMBUS_IDLE_PHASE = 0,
+ GMBUS_DATA_PHASE,
+ GMBUS_WAIT_PHASE,
+ //GMBUS_STOP_PHASE,
+ GMBUS_MAX_PHASE
+};
+
+struct gvt_i2c_gmbus_t {
+ unsigned total_byte_count; /* from GMBUS1 */
+ enum gmbus_cycle_type_t cycle_type;
+ enum gvt_gmbus_phase_t phase;
+};
+
+struct gvt_i2c_aux_ch_t{
+ bool i2c_over_aux_ch;
+ bool aux_ch_mot;
+};
+
+enum i2c_state_t {
+ I2C_NOT_SPECIFIED = 0,
+ I2C_GMBUS = 1,
+ I2C_AUX_CH = 2
+};
+
+/* I2C sequences cannot interleave.
+ * GMBUS and AUX_CH sequences cannot interleave.
+ */
+struct gvt_i2c_edid_t {
+ enum i2c_state_t state;
+
+ unsigned port;
+ bool slave_selected;
+ bool edid_available;
+ unsigned current_edid_read;
+
+ struct gvt_i2c_gmbus_t gmbus;
+ struct gvt_i2c_aux_ch_t aux_ch;
+};
+
+void gvt_init_i2c_edid(struct vgt_device *vgt);
+
+bool gvt_i2c_handle_gmbus_read(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+bool gvt_i2c_handle_gmbus_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+void gvt_i2c_handle_aux_ch_write(struct vgt_device *vgt,
+ enum port port_idx,
+ unsigned int offset,
+ void *p_data);
+
+bool gvt_is_edid_valid(u8 *raw_edid);
+
+#define AUX_REGISTER_NUM 6
+typedef enum {
+ AUX_CH_INV = -1,
+ AUX_CH_CTL = 0,
+ AUX_CH_DATA1,
+ AUX_CH_DATA2,
+ AUX_CH_DATA3,
+ AUX_CH_DATA4,
+ AUX_CH_DATA5
+}AUX_CH_REGISTERS;
+
+static inline enum port gvt_get_dp_port_idx(unsigned int offset)
+{
+ enum port port_idx;
+
+ if (offset >= _PCH_DPA_AUX_CH_CTL
+ && offset <= _PCH_DPA_AUX_CH_CTL +
+ AUX_REGISTER_NUM * sizeof(u32)) {
+ return PORT_A;
+ }
+
+ switch (((offset & 0xff00) >> 8) - 0x41) {
+ case 0:
+ port_idx = PORT_B;
+ break;
+ case 1:
+ port_idx = PORT_C;
+ break;
+ case 2:
+ port_idx = PORT_D;
+ break;
+ default:
+ port_idx = I915_MAX_PORTS;
+ break;
+ }
+ return port_idx;
+}
+
+#endif /*_GVT_EDID_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 84549a0..ea871cd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -242,6 +242,12 @@ static int gvt_service_thread(void *data)
gvt_dpy_ready_uevent_handler(pdev);
}
+ if (test_and_clear_bit(GVT_REQUEST_EMUL_DPY_EVENTS,
+ (void *)&pdev->service_request)) {
+ mutex_lock(&pdev->lock);
+ gvt_emulate_display_events(pdev);
+ mutex_unlock(&pdev->lock);
+ }
if (r) {
gvt_warn("service thread is waken up by unexpected signal.");
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 62cbb62..b44b5b5 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -40,6 +40,8 @@
#include "cfg_space.h"
#include "opregion.h"
#include "fb_decoder.h"
+#include "edid.h"
+#include "display.h"
#define GVT_MAX_VGPU 8
@@ -123,11 +125,18 @@ struct gvt_virtual_gm_state {
struct gvt_gm_node node;
};
+struct gvt_virtual_display_state {
+ struct gvt_i2c_edid_t gvt_i2c_edid;
+ struct gt_port ports[I915_MAX_PORTS];
+ struct sbi_registers sbi_regs;
+};
+
struct gvt_virtual_device_state {
struct gvt_virtual_gm_state gm;
struct gvt_virtual_mmio_state mmio;
struct gvt_virtual_cfg_state cfg;
struct gvt_virtual_opregion_state opregion;
+ struct gvt_virtual_display_state display;
};
struct gvt_uevent {
@@ -676,6 +685,9 @@ bool gvt_default_mmio_write(struct vgt_device *vgt, unsigned int offset, void *p
bool register_mmio_handler(struct pgt_device *pdev, unsigned int start, int bytes,
gvt_mmio_handler_t read, gvt_mmio_handler_t write);
+bool gvt_update_display_events_emulation(struct pgt_device *pdev);
+void gvt_emulate_display_events(struct pgt_device *pdev);
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index ba29c9c..638a295 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -39,6 +39,18 @@ static bool mmio_not_allow_write(struct vgt_device *vgt,
return true;
}
+static bool gmbus_mmio_read(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return gvt_i2c_handle_gmbus_read(vgt, offset, p_data, bytes);
+}
+
+static bool gmbus_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return gvt_i2c_handle_gmbus_write(vgt, offset, p_data, bytes);
+}
+
/* Fence MMIO handlers. */
static bool check_fence_mmio_access(struct vgt_device *vgt,
unsigned int off, void *p_data, unsigned int bytes)
@@ -138,12 +150,777 @@ static bool gdrst_mmio_write(struct vgt_device *vgt, unsigned int offset,
return true;
}
+static bool pch_pp_control_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ u32 reg;
+ union _PCH_PP_CTL pp_control;
+ union _PCH_PP_STAUTS pp_status;
+
+ reg = offset & ~(bytes - 1);
+
+ data = *(u32*)p_data;
+
+ __vreg(vgt, _PCH_PP_CONTROL) = data;
+
+ pp_control.data = data;
+ pp_status.data = __vreg(vgt, _PCH_PP_STATUS);
+ if (pp_control.power_state_target == 1){
+ /* power on panel */
+ pp_status.panel_powere_on_statue = 1;
+ pp_status.power_sequence_progress = 0;
+ pp_status.power_cycle_delay_active = 0;
+ } else {
+ /* power down panel */
+ pp_status.panel_powere_on_statue = 0;
+ pp_status.power_sequence_progress = 0;
+ pp_status.power_cycle_delay_active = 0;
+ }
+ __vreg(vgt, _PCH_PP_STATUS) = pp_status.data;
+
+ return true;
+}
+
+static bool transaconf_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ union _TRANS_CONFIG config;
+
+ config.data = *(u32*)p_data;
+ config.transcoder_state = config.transcoder_enable;
+
+ __vreg(vgt, offset) = config.data;
+ return true;
+}
+
+/*
+ * TODO: Check the hotplug bit definitions on BDW+
+ */
+static bool shotplug_ctl_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 val = *(u32 *)p_data;
+ u32 sticky_mask = _REGBIT_DP_B_STATUS |
+ _REGBIT_DP_C_STATUS |
+ _REGBIT_DP_D_STATUS;
+
+ __vreg(vgt, offset) = (val & ~sticky_mask) |
+ (__vreg(vgt, offset) & sticky_mask);
+ __vreg(vgt, offset) &= ~(val & sticky_mask);
+
+ __sreg(vgt, offset) = val;
+
+ return true;
+}
+
+static bool lcpll_ctl_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+
+ if (data & LCPLL_PLL_DISABLE)
+ data &= ~LCPLL_PLL_LOCK;
+ else
+ data |= LCPLL_PLL_LOCK;
+
+ if (data & LCPLL_CD_SOURCE_FCLK)
+ data |= LCPLL_CD_SOURCE_FCLK_DONE;
+ else
+ data &= ~LCPLL_CD_SOURCE_FCLK_DONE;
+
+ return gvt_default_mmio_write(vgt, offset, &data, bytes);
+}
+
+static bool dpy_reg_mmio_read(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32*)p_data = (1<<17);
+
+ return true;
+}
+
+static bool dpy_reg_mmio_read_2(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32*)p_data = 3;
+
+ return true;
+}
+
+static bool dpy_reg_mmio_read_3(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32*)p_data = (0x2F << 16);
+
+ return true;
+}
+
static bool ring_mode_write(struct vgt_device *vgt, unsigned int off,
void *p_data, unsigned int bytes)
{
return true;
}
+static bool pipe_conf_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 wr_data = *((u32 *)p_data);
+
+ /* vreg status will be updated when when read hardware status */
+ if (wr_data & PIPECONF_ENABLE)
+ wr_data |= I965_PIPECONF_ACTIVE;
+ else if (!(wr_data & PIPECONF_ENABLE))
+ wr_data &= I965_PIPECONF_ACTIVE;
+
+ if (!gvt_default_mmio_write(vgt, offset, &wr_data, bytes))
+ return false;
+
+ return gvt_update_display_events_emulation(vgt->pdev);
+}
+
+static bool ddi_buf_ctl_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool rc;
+ u32 reg_val;
+ reg_val = *(u32 *)p_data;
+
+ // set the fully virtualized RO bit with its original value
+ reg_val = (reg_val & ~_DDI_BUFCTL_DETECT_MASK)
+ | (__vreg(vgt, offset) & _DDI_BUFCTL_DETECT_MASK);
+
+ rc = gvt_default_mmio_write(vgt, offset, ®_val, bytes);
+
+ //update idle status when enable/disable DDI buf
+ reg_val = __vreg(vgt, offset);
+
+ if (reg_val & _REGBIT_DDI_BUF_ENABLE)
+ reg_val &= ~_REGBIT_DDI_BUF_IS_IDLE;
+ else
+ reg_val |= _REGBIT_DDI_BUF_IS_IDLE;
+
+ __vreg(vgt, offset) = reg_val;
+
+ // clear the auto_training done bit
+ if ((offset == _REG_DDI_BUF_CTL_E) &&
+ (!(reg_val & _REGBIT_DDI_BUF_ENABLE))) {
+ __vreg(vgt, _REG_DP_TP_STATUS_E) &=
+ ~DP_TP_STATUS_AUTOTRAIN_DONE;
+ }
+
+ return rc;
+}
+
+static bool fdi_rx_iir_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int reg;
+ u32 wr_data, old_iir;
+ bool rc;
+ reg = offset & ~(bytes -1);
+
+ wr_data = *(u32 *)p_data;
+ old_iir = __vreg(vgt, reg);
+
+ rc = gvt_default_mmio_write(vgt, offset, p_data, bytes);
+
+ /* FIXME: sreg will be updated only when reading hardware status happened,
+ * so when dumping sreg space, the "hardware status" related bits may not
+ * be trusted */
+ __vreg(vgt, reg) = old_iir ^ wr_data;
+ return rc;
+}
+
+#define FDI_LINK_TRAIN_PATTERN1 0
+#define FDI_LINK_TRAIN_PATTERN2 1
+
+static bool fdi_auto_training_started(struct vgt_device *vgt)
+{
+ bool rc = false;
+
+ u32 ddi_buf_ctl = __vreg(vgt, _REG_DDI_BUF_CTL_E);
+ u32 rx_ctl = __vreg(vgt, _FDI_RXA_CTL);
+ u32 tx_ctl = __vreg(vgt, _REG_DP_TP_CTL_E);
+
+ if ((ddi_buf_ctl & _REGBIT_DDI_BUF_ENABLE) &&
+ (rx_ctl & FDI_RX_ENABLE) &&
+ (rx_ctl & _REGBIT_FDI_RX_FDI_AUTO_TRAIN_ENABLE) &&
+ (tx_ctl & DP_TP_CTL_ENABLE) &&
+ (tx_ctl & _REGBIT_DP_TP_FDI_AUTO_TRAIN_ENABLE)) {
+ rc = true;
+ }
+
+ return rc;
+}
+
+/* FIXME: this function is highly platform-dependent (SNB + CPT) */
+static bool check_fdi_rx_train_status(struct vgt_device *vgt,
+ enum pipe pipe, unsigned int train_pattern)
+{
+ unsigned int fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
+ unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
+ unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
+ unsigned int fdi_iir_check_bits;
+ fdi_rx_imr = GVT_FDI_RX_IMR(pipe);
+ fdi_tx_ctl = GVT_FDI_TX_CTL(pipe);
+ fdi_rx_ctl = GVT_FDI_RX_CTL(pipe);
+
+ if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
+ fdi_rx_train_bits =FDI_LINK_TRAIN_PATTERN_1_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
+ fdi_iir_check_bits = _REGBIT_FDI_RX_BIT_LOCK;
+ } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
+ fdi_iir_check_bits = _REGBIT_FDI_RX_SYMBOL_LOCK;
+ } else {
+ BUG();
+ }
+
+ fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
+ fdi_tx_check_bits = _REGBIT_FDI_TX_ENABLE | fdi_tx_train_bits;
+
+ /* If imr bit not been masked */
+ if (((__vreg(vgt, fdi_rx_imr) & fdi_iir_check_bits) == 0)
+ && ((__vreg(vgt, fdi_tx_ctl)
+ & fdi_tx_check_bits) == fdi_tx_check_bits)
+ && ((__vreg(vgt, fdi_rx_ctl)
+ & fdi_rx_check_bits) == fdi_rx_check_bits))
+ return true;
+ else
+ return false;
+}
+
+static bool update_fdi_rx_iir_status(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ enum pipe pipe;
+ unsigned int reg, fdi_rx_iir;
+ bool rc;
+ reg = offset & ~(bytes - 1);
+
+ switch (offset) {
+ case _FDI_RXA_CTL:
+ case _FDI_TXA_CTL:
+ case _FDI_RXA_IMR:
+ pipe = PIPE_A;
+ break;
+
+ case _FDI_RXB_CTL:
+ case _FDI_TXB_CTL:
+ case _FDI_RXB_IMR:
+ pipe = PIPE_B;
+ break;
+
+ case _REG_FDI_RXC_CTL:
+ case _REG_FDI_TXC_CTL:
+ case _REG_FDI_RXC_IMR:
+ pipe = PIPE_C;
+ break;
+
+ default:
+ BUG();
+ }
+
+ fdi_rx_iir = GVT_FDI_RX_IIR(pipe);
+
+ rc = gvt_default_mmio_write(vgt, offset, p_data, bytes);
+ if (check_fdi_rx_train_status(vgt, pipe, FDI_LINK_TRAIN_PATTERN1))
+ __vreg(vgt, fdi_rx_iir) |= _REGBIT_FDI_RX_BIT_LOCK;
+ if (check_fdi_rx_train_status(vgt, pipe, FDI_LINK_TRAIN_PATTERN2))
+ __vreg(vgt, fdi_rx_iir) |= _REGBIT_FDI_RX_SYMBOL_LOCK;
+ if (offset == _FDI_RXA_CTL) {
+ if (fdi_auto_training_started(vgt))
+ __vreg(vgt, _REG_DP_TP_STATUS_E) |=
+ DP_TP_STATUS_AUTOTRAIN_DONE;
+ }
+ return rc;
+}
+
+#define DP_TP_CTL_10_8_MASK 0x00000700
+#define DP_TP_CTL_8_SHIFT 0x8
+#define DP_TP_STATUS_25_SHIFT 25
+
+static bool dp_tp_ctl_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ enum port port;
+ unsigned int dp_tp_status_reg, val;
+ u32 ctl_val;
+ bool rc;
+ rc = gvt_default_mmio_write(vgt, offset, p_data, bytes);
+
+ port = DP_TP_PORT(offset);
+ ctl_val = __vreg(vgt, offset);
+ val = (ctl_val & DP_TP_CTL_10_8_MASK) >> DP_TP_CTL_8_SHIFT;
+
+ if (val == 0x2) {
+ dp_tp_status_reg = i915_mmio_reg_offset(DP_TP_STATUS(port));
+ __vreg(vgt, dp_tp_status_reg) |= (1 << DP_TP_STATUS_25_SHIFT);
+ __sreg(vgt, dp_tp_status_reg) = __vreg(vgt, dp_tp_status_reg);
+ }
+ return rc;
+}
+
+#define BIT_27 27
+#define BIT_26 26
+#define BIT_24 24
+
+static bool dp_tp_status_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool rc = true;
+ u32 reg_val;
+ u32 sticky_mask;
+ reg_val = *((u32 *)p_data);
+ sticky_mask = (1 << BIT_27) | (1 << BIT_26) | (1 << BIT_24);
+
+ __vreg(vgt, offset) = (reg_val & ~sticky_mask) |
+ (__vreg(vgt, offset) & sticky_mask);
+ __vreg(vgt, offset) &= ~(reg_val & sticky_mask);
+
+ __sreg(vgt, offset) = reg_val;
+
+ return rc;
+}
+
+static bool pch_adpa_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 old = __vreg(vgt, offset);
+ u32 new = *(u32 *)p_data;
+
+ if (new & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) {
+ new &= ~(ADPA_CRT_HOTPLUG_FORCE_TRIGGER |
+ ADPA_CRT_HOTPLUG_MONITOR_MASK);
+ } else {
+ /* ignore the status bits in new value
+ * since they are read only actually
+ */
+ new = (new & ~ADPA_CRT_HOTPLUG_MONITOR_MASK) |
+ (old & ADPA_CRT_HOTPLUG_MONITOR_MASK);
+ }
+
+ return gvt_default_mmio_write(vgt, offset, &new, bytes);
+}
+
+static bool pri_surf_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct gvt_fb_notify_msg msg;
+ enum pipe pipe = GVT_DSPSURFPIPE(offset);
+ u32 surflive_reg = GVT_DSPSURFLIVE(pipe);
+
+ if (!gvt_default_mmio_write(vgt, offset, p_data, bytes))
+ return false;
+
+ /* Update virtual surflive register */
+ if (!gvt_default_mmio_write(vgt, surflive_reg, p_data, bytes))
+ return false;
+
+ __vreg(vgt, GVT_PIPE_FLIPCOUNT(pipe))++;
+ gvt_inject_flip_done(vgt, GVT_DSPSURFPIPE(offset));
+
+ msg.vm_id = vgt->vm_id;
+ msg.plane_id = PRIMARY_PLANE;
+ msg.pipe_id = GVT_DSPSURFPIPE(offset);
+ gvt_fb_notifier_call_chain(FB_DISPLAY_FLIP, &msg);
+
+ return true;
+}
+
+static bool spr_surf_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct gvt_fb_notify_msg msg;
+ enum pipe pipe = GVT_SPRSURFPIPE(offset);
+ u32 surflive_reg = GVT_SPRSURFLIVE(pipe);
+
+ if (!gvt_default_mmio_write(vgt, offset, p_data, bytes))
+ return false;
+
+ /* Update virtual surflive register */
+ if (!gvt_default_mmio_write(vgt, surflive_reg, p_data, bytes))
+ return false;
+
+ msg.vm_id = vgt->vm_id;
+ msg.plane_id = SPRITE_PLANE;
+ msg.pipe_id = GVT_SPRSURFPIPE(offset);
+ gvt_fb_notifier_call_chain(FB_DISPLAY_FLIP, &msg);
+
+ return true;
+}
+
+static bool south_chicken2_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+
+ if (data & FDI_MPHY_IOSFSB_RESET_CTL)
+ data |= FDI_MPHY_IOSFSB_RESET_STATUS;
+ else
+ data &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
+
+ return gvt_default_mmio_write(vgt, offset, &data, bytes);
+}
+
+static void dp_aux_ch_trigger_interrupt_on_done(struct vgt_device *vgt, u32 value,
+ unsigned int reg)
+{
+ enum gvt_event_type event = GVT_EVENT_MAX;
+ if (reg == _PCH_DPA_AUX_CH_CTL) {
+ event = AUX_CHANNEL_A;
+ } else if (reg == _PCH_DPB_AUX_CH_CTL) {
+ event = AUX_CHENNEL_B;
+ } else if (reg == _PCH_DPC_AUX_CH_CTL) {
+ event = AUX_CHENNEL_C;
+ } else if (reg == _PCH_DPD_AUX_CH_CTL) {
+ event = AUX_CHENNEL_D;
+ }
+
+ if (event != GVT_EVENT_MAX && (DP_AUX_CH_CTL_INTERRUPT & value)) {
+ gvt_trigger_virtual_event(vgt, event);
+ }
+}
+
+static void dp_aux_ch_ctl_trans_done(struct vgt_device *vgt, u32 value,
+ unsigned int reg, int len, bool data_valid)
+{
+ /* mark transaction done */
+ value |= _REGBIT_DP_AUX_CH_CTL_DONE;
+ value &= ~_REGBIT_DP_AUX_CH_CTL_SEND_BUSY;
+ value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
+
+ if (data_valid) {
+ value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
+ } else {
+ value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
+ }
+
+ /* message size */
+ value &= ~(0xf << 20);
+ value |= (len << 20);
+ __vreg(vgt, reg) = value;
+
+ dp_aux_ch_trigger_interrupt_on_done(vgt, value, reg);
+}
+
+static void dp_aux_ch_ctl_link_training(struct gvt_dpcd_data *dpcd, uint8_t t)
+{
+ if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
+
+ /* training pattern 1 for CR */
+ /* set LANE0_CR_DONE, LANE1_CR_DONE */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
+ /* set LANE2_CR_DONE, LANE3_CR_DONE */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
+
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_TRAINING_PATTERN_2) {
+
+ /* training pattern 2 for EQ */
+
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
+
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* set INTERLANE_ALIGN_DONE */
+ dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
+ DPCD_INTERLANE_ALIGN_DONE;
+
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_LINK_TRAINING_DISABLED) {
+
+ /* finish link training */
+ /* set sink status as synchronized */
+ dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
+ }
+}
+
+static bool dp_aux_ch_ctl_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int reg = 0;
+ u32 value = *(u32 *)p_data;
+ int msg, addr, ctrl, op, len;
+ struct gvt_dpcd_data *dpcd = NULL;
+ enum port port_idx = gvt_get_dp_port_idx(offset);
+ struct gt_port *port = NULL;
+ ASSERT(bytes == 4);
+ ASSERT((offset & (bytes - 1)) == 0);
+
+ reg = offset & ~(bytes - 1);
+
+ gvt_default_mmio_write(vgt, offset, p_data, bytes);
+
+ if (reg != _PCH_DPA_AUX_CH_CTL &&
+ reg != _PCH_DPB_AUX_CH_CTL &&
+ reg != _PCH_DPC_AUX_CH_CTL &&
+ reg != _PCH_DPD_AUX_CH_CTL) {
+ /* write to the data registers */
+ return true;
+ }
+
+ if (!(value & _REGBIT_DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* just want to clear the sticky bits */
+ __vreg(vgt, reg) = 0;
+ return true;
+ }
+
+ if (!dpy_is_valid_port(port_idx)) {
+ gvt_warn("vGT(%d): Unsupported DP port access!\n",
+ vgt->id);
+ return true;
+ }
+
+ port = gvt_vport(vgt, port_idx);
+
+ if (port) {
+ dpcd = port->dpcd;
+ }
+
+ /* read out message from DATA1 register */
+ msg = __vreg(vgt, reg + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ len = msg & 0xff;
+ op = ctrl >> 4;
+
+ if (op == GVT_AUX_NATIVE_WRITE) {
+ int t;
+ uint8_t buf[16];
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * Write request exceeds what we supported,
+ * DCPD spec: When a Source Device is writing a DPCD
+ * address not supported by the Sink Device, the Sink
+ * Device shall reply with AUX NACK and “M” equal to zero.
+ */
+
+ /* NAK the write */
+ __vreg(vgt, reg + 4) = AUX_NATIVE_REPLY_NAK;
+
+ dp_aux_ch_ctl_trans_done(vgt, value, reg, 2, true);
+
+ return true;
+ }
+
+ /*
+ * Write request format: (command + address) occupies
+ * 3 bytes, followed by (len + 1) bytes of data.
+ */
+ ASSERT((len + 4) <= AUX_BURST_SIZE);
+
+ /* unpack data from vreg to buf */
+ for (t = 0; t < 4; t ++) {
+ u32 r = __vreg(vgt, reg + 8 + t*4);
+
+ buf[t*4] = (r >> 24) & 0xff;
+ buf[t*4 + 1] = (r >> 16) & 0xff;
+ buf[t*4 + 2] = (r >> 8) & 0xff;
+ buf[t*4 + 3] = r & 0xff;
+ }
+
+ /* write to virtual DPCD */
+ if (dpcd && dpcd->data_valid) {
+ for (t = 0; t <= len; t ++) {
+ int p = addr + t;
+
+ dpcd->data[p] = buf[t];
+
+ /* check for link training */
+ if (p == DPCD_TRAINING_PATTERN_SET)
+ dp_aux_ch_ctl_link_training(dpcd, buf[t]);
+ }
+ }
+
+ /* ACK the write */
+ __vreg(vgt, reg + 4) = 0;
+
+ dp_aux_ch_ctl_trans_done(vgt, value, reg, 1, dpcd && dpcd->data_valid);
+
+ return true;
+ }
+
+ if (op == GVT_AUX_NATIVE_READ) {
+ int idx, i, ret = 0;
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * read request exceeds what we supported
+ * DPCD spec: A Sink Device receiving a Native AUX CH
+ * read request for an unsupported DPCD address must
+ * reply with an AUX ACK and read data set equal to
+ * zero instead of replying with AUX NACK.
+ */
+
+ /* ACK the READ*/
+ __vreg(vgt, reg + 4) = 0;
+ __vreg(vgt, reg + 8) = 0;
+ __vreg(vgt, reg + 12) = 0;
+ __vreg(vgt, reg + 16) = 0;
+ __vreg(vgt, reg + 20) = 0;
+
+ dp_aux_ch_ctl_trans_done(vgt ,value, reg, len + 2, true);
+
+ return true;
+ }
+
+ for (idx = 1; idx <= 5; idx ++) {
+ /* clear the data registers */
+ __vreg(vgt, reg + 4 * idx) = 0;
+ }
+
+ /*
+ * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
+ */
+ ASSERT((len + 2) <= AUX_BURST_SIZE);
+
+ /* read from virtual DPCD to vreg */
+ /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
+ if (dpcd && dpcd->data_valid) {
+ for (i = 1; i <= (len + 1); i ++) {
+ int t;
+
+ t = dpcd->data[addr + i - 1];
+ t <<= (24 - 8*(i%4));
+ ret |= t;
+
+ if ((i%4 == 3) || (i == (len + 1))) {
+ __vreg(vgt, reg + (i/4 + 1)*4) = ret;
+ ret = 0;
+ }
+ }
+ }
+
+ dp_aux_ch_ctl_trans_done(vgt, value, reg, len + 2, dpcd && dpcd->data_valid);
+
+ return true;
+ }
+
+ /* i2c transaction starts */
+ gvt_i2c_handle_aux_ch_write(vgt, port_idx, offset, p_data);
+
+ dp_aux_ch_trigger_interrupt_on_done(vgt, value, reg);
+ return true;
+}
+
+static void gvt_dpy_stat_notify(struct vgt_device *vgt)
+{
+ struct pgt_device *pdev = vgt->pdev;
+
+ gvt_set_dpy_uevent(vgt);
+ gvt_raise_request(pdev, GVT_REQUEST_UEVENT);
+}
+
+static bool vga_control_w(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool vga_disable;
+ gvt_default_mmio_write(vgt, offset, p_data, bytes);
+
+ vga_disable = __vreg(vgt, offset) & _REGBIT_VGA_DISPLAY_DISABLE;
+
+ gvt_info("VM(%d): %s VGA mode %x\n", vgt->vm_id,
+ vga_disable ? "Disable" : "Enable",
+ (unsigned int)__vreg(vgt, offset));
+ return true;
+}
+
+static u32 get_sbi_reg_cached_value(struct vgt_device *vgt,
+ unsigned int sbi_offset)
+{
+ int i;
+ int num = vgt->state.display.sbi_regs.number;
+ u32 value = 0;
+ for (i = 0; i < num; ++ i) {
+ if (vgt->state.display.sbi_regs.registers[i].offset == sbi_offset)
+ break;
+ }
+
+ if (i < num) {
+ value = vgt->state.display.sbi_regs.registers[i].value;
+ } else {
+ gvt_warn("vGT(%d): SBI reading did not find the cached value"
+ " for offset 0x%x. 0 will be returned!\n",
+ vgt->id, sbi_offset);
+ }
+ return value;
+}
+
+static void cache_sbi_reg_value(struct vgt_device *vgt, unsigned int sbi_offset,
+ u32 value)
+{
+ int i;
+ int num = vgt->state.display.sbi_regs.number;
+ for (i = 0; i < num; ++ i) {
+ if (vgt->state.display.sbi_regs.registers[i].offset == sbi_offset)
+ break;
+ }
+
+ if (i == num) {
+ if (num < SBI_REG_MAX) {
+ vgt->state.display.sbi_regs.number ++;
+ } else {
+ gvt_warn("vGT(%d): SBI caching meets maximum limits!\n",
+ vgt->id);
+ return;
+ }
+ }
+
+ vgt->state.display.sbi_regs.registers[i].offset = sbi_offset;
+ vgt->state.display.sbi_regs.registers[i].value = value;
+}
+
+static bool sbi_mmio_data_read(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool rc = 0;
+ rc = gvt_default_mmio_read(vgt, offset, p_data, bytes);
+
+ if (((__vreg(vgt, _SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
+ unsigned int sbi_offset = (__vreg(vgt, _SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ u32 val = get_sbi_reg_cached_value(vgt, sbi_offset);
+ *(u32 *)p_data = val;
+ }
+ return rc;
+}
+
+static bool sbi_mmio_ctl_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool rc = 0;
+ u32 data;
+ rc = gvt_default_mmio_write(vgt, offset, p_data, bytes);
+
+ data = __vreg(vgt, offset);
+
+ data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
+ data |= SBI_READY;
+
+ data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+ data |= SBI_RESPONSE_SUCCESS;
+
+ __vreg(vgt, offset) = data;
+
+ if (((__vreg(vgt, _SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
+ unsigned int sbi_offset = (__vreg(vgt, _SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ u32 val = __vreg(vgt, _SBI_DATA);
+ cache_sbi_reg_value(vgt, sbi_offset, val);
+ }
+
+ return rc;
+}
+
static bool pvinfo_read(struct vgt_device *vgt, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -193,6 +970,7 @@ static bool pvinfo_write(struct vgt_device *vgt, unsigned int offset,
u32 val = *(u32 *)p_data;
u32 min;
bool rc = true;
+ bool invalid_event = false;
switch (offset) {
case _vgtif_reg(min_low_gmadr):
@@ -223,8 +1001,30 @@ static bool pvinfo_write(struct vgt_device *vgt, unsigned int offset,
rc = false;
}
break;
+ case _vgtif_reg(display_ready):
+ switch (val) {
+ case 0:
+ case 1:
+ case 2:
+ break;
+ default:
+ invalid_event = true;
+ gvt_warn("invalid display event: %d\n", val);
+ break;
+ }
+
+ if (!invalid_event)
+ gvt_dpy_stat_notify(vgt);
+
+ break;
case _vgtif_reg(g2v_notify):
- if (val == VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE) {
+ if (val == VGT_G2V_SET_POINTER_SHAPE) {
+ struct gvt_fb_notify_msg msg;
+ msg.vm_id = vgt->vm_id;
+ msg.plane_id = CURSOR_PLANE;
+ msg.pipe_id = 0;
+ gvt_fb_notifier_call_chain(FB_DISPLAY_FLIP, &msg);
+ } else if (val == VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE) {
rc = gvt_g2v_create_ppgtt_mm(vgt, 3);
} else if (val == VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY) {
rc = gvt_g2v_destroy_ppgtt_mm(vgt, 3);
@@ -236,6 +1036,17 @@ static bool pvinfo_write(struct vgt_device *vgt, unsigned int offset,
gvt_warn("Invalid PV notification. %x\n", val);
}
break;
+ case _vgtif_reg(xhot):
+ case _vgtif_reg(yhot):
+ {
+ struct gvt_fb_notify_msg msg;
+ msg.vm_id = vgt->vm_id;
+ msg.plane_id = CURSOR_PLANE;
+ msg.pipe_id = 0;
+ gvt_fb_notifier_call_chain(FB_DISPLAY_FLIP, &msg);
+ }
+ break;
+
case _vgtif_reg(pdp[0].lo):
case _vgtif_reg(pdp[0].hi):
case _vgtif_reg(pdp[1].lo):
@@ -262,6 +1073,22 @@ static bool pvinfo_write(struct vgt_device *vgt, unsigned int offset,
return rc;
}
+static bool power_well_ctl_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool rc = true;
+ u32 value = *(u32 *)p_data;
+ memcpy ((char *)vgt->state.mmio.vreg + offset, p_data, bytes);
+
+ if (value & HSW_PWR_WELL_ENABLE_REQUEST) {
+ __vreg(vgt, offset) |= HSW_PWR_WELL_STATE_ENABLED;
+ } else {
+ __vreg(vgt, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
+ }
+
+ return rc;
+}
+
bool fpga_dbg_write(struct vgt_device *vgt, unsigned int reg,
void *p_data, unsigned int bytes)
{
@@ -408,6 +1235,26 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{_DERRMR, 4, F_VIRT, 0, D_ALL, NULL, NULL},
+ {GVT_PIPEDSL(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPECONF(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, pipe_conf_mmio_write},
+ {GVT_PIPESTAT(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPE_FRMCOUNT(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPE_FLIPCOUNT(PIPE_A), 4, F_VIRT, 0, D_ALL, NULL, NULL},
+
+ {GVT_PIPEDSL(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPECONF(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, pipe_conf_mmio_write},
+ {GVT_PIPESTAT(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPE_FRMCOUNT(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPE_FLIPCOUNT(PIPE_B), 4, F_VIRT, 0, D_ALL, NULL, NULL},
+
+ {GVT_PIPEDSL(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPECONF(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, pipe_conf_mmio_write},
+ {GVT_PIPESTAT(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPE_FRMCOUNT(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_PIPE_FLIPCOUNT(PIPE_C), 4, F_VIRT, 0, D_ALL, NULL, NULL},
+
+ {_REG_PIPE_EDP_CONF, 4, F_DPY, 0, D_ALL, NULL, pipe_conf_mmio_write},
+
{GVT_CURSURF(PIPE_A), 4, F_DPY_ADRFIX, 0xFFFFF000, D_ALL, NULL, NULL},
{GVT_CURCNTR(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
{GVT_CURPOS(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
@@ -431,6 +1278,57 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{_REG_CURAPALET_2, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_REG_CURAPALET_3, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPCNTR(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSURF(PIPE_A), 4, F_DPY_ADRFIX, 0xFFFFF000, D_ALL, NULL,
+ pri_surf_mmio_write},
+ {GVT_DSPSURFLIVE(PIPE_A), 4, F_DPY_HWSTS_ADRFIX, 0xFFFFF000, D_ALL, NULL,
+ mmio_not_allow_write},
+ {GVT_DSPPOS(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPLINOFF(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSTRIDE(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSIZE(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPTILEOFF(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+
+ {GVT_DSPCNTR(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSURF(PIPE_B), 4, F_DPY_ADRFIX, 0xFFFFF000, D_ALL, NULL,
+ pri_surf_mmio_write},
+ {GVT_DSPSURFLIVE(PIPE_B), 4, F_DPY_HWSTS_ADRFIX, 0xFFFFF000, D_ALL, NULL,
+ mmio_not_allow_write},
+ {GVT_DSPPOS(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPLINOFF(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSTRIDE(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSIZE(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPTILEOFF(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+
+ {GVT_DSPCNTR(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSURF(PIPE_C), 4, F_DPY_ADRFIX, 0xFFFFF000, D_ALL, NULL,
+ pri_surf_mmio_write},
+ {GVT_DSPSURFLIVE(PIPE_C), 4, F_DPY_HWSTS_ADRFIX, 0xFFFFF000, D_ALL, NULL,
+ mmio_not_allow_write},
+ {GVT_DSPPOS(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPLINOFF(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSTRIDE(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPSIZE(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {GVT_DSPTILEOFF(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+
+ {GVT_SPRSURF(PIPE_A), 4, F_DPY_ADRFIX, 0xFFFFF000, D_ALL,
+ NULL, spr_surf_mmio_write},
+ {GVT_SPRSURFLIVE(PIPE_A), 4, F_DPY_HWSTS_ADRFIX, 0xFFFFF000, D_ALL,
+ NULL, mmio_not_allow_write},
+ {GVT_SPRCNTR(PIPE_A), 4, F_DPY, 0, D_ALL, NULL, NULL},
+
+ {GVT_SPRSURF(PIPE_B), 4, F_DPY_ADRFIX, 0xFFFFF000, D_ALL,
+ NULL, spr_surf_mmio_write},
+ {GVT_SPRSURFLIVE(PIPE_B), 4, F_DPY_HWSTS_ADRFIX, 0xFFFFF000, D_ALL,
+ NULL, mmio_not_allow_write},
+ {GVT_SPRCNTR(PIPE_B), 4, F_DPY, 0, D_ALL, NULL, NULL},
+
+ {GVT_SPRSURF(PIPE_C), 4, F_DPY_ADRFIX, 0xFFFFF000, D_ALL,
+ NULL, spr_surf_mmio_write},
+ {GVT_SPRSURFLIVE(PIPE_C), 4, F_DPY_HWSTS_ADRFIX, 0xFFFFF000, D_ALL,
+ NULL, mmio_not_allow_write},
+ {GVT_SPRCNTR(PIPE_C), 4, F_DPY, 0, D_ALL, NULL, NULL},
+
{_LGC_PALETTE_A, 4 * 256, F_DPY, 0, D_ALL, NULL, NULL},
{_LGC_PALETTE_B, 4 * 256, F_DPY, 0, D_ALL, NULL, NULL},
{_REG_LGC_PALETTE_C, 4 * 256, F_DPY, 0, D_ALL, NULL, NULL},
@@ -521,6 +1419,31 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{_BLC_PWM_PCH_CTL1, 4, F_DOM0, 0, D_ALL, NULL, NULL},
{_BLC_PWM_PCH_CTL2, 4, F_DOM0, 0, D_ALL, NULL, NULL},
+ {_PCH_GMBUS0, 4*4, F_DPY, 0, D_ALL, gmbus_mmio_read, gmbus_mmio_write},
+ {_PCH_GPIOA, 6*4, F_VIRT, 0, D_ALL, NULL, NULL},
+
+ {_REG_DP_BUFTRANS, 0x28, F_DPY, 0, D_ALL, NULL, NULL},
+
+ {_PCH_DPB_AUX_CH_CTL, 6*4, F_DPY, 0, D_ALL, NULL, dp_aux_ch_ctl_mmio_write},
+ {_PCH_DPC_AUX_CH_CTL, 6*4, F_DPY, 0, D_ALL, NULL, dp_aux_ch_ctl_mmio_write},
+ {_PCH_DPD_AUX_CH_CTL, 6*4, F_DPY, 0, D_ALL, NULL, dp_aux_ch_ctl_mmio_write},
+
+ {_PCH_ADPA, 4, F_DPY, 0, D_ALL, NULL, pch_adpa_mmio_write},
+ {_PCH_TRANSACONF, 4, F_DPY, 0, D_ALL, NULL, transaconf_mmio_write},
+ {_PCH_TRANSBCONF, 4, F_DPY, 0, D_ALL, NULL, transaconf_mmio_write},
+ {_FDI_RXA_IIR, 4, F_DPY, 0, D_ALL, NULL, fdi_rx_iir_mmio_write},
+ {_FDI_RXB_IIR, 4, F_DPY, 0, D_ALL, NULL, fdi_rx_iir_mmio_write},
+ {_REG_FDI_RXC_IIR, 4, F_DPY, 0, D_ALL, NULL, fdi_rx_iir_mmio_write},
+ {_FDI_RXA_CTL, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+ {_FDI_RXB_CTL, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+ {_REG_FDI_RXC_CTL, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+ {_FDI_TXA_CTL, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+ {_FDI_TXB_CTL, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+ {_REG_FDI_TXC_CTL, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+ {_FDI_RXA_IMR, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+ {_FDI_RXB_IMR, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+ {_REG_FDI_RXC_IMR, 4, F_DPY, 0, D_ALL, NULL, update_fdi_rx_iir_status},
+
{_PCH_TRANS_HTOTAL_A, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_PCH_TRANS_HBLANK_A, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_PCH_TRANS_HSYNC_A, 4, F_DPY, 0, D_ALL, NULL, NULL},
@@ -566,6 +1489,9 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{_FDI_RXB_TUSIZE1, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_FDI_RXB_TUSIZE2, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {_PCH_PP_CONTROL, 4, F_DPY, 0, D_ALL, NULL, pch_pp_control_mmio_write},
+ {_PCH_PP_DIVISOR, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {_PCH_PP_STATUS, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_PCH_LVDS, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_PCH_DPLL_A, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_PCH_DPLL_B, 4, F_DPY, 0, D_ALL, NULL, NULL},
@@ -582,6 +1508,16 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{_PCH_PP_ON_DELAYS, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_PCH_PP_OFF_DELAYS, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {0xE651C, 4, F_DPY, 0, D_ALL, dpy_reg_mmio_read, NULL},
+ {0xE661C, 4, F_DPY, 0, D_ALL, dpy_reg_mmio_read, NULL},
+ {0xE671C, 4, F_DPY, 0, D_ALL, dpy_reg_mmio_read, NULL},
+ {0xE681C, 4, F_DPY, 0, D_ALL, dpy_reg_mmio_read, NULL},
+ {0xE6C04, 4, F_DPY, 0, D_ALL,
+ dpy_reg_mmio_read_2, NULL},
+ {0xE6E1C, 4, F_DPY, 0, D_ALL,
+ dpy_reg_mmio_read_3, NULL},
+ {_PCH_PORT_HOTPLUG, 4, F_VIRT, 0, D_ALL, NULL, shotplug_ctl_mmio_write},
+ {_LCPLL_CTL, 4, F_DPY, 0, D_ALL, NULL, lcpll_ctl_mmio_write},
{_FUSE_STRAP, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_DIGITAL_PORT_HOTPLUG_CNTRL, 4, F_DPY, 0, D_ALL, NULL, NULL},
@@ -592,6 +1528,8 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{_ILK_DISPLAY_CHICKEN2, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_ILK_DSPCLK_GATE_D, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {_SOUTH_CHICKEN1, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {_SOUTH_CHICKEN2, 4, F_DPY, 0, D_ALL, NULL, south_chicken2_write},
{_TRANSA_CHICKEN1, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_TRANSB_CHICKEN1, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_SOUTH_DSPCLK_GATE_D, 4, F_DPY, 0, D_ALL, NULL, NULL},
@@ -685,8 +1623,30 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{_SFUSE_STRAP, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_SBI_ADDR, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {_SBI_DATA, 4, F_DPY, 0, D_ALL, sbi_mmio_data_read, NULL},
+ {_SBI_CTL_STAT, 4, F_DPY, 0, D_ALL, NULL, sbi_mmio_ctl_write},
{_PIXCLK_GATE, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {_PCH_DPA_AUX_CH_CTL, 6*4, F_DPY, 0, D_ALL, NULL, dp_aux_ch_ctl_mmio_write},
+
+ {_DDI_BUF_CTL_A, 4, F_DPY, 0, D_ALL, NULL, ddi_buf_ctl_mmio_write},
+ {_DDI_BUF_CTL_B, 4, F_DPY, 0, D_ALL, NULL, ddi_buf_ctl_mmio_write},
+ {_REG_DDI_BUF_CTL_C, 4, F_DPY, 0, D_ALL, NULL, ddi_buf_ctl_mmio_write},
+ {_REG_DDI_BUF_CTL_D, 4, F_DPY, 0, D_ALL, NULL, ddi_buf_ctl_mmio_write},
+ {_REG_DDI_BUF_CTL_E, 4, F_DPY, 0, D_ALL, NULL, ddi_buf_ctl_mmio_write},
+
+ {_DP_TP_CTL_A, 4, F_DPY, 0, D_ALL, NULL, dp_tp_ctl_mmio_write},
+ {_DP_TP_CTL_B, 4, F_DPY, 0, D_ALL, NULL, dp_tp_ctl_mmio_write},
+ {_REG_DP_TP_CTL_C, 4, F_DPY, 0, D_ALL, NULL, dp_tp_ctl_mmio_write},
+ {_REG_DP_TP_CTL_D, 4, F_DPY, 0, D_ALL, NULL, dp_tp_ctl_mmio_write},
+ {_REG_DP_TP_CTL_E, 4, F_DPY, 0, D_ALL, NULL, NULL},
+
+ {_DP_TP_STATUS_A, 4, F_DPY, 0, D_ALL, NULL, dp_tp_status_mmio_write},
+ {_DP_TP_STATUS_B, 4, F_DPY, 0, D_ALL, NULL, dp_tp_status_mmio_write},
+ {_REG_DP_TP_STATUS_C, 4, F_DPY, 0, D_ALL, NULL, dp_tp_status_mmio_write},
+ {_REG_DP_TP_STATUS_D, 4, F_DPY, 0, D_ALL, NULL, dp_tp_status_mmio_write},
+ {_REG_DP_TP_STATUS_E, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {_DDI_BUF_TRANS_A, 0x50, F_DPY, 0, D_ALL, NULL, NULL},
{0x64E60, 0x50, F_DPY, 0, D_ALL, NULL, NULL},
{0x64Ec0, 0x50, F_DPY, 0, D_ALL, NULL, NULL},
{0x64F20, 0x50, F_DPY, 0, D_ALL, NULL, NULL},
@@ -738,10 +1698,17 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{_GEN6_RC6p_THRESHOLD, 4, F_DOM0, 0, D_ALL, NULL, NULL},
{_GEN6_RC6pp_THRESHOLD, 4, F_DOM0, 0, D_ALL, NULL, NULL},
{_GEN6_PMINTRMSK, 4, F_DOM0, 0, D_ALL, NULL, NULL},
- {_GEN6_GDRST, 4, F_DOM0, 0, D_ALL, NULL, gdrst_mmio_write},
+ {_HSW_PWR_WELL_BIOS, 4, F_DOM0, 0, D_ALL, NULL, power_well_ctl_write},
+ {_HSW_PWR_WELL_DRIVER, 4, F_DOM0, 0, D_ALL, NULL, power_well_ctl_write},
+ {_HSW_PWR_WELL_KVMR, 4, F_DOM0, 0, D_ALL, NULL, power_well_ctl_write},
+ {_HSW_PWR_WELL_DEBUG, 4, F_DOM0, 0, D_ALL, NULL, power_well_ctl_write},
+ {_HSW_PWR_WELL_CTL5, 4, F_DOM0, 0, D_ALL, NULL, power_well_ctl_write},
+ {_HSW_PWR_WELL_CTL6, 4, F_DOM0, 0, D_ALL, NULL, power_well_ctl_write},
+ {_GEN6_GDRST, 4, F_DOM0, 0, D_ALL, NULL, gdrst_mmio_write},
{0x100000, 0x80, F_VIRT, 0, D_ALL, fence_mmio_read, fence_mmio_write},
{VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_VIRT, 0, D_ALL, pvinfo_read, pvinfo_write},
+ {_CPU_VGACNTRL, 4, F_DOM0, 0, D_ALL, NULL, vga_control_w},
/* TODO: MCHBAR, suppose read-only */
{MCHBAR_MIRROR_BASE_SNB, 0x40000, F_VIRT, 0, D_ALL, NULL, NULL},
@@ -792,6 +1759,9 @@ struct gvt_reg_info gvt_general_reg_info[] = {
{0xe6704, 4, F_VIRT, 0, D_ALL, NULL, NULL},
{0xe6800, 4, F_VIRT, 0, D_ALL, NULL, NULL},
{0xe6804, 4, F_VIRT, 0, D_ALL, NULL, NULL},
+ /* FIXME: now looks gmbus handler can't cover 4/5 ports */
+ {_PCH_GMBUS4, 4, F_DPY, 0, D_ALL, NULL, NULL},
+ {_PCH_GMBUS5, 4, F_DPY, 0, D_ALL, NULL, NULL},
{_REG_SUPER_QUEUE_CONFIG, 4, F_VIRT, 0, D_ALL, NULL, NULL},
{0xec008, 4, F_VIRT, 0, D_ALL, NULL, NULL},
diff --git a/drivers/gpu/drm/i915/gvt/instance.c b/drivers/gpu/drm/i915/gvt/instance.c
index 0bf62e4..3015fdf 100644
--- a/drivers/gpu/drm/i915/gvt/instance.c
+++ b/drivers/gpu/drm/i915/gvt/instance.c
@@ -162,6 +162,7 @@ static bool create_virtual_gm_state(struct vgt_device *vgt,
static void destroy_virtual_device_state(struct vgt_device *vgt)
{
+ gvt_clean_virtual_display_state(vgt);
gvt_clean_vgtt(vgt);
destroy_virtual_mmio_state(vgt);
destroy_virtual_gm_state(vgt);
@@ -179,6 +180,9 @@ static bool create_virtual_device_state(struct vgt_device *vgt,
init_virtual_cfg_space_state(vgt, info);
+ if (!gvt_init_virtual_display_state(vgt))
+ return false;
+
return true;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 23d40ce..d0cc1a6 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -598,12 +598,29 @@ static void gvt_init_events(
}
}
+static enum hrtimer_restart gvt_dpy_timer_fn(struct hrtimer *data)
+{
+ struct gvt_emul_timer *dpy_timer;
+ struct gvt_irq_state *state;
+ struct pgt_device *pdev;
+
+ dpy_timer = container_of(data, struct gvt_emul_timer, timer);
+ state = container_of(dpy_timer, struct gvt_irq_state, dpy_timer);
+ pdev = gvt_irq_state_to_pdev(state);
+
+ gvt_raise_request(pdev, GVT_REQUEST_EMUL_DPY_EVENTS);
+
+ hrtimer_add_expires_ns(&dpy_timer->timer, dpy_timer->period);
+ return HRTIMER_RESTART;
+}
+
/*
* Do interrupt initialization for vGT driver
*/
bool gvt_irq_init(struct pgt_device *pdev)
{
struct gvt_irq_state *state = &pdev->irq_state;
+ struct gvt_emul_timer *dpy_timer;
gvt_dbg_core("init irq framework");
@@ -623,12 +640,19 @@ bool gvt_irq_init(struct pgt_device *pdev)
gvt_irq_map_init(state);
+ dpy_timer = &state->dpy_timer;
+ hrtimer_init(&dpy_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ dpy_timer->timer.function = gvt_dpy_timer_fn;
+ dpy_timer->period = GVT_DPY_EMUL_PERIOD;
+
return true;
}
void gvt_irq_exit(struct pgt_device *pdev)
{
- return;
+ struct gvt_irq_state *state = &pdev->irq_state;
+
+ hrtimer_cancel(&state->dpy_timer.timer);
}
void gvt_inject_flip_done(struct vgt_device *vgt, int pipe)
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index 3142ed6..ff125fc 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -165,6 +165,7 @@ enum gvt_irq_type {
};
#define GVT_IRQ_BITWIDTH 32
+#define GVT_DPY_EMUL_PERIOD 16000000 // 16 ms for now
/* device specific interrupt bit definitions */
struct gvt_irq_info {
@@ -199,6 +200,11 @@ struct gvt_irq_map {
u32 down_irq_bitmask;
};
+struct gvt_emul_timer {
+ struct hrtimer timer;
+ u64 period;
+};
+
/* structure containing device specific IRQ state */
struct gvt_irq_state {
struct gvt_irq_ops *ops;
@@ -206,6 +212,7 @@ struct gvt_irq_state {
DECLARE_BITMAP(irq_info_bitmap, GVT_IRQ_INFO_MAX);
struct gvt_event_info events[GVT_EVENT_MAX];
DECLARE_BITMAP(pending_events, GVT_EVENT_MAX);
+ struct gvt_emul_timer dpy_timer;
struct gvt_irq_map *irq_map;
};
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 1758092..1186da5 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -571,6 +571,9 @@ union _TRANS_CONFIG
#define _PCH_GMBUS0 0xc5100
#define _PCH_GMBUS1 0xc5104
#define _PCH_GMBUS2 0xc5108
+#define _PCH_GMBUS3 0xc510c
+#define _PCH_GMBUS4 0xc5110
+#define _PCH_GMBUS5 0xc5120
#define _GEN6_GDRST 0x941c
#define _GEN6_GT_THREAD_STATUS_REG 0x13805c
--
1.9.1
More information about the Intel-gfx
mailing list