[RFC PATCH v2 3/3] drm/i915/gvt: Enable vGPU direct flip
Tina Zhang
tina.zhang at intel.com
Fri May 10 04:15:28 UTC 2019
When vGPU does page flip, display model flips the shadow framebuffer
to the assigned hardware plane through drm display framework.
Signed-off-by: Tina Zhang <tina.zhang at intel.com>
---
drivers/gpu/drm/i915/gvt/dmabuf.c | 8 +-
drivers/gpu/drm/i915/gvt/dmabuf.h | 6 +
drivers/gpu/drm/i915/gvt/handlers.c | 332 ++++++++++++++++++++++++++++++++++++
3 files changed, 345 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index e9adcd4..b6d9317 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -201,7 +201,7 @@ static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
return false;
}
-static int vgpu_get_plane_info(struct drm_device *dev,
+int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu *vgpu,
struct intel_vgpu_fb_info *info,
int plane_id)
@@ -221,6 +221,8 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->height = p.height;
info->stride = p.stride;
info->drm_format = p.drm_format;
+ info->src_x = p.x_offset;
+ info->src_y = p.y_offset;
switch (p.tiled) {
case PLANE_CTL_TILED_LINEAR:
@@ -289,6 +291,8 @@ static int vgpu_get_plane_info(struct drm_device *dev,
return -EFAULT;
}
+ info->vgpu_id = vgpu->id;
+
return 0;
}
@@ -406,6 +410,8 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
mutex_unlock(&vgpu->dmabuf_lock);
+ fb_info.vgpu_id = vgpu->id;
+
/* Need to allocate a new one*/
dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
if (unlikely(!dmabuf_obj)) {
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.h b/drivers/gpu/drm/i915/gvt/dmabuf.h
index cf97eb4..184445b 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.h
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.h
@@ -46,6 +46,8 @@ struct intel_vgpu_fb_info {
__u32 x_hot; /* horizontal position of cursor hotspot */
__u32 y_hot; /* vertical position of cursor hotspot */
__u32 vgpu_id;
+ __u32 src_x;
+ __u32 src_y;
struct intel_vgpu_dmabuf_obj *obj;
};
@@ -66,4 +68,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id);
void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct intel_vgpu_fb_info *info);
+int vgpu_get_plane_info(struct drm_device *dev,
+ struct intel_vgpu *vgpu,
+ struct intel_vgpu_fb_info *info,
+ int plane_id);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 18f01ee..698a72d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -36,6 +36,8 @@
*/
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
@@ -743,6 +745,334 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
return 0;
}
+static int vgpu_fb_update(struct drm_device *dev,
+ struct intel_vgpu *vgpu,
+ int plane_id,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_vgpu_fb_info info;
+ int ret;
+
+ ret = vgpu_get_plane_info(dev, vgpu, &info, plane_id);
+ if (ret || !info.size || !info.drm_format)
+ return ret;
+
+ if (plane_id == DRM_PLANE_TYPE_PRIMARY)
+ obj = vgpu->display.shadow_fbs.shadow_obj[PRIMARY_PLANE];
+ else if (plane_id == DRM_PLANE_TYPE_CURSOR)
+ obj = vgpu->display.shadow_fbs.shadow_obj[CURSOR_PLANE];
+ else
+ obj = vgpu->display.shadow_fbs.shadow_obj[SPRITE_PLANE];
+
+ if (obj == NULL)
+ return -ENODEV;
+
+ memcpy(obj->gvt_info, &info, sizeof(info));
+
+ mode_cmd->pixel_format = info.drm_format;
+ mode_cmd->width = info.width;
+ mode_cmd->height = info.height;
+ mode_cmd->handles[0] = 0;
+ mode_cmd->offsets[0] = 0;
+ mode_cmd->pitches[0] = info.stride;
+ mode_cmd->flags = DRM_MODE_FB_MODIFIERS;
+ mode_cmd->modifier[0] = info.drm_format_mod;
+
+ return ret;
+}
+
+static int check_vgpu_plane_assignment_status(struct intel_vgpu *vgpu,
+ enum plane_id plane_id,
+ struct drm_crtc **crtc,
+ struct drm_plane **drm_plane,
+ struct drm_plane **cur_drm_plane)
+{
+ struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ struct drm_mode_object *obj_fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_gem_object *gem_obj;
+ struct drm_i915_gem_object *i915_gem_obj;
+ struct intel_vgpu_fb_info *fb_info;
+ u32 fb_id = 0;
+ int ret = 0;
+
+ *crtc = NULL;
+ *drm_plane = NULL;
+
+ drm_for_each_plane(plane, dev) {
+ intel_plane = to_intel_plane(plane);
+ if (intel_plane->id == plane_id) {
+ if (plane->state && plane->state->fb)
+ fb_id = plane->state->fb->base.id;
+ else if (!plane->state && plane->fb)
+ fb_id = plane->fb->base.id;
+ else
+ continue;
+
+ obj_fb = idr_find(&dev->mode_config.object_idr, fb_id);
+ intel_fb = to_intel_framebuffer(obj_to_fb(obj_fb));
+ gem_obj = intel_fb->base.obj[0];
+ i915_gem_obj = to_intel_bo(gem_obj);
+ fb_info = i915_gem_obj->gvt_info;
+
+ if (i915_gem_object_is_proxy(i915_gem_obj) &&
+ fb_info && fb_info->vgpu_id == vgpu->id &&
+ plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ if (plane->state && plane->state->crtc) {
+ *crtc = plane->state->crtc;
+ *drm_plane = plane;
+ *cur_drm_plane =
+ plane->state->crtc->cursor;
+ break;
+ } else if (!plane->state && plane->crtc) {
+ *crtc = plane->crtc;
+ *drm_plane = plane;
+ *cur_drm_plane = plane->crtc->cursor;
+ break;
+ }
+ }
+ }
+ }
+
+ if (fb_id == 0 || *crtc == NULL || *drm_plane == NULL)
+ ret = -1;
+
+ return ret;
+}
+
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+ rcu_read_unlock();
+}
+
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *pages;
+
+ pages = fetch_and_zero(&obj->mm.pages);
+ if (IS_ERR_OR_NULL(pages))
+ return pages;
+
+ spin_lock(&i915->mm.obj_lock);
+ list_del(&obj->mm.link);
+ spin_unlock(&i915->mm.obj_lock);
+
+ if (obj->mm.mapping) {
+ void *ptr;
+
+ ptr = page_mask_bits(obj->mm.mapping);
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+
+ obj->mm.mapping = NULL;
+ }
+
+ __i915_gem_object_reset_page_iter(obj);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+ return pages;
+}
+
+static int handle_vgpu_page_flip(struct intel_vgpu *vgpu, int pipe,
+ enum plane_id plane_id)
+{
+ /* struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; */
+ /* struct intel_vgpu_plane_info vgpu_plane_info; */
+ struct drm_device *drm = &vgpu->gvt->dev_priv->drm;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane, *cur_plane;
+ struct drm_framebuffer *pri_fb, *cur_fb;
+ struct drm_i915_gem_object *pri_obj, *cur_obj;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state = NULL, *cur_plane_state = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct intel_vgpu_fb_info *fb_info;
+ int ret = 0;
+
+ if (!vgpu->display.shadow_fbs.enable_direct_flip)
+ return ret;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+retry:
+ ret = drm_modeset_lock_all_ctx(drm, &ctx);
+ if (ret)
+ goto fail;
+
+ /* 1. Check assignment status */
+ ret = check_vgpu_plane_assignment_status(vgpu, plane_id, &crtc,
+ &plane, &cur_plane);
+ if (ret) {
+ DRM_DEBUG_KMS("%s, vgpu plane %d has no assgined plane\n",
+ __func__, plane_id);
+ ret = -EINVAL;
+ goto fail;
+ }
+ /* 2. Update shadow primary plane */
+ if (plane_id == PLANE_PRIMARY) {
+ pri_fb = vgpu->display.shadow_fbs.shadow_fb[PRIMARY_PLANE];
+ pri_obj = vgpu->display.shadow_fbs.shadow_obj[PRIMARY_PLANE];
+
+ fb_info = pri_obj->gvt_info;
+
+ state = drm_atomic_state_alloc(drm);
+ if (!state)
+ goto fail;
+
+ state->acquire_ctx = &ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto out;
+ }
+
+ ret = vgpu_fb_update(drm, vgpu, DRM_PLANE_TYPE_PRIMARY, &mode_cmd);
+ if (ret) {
+ DRM_DEBUG_KMS("vgpu_fb_update failed\n");
+ goto out;
+ }
+
+ drm_helper_mode_fill_fb_struct(drm, pri_fb, &mode_cmd);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto out;
+ if (plane_state->fb != pri_fb)
+ drm_atomic_set_fb_for_plane(plane_state, pri_fb);
+
+ plane_state->src_x = fb_info->src_x;
+ plane_state->src_y = fb_info->src_y;
+ plane_state->src_w = mode_cmd.width << 16;
+ plane_state->src_h = mode_cmd.height << 16;
+
+ /* Don't allow full modeset */
+ state->allow_modeset = false;
+
+ if (i915_gem_object_has_pinned_pages(pri_obj)) {
+ struct sg_table *pages;
+ struct i915_vma *vma;
+ struct i915_address_space *vm =
+ &vgpu->gvt->dev_priv->ggtt.vm;
+
+ pages = __i915_gem_object_unset_pages(pri_obj);
+ if (!IS_ERR(pages))
+ pri_obj->ops->put_pages(pri_obj, pages);
+ ret = pri_obj->ops->get_pages(pri_obj);
+
+ /* pri_obj->cache_dirty = true; */
+
+ vma = i915_vma_instance(pri_obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto out;
+
+ vma->ops->set_pages(vma);
+
+ ret = i915_vma_bind(vma, vma->obj ?
+ pri_obj->cache_level : 0,
+ PIN_UPDATE);
+ if (ret)
+ goto out;
+ }
+
+ /* 3. Update shadow cursor plane */
+ if (cur_plane) {
+ ret = vgpu_fb_update(drm, vgpu, DRM_PLANE_TYPE_CURSOR,
+ &mode_cmd);
+ if (ret) {
+ DRM_DEBUG_KMS("cursor vgpu_fb_update failed\n");
+ goto flip;
+ }
+ cur_fb = vgpu->display.shadow_fbs.shadow_fb[CURSOR_PLANE];
+ cur_obj = vgpu->display.shadow_fbs.shadow_obj[CURSOR_PLANE];
+ cur_plane_state = drm_atomic_get_plane_state(state,
+ cur_plane);
+ if (IS_ERR(cur_plane_state)) {
+ ret = PTR_ERR(cur_plane_state);
+ goto flip;
+ }
+
+ fb_info = cur_obj->gvt_info;
+ drm_helper_mode_fill_fb_struct(drm, cur_fb, &mode_cmd);
+ ret = drm_atomic_set_crtc_for_plane(cur_plane_state,
+ crtc);
+ if (ret != 0)
+ goto flip;
+ if (cur_plane_state->fb != cur_fb)
+ drm_atomic_set_fb_for_plane(cur_plane_state,
+ cur_fb);
+
+ cur_plane_state->crtc_x = fb_info->x_pos;
+ cur_plane_state->crtc_y = fb_info->y_pos;
+ cur_plane_state->crtc_w = mode_cmd.width;
+ cur_plane_state->crtc_h = mode_cmd.height;
+ cur_plane_state->src_x = 0;
+ cur_plane_state->src_y = 0;
+ cur_plane_state->src_w = mode_cmd.width << 16;
+ cur_plane_state->src_h = mode_cmd.height << 16;
+
+ if (i915_gem_object_has_pinned_pages(cur_obj)) {
+ struct sg_table *pages;
+ struct i915_vma *vma;
+ struct i915_address_space *vm =
+ &vgpu->gvt->dev_priv->ggtt.vm;
+
+ pages = __i915_gem_object_unset_pages(cur_obj);
+ if (!IS_ERR(pages))
+ cur_obj->ops->put_pages(cur_obj, pages);
+ ret = cur_obj->ops->get_pages(cur_obj);
+
+ vma = i915_vma_instance(cur_obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto out;
+
+ vma->ops->set_pages(vma);
+
+ ret = i915_vma_bind(vma, vma->obj ?
+ cur_obj->cache_level : 0,
+ PIN_UPDATE);
+ if (ret)
+ goto out;
+ }
+ }
+flip:
+
+ /* 4. Commit drm_framebuffer */
+ ret = drm_atomic_commit(state);
+ if (plane_state->fb != pri_fb)
+ drm_framebuffer_put(pri_fb);
+ if (cur_plane_state && cur_plane_state->fb != cur_fb)
+ drm_framebuffer_put(cur_fb);
+ } else {
+ DRM_DEBUG_KMS("Assigning sprite plane hasn't been supported\n");
+ }
+
+out:
+ drm_atomic_state_put(state);
+fail:
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
#define DSPSURF_TO_PIPE(offset) \
calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
@@ -758,6 +1088,8 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ handle_vgpu_page_flip(vgpu, pipe, PLANE_PRIMARY);
+
if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
intel_vgpu_trigger_virtual_event(vgpu, event);
else
--
2.7.4
More information about the intel-gvt-dev
mailing list