[PATCH v2 2/3] drm/virtio: never kick in virtio_gpu_queue_ctrl_sgs
Chia-I Wu
olvaffe at gmail.com
Fri Feb 7 22:13:58 UTC 2020
virtio_gpu_queue_ctrl_sgs queues only. virtio_gpu_commit_ctrl must
be explicitly called. This however means that we need to grab the
spinlock twice.
Signed-off-by: Chia-I Wu <olvaffe at gmail.com>
---
drivers/gpu/drm/virtio/virtgpu_vq.c | 29 ++++++++++++++++++++++-------
1 file changed, 22 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 0c9be2142ba1a..d76b24f2b2bff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -367,13 +367,7 @@ static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
- if (!vgdev->disable_notify)
- notify = virtqueue_kick_prepare(vq);
-
spin_unlock(&vgdev->ctrlq.qlock);
-
- if (notify)
- virtqueue_notify(vq);
}
static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
@@ -434,6 +428,9 @@ static void virtio_gpu_commit_ctrl(struct virtio_gpu_device *vgdev)
struct virtqueue *vq = vgdev->ctrlq.vq;
bool notify;
+ if (vgdev->disable_notify)
+ return;
+
spin_lock(&vgdev->ctrlq.qlock);
notify = virtqueue_kick_prepare(vq);
spin_unlock(&vgdev->ctrlq.qlock);
@@ -525,6 +522,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p->height = cpu_to_le32(params->height);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+ virtio_gpu_commit_ctrl(vgdev);
bo->created = true;
}
@@ -541,6 +539,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
cmd_p->resource_id = cpu_to_le32(resource_id);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
}
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
@@ -557,6 +556,7 @@ static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgde
cmd_p->resource_id = cpu_to_le32(resource_id);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+ virtio_gpu_commit_ctrl(vgdev);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -579,6 +579,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
}
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
@@ -600,6 +601,7 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
@@ -632,6 +634,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+ virtio_gpu_commit_ctrl(vgdev);
}
static void
@@ -655,6 +658,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
vbuf->data_size = sizeof(*ents) * nents;
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+ virtio_gpu_commit_ctrl(vgdev);
}
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
@@ -787,6 +791,7 @@ int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
vgdev->display_info_pending = true;
cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
return 0;
}
@@ -810,6 +815,7 @@ int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
cmd_p->capset_index = cpu_to_le32(idx);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
return 0;
}
@@ -885,6 +891,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
cmd_p->capset_version = cpu_to_le32(version);
*cache_p = cache_ent;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
return 0;
}
@@ -912,6 +919,7 @@ int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
cmd_p->scanout = cpu_to_le32(scanout);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
}
return 0;
@@ -932,6 +940,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
}
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
@@ -946,6 +955,7 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
}
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
@@ -964,7 +974,7 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
-
+ virtio_gpu_commit_ctrl(vgdev);
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
@@ -983,6 +993,7 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_commit_ctrl(vgdev);
}
void
@@ -1014,6 +1025,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->flags = cpu_to_le32(params->flags);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+ virtio_gpu_commit_ctrl(vgdev);
bo->created = true;
}
@@ -1047,6 +1059,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->level = cpu_to_le32(level);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+ virtio_gpu_commit_ctrl(vgdev);
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
@@ -1073,6 +1086,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->level = cpu_to_le32(level);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+ virtio_gpu_commit_ctrl(vgdev);
}
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
@@ -1096,6 +1110,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
cmd_p->size = cpu_to_le32(data_size);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+ virtio_gpu_commit_ctrl(vgdev);
}
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
--
2.25.0.341.g760bfbb309-goog
More information about the dri-devel
mailing list