<div dir="ltr"><div dir="ltr">On Thu, Jun 26, 2025 at 4:12 PM Ian Forbes <<a href="mailto:ian.forbes@broadcom.com">ian.forbes@broadcom.com</a>> wrote:</div><div class="gmail_quote gmail_quote_container"><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Replace vmwgfx's ad hoc userspace fence tracking with drm_syncobj.<br>
They are nearly identical and it is possible to maintain compatibility<br>
with the old IOCTLs.<br>
<br>
Signed-off-by: Ian Forbes <<a href="mailto:ian.forbes@broadcom.com" target="_blank">ian.forbes@broadcom.com</a>><br>
---<br>
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 3 +-<br>
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 2 +-<br>
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 10 +-<br>
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 230 ++++++------------------<br>
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 3 +-<br>
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 +-<br>
6 files changed, 67 insertions(+), 183 deletions(-)<br>
<br>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c<br>
index bc0342c58b4b..33e5e90ce63d 100644<br>
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c<br>
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c<br>
@@ -1582,7 +1582,8 @@ static const struct file_operations vmwgfx_driver_fops = {<br>
<br>
static const struct drm_driver driver = {<br>
.driver_features =<br>
- DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT,<br>
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC |<br>
+ DRIVER_GEM | DRIVER_CURSOR_HOTSPOT | DRIVER_SYNCOBJ,<br>
.ioctls = vmw_ioctls,<br>
.num_ioctls = ARRAY_SIZE(vmw_ioctls),<br>
.master_set = vmw_master_set,<br>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h<br>
index eda5b6f8f4c4..6df5f66aecf7 100644<br>
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h<br>
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h<br>
@@ -983,7 +983,7 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,<br>
struct vmw_fence_obj **p_fence,<br>
uint32_t *p_handle);<br>
extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,<br>
- struct vmw_fpriv *vmw_fp,<br>
+ struct drm_file *,<br>
int ret,<br>
struct drm_vmw_fence_rep __user<br>
*user_fence_rep,<br>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c<br>
index 819704ac675d..f83c35861bf7 100644<br>
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c<br>
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c<br>
@@ -14,6 +14,7 @@<br>
<br>
#include <drm/ttm/ttm_bo.h><br>
#include <drm/ttm/ttm_placement.h><br>
+#include <drm/drm_syncobj.h><br>
<br>
#include <linux/sync_file.h><br>
#include <linux/hashtable.h><br>
@@ -3859,7 +3860,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,<br>
*/<br>
int<br>
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,<br>
- struct vmw_fpriv *vmw_fp, int ret,<br>
+ struct drm_file *file_priv, int ret,<br>
struct drm_vmw_fence_rep __user *user_fence_rep,<br>
struct vmw_fence_obj *fence, uint32_t fence_handle,<br>
int32_t out_fence_fd)<br>
@@ -3894,7 +3895,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,<br>
* handle.<br>
*/<br>
if (unlikely(ret != 0) && (fence_rep.error == 0)) {<br>
- ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);<br>
+ drm_syncobj_destroy(file_priv, fence_handle);<br>
VMW_DEBUG_USER("Fence copy error. Syncing.\n");<br>
(void) vmw_fence_obj_wait(fence, false, false,<br>
VMW_FENCE_WAIT_TIMEOUT);<br>
@@ -4236,8 +4237,9 @@ int vmw_execbuf_process(struct drm_file *file_priv,<br>
}<br>
}<br>
<br>
- ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,<br>
- user_fence_rep, fence, handle, out_fence_fd);<br>
+ ret = vmw_execbuf_copy_fence_user(dev_priv, file_priv, ret,<br>
+ user_fence_rep, fence, handle,<br>
+ out_fence_fd);<br>
<br>
if (sync_file) {<br>
if (ret) {<br>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c<br>
index c2294abbe753..de81d95268c3 100644<br>
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c<br>
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c<br>
@@ -7,6 +7,7 @@<br>
**************************************************************************/<br>
<br>
#include "vmwgfx_drv.h"<br>
+#include <drm/drm_syncobj.h><br>
<br>
#define VMW_FENCE_WRAP (1 << 31)<br>
<br>
@@ -18,11 +19,6 @@ struct vmw_fence_manager {<br>
u64 ctx;<br>
};<br>
<br>
-struct vmw_user_fence {<br>
- struct ttm_base_object base;<br>
- struct vmw_fence_obj fence;<br>
-};<br>
-<br>
/**<br>
* struct vmw_event_fence_action - fence callback that delivers a DRM event.<br>
*<br>
@@ -74,7 +70,7 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)<br>
vmw_seqno_waiter_remove(fman->dev_priv);<br>
spin_unlock(&fman->lock);<br>
}<br>
- fence->destroy(fence);<br>
+ dma_fence_free(f);<br>
}<br>
<br>
static const char *vmw_fence_get_driver_name(struct dma_fence *f)<br>
@@ -156,14 +152,12 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)<br>
}<br>
<br>
static int vmw_fence_obj_init(struct vmw_fence_manager *fman,<br>
- struct vmw_fence_obj *fence, u32 seqno,<br>
- void (*destroy) (struct vmw_fence_obj *fence))<br>
+ struct vmw_fence_obj *fence, u32 seqno)<br>
{<br>
int ret = 0;<br>
<br>
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,<br>
fman->ctx, seqno);<br>
- fence->destroy = destroy;<br>
<br>
spin_lock(&fman->lock);<br>
if (unlikely(fman->fifo_down)) {<br>
@@ -239,11 +233,6 @@ int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,<br>
return ret;<br>
}<br>
<br>
-static void vmw_fence_destroy(struct vmw_fence_obj *fence)<br>
-{<br>
- dma_fence_free(&fence->base);<br>
-}<br>
-<br>
int vmw_fence_create(struct vmw_fence_manager *fman,<br>
uint32_t seqno,<br>
struct vmw_fence_obj **p_fence)<br>
@@ -255,7 +244,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,<br>
if (unlikely(!fence))<br>
return -ENOMEM;<br>
<br>
- ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy);<br>
+ ret = vmw_fence_obj_init(fman, fence, seqno);<br>
if (unlikely(ret != 0))<br>
goto out_err_init;<br>
<br>
@@ -267,77 +256,31 @@ int vmw_fence_create(struct vmw_fence_manager *fman,<br>
return ret;<br>
}<br>
<br>
-<br>
-static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)<br>
-{<br>
- struct vmw_user_fence *ufence =<br>
- container_of(fence, struct vmw_user_fence, fence);<br>
-<br>
- ttm_base_object_kfree(ufence, base);<br>
-}<br>
-<br>
-static void vmw_user_fence_base_release(struct ttm_base_object **p_base)<br>
-{<br>
- struct ttm_base_object *base = *p_base;<br>
- struct vmw_user_fence *ufence =<br>
- container_of(base, struct vmw_user_fence, base);<br>
- struct vmw_fence_obj *fence = &ufence->fence;<br>
-<br>
- *p_base = NULL;<br>
- vmw_fence_obj_unreference(&fence);<br>
-}<br>
-<br>
int vmw_user_fence_create(struct drm_file *file_priv,<br>
struct vmw_fence_manager *fman,<br>
uint32_t seqno,<br>
struct vmw_fence_obj **p_fence,<br>
uint32_t *p_handle)<br>
{<br>
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;<br>
- struct vmw_user_fence *ufence;<br>
+ struct drm_syncobj *ufence;<br>
struct vmw_fence_obj *tmp;<br>
int ret;<br>
<br>
- ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);<br>
- if (unlikely(!ufence)) {<br>
- ret = -ENOMEM;<br>
- goto out_no_object;<br>
- }<br>
-<br>
- ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,<br>
- vmw_user_fence_destroy);<br>
- if (unlikely(ret != 0)) {<br>
- kfree(ufence);<br>
- goto out_no_object;<br>
- }<br>
-<br>
- /*<br>
- * The base object holds a reference which is freed in<br>
- * vmw_user_fence_base_release.<br>
- */<br>
- tmp = vmw_fence_obj_reference(&ufence->fence);<br>
-<br>
- ret = ttm_base_object_init(tfile, &ufence->base, false,<br>
- VMW_RES_FENCE,<br>
- &vmw_user_fence_base_release);<br>
-<br>
+ ret = vmw_fence_create(fman, seqno, &tmp);<br>
+ if (ret != 0)<br>
+ return ret;<br>
<br>
- if (unlikely(ret != 0)) {<br>
- /*<br>
- * Free the base object's reference<br>
- */<br>
+ ret = drm_syncobj_create(&ufence, 0, &tmp->base);<br>
+ if (ret != 0) {<br>
vmw_fence_obj_unreference(&tmp);<br>
- goto out_err;<br>
+ return ret;<br>
}<br>
<br>
- *p_fence = &ufence->fence;<br>
- *p_handle = ufence->base.handle;<br>
-<br>
- return 0;<br>
-out_err:<br>
- tmp = &ufence->fence;<br>
- vmw_fence_obj_unreference(&tmp);<br>
-out_no_object:<br>
+ ret = drm_syncobj_get_handle(file_priv, ufence, p_handle);<br>
+ drm_syncobj_put(ufence);<br>
+ if (ret != 0)<br>
+ vmw_fence_obj_unreference(&tmp);<br>
+ *p_fence = tmp;<br>
return ret;<br>
}<br>
<br>
@@ -385,51 +328,13 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)<br>
spin_unlock(&fman->lock);<br>
}<br>
<br>
-<br>
-/**<br>
- * vmw_fence_obj_lookup - Look up a user-space fence object<br>
- *<br>
- * @tfile: A struct ttm_object_file identifying the caller.<br>
- * @handle: A handle identifying the fence object.<br>
- * @return: A struct vmw_user_fence base ttm object on success or<br>
- * an error pointer on failure.<br>
- *<br>
- * The fence object is looked up and type-checked. The caller needs<br>
- * to have opened the fence object first, but since that happens on<br>
- * creation and fence objects aren't shareable, that's not an<br>
- * issue currently.<br>
- */<br>
-static struct ttm_base_object *<br>
-vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)<br>
-{<br>
- struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);<br>
-<br>
- if (!base) {<br>
- pr_err("Invalid fence object handle 0x%08lx.\n",<br>
- (unsigned long)handle);<br>
- return ERR_PTR(-EINVAL);<br>
- }<br>
-<br>
- if (base->refcount_release != vmw_user_fence_base_release) {<br>
- pr_err("Invalid fence object handle 0x%08lx.\n",<br>
- (unsigned long)handle);<br>
- ttm_base_object_unref(&base);<br>
- return ERR_PTR(-EINVAL);<br>
- }<br>
-<br>
- return base;<br>
-}<br>
-<br>
-<br>
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,<br>
struct drm_file *file_priv)<br>
{<br>
struct drm_vmw_fence_wait_arg *arg =<br>
(struct drm_vmw_fence_wait_arg *)data;<br>
unsigned long timeout;<br>
- struct ttm_base_object *base;<br>
struct vmw_fence_obj *fence;<br>
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;<br>
int ret;<br>
uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);<br>
<br>
@@ -446,11 +351,10 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,<br>
arg->kernel_cookie = jiffies + wait_timeout;<br>
}<br>
<br>
- base = vmw_fence_obj_lookup(tfile, arg->handle);<br>
- if (IS_ERR(base))<br>
- return PTR_ERR(base);<br>
-<br>
- fence = &(container_of(base, struct vmw_user_fence, base)->fence);<br>
+ ret = drm_syncobj_find_fence(file_priv, arg->handle, 0, 0,<br>
+ (struct dma_fence **)&fence);<br>
+ if (ret != 0)<br>
+ return ret;<br>
<br>
timeout = jiffies;<br>
if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {<br>
@@ -464,14 +368,14 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,<br>
ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);<br>
<br>
out:<br>
- ttm_base_object_unref(&base);<br>
+ vmw_fence_obj_unreference(&fence); // from find_fence<br>
<br>
/*<br>
* Optionally unref the fence object.<br>
*/<br>
<br>
if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))<br>
- return ttm_ref_object_base_unref(tfile, arg->handle);<br>
+ ret = drm_syncobj_destroy(file_priv, arg->handle);<br>
return ret;<br>
}<br>
<br>
@@ -480,36 +384,33 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,<br>
{<br>
struct drm_vmw_fence_signaled_arg *arg =<br>
(struct drm_vmw_fence_signaled_arg *) data;<br>
- struct ttm_base_object *base;<br>
struct vmw_fence_obj *fence;<br>
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;<br>
struct vmw_private *dev_priv = vmw_priv(dev);<br>
<br>
- base = vmw_fence_obj_lookup(tfile, arg->handle);<br>
- if (IS_ERR(base))<br>
- return PTR_ERR(base);<br>
+ int ret = drm_syncobj_find_fence(file_priv, arg->handle, 0, 0,<br>
+ (struct dma_fence **)&fence);<br>
+ if (ret != 0)<br>
+ return ret;<br>
<br>
- fence = &(container_of(base, struct vmw_user_fence, base)->fence);<br>
<br>
arg->signaled = vmw_fence_obj_signaled(fence);<br>
<br>
arg->signaled_flags = arg->flags;<br>
arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);<br>
<br>
- ttm_base_object_unref(&base);<br>
+ vmw_fence_obj_unreference(&fence); // from find_fence<br>
<br>
return 0;<br>
}<br>
<br>
<br>
int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,<br>
- struct drm_file *file_priv)<br>
+ struct drm_file *file_priv)<br>
{<br>
struct drm_vmw_fence_arg *arg =<br>
(struct drm_vmw_fence_arg *) data;<br>
<br>
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,<br>
- arg->handle);<br>
+ return drm_syncobj_destroy(file_priv, arg->handle);<br>
}<br>
<br>
/**<br>
@@ -660,50 +561,34 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,<br>
struct vmw_private *dev_priv = vmw_priv(dev);<br>
struct drm_vmw_fence_event_arg *arg =<br>
(struct drm_vmw_fence_event_arg *) data;<br>
+ struct drm_syncobj *ufence = NULL;<br>
struct vmw_fence_obj *fence = NULL;<br>
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);<br>
- struct ttm_object_file *tfile = vmw_fp->tfile;<br>
struct drm_vmw_fence_rep __user *user_fence_rep =<br>
- (struct drm_vmw_fence_rep __user *)(unsigned long)<br>
- arg->fence_rep;<br>
- uint32_t handle;<br>
+ (struct drm_vmw_fence_rep __user *)(unsigned long)arg->fence_rep;<br>
+ u32 handle = 0;<br>
int ret;<br>
<br>
/*<br>
- * Look up an existing fence object,<br>
- * and if user-space wants a new reference,<br>
- * add one.<br>
+ * Look up an existing fence object and if user-space<br>
+ * wants a new reference add one.<br>
*/<br>
if (arg->handle) {<br>
- struct ttm_base_object *base =<br>
- vmw_fence_obj_lookup(tfile, arg->handle);<br>
+ ufence = drm_syncobj_find(file_priv, arg->handle);<br>
+ if (!ufence)<br>
+ return -ENOENT;<br>
<br>
- if (IS_ERR(base))<br>
- return PTR_ERR(base);<br>
-<br>
- fence = &(container_of(base, struct vmw_user_fence,<br>
- base)->fence);<br>
- (void) vmw_fence_obj_reference(fence);<br>
+ fence = container_of(drm_syncobj_fence_get(ufence),<br>
+ typeof(*fence), base);<br>
<br>
if (user_fence_rep != NULL) {<br>
- ret = ttm_ref_object_add(vmw_fp->tfile, base,<br>
- NULL, false);<br>
- if (unlikely(ret != 0)) {<br>
- DRM_ERROR("Failed to reference a fence "<br>
- "object.\n");<br>
- goto out_no_ref_obj;<br>
+ ret = drm_syncobj_get_handle(file_priv, ufence, &handle);<br></blockquote><div><br></div><div> This bit looks suspicious. So we're allocating a new handle here but userspace is not aware of this handle and will never release it, so each call to vmw_fence_event_ioctl will create another handle and increase the refcount on the syncobj. You probably want:</div><div>if (handle)</div><div> drm_syncobj_destroy(file_priv, handle);</div><div>executed on all paths but even then the behavior won't match the old code. In the old code the refcount on the original handle was increased in the new we create a new handle and if we unconditionally delete it then we're back to the original refcount and if we don't then userspace will never know about it and won't release it either.</div><div><br></div><div>z</div></div></div>