[PATCH v2 1/3] drm/vmwgfx: Introduce userspace managed surfaces
Zack Rusin
zack.rusin at broadcom.com
Tue Nov 19 19:38:26 UTC 2024
On Fri, Oct 18, 2024 at 5:01 PM Maaz Mombasawala
<maaz.mombasawala at broadcom.com> wrote:
>
> The kernel currently exposes both mobs and surfaces to userspace through
> ioctls. We would like to move to a model where kernel would only expose
> mobs and have userspace manage surfaces. This would simplify kernel paths
> for surfaces since these userspace managed surfaces will not support prime
> transfer.
>
> Allow userspace submission of surface create and destroy commands.
> Userspace submits its own surface id which is mapped to a ttm base object
> and a resource with their corresponding ids.
>
> Signed-off-by: Maaz Mombasawala <maaz.mombasawala at broadcom.com>
> ---
> drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 23 ++-
> drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 234 +++++++++++++++++++++---
> drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 3 +
> drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 157 +++++++++++++++-
> include/uapi/drm/vmwgfx_drm.h | 4 +
> 5 files changed, 390 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> index 3f4719b3c268..67f75d366f9d 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> @@ -1,8 +1,8 @@
> /* SPDX-License-Identifier: GPL-2.0 OR MIT */
> /**************************************************************************
> *
> - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
> - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
> + * Copyright (c) 2009-2024 Broadcom. All Rights Reserved.
> + * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
> *
> * Permission is hereby granted, free of charge, to any person obtaining a
> * copy of this software and associated documentation files (the
> @@ -196,7 +196,8 @@ enum vmw_res_type {
> enum vmw_cmdbuf_res_type {
> vmw_cmdbuf_res_shader,
> vmw_cmdbuf_res_view,
> - vmw_cmdbuf_res_streamoutput
> + vmw_cmdbuf_res_streamoutput,
> + vmw_cmdbuf_res_surface
> };
>
> struct vmw_cmdbuf_res_manager;
> @@ -263,6 +264,11 @@ struct vmw_surface {
> struct list_head view_list;
> };
>
> +struct vmw_cmdbuf_surface {
> + struct vmw_surface surface;
> + struct ttm_base_object base;
> +};
> +
> struct vmw_fifo_state {
> unsigned long reserved_size;
> u32 *dynamic_buffer;
> @@ -1199,6 +1205,17 @@ int vmw_dumb_create(struct drm_file *file_priv,
> struct drm_device *dev,
> struct drm_mode_create_dumb *args);
>
> +struct vmw_cmdbuf_surface *vmw_res_to_cmdbuf_srf(struct vmw_resource *res);
> +
> +int vmw_cmdbuf_surface_define(struct vmw_private *dev_priv,
> + struct vmw_sw_context *sw_context,
> + struct vmw_surface_metadata *metadata,
> + uint32 user_key);
> +
> +int vmw_cmdbuf_surface_destroy(struct vmw_private *dev_priv,
> + struct vmw_sw_context *sw_context,
> + uint32 user_key);
> +
> /*
> * Shader management - vmwgfx_shader.c
> */
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> index 2e52d73eba48..0468c9f4f293 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> @@ -1,7 +1,8 @@
> // SPDX-License-Identifier: GPL-2.0 OR MIT
> /**************************************************************************
> *
> - * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
> + * Copyright (c) 2009-2024 Broadcom. All Rights Reserved.
> + * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
> *
> * Permission is hereby granted, free of charge, to any person obtaining a
> * copy of this software and associated documentation files (the
> @@ -614,28 +615,77 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
> return ret;
> }
>
> -/**
> - * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
> - * resource validate list unless it's already there.
> - *
> - * @dev_priv: Pointer to a device private structure.
> - * @sw_context: Pointer to the software context.
> - * @res_type: Resource type.
> - * @dirty: Whether to change dirty status.
> - * @converter: User-space visible type specific information.
> - * @id_loc: Pointer to the location in the command buffer currently being parsed
> - * from where the user-space resource id handle is located.
> - * @p_res: Pointer to pointer to resource validation node. Populated on
> - * exit.
> - */
> static int
> -vmw_cmd_res_check(struct vmw_private *dev_priv,
> - struct vmw_sw_context *sw_context,
> - enum vmw_res_type res_type,
> - u32 dirty,
> - const struct vmw_user_resource_conv *converter,
> - uint32_t *id_loc,
> - struct vmw_resource **p_res)
> +vmw_cmd_cmdbuf_surface_check(struct vmw_private *dev_priv,
> + struct vmw_sw_context *sw_context,
> + u32 dirty,
> + uint32_t *id_loc,
> + struct vmw_resource **p_res)
> +{
> + struct vmw_res_cache_entry *rcache =
> + &sw_context->res_cache[vmw_res_surface];
> + struct vmw_resource *res;
> + int ret = 0;
> + bool needs_unref = false;
> + uint32 handle;
> +
> + res = vmw_cmdbuf_res_lookup(sw_context->man, vmw_cmdbuf_res_surface,
> + *id_loc);
> + if (!IS_ERR(res)) {
> + kref_get(&res->kref);
> + needs_unref = true;
> + handle = vmw_res_to_cmdbuf_srf(res)->base.handle;
> + } else {
> + VMW_DEBUG_USER("Could not find resource 0x%08x.\n", *id_loc);
> + return -EINVAL;
> + }
> +
> + if (likely(rcache->valid_handle && handle == rcache->handle)) {
> + res = rcache->res;
> + if (dirty)
> + vmw_validation_res_set_dirty(sw_context->ctx,
> + rcache->private, dirty);
> + } else {
> + unsigned int size =
> + vmw_execbuf_res_size(dev_priv, vmw_res_surface);
> +
> + ret = vmw_validation_preload_res(sw_context->ctx, size);
> + if (unlikely(ret != 0))
> + goto cmdbuf_check_done;
> +
> + ret = vmw_execbuf_res_val_add(sw_context, res, dirty,
> + vmw_val_add_flag_none);
> + if (unlikely(ret != 0))
> + goto cmdbuf_check_done;
> +
> + if (rcache->valid && rcache->res == res) {
> + rcache->valid_handle = true;
> + rcache->handle = handle;
> + }
> + }
> +
> + ret = vmw_resource_relocation_add(sw_context, res,
> + vmw_ptr_diff(sw_context->buf_start,
> + id_loc),
> + vmw_res_rel_normal);
> + if (p_res)
> + *p_res = res;
> +
> +cmdbuf_check_done:
> + if (needs_unref)
> + vmw_resource_unreference(&res);
> +
> + return ret;
> +}
> +
> +static int
> +vmw_cmd_gen_res_check(struct vmw_private *dev_priv,
> + struct vmw_sw_context *sw_context,
> + enum vmw_res_type res_type,
> + u32 dirty,
> + const struct vmw_user_resource_conv *converter,
> + uint32_t *id_loc,
> + struct vmw_resource **p_res)
> {
> struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
> struct vmw_resource *res;
> @@ -698,6 +748,40 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
> return ret;
> }
>
> +/**
> + * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
> + * resource validate list unless it's already there.
> + *
> + * @dev_priv: Pointer to a device private structure.
> + * @sw_context: Pointer to the software context.
> + * @res_type: Resource type.
> + * @dirty: Whether to change dirty status.
> + * @converter: User-space visible type specific information. This is optional,
> + * it is not used for userspace managed surfaces.
> + * @id_loc: Pointer to the location in the command buffer currently being parsed
> + * from where the user-space resource id handle is located.
> + * @p_res: Pointer to pointer to resource validation node. Populated on
> + * exit.
> + */
> +static int
> +vmw_cmd_res_check(struct vmw_private *dev_priv,
> + struct vmw_sw_context *sw_context,
> + enum vmw_res_type res_type,
> + u32 dirty,
> + const struct vmw_user_resource_conv *converter,
> + uint32_t *id_loc,
> + struct vmw_resource **p_res)
> +{
> + if (res_type == vmw_res_surface &&
> + (*id_loc < VMWGFX_NUM_GB_SURFACE + 1)) {
> + return vmw_cmd_cmdbuf_surface_check(dev_priv, sw_context, dirty,
> + id_loc, p_res);
> + } else {
> + return vmw_cmd_gen_res_check(dev_priv, sw_context, res_type,
> + dirty, converter, id_loc, p_res);
> + }
> +}
> +
> /**
> * vmw_rebind_all_dx_query - Rebind DX query associated with the context
> *
> @@ -1759,6 +1843,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
> if (ret)
> return ret;
>
> + /**
> + * Userspace managed surface can be unbound by putting SVGA_ID_INVALID
> + * as mob id, so this particular case is valid.
> + */
> + if ((res_type == vmw_res_surface) &&
> + (*res_id < VMWGFX_NUM_GB_SURFACE + 1) &&
> + (*buf_id == SVGA_ID_INVALID))
> + return 0;
> +
> return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
> backup_offset);
> }
> @@ -3212,6 +3305,95 @@ static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
> &cmd->body.argsBufferSid, NULL);
> }
>
> +static inline int
> +vmw_cmd_get_expected_surface_version(struct vmw_private *dev_priv,
> + uint32 array_size,
> + uint32 *cmd_id)
> +{
> + if (array_size > 0) {
> + if (has_sm5_context(dev_priv))
> + *cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
> + else if (has_sm4_1_context(dev_priv))
> + *cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
> + else if (has_sm4_context(dev_priv))
> + *cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
> + } else if (array_size == 0)
> + *cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
> + else
> + return -EINVAL;
> + return 0;
> +}
> +
> +static int vmw_cmd_define_gb_surface_v4(struct vmw_private *dev_priv,
> + struct vmw_sw_context *sw_context,
> + SVGA3dCmdHeader *header)
> +{
> + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineGBSurface_v4);
> + uint32 expected_cmd_id;
> + struct vmw_surface_metadata metadata = {0};
> + int ret;
> +
> + cmd = container_of(header, typeof(*cmd), header);
> +
> + ret = vmw_cmd_get_expected_surface_version(dev_priv,
> + cmd->body.arraySize,
> + &expected_cmd_id);
> + if (ret || (expected_cmd_id != header->id))
> + return -EINVAL;
> +
> + if (cmd->body.sid >= VMWGFX_NUM_GB_SURFACE)
> + return -EINVAL;
> +
> + metadata.flags = cmd->body.surfaceFlags;
> + metadata.format = cmd->body.format;
> + metadata.mip_levels[0] = cmd->body.numMipLevels;
> + metadata.multisample_count = cmd->body.multisampleCount;
> + metadata.multisample_pattern = cmd->body.multisamplePattern;
> + metadata.quality_level = cmd->body.qualityLevel;
> + metadata.autogen_filter = cmd->body.autogenFilter;
> + metadata.array_size = cmd->body.arraySize;
> + metadata.buffer_byte_stride = cmd->body.bufferByteStride;
> + metadata.num_sizes = 1;
> + metadata.base_size.width = cmd->body.size.width;
> + metadata.base_size.height = cmd->body.size.height;
> + metadata.base_size.depth = cmd->body.size.depth;
> +
> + ret = vmw_cmdbuf_surface_define(dev_priv, sw_context, &metadata,
> + cmd->body.sid);
> + if (unlikely(ret))
> + return ret;
> +
> + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
> + VMW_RES_DIRTY_NONE, NULL, &cmd->body.sid, NULL);
> +
> + return ret;
> +}
> +
> +static int vmw_cmd_destroy_gb_surface(struct vmw_private *dev_priv,
> + struct vmw_sw_context *sw_context,
> + SVGA3dCmdHeader *header)
> +{
> + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyGBSurface);
> + int ret;
> +
> + cmd = container_of(header, typeof(*cmd), header);
> +
> + if (cmd->body.sid >= VMWGFX_NUM_GB_SURFACE)
> + return -EINVAL;
> +
> + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
> + VMW_RES_DIRTY_NONE, NULL, &cmd->body.sid, NULL);
> + if (unlikely(ret))
> + return ret;
> +
> + ret = vmw_cmdbuf_surface_destroy(dev_priv, sw_context, cmd->body.sid);
> + if (unlikely(ret))
> + return ret;
> +
> + return 0;
> +}
> +
> +
> static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
> struct vmw_sw_context *sw_context,
> void *buf, uint32_t *size)
> @@ -3350,8 +3532,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
> false, false, true),
> VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
> false, false, true),
> - VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
> - false, false, true),
> + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_destroy_gb_surface,
> + true, false, true),
> VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
> true, false, true),
> VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
> @@ -3602,6 +3784,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
> VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
> VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
> &vmw_cmd_dispatch_indirect, true, false, true),
> + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
> + &vmw_cmd_define_gb_surface_v4, true, false, true),
> VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
> false, true),
> VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
> @@ -3612,8 +3796,6 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
> &vmw_cmd_dx_bind_streamoutput, true, false, true),
> VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
> &vmw_cmd_dx_so_define, true, false, true),
> - VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
> - &vmw_cmd_invalid, false, false, true),
> };
>
> bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
> index 835d1eed8dd9..cfa14a34a679 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
> @@ -112,6 +112,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
> case DRM_VMW_PARAM_DEVICE_ID:
> param->value = to_pci_dev(dev_priv->drm.dev)->device;
> break;
> + case DRM_VMW_PARAM_USER_SRF:
> + param->value = 1;
> + break;
We probably should shield this with has_sm5_context and
SVGA_CAP_GBOBJECTS, right? Otherwise the commands will start failing.
z
More information about the dri-devel
mailing list