[RFC 08/34] drm/xe: Runtime PM wake on every IOCTL

Matthew Auld matthew.auld at intel.com
Mon Feb 5 09:39:27 UTC 2024


On 26/01/2024 20:30, Rodrigo Vivi wrote:
> Let's ensure our PCI device is awaken on every IOCTL entry.
> Let's increase the runtime_pm protection and start moving
> that to the outer bounds.
> 
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> ---
>   drivers/gpu/drm/xe/xe_device.c | 32 ++++++++++++++++++++++++++++++--
>   drivers/gpu/drm/xe/xe_pm.c     | 15 +++++++++++++++
>   drivers/gpu/drm/xe/xe_pm.h     |  1 +
>   3 files changed, 46 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index 01db34f06a7d..ab41202ecaf8 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -141,15 +141,43 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
>   			  DRM_RENDER_ALLOW),
>   };
>   
> +static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> +{
> +	struct drm_file *file_priv = file->private_data;
> +	struct xe_device *xe = to_xe_device(file_priv->minor->dev);
> +	long ret;
> +
> +	ret = xe_pm_runtime_get_sync(xe);
> +	if (ret >= 0)
> +		ret = drm_ioctl(file, cmd, arg);
> +	xe_pm_runtime_put(xe);
> +
> +	return ret;
> +}
> +
> +static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> +{
> +	struct drm_file *file_priv = file->private_data;
> +	struct xe_device *xe = to_xe_device(file_priv->minor->dev);
> +	long ret;
> +
> +	ret = xe_pm_runtime_get_sync(xe);
> +	if (ret >= 0)
> +		ret = drm_compat_ioctl(file, cmd, arg);
> +	xe_pm_runtime_put(xe);
> +
> +	return ret;
> +}
> +
>   static const struct file_operations xe_driver_fops = {
>   	.owner = THIS_MODULE,
>   	.open = drm_open,
>   	.release = drm_release_noglobal,
> -	.unlocked_ioctl = drm_ioctl,
> +	.unlocked_ioctl = xe_drm_ioctl,
>   	.mmap = drm_gem_mmap,
>   	.poll = drm_poll,
>   	.read = drm_read,
> -	.compat_ioctl = drm_compat_ioctl,
> +	.compat_ioctl = xe_drm_compat_ioctl,
>   	.llseek = noop_llseek,
>   #ifdef CONFIG_PROC_FS
>   	.show_fdinfo = drm_show_fdinfo,
> diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
> index 3ef14937d5d2..d98f4bb3ad02 100644
> --- a/drivers/gpu/drm/xe/xe_pm.c
> +++ b/drivers/gpu/drm/xe/xe_pm.c
> @@ -429,6 +429,21 @@ void xe_pm_runtime_put(struct xe_device *xe)
>   	}
>   }
>   
> +/**
> + * xe_pm_runtime_get_sync - Get a runtime_pm reference and resume synchronously
> + * @xe: xe device instance
> + *
> + * Returns: Any number grater than or equal to 0 for success, negative error

s/grater/greater/

> + * code otherwise.
> + */
> +int xe_pm_runtime_get_sync(struct xe_device *xe)

AFAICT we now have the following API:

xe_pm_runtime_get()
xe_pm_runtime_get_sync()

Which one should users pick? The xe_pm_runtime_get() makes it sound like 
it's async(), if there is also a _sync() version? Is 
xe_pm_runtime_get_sync() just for the ioctl level? Maybe we should add 
_ioctl or something and make it clear?

Otherwise I think LGTM.

> +{
> +	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
> +		return -ELOOP;
> +
> +	return pm_runtime_get_sync(xe->drm.dev);
> +}
> +
>   /**
>    * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
>    * @xe: xe device instance
> diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
> index a672adffd0e1..7d8cf87b95d2 100644
> --- a/drivers/gpu/drm/xe/xe_pm.h
> +++ b/drivers/gpu/drm/xe/xe_pm.h
> @@ -27,6 +27,7 @@ bool xe_pm_runtime_suspended(struct xe_device *xe);
>   int xe_pm_runtime_suspend(struct xe_device *xe);
>   int xe_pm_runtime_resume(struct xe_device *xe);
>   void xe_pm_runtime_get(struct xe_device *xe);
> +int xe_pm_runtime_get_sync(struct xe_device *xe);
>   void xe_pm_runtime_put(struct xe_device *xe);
>   int xe_pm_runtime_get_if_active(struct xe_device *xe);
>   bool xe_pm_runtime_get_if_in_use(struct xe_device *xe);


More information about the Intel-xe mailing list