[RFC 07/20] drm/xe: Runtime PM wake on every IOCTL
Gupta, Anshuman
anshuman.gupta at intel.com
Tue Jan 2 11:30:31 UTC 2024
> -----Original Message-----
> From: Intel-xe <intel-xe-bounces at lists.freedesktop.org> On Behalf Of Rodrigo
> Vivi
> Sent: Thursday, December 28, 2023 7:42 AM
> To: intel-xe at lists.freedesktop.org
> Cc: Vivi, Rodrigo <rodrigo.vivi at intel.com>
> Subject: [RFC 07/20] drm/xe: Runtime PM wake on every IOCTL
>
> Let's ensure our PCI device is awaken on every IOCTL entry.
> Let's increase the runtime_pm protection and start moving that to the outer
> bounds.
IMO we need to decouple dc9 from runtime suspend as prev patch " [RFC,05/20] drm/xe: Prepare display for D3Cold"
added that. Let dc9 to be enable when all display are off. Otherwise blocking runtime PM on every ioctl will also block
DC9 unnecessary.
Thanks,
Anshuman Gupta.
>
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> ---
> drivers/gpu/drm/xe/xe_device.c | 32
> ++++++++++++++++++++++++++++++--
> drivers/gpu/drm/xe/xe_pm.c | 15 +++++++++++++++
> drivers/gpu/drm/xe/xe_pm.h | 1 +
> 3 files changed, 46 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index dc3721bb37b1e..ee9b6612eec43 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -140,15 +140,43 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> DRM_RENDER_ALLOW),
> };
>
> +static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned
> +long arg) {
> + struct drm_file *file_priv = file->private_data;
> + struct xe_device *xe = to_xe_device(file_priv->minor->dev);
> + long ret;
> +
> + ret = xe_pm_runtime_get_sync(xe);
> + if (ret >= 0)
> + ret = drm_ioctl(file, cmd, arg);
> + xe_pm_runtime_put(xe);
> +
> + return ret;
> +}
> +
> +static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd,
> +unsigned long arg) {
> + struct drm_file *file_priv = file->private_data;
> + struct xe_device *xe = to_xe_device(file_priv->minor->dev);
> + long ret;
> +
> + ret = xe_pm_runtime_get_sync(xe);
> + if (ret >= 0)
> + ret = drm_compat_ioctl(file, cmd, arg);
> + xe_pm_runtime_put(xe);
> +
> + return ret;
> +}
> +
> static const struct file_operations xe_driver_fops = {
> .owner = THIS_MODULE,
> .open = drm_open,
> .release = drm_release_noglobal,
> - .unlocked_ioctl = drm_ioctl,
> + .unlocked_ioctl = xe_drm_ioctl,
> .mmap = drm_gem_mmap,
> .poll = drm_poll,
> .read = drm_read,
> - .compat_ioctl = drm_compat_ioctl,
> + .compat_ioctl = xe_drm_compat_ioctl,
> .llseek = noop_llseek,
> #ifdef CONFIG_PROC_FS
> .show_fdinfo = drm_show_fdinfo,
> diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index
> 45114e4e76a5a..f599707413f18 100644
> --- a/drivers/gpu/drm/xe/xe_pm.c
> +++ b/drivers/gpu/drm/xe/xe_pm.c
> @@ -411,6 +411,21 @@ void xe_pm_runtime_put(struct xe_device *xe)
> pm_runtime_put(xe->drm.dev);
> }
>
> +/**
> + * xe_pm_runtime_get_sync - Get a runtime_pm reference and resume
> +synchronously
> + * @xe: xe device instance
> + *
> + * Returns: Any number grater than or equal to 0 for success, negative
> +error
> + * code otherwise.
> + */
> +int xe_pm_runtime_get_sync(struct xe_device *xe) {
> + if (WARN_ON(xe_pm_read_callback_task(xe) == current))
> + return -ELOOP;
> +
> + return pm_runtime_get_sync(xe->drm.dev); }
> +
> /**
> * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
> * @xe: xe device instance
> diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
> index 67a9bf3dd379b..d0e6011a80688 100644
> --- a/drivers/gpu/drm/xe/xe_pm.h
> +++ b/drivers/gpu/drm/xe/xe_pm.h
> @@ -26,6 +26,7 @@ bool xe_pm_runtime_suspended(struct xe_device *xe);
> int xe_pm_runtime_suspend(struct xe_device *xe); int
> xe_pm_runtime_resume(struct xe_device *xe); void
> xe_pm_runtime_get(struct xe_device *xe);
> +int xe_pm_runtime_get_sync(struct xe_device *xe);
> void xe_pm_runtime_put(struct xe_device *xe); int
> xe_pm_runtime_get_if_active(struct xe_device *xe); bool
> xe_pm_runtime_get_if_in_use(struct xe_device *xe);
> --
> 2.43.0
More information about the Intel-xe
mailing list