[Intel-xe] [PATCH v4 4/5] xe/drm/pm: Toggle d3cold_allowed using vram_usages

Rodrigo Vivi rodrigo.vivi at intel.com
Fri Jul 7 19:37:07 UTC 2023


On Thu, Jul 06, 2023 at 05:32:07PM +0530, Anshuman Gupta wrote:
> Adding support to control d3cold by using vram_usages metric from
> ttm resource manager.
> When root port  is capable of d3cold but xe has disallowed d3cold
> due to vrame_usages above vram_d3ccold_threshol. It is required to
                                                 ^ typo

> disable d3cold to avoid any resume failure because root port can
> still transition to d3cold when all of pcie endpoints and
> {upstream, virtual} switch ports will transition to d3hot.
> Also cleaning up the TODO code comment.
> 
> v2:
> - Modify d3cold.allowed in xe_pm_d3cold_allowed_toggle. [Riana]
> - Cond changed (total_vram_used_mb < xe->d3cold.vram_threshold)
>   according to doc comment.
> 
> Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
> Signed-off-by: Anshuman Gupta <anshuman.gupta at intel.com>
> Reviewed-by: Badal Nilawar <badal.nilawar at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_pci.c | 27 ++++++++++++++++++++++++---
>  drivers/gpu/drm/xe/xe_pm.c  | 26 ++++++++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_pm.h  |  1 +
>  3 files changed, 51 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
> index ce4bdfcbc46d..8585b090ff0e 100644
> --- a/drivers/gpu/drm/xe/xe_pci.c
> +++ b/drivers/gpu/drm/xe/xe_pci.c
> @@ -754,6 +754,24 @@ static int xe_pci_resume(struct device *dev)
>  	return 0;
>  }
>  
> +static void d3cold_toggle(struct pci_dev *pdev, bool enable)
> +{
> +	struct xe_device *xe = pdev_to_xe_device(pdev);
> +	struct pci_dev *root_pdev;
> +
> +	if (!xe->d3cold.capable)
> +		return;
> +
> +	root_pdev = pcie_find_root_port(pdev);
> +	if (!root_pdev)
> +		return;
> +
> +	if (enable)
> +		pci_d3cold_enable(root_pdev);
> +	else
> +		pci_d3cold_disable(root_pdev);
> +}
> +
>  static int xe_pci_runtime_suspend(struct device *dev)
>  {
>  	struct pci_dev *pdev = to_pci_dev(dev);
> @@ -771,6 +789,7 @@ static int xe_pci_runtime_suspend(struct device *dev)
>  		pci_ignore_hotplug(pdev);
>  		pci_set_power_state(pdev, PCI_D3cold);
>  	} else {
> +		d3cold_toggle(pdev, false);
>  		pci_set_power_state(pdev, PCI_D3hot);
>  	}
>  
> @@ -795,6 +814,8 @@ static int xe_pci_runtime_resume(struct device *dev)
>  			return err;
>  
>  		pci_set_master(pdev);
> +	} else {
> +		d3cold_toggle(pdev, true);
>  	}
>  
>  	return xe_pm_runtime_resume(xe);
> @@ -808,15 +829,15 @@ static int xe_pci_runtime_idle(struct device *dev)
>  	if (!xe->d3cold.capable) {
>  		xe->d3cold.allowed = false;
>  	} else {
> +		xe_pm_d3cold_allowed_toggle(xe);
> +
>  		/*
>  		 * TODO: d3cold should be allowed (true) if
>  		 * (IS_DGFX(xe) && !xe_device_mem_access_ongoing(xe))
>  		 * but maybe include some other conditions. So, before
>  		 * we can re-enable the D3cold, we need to:
>  		 * 1. rewrite the VRAM save / restore to avoid buffer object locks
> -		 * 2. block D3cold if we have a big amount of device memory in use
> -		 *    in order to reduce the latency.
> -		 * 3. at resume, detect if we really lost power and avoid memory
> +		 * 2. at resume, detect if we really lost power and avoid memory
>  		 *    restoration if we were only up to d3cold
>  		 */
>  		xe->d3cold.allowed = false;
> diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
> index 07e204990aa9..74a9bccb78c7 100644
> --- a/drivers/gpu/drm/xe/xe_pm.c
> +++ b/drivers/gpu/drm/xe/xe_pm.c
> @@ -292,3 +292,29 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
>  
>  	return 0;
>  }
> +
> +void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
> +{
> +	struct ttm_resource_manager *man;
> +	u32 total_vram_used_mb = 0;
> +	u64 vram_used;
> +	int i;
> +
> +	/* TODO: Extend the logic to beyond XE_PL_VRAM1 */

why? this looks the max we have there.
or should we change that enum to have the XE_PL_MAX?
anyway, it doesn't look here is the best place for this todo.

anyway:
Acked-by: Rodrigo Vivi <rodrigo.vivi at intel.com>

> +	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
> +		man = ttm_manager_type(&xe->ttm, i);
> +		if (man) {
> +			vram_used = ttm_resource_manager_usage(man);
> +			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
> +		}
> +	}
> +
> +	mutex_lock(&xe->d3cold.lock);
> +
> +	if (total_vram_used_mb < xe->d3cold.vram_threshold)
> +		xe->d3cold.allowed = true;
> +	else
> +		xe->d3cold.allowed = false;
> +
> +	mutex_unlock(&xe->d3cold.lock);
> +}
> diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
> index bbd91a5855cd..ee30cf025f64 100644
> --- a/drivers/gpu/drm/xe/xe_pm.h
> +++ b/drivers/gpu/drm/xe/xe_pm.h
> @@ -25,5 +25,6 @@ bool xe_pm_runtime_resume_if_suspended(struct xe_device *xe);
>  int xe_pm_runtime_get_if_active(struct xe_device *xe);
>  void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
>  int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
> +void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
>  
>  #endif
> -- 
> 2.38.0
> 


More information about the Intel-xe mailing list