[Intel-gfx] [PATCH v2 4/4] drm/i915: Handle all GTs on driver (un)load paths
Matt Roper
matthew.d.roper at intel.com
Thu Sep 15 23:26:54 UTC 2022
From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
This, along with the changes already landed in commit 1c66a12ab431
("drm/i915: Handle each GT on init/release and suspend/resume") makes
engines from all GTs actually known to the driver.
To accomplish this we need to sprinkle a lot of for_each_gt calls around
but is otherwise pretty un-eventuful.
v2:
- Consolidate adjacent GT loops in a couple places. (Daniele)
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Signed-off-by: Matt Roper <matthew.d.roper at intel.com>
---
drivers/gpu/drm/i915/i915_driver.c | 3 ++-
drivers/gpu/drm/i915/i915_gem.c | 43 +++++++++++++++++++++---------
2 files changed, 33 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index c459eb362c47..9d1fc2477f80 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -1661,7 +1661,8 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_gt_runtime_resume(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_resume(gt);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f18cc6270b2b..e936e67f3411 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1128,6 +1128,8 @@ void i915_gem_drain_workqueue(struct drm_i915_private *i915)
int i915_gem_init(struct drm_i915_private *dev_priv)
{
+ struct intel_gt *gt;
+ unsigned int i;
int ret;
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
@@ -1158,9 +1160,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_gt_init(to_gt(dev_priv));
- if (ret)
- goto err_unlock;
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_gt_init(gt);
+ if (ret)
+ goto err_unlock;
+ }
return 0;
@@ -1173,8 +1177,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_unlock:
i915_gem_drain_workqueue(dev_priv);
- if (ret != -EIO)
- intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
+ if (ret != -EIO) {
+ for_each_gt(gt, dev_priv, i) {
+ intel_gt_driver_remove(gt);
+ intel_gt_driver_release(gt);
+ intel_uc_cleanup_firmwares(>->uc);
+ }
+ }
if (ret == -EIO) {
/*
@@ -1182,10 +1191,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* as wedged. But we only want to do this when the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!intel_gt_is_wedged(to_gt(dev_priv))) {
- i915_probe_error(dev_priv,
- "Failed to initialize GPU, declaring it wedged!\n");
- intel_gt_set_wedged(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i) {
+ if (!intel_gt_is_wedged(gt)) {
+ i915_probe_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ intel_gt_set_wedged(gt);
+ }
}
/* Minimal basic recovery for KMS */
@@ -1213,10 +1224,14 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
+ struct intel_gt *gt;
+ unsigned int i;
+
intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
i915_gem_suspend_late(dev_priv);
- intel_gt_driver_remove(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_remove(gt);
dev_priv->uabi_engines = RB_ROOT;
/* Flush any outstanding unpin_work. */
@@ -1227,9 +1242,13 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- intel_gt_driver_release(to_gt(dev_priv));
+ struct intel_gt *gt;
+ unsigned int i;
- intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
+ for_each_gt(gt, dev_priv, i) {
+ intel_gt_driver_release(gt);
+ intel_uc_cleanup_firmwares(>->uc);
+ }
i915_gem_drain_freed_objects(dev_priv);
--
2.37.3
More information about the Intel-gfx
mailing list