[PATCH v3 67/71] drm/vc4: Switch to drmm_mutex_init
Maxime Ripard
maxime at cerno.tech
Wed Jun 29 12:35:06 UTC 2022
mutex_init is supposed to be balanced by a call to mutex_destroy that we
were never doing in the vc4 driver.
Since a DRM-managed mutex_init variant has been introduced, let's just
switch to it.
Signed-off-by: Maxime Ripard <maxime at cerno.tech>
---
drivers/gpu/drm/vc4/vc4_bo.c | 15 +++++++++++++--
drivers/gpu/drm/vc4/vc4_drv.c | 4 +++-
drivers/gpu/drm/vc4/vc4_gem.c | 10 ++++++++--
drivers/gpu/drm/vc4/vc4_hdmi.c | 5 ++++-
4 files changed, 28 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 9706fec47bcd..2ccf96b764db 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -394,6 +394,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
+ int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
@@ -404,7 +405,11 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
- mutex_init(&bo->madv_lock);
+
+ ret = drmm_mutex_init(dev, &bo->madv_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
mutex_lock(&vc4->bo_lock);
bo->label = VC4_BO_TYPE_KERNEL;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
@@ -1005,6 +1010,7 @@ static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
int i;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -1024,7 +1030,12 @@ int vc4_bo_cache_init(struct drm_device *dev)
for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
vc4->bo_labels[i].name = bo_type_names[i];
- mutex_init(&vc4->bo_lock);
+ ret = drmm_mutex_init(dev, &vc4->bo_lock);
+ if (ret) {
+ kfree(vc4->bo_labels);
+ return ret;
+ }
+
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 942b5442b843..ff6eb879b469 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -324,7 +324,9 @@ static int vc4_drm_bind(struct device *dev)
INIT_LIST_HEAD(&vc4->debugfs_list);
if (!is_vc5) {
- mutex_init(&vc4->bin_bo_lock);
+ ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
+ if (ret)
+ return ret;
ret = vc4_bo_cache_init(drm);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index fe10d9c3fff8..7acb43972e69 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -1308,6 +1308,7 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused);
int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
@@ -1325,10 +1326,15 @@ int vc4_gem_init(struct drm_device *dev)
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
- mutex_init(&vc4->power_lock);
+ ret = drmm_mutex_init(dev, &vc4->power_lock);
+ if (ret)
+ return ret;
INIT_LIST_HEAD(&vc4->purgeable.list);
- mutex_init(&vc4->purgeable.lock);
+
+ ret = drmm_mutex_init(dev, &vc4->purgeable.lock);
+ if (ret)
+ return ret;
return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 4acc865576f0..6f61a1b8a1a3 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -3270,7 +3270,10 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (!vc4_hdmi)
return -ENOMEM;
- mutex_init(&vc4_hdmi->mutex);
+ ret = drmm_mutex_init(drm, &vc4_hdmi->mutex);
+ if (ret)
+ return ret;
+
spin_lock_init(&vc4_hdmi->hw_lock);
INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq);
--
2.36.1
More information about the dri-devel
mailing list