[Intel-gfx] [PATCH v15 16/23] drm/shmem-helper: Use kref for vmap_use_count
Dmitry Osipenko
dmitry.osipenko at collabora.com
Sun Aug 27 17:54:42 UTC 2023
Use kref helper for vmap_use_count to make refcounting consistent with
pages_use_count and pages_pin_count that use kref. This will allow to
optimize unlocked vmappings by skipping reservation locking if refcnt > 1.
Suggested-by: Boris Brezillon <boris.brezillon at collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko at collabora.com>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 37 ++++++++++++++------------
include/drm/drm_gem_shmem_helper.h | 2 +-
2 files changed, 21 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 17a0177acb5d..d96fee3d6166 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -144,7 +144,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} else if (!shmem->imported_sgt) {
dma_resv_lock(shmem->base.resv, NULL);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, kref_read(&shmem->vmap_use_count));
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -359,23 +359,25 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
dma_resv_assert_held(shmem->base.resv);
- if (shmem->vmap_use_count++ > 0) {
+ if (kref_get_unless_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ kref_init(&shmem->vmap_use_count);
+ }
}
if (ret) {
@@ -388,13 +390,22 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
err_put_pages:
if (!obj->import_attach)
drm_gem_shmem_unpin_locked(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
return ret;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
+static void drm_gem_shmem_kref_vunmap(struct kref *kref)
+{
+ struct drm_gem_shmem_object *shmem;
+
+ shmem = container_of(kref, struct drm_gem_shmem_object,
+ vmap_use_count);
+
+ vunmap(shmem->vaddr);
+ drm_gem_shmem_unpin_locked(shmem);
+}
+
/*
* drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
@@ -416,15 +427,7 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
-
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
-
- vunmap(shmem->vaddr);
- drm_gem_shmem_unpin_locked(shmem);
+ kref_put(&shmem->vmap_use_count, drm_gem_shmem_kref_vunmap);
}
shmem->vaddr = NULL;
@@ -663,7 +666,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
return;
drm_printf_indent(p, indent, "pages_use_count=%u\n", kref_read(&shmem->pages_use_count));
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", kref_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 400ecd63f45f..0e0ccd380f66 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -81,7 +81,7 @@ struct drm_gem_shmem_object {
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int vmap_use_count;
+ struct kref vmap_use_count;
/**
* @got_sgt:
--
2.41.0
More information about the Intel-gfx
mailing list