[PATCH 06/10] drm/msm: wire up vmap shrinker
Rob Clark
robdclark at gmail.com
Thu Jun 16 21:22:31 UTC 2016
Signed-off-by: Rob Clark <robdclark at gmail.com>
---
drivers/gpu/drm/msm/msm_drv.h | 2 ++
drivers/gpu/drm/msm/msm_gem.c | 25 ++++++++++++++++-----
drivers/gpu/drm/msm/msm_gem.h | 10 +++++++++
drivers/gpu/drm/msm/msm_gem_shrinker.c | 40 ++++++++++++++++++++++++++++++++++
4 files changed, 72 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 2c16385..3ab10a1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -149,6 +149,7 @@ struct msm_drm_private {
struct drm_mm mm;
} vram;
+ struct notifier_block vmap_notifier;
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
@@ -202,6 +203,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_purge(struct drm_gem_object *obj);
+void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index c05fc1d..886cfe0 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -421,6 +421,7 @@ void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
if (msm_obj->vaddr == NULL)
return ERR_PTR(-ENOMEM);
}
+ msm_obj->vmap_count++;
return msm_obj->vaddr;
}
@@ -435,13 +436,17 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- /* no-op for now */
+ WARN_ON(msm_obj->vmap_count < 1);
+ msm_obj->vmap_count--;
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
- /* no-op for now */
+ mutex_lock(&obj->dev->struct_mutex);
+ msm_gem_put_vaddr_locked(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
}
/* Update madvise status, returns true if not purged, else
@@ -470,8 +475,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova(obj);
- vunmap(msm_obj->vaddr);
- msm_obj->vaddr = NULL;
+ msm_gem_vunmap(obj);
put_pages(obj);
@@ -491,6 +495,17 @@ void msm_gem_purge(struct drm_gem_object *obj)
0, (loff_t)-1);
}
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
+ return;
+
+ vunmap(msm_obj->vaddr);
+ msm_obj->vaddr = NULL;
+}
+
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
@@ -694,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
- vunmap(msm_obj->vaddr);
+ msm_gem_vunmap(obj);
put_pages(obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 631dab5..7a4da81 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -34,6 +34,11 @@ struct msm_gem_object {
*/
uint8_t madv;
+ /**
+ * count of active vmap'ing
+ */
+ uint8_t vmap_count;
+
/* And object is either:
* inactive - on priv->inactive_list
* active - on one one of the gpu's active_list.. well, at
@@ -83,6 +88,11 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)
!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
}
+static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
+}
+
#define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 2bbd6d0..8d13b9e 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -100,6 +100,42 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return freed;
}
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct msm_drm_private *priv =
+ container_of(nb, struct msm_drm_private, vmap_notifier);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned unmapped = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return NOTIFY_DONE;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (is_vunmapable(msm_obj)) {
+ msm_gem_vunmap(&msm_obj->base);
+ /* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+ if (++unmapped >= 5)
+ break;
+ }
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ *(unsigned long *)ptr += unmapped;
+
+ if (unmapped > 0)
+ pr_info("Purging %u vmaps\n", unmapped);
+
+ return NOTIFY_DONE;
+}
+
/**
* msm_gem_shrinker_init - Initialize msm shrinker
* @dev_priv: msm device
@@ -113,6 +149,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS;
WARN_ON(register_shrinker(&priv->shrinker));
+
+ priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
}
/**
@@ -124,5 +163,6 @@ void msm_gem_shrinker_init(struct drm_device *dev)
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
unregister_shrinker(&priv->shrinker);
}
--
2.5.5
More information about the dri-devel
mailing list