<div dir="ltr">Thanks for the feedback Christian. I am still digging into this one. Daniel suggested leveraging the Shrinker API for the functionality of this commit in RFC v3 but I am still trying to figure it out how/if ttm fit with shrinker (though the idea behind the shrinker API seems fairly straightforward as far as I understand it currently.)<div><br></div><div>Regards,</div><div>Kenny</div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Thu, Aug 29, 2019 at 3:08 AM Koenig, Christian <<a href="mailto:Christian.Koenig@amd.com">Christian.Koenig@amd.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Am 29.08.19 um 08:05 schrieb Kenny Ho:<br>
> Allow DRM TTM memory manager to register a work_struct, such that, when<br>
> a drmcgrp is under memory pressure, memory reclaiming can be triggered<br>
> immediately.<br>
><br>
> Change-Id: I25ac04e2db9c19ff12652b88ebff18b44b2706d8<br>
> Signed-off-by: Kenny Ho <<a href="mailto:Kenny.Ho@amd.com" target="_blank">Kenny.Ho@amd.com</a>><br>
> ---<br>
> drivers/gpu/drm/ttm/ttm_bo.c | 49 +++++++++++++++++++++++++++++++++<br>
> include/drm/drm_cgroup.h | 16 +++++++++++<br>
> include/drm/ttm/ttm_bo_driver.h | 2 ++<br>
> kernel/cgroup/drm.c | 30 ++++++++++++++++++++<br>
> 4 files changed, 97 insertions(+)<br>
><br>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c<br>
> index d7e3d3128ebb..72efae694b7e 100644<br>
> --- a/drivers/gpu/drm/ttm/ttm_bo.c<br>
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c<br>
> @@ -1590,6 +1590,46 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)<br>
> }<br>
> EXPORT_SYMBOL(ttm_bo_evict_mm);<br>
> <br>
> +static void ttm_bo_reclaim_wq(struct work_struct *work)<br>
> +{<br>
> + struct ttm_operation_ctx ctx = {<br>
> + .interruptible = false,<br>
> + .no_wait_gpu = false,<br>
> + .flags = TTM_OPT_FLAG_FORCE_ALLOC<br>
> + };<br>
> + struct ttm_mem_type_manager *man =<br>
> + container_of(work, struct ttm_mem_type_manager, reclaim_wq);<br>
> + struct ttm_bo_device *bdev = man->bdev;<br>
> + struct dma_fence *fence;<br>
> + int mem_type;<br>
> + int ret;<br>
> +<br>
> + for (mem_type = 0; mem_type < TTM_NUM_MEM_TYPES; mem_type++)<br>
> + if (&bdev->man[mem_type] == man)<br>
> + break;<br>
> +<br>
> + WARN_ON(mem_type >= TTM_NUM_MEM_TYPES);<br>
> + if (mem_type >= TTM_NUM_MEM_TYPES)<br>
> + return;<br>
> +<br>
> + if (!drmcg_mem_pressure_scan(bdev, mem_type))<br>
> + return;<br>
> +<br>
> + ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx, NULL);<br>
> + if (ret)<br>
> + return;<br>
> +<br>
> + spin_lock(&man->move_lock);<br>
> + fence = dma_fence_get(man->move);<br>
> + spin_unlock(&man->move_lock);<br>
> +<br>
> + if (fence) {<br>
> + ret = dma_fence_wait(fence, false);<br>
> + dma_fence_put(fence);<br>
> + }<br>
<br>
Why do you want to block for the fence here? That is a rather bad idea <br>
and would break pipe-lining.<br>
<br>
Apart from that I don't think we should put that into TTM.<br>
<br>
Instead drmcg_register_device_mm() should get a function pointer which <br>
is called from a work item when the group is under pressure.<br>
<br>
TTM can then provides the function which can be called, but the actually <br>
registration is job of the device and not TTM.<br>
<br>
Regards,<br>
Christian.<br>
<br>
> +<br>
> +}<br>
> +<br>
> int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,<br>
> unsigned long p_size)<br>
> {<br>
> @@ -1624,6 +1664,13 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,<br>
> INIT_LIST_HEAD(&man->lru[i]);<br>
> man->move = NULL;<br>
> <br>
> + pr_err("drmcg %p type %d\n", bdev->ddev, type);<br>
> +<br>
> + if (type <= TTM_PL_VRAM) {<br>
> + INIT_WORK(&man->reclaim_wq, ttm_bo_reclaim_wq);<br>
> + drmcg_register_device_mm(bdev->ddev, type, &man->reclaim_wq);<br>
> + }<br>
> +<br>
> return 0;<br>
> }<br>
> EXPORT_SYMBOL(ttm_bo_init_mm);<br>
> @@ -1701,6 +1748,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)<br>
> man = &bdev->man[i];<br>
> if (man->has_type) {<br>
> man->use_type = false;<br>
> + drmcg_unregister_device_mm(bdev->ddev, i);<br>
> + cancel_work_sync(&man->reclaim_wq);<br>
> if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {<br>
> ret = -EBUSY;<br>
> pr_err("DRM memory manager type %d is not clean\n",<br>
> diff --git a/include/drm/drm_cgroup.h b/include/drm/drm_cgroup.h<br>
> index c11df388fdf2..6d9707e1eb72 100644<br>
> --- a/include/drm/drm_cgroup.h<br>
> +++ b/include/drm/drm_cgroup.h<br>
> @@ -5,6 +5,7 @@<br>
> #define __DRM_CGROUP_H__<br>
> <br>
> #include <linux/cgroup_drm.h><br>
> +#include <linux/workqueue.h><br>
> #include <drm/ttm/ttm_bo_api.h><br>
> #include <drm/ttm/ttm_bo_driver.h><br>
> <br>
> @@ -25,12 +26,17 @@ struct drmcg_props {<br>
> s64 mem_bw_avg_bytes_per_us_default;<br>
> <br>
> s64 mem_highs_default[TTM_PL_PRIV+1];<br>
> +<br>
> + struct work_struct *mem_reclaim_wq[TTM_PL_PRIV];<br>
> };<br>
> <br>
> #ifdef CONFIG_CGROUP_DRM<br>
> <br>
> void drmcg_device_update(struct drm_device *device);<br>
> void drmcg_device_early_init(struct drm_device *device);<br>
> +void drmcg_register_device_mm(struct drm_device *dev, unsigned int type,<br>
> + struct work_struct *wq);<br>
> +void drmcg_unregister_device_mm(struct drm_device *dev, unsigned int type);<br>
> bool drmcg_try_chg_bo_alloc(struct drmcg *drmcg, struct drm_device *dev,<br>
> size_t size);<br>
> void drmcg_unchg_bo_alloc(struct drmcg *drmcg, struct drm_device *dev,<br>
> @@ -53,6 +59,16 @@ static inline void drmcg_device_early_init(struct drm_device *device)<br>
> {<br>
> }<br>
> <br>
> +static inline void drmcg_register_device_mm(struct drm_device *dev,<br>
> + unsigned int type, struct work_struct *wq)<br>
> +{<br>
> +}<br>
> +<br>
> +static inline void drmcg_unregister_device_mm(struct drm_device *dev,<br>
> + unsigned int type)<br>
> +{<br>
> +}<br>
> +<br>
> static inline void drmcg_try_chg_bo_alloc(struct drmcg *drmcg,<br>
> struct drm_device *dev, size_t size)<br>
> {<br>
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h<br>
> index e1a805d65b83..529cef92bcf6 100644<br>
> --- a/include/drm/ttm/ttm_bo_driver.h<br>
> +++ b/include/drm/ttm/ttm_bo_driver.h<br>
> @@ -205,6 +205,8 @@ struct ttm_mem_type_manager {<br>
> * Protected by @move_lock.<br>
> */<br>
> struct dma_fence *move;<br>
> +<br>
> + struct work_struct reclaim_wq;<br>
> };<br>
> <br>
> /**<br>
> diff --git a/kernel/cgroup/drm.c b/kernel/cgroup/drm.c<br>
> index 04fb9a398740..0ea7f0619e25 100644<br>
> --- a/kernel/cgroup/drm.c<br>
> +++ b/kernel/cgroup/drm.c<br>
> @@ -804,6 +804,29 @@ void drmcg_device_early_init(struct drm_device *dev)<br>
> }<br>
> EXPORT_SYMBOL(drmcg_device_early_init);<br>
> <br>
> +void drmcg_register_device_mm(struct drm_device *dev, unsigned int type,<br>
> + struct work_struct *wq)<br>
> +{<br>
> + if (dev == NULL || type >= TTM_PL_PRIV)<br>
> + return;<br>
> +<br>
> + mutex_lock(&drmcg_mutex);<br>
> + dev->drmcg_props.mem_reclaim_wq[type] = wq;<br>
> + mutex_unlock(&drmcg_mutex);<br>
> +}<br>
> +EXPORT_SYMBOL(drmcg_register_device_mm);<br>
> +<br>
> +void drmcg_unregister_device_mm(struct drm_device *dev, unsigned int type)<br>
> +{<br>
> + if (dev == NULL || type >= TTM_PL_PRIV)<br>
> + return;<br>
> +<br>
> + mutex_lock(&drmcg_mutex);<br>
> + dev->drmcg_props.mem_reclaim_wq[type] = NULL;<br>
> + mutex_unlock(&drmcg_mutex);<br>
> +}<br>
> +EXPORT_SYMBOL(drmcg_unregister_device_mm);<br>
> +<br>
> /**<br>
> * drmcg_try_chg_bo_alloc - charge GEM buffer usage for a device and cgroup<br>
> * @drmcg: the DRM cgroup to be charged to<br>
> @@ -1013,6 +1036,13 @@ void drmcg_mem_track_move(struct ttm_buffer_object *old_bo, bool evict,<br>
> <br>
> ddr->mem_bw_stats[DRMCG_MEM_BW_ATTR_BYTE_CREDIT]<br>
> -= move_in_bytes;<br>
> +<br>
> + if (dev->drmcg_props.mem_reclaim_wq[new_mem_type]<br>
> + != NULL &&<br>
> + ddr->mem_stats[new_mem_type] ><br>
> + ddr->mem_highs[new_mem_type])<br>
> + schedule_work(dev-><br>
> + drmcg_props.mem_reclaim_wq[new_mem_type]);<br>
> }<br>
> mutex_unlock(&dev->drmcg_mutex);<br>
> }<br>
<br>
</blockquote></div>