[PATCH 42/59] drm/vmwgfx/ttm: use wrapper to access memory manager
daniel at ffwll.ch
daniel at ffwll.ch
Wed Aug 5 09:22:39 UTC 2020
On Tue, Aug 04, 2020 at 12:56:15PM +1000, Dave Airlie wrote:
> From: Dave Airlie <airlied at redhat.com>
>
> Signed-off-by: Dave Airlie <airlied at redhat.com>
Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>
> ---
> drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 23 +++++++++++--------
> drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 ++--
> drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 4 ++--
> 3 files changed, 18 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> index f368a9cc0c2a..6ed92f38b54b 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> @@ -634,7 +634,7 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv)
> ret = ttm_range_man_init(&dev_priv->bdev, man,
> dev_priv->vram_size >> PAGE_SHIFT);
> #endif
> - dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
> + ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM)->use_type = false;
> return ret;
> }
>
> @@ -644,7 +644,7 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
> vmw_thp_fini(dev_priv);
> #else
> ttm_bo_man_fini(&dev_priv->bdev,
> - &dev_priv->bdev.man[TTM_PL_VRAM]);
> + ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM));
> #endif
> }
>
> @@ -887,7 +887,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
> DRM_ERROR("Failed initializing TTM buffer object driver.\n");
> goto out_no_bdev;
> }
> - dev_priv->bdev.man[TTM_PL_SYSTEM].available_caching =
> + ttm_manager_type(&dev_priv->bdev, TTM_PL_SYSTEM)->available_caching =
> TTM_PL_FLAG_CACHED;
>
> /*
> @@ -1194,10 +1194,12 @@ static void vmw_master_drop(struct drm_device *dev,
> */
> static void __vmw_svga_enable(struct vmw_private *dev_priv)
> {
> + struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
> +
> spin_lock(&dev_priv->svga_lock);
> - if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
> + if (!man->use_type) {
> vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
> - dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
> + man->use_type = true;
> }
> spin_unlock(&dev_priv->svga_lock);
> }
> @@ -1223,9 +1225,11 @@ void vmw_svga_enable(struct vmw_private *dev_priv)
> */
> static void __vmw_svga_disable(struct vmw_private *dev_priv)
> {
> + struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
> +
> spin_lock(&dev_priv->svga_lock);
> - if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
> - dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
> + if (man->use_type) {
> + man->use_type = false;
> vmw_write(dev_priv, SVGA_REG_ENABLE,
> SVGA_REG_ENABLE_HIDE |
> SVGA_REG_ENABLE_ENABLE);
> @@ -1242,6 +1246,7 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
> */
> void vmw_svga_disable(struct vmw_private *dev_priv)
> {
> + struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
> /*
> * Disabling SVGA will turn off device modesetting capabilities, so
> * notify KMS about that so that it doesn't cache atomic state that
> @@ -1257,8 +1262,8 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
> vmw_kms_lost_device(dev_priv->dev);
> ttm_write_lock(&dev_priv->reservation_sem, false);
> spin_lock(&dev_priv->svga_lock);
> - if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
> - dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
> + if (man->use_type) {
> + man->use_type = false;
> spin_unlock(&dev_priv->svga_lock);
> if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
> DRM_ERROR("Failed evicting VRAM buffers.\n");
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> index ec1b5bb01a93..54c85a59dd8b 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> @@ -98,7 +98,7 @@ static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
>
> int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
> {
> - struct ttm_mem_type_manager *man = &dev_priv->bdev.man[type];
> + struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, type);
> struct vmwgfx_gmrid_man *gman =
> kzalloc(sizeof(*gman), GFP_KERNEL);
>
> @@ -135,7 +135,7 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
>
> void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
> {
> - struct ttm_mem_type_manager *man = &dev_priv->bdev.man[type];
> + struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, type);
> struct vmwgfx_gmrid_man *gman =
> (struct vmwgfx_gmrid_man *)man->priv;
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> index 548f152b9963..720a24214c74 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> @@ -117,7 +117,7 @@ static void vmw_thp_put_node(struct ttm_mem_type_manager *man,
>
> int vmw_thp_init(struct vmw_private *dev_priv)
> {
> - struct ttm_mem_type_manager *man = &dev_priv->bdev.man[TTM_PL_VRAM];
> + struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
> struct vmw_thp_manager *rman;
> man->available_caching = TTM_PL_FLAG_CACHED;
> man->default_caching = TTM_PL_FLAG_CACHED;
> @@ -137,7 +137,7 @@ int vmw_thp_init(struct vmw_private *dev_priv)
>
> void vmw_thp_fini(struct vmw_private *dev_priv)
> {
> - struct ttm_mem_type_manager *man = &dev_priv->bdev.man[TTM_PL_VRAM];
> + struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
> struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
> struct drm_mm *mm = &rman->mm;
> int ret;
> --
> 2.26.2
>
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the dri-devel
mailing list