[PATCH 02/10] drm/ttm: flip over the range manager to self allocated nodes

Thomas Hellström (Intel) thomas_os at shipmail.org
Wed Jun 2 11:44:45 UTC 2021


On 6/2/21 12:09 PM, Christian König wrote:
> Start with the range manager to make the resource object the base
> class for the allocated nodes.
>
> While at it cleanup a lot of the code around that.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> Reviewed-by: Matthew Auld <matthew.auld at intel.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |  1 +
>   drivers/gpu/drm/drm_gem_vram_helper.c   |  2 +
>   drivers/gpu/drm/nouveau/nouveau_ttm.c   |  2 +
>   drivers/gpu/drm/qxl/qxl_ttm.c           |  1 +
>   drivers/gpu/drm/radeon/radeon_ttm.c     |  1 +
>   drivers/gpu/drm/ttm/ttm_range_manager.c | 56 ++++++++++++++++++-------
>   drivers/gpu/drm/ttm/ttm_resource.c      | 26 ++++++++----
>   include/drm/ttm/ttm_bo_driver.h         | 26 ------------
>   include/drm/ttm/ttm_range_manager.h     | 43 +++++++++++++++++++
>   include/drm/ttm/ttm_resource.h          |  3 ++
>   10 files changed, 111 insertions(+), 50 deletions(-)
>   create mode 100644 include/drm/ttm/ttm_range_manager.h
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 69db89261650..df1f185faae9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -45,6 +45,7 @@
>   #include <drm/ttm/ttm_bo_api.h>
>   #include <drm/ttm/ttm_bo_driver.h>
>   #include <drm/ttm/ttm_placement.h>
> +#include <drm/ttm/ttm_range_manager.h>
>   
>   #include <drm/amdgpu_drm.h>
>   
> diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
> index 83e7258c7f90..17a4c5d47b6a 100644
> --- a/drivers/gpu/drm/drm_gem_vram_helper.c
> +++ b/drivers/gpu/drm/drm_gem_vram_helper.c
> @@ -17,6 +17,8 @@
>   #include <drm/drm_prime.h>
>   #include <drm/drm_simple_kms_helper.h>
>   
> +#include <drm/ttm/ttm_range_manager.h>
> +
>   static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
>   
>   /**
> diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> index 65430912ff72..b08b8efeefba 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> @@ -26,6 +26,8 @@
>   #include <linux/limits.h>
>   #include <linux/swiotlb.h>
>   
> +#include <drm/ttm/ttm_range_manager.h>
> +
>   #include "nouveau_drv.h"
>   #include "nouveau_gem.h"
>   #include "nouveau_mem.h"
> diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
> index 8aa87b8edb9c..19fd39d9a00c 100644
> --- a/drivers/gpu/drm/qxl/qxl_ttm.c
> +++ b/drivers/gpu/drm/qxl/qxl_ttm.c
> @@ -32,6 +32,7 @@
>   #include <drm/ttm/ttm_bo_api.h>
>   #include <drm/ttm/ttm_bo_driver.h>
>   #include <drm/ttm/ttm_placement.h>
> +#include <drm/ttm/ttm_range_manager.h>
>   
>   #include "qxl_drv.h"
>   #include "qxl_object.h"
> diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
> index cdffa9b65108..ad2a5a791bba 100644
> --- a/drivers/gpu/drm/radeon/radeon_ttm.c
> +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
> @@ -45,6 +45,7 @@
>   #include <drm/ttm/ttm_bo_api.h>
>   #include <drm/ttm/ttm_bo_driver.h>
>   #include <drm/ttm/ttm_placement.h>
> +#include <drm/ttm/ttm_range_manager.h>
>   
>   #include "radeon_reg.h"
>   #include "radeon.h"
> diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
> index b9d5da6e6a81..ce5d07ca384c 100644
> --- a/drivers/gpu/drm/ttm/ttm_range_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
> @@ -29,12 +29,13 @@
>    * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
>    */
>   
> -#include <drm/ttm/ttm_bo_driver.h>
> +#include <drm/ttm/ttm_device.h>
>   #include <drm/ttm/ttm_placement.h>
> +#include <drm/ttm/ttm_range_manager.h>
> +#include <drm/ttm/ttm_bo_api.h>
>   #include <drm/drm_mm.h>
>   #include <linux/slab.h>
>   #include <linux/spinlock.h>
> -#include <linux/module.h>
>   
>   /*
>    * Currently we use a spinlock for the lock, but a mutex *may* be
> @@ -60,8 +61,8 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
>   			       struct ttm_resource *mem)
>   {
>   	struct ttm_range_manager *rman = to_range_manager(man);
> +	struct ttm_range_mgr_node *node;
>   	struct drm_mm *mm = &rman->mm;
> -	struct drm_mm_node *node;
>   	enum drm_mm_insert_mode mode;
>   	unsigned long lpfn;
>   	int ret;
> @@ -70,7 +71,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
>   	if (!lpfn)
>   		lpfn = man->size;
>   
> -	node = kzalloc(sizeof(*node), GFP_KERNEL);
> +	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);

I'm still a bit confused  about the situation where a driver wants to 
attach private data to a struct ttm_resource without having to 
re-implement its own range manager?

Could be cached sg-tables, list of GPU bindings etc. Wouldn't work with 
the above unless we have a void *driver_private member on the struct 
ttm_resource. Is that the plan going forward here? Or that the driver 
actually does the re-implementation?

Thanks,

Thomas




More information about the dri-devel mailing list