[PATCH 4/6] drm/msm: refactor how we handle vram carveout buffers

Jordan Crouse jcrouse at codeaurora.org
Tue Jun 13 19:35:36 UTC 2017


On Tue, Jun 13, 2017 at 02:49:46PM -0400, Rob Clark wrote:
> Pull some of the logic out into msm_gem_new() (since we don't need to
> care about the imported-bo case), and don't defer allocating pages.  The
> latter is generally a good idea, since if we are using VRAM carveout to
> allocate contiguous buffers (ie. no IOMMU), the allocation is more
> likely to fail.  So failing at allocation time is a more sane option.
> Plus this simplifies things in the next patch.
> 
> Signed-off-by: Rob Clark <robdclark at gmail.com>
> ---
>  drivers/gpu/drm/msm/msm_gem.c | 48 ++++++++++++++++++++++++-------------------
>  1 file changed, 27 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 2f470a6..754432c 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -324,12 +324,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
>  		if (IS_ERR(pages))
>  			return PTR_ERR(pages);
>  
> -		if (iommu_present(&platform_bus_type)) {
> -			ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
> -					msm_obj->sgt, obj->size >> PAGE_SHIFT);
> -		} else {
> -			msm_obj->domain[id].iova = physaddr(obj);
> -		}
> +		ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
> +				msm_obj->sgt, obj->size >> PAGE_SHIFT);
>  	}
>  
>  	if (!ret)
> @@ -765,7 +761,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
>  {
>  	struct msm_drm_private *priv = dev->dev_private;
>  	struct msm_gem_object *msm_obj;
> -	bool use_vram = false;
>  
>  	switch (flags & MSM_BO_CACHE_MASK) {
>  	case MSM_BO_UNCACHED:
> @@ -778,21 +773,10 @@ static int msm_gem_new_impl(struct drm_device *dev,
>  		return -EINVAL;
>  	}
>  
> -	if (!iommu_present(&platform_bus_type))
> -		use_vram = true;
> -	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
> -		use_vram = true;
> -
> -	if (WARN_ON(use_vram && !priv->vram.size))
> -		return -EINVAL;
> -
>  	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
>  	if (!msm_obj)
>  		return -ENOMEM;
>  
> -	if (use_vram)
> -		msm_obj->vram_node = &msm_obj->domain[0].node;
> -
>  	msm_obj->flags = flags;
>  	msm_obj->madv = MSM_MADV_WILLNEED;
>  
> @@ -814,13 +798,23 @@ static int msm_gem_new_impl(struct drm_device *dev,
>  struct drm_gem_object *msm_gem_new(struct drm_device *dev,
>  		uint32_t size, uint32_t flags)
>  {
> +	struct msm_drm_private *priv = dev->dev_private;
>  	struct drm_gem_object *obj = NULL;
> +	bool use_vram = false;
>  	int ret;
>  
>  	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
>  
>  	size = PAGE_ALIGN(size);
>  
> +	if (!iommu_present(&platform_bus_type))
> +		use_vram = true;
> +	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
> +		use_vram = true;
> +
> +	if (WARN_ON(use_vram && !priv->vram.size))
> +		return ERR_PTR(-EINVAL);
> +

I would say WARN_ONCE here but I guess if the first allocation fails we aren't
making any more. This might be worth a nastygram to the log.

>  	/* Disallow zero sized objects as they make the underlying
>  	 * infrastructure grumpy
>  	 */
> @@ -831,12 +825,24 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
>  	if (ret)
>  		goto fail;
>  
> -	if (use_pages(obj)) {
> +	if (use_vram) {
> +		struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +		struct page **pages;
> +
> +		msm_obj->vram_node = &msm_obj->domain[0].node;
> +		drm_gem_private_object_init(dev, obj, size);
> +
> +		msm_obj->pages = get_pages(obj);
> +		pages = get_pages(obj);
> +		if (IS_ERR(pages)) {
> +			ret = PTR_ERR(pages);
> +			goto fail;
> +		}
> +		msm_obj->domain[0].iova = physaddr(obj);
> +	} else {
>  		ret = drm_gem_object_init(dev, obj, size);
>  		if (ret)
>  			goto fail;
> -	} else {
> -		drm_gem_private_object_init(dev, obj, size);
>  	}
>  
>  	return obj;
> -- 
> 2.9.4
> 

Otherwise,
Acked-by: Jordan Crouse <jcrouse at codeaurora.org>
-- 
The Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project


More information about the dri-devel mailing list