[PATCH] DRM: add drm gem CMA helper

Lars-Peter Clausen lars at metafoo.de
Tue May 29 07:46:36 PDT 2012


On 05/29/2012 04:10 PM, Sascha Hauer wrote:
> Many embedded drm devices do not have a IOMMU and no dedicated
> memory for graphics. These devices use CMA (Contiguous Memory
> Allocator) backed graphics memory. This patch provides helper
> functions to be able to share the code.
> 
> Signed-off-by: Sascha Hauer <s.hauer at pengutronix.de>
> ---
> 
> Lars-Peter, please let me know if this fits your needs or if you are missing
> something.
> 

Awesome :) The overall structure looks basically like what I had, so I can
just use these functions as drop-in replacement. I had some minor
differences in the implementation though. Comments inline.

>  drivers/gpu/drm/Kconfig              |    6 +
>  drivers/gpu/drm/Makefile             |    1 +
>  drivers/gpu/drm/drm_gem_cma_helper.c |  321 ++++++++++++++++++++++++++++++++++
>  include/drm/drm_gem_cma_helper.h     |   47 +++++
>  4 files changed, 375 insertions(+)
>  create mode 100644 drivers/gpu/drm/drm_gem_cma_helper.c
>  create mode 100644 include/drm/drm_gem_cma_helper.h
> 
> diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> index e354bc0..f62717e 100644
> --- a/drivers/gpu/drm/Kconfig
> +++ b/drivers/gpu/drm/Kconfig
> @@ -53,6 +53,12 @@ config DRM_TTM
>  	  GPU memory types. Will be enabled automatically if a device driver
>  	  uses it.
>  
> +config DRM_GEM_CMA_HELPER
> +	tristate
> +	depends on DRM
> +	help
> +	  Choose this if you need the GEM cma helper functions

This shouldn't have a help text as it should be selected by the driver and
not by the user. Also the 'depends on DRM' can go away, since it becomes
meaningless if the symbol is not user-selectable.

> +
>  config DRM_TDFX
>  	tristate "3dfx Banshee/Voodoo3+"
>  	depends on DRM && PCI
> diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
> index c20da5b..9a0d98a 100644
> --- a/drivers/gpu/drm/Makefile
> +++ b/drivers/gpu/drm/Makefile
> @@ -15,6 +15,7 @@ drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
>  		drm_trace_points.o drm_global.o drm_prime.o
>  
>  drm-$(CONFIG_COMPAT) += drm_ioc32.o
> +drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
>  
>  drm-usb-y   := drm_usb.o
>  
> diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
> new file mode 100644
> index 0000000..75534ff
> --- /dev/null
> +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
> @@ -0,0 +1,321 @@
> [...]
> +/*
> + * drm_gem_cma_create - allocate an object with the given size
> + *
> + * returns a struct drm_gem_cma_obj* on success or ERR_PTR values
> + * on failure.
> + */
> +struct drm_gem_cma_obj *drm_gem_cma_create(struct drm_device *drm,
> +		unsigned int size)
> +{
> +	struct drm_gem_cma_obj *cma_obj;
> +	struct drm_gem_object *gem_obj;
> +	int ret;
> +
> +	size = roundup(size, PAGE_SIZE);
> +
> +	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
> +	if (!cma_obj)
> +		return ERR_PTR(-ENOMEM);
> +
> +	cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
> +			(dma_addr_t *)&cma_obj->paddr,

The cast shouldn't be necessary.


? [...]
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_create);
>[...]
> +
> +static int drm_gem_cma_mmap_buffer(struct file *filp,
> +		struct vm_area_struct *vma)
> +{
> +	struct drm_gem_object *gem_obj = filp->private_data;
> +	struct drm_gem_cma_obj *cma_obj = to_dma_alloc_gem_obj(gem_obj);
> +	unsigned long pfn, vm_size;
> +
> +	vma->vm_flags |= VM_IO | VM_RESERVED;
> +
> +	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
> +	vma->vm_file = filp;
> +
> +	vm_size = vma->vm_end - vma->vm_start;
> +
> +	/* check if user-requested size is valid. */
> +	if (vm_size > gem_obj->size)
> +		return -EINVAL;
> +
> +	/*
> +	 * get page frame number to physical memory to be mapped
> +	 * to user space.
> +	 */
> +	pfn = cma_obj->paddr >> PAGE_SHIFT;
> +
> +	if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
> +				vma->vm_page_prot)) {
> +		dev_err(gem_obj->dev->dev, "failed to remap pfn range.\n");
> +		return -EAGAIN;
> +	}
> +
> +	return 0;
> +}
> +
> +static const struct file_operations drm_gem_cma_fops = {
> +	.mmap = drm_gem_cma_mmap_buffer,
> +};

This and the function above seem to be unused. I think it's a relict from
the old Exynos code.

> [...]
> +/*
> + * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
> + * function
> + *
> + * This aligns the pitch and size arguments to the minimum required. wrap
> + * this into your own function if you need bigger alignment.
> + */
> +int drm_gem_cma_dumb_create(struct drm_file *file_priv,
> +		struct drm_device *dev, struct drm_mode_create_dumb *args)
> +{
> +	struct drm_gem_cma_obj *cma_obj;
> +
> +	if (args->pitch < args->width * args->bpp >> 3)
> +		args->pitch = args->width * args->bpp >> 3;

I used DIV_ROUND_UP(args->bpp, 8) here. I think it makes more sense for the
generic case.

> +
> +	if (args->size < args->pitch * args->height)
> +		args->size = args->pitch * args->height;
> +
> +	cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
> +			args->size, &args->handle);
> +	if (IS_ERR(cma_obj))
> +		return PTR_ERR(cma_obj);
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
> +
> +/*
> + * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
> + * function
> + */
> +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
> +		struct drm_device *drm, uint32_t handle, uint64_t *offset)
> +{
> +	struct drm_gem_cma_obj *cma_obj;
> +	struct drm_gem_object *gem_obj;
> +
> +	mutex_lock(&drm->struct_mutex);
> +
> +	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
> +	if (!gem_obj) {
> +		dev_err(drm->dev, "failed to lookup gem object\n");
> +		mutex_unlock(&drm->struct_mutex);
> +		return -EINVAL;
> +	}
> +
> +	cma_obj = to_dma_alloc_gem_obj(gem_obj);
> +
> +	*offset = get_gem_mmap_offset(&cma_obj->base);

Just get_gem_mmap_offset(gem_obj), also means the to_dma_alloc_gem_obj can
go away.

> +
> +	drm_gem_object_unreference(gem_obj);
> +
> +	mutex_unlock(&drm->struct_mutex);
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
> +
> +/*
> + * drm_gem_cma_fault - (struct vm_operations_struct)->fault callback function
> + */
> +int drm_gem_cma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
> +{
> +	struct drm_gem_object *gem_obj = vma->vm_private_data;
> +	struct drm_gem_cma_obj *cma_obj = to_dma_alloc_gem_obj(gem_obj);
> +	struct drm_device *dev = gem_obj->dev;
> +	unsigned long pfn;
> +	pgoff_t page_offset;
> +	int ret;
> +
> +	page_offset = ((unsigned long)vmf->virtual_address -
> +			vma->vm_start) >> PAGE_SHIFT;
> +
> +	mutex_lock(&dev->struct_mutex);
> +
> +	pfn = (cma_obj->paddr >> PAGE_SHIFT) + page_offset;
> +
> +	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
> +
> +	mutex_unlock(&dev->struct_mutex);
> +
> +	return convert_to_vm_err_msg(ret);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_fault);
> +

Do you think it makes sense to have generic vm_operations struct as well, I
think it will look the same for most drivers:

struct vm_operations_struct drm_gem_cma_vm_ops = {
    .fault = drm_gem_cma_fault,
    .open = drm_gem_vm_open,
    .close = drm_gem_vm_close,
};

> +/*
> + * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
> + */
> +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +	int ret;
> +
> +	ret = drm_gem_mmap(filp, vma);
> +	if (ret)
> +		return ret;
> +
> +	vma->vm_flags &= ~VM_PFNMAP;
> +	vma->vm_flags |= VM_MIXEDMAP;
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);

This might be worth being made a more generic helper function, since we are
not the only ones who want a mixed mapping. But that can wait for later.

> +
> +/*
> + * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
> + */
> +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
> +		struct drm_device *drm, unsigned int handle)
> +{
> +	return drm_gem_handle_delete(file_priv, handle);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
> diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
> new file mode 100644
> index 0000000..53b007c
> --- /dev/null
> +++ b/include/drm/drm_gem_cma_helper.h
> @@ -0,0 +1,47 @@
> +#ifndef __DRM_GEM_DMA_ALLOC_HELPER_H__
> +#define __DRM_GEM_DMA_ALLOC_HELPER_H__
> +
> +struct drm_gem_cma_obj {
> +	struct drm_gem_object base;
> +	dma_addr_t paddr;
> +	void __iomem *vaddr;

__iomem?

> +};
> +
> +#define to_dma_alloc_gem_obj(x) \
> +	container_of(x,	struct drm_gem_cma_obj, base)
> +
> +/* free gem object. */
> +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
> +
> +/* create memory region for drm framebuffer. */
> +int drm_gem_cma_dumb_create(struct drm_file *file_priv,
> +		struct drm_device *drm, struct drm_mode_create_dumb *args);
> +
> +/* map memory region for drm framebuffer to user space. */
> +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
> +		struct drm_device *drm, uint32_t handle, uint64_t *offset);
> +
> +/* page fault handler and mmap fault address(virtual) to physical memory. */
> +int drm_gem_cma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
> +
> +/* set vm_flags and we can change the vm attribute to other one at here. */
> +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
> +
> +/*
> + * destroy memory region allocated.
> + *	- a gem handle and physical memory region pointed by a gem object
> + *	would be released by drm_gem_handle_delete().
> + */
> +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
> +		struct drm_device *drm, unsigned int handle);
> +
> +/* allocate physical memory. */
> +struct drm_gem_cma_obj *drm_gem_cma_create(struct drm_device *drm,
> +		unsigned int size);
> +
> +struct drm_gem_cma_obj *drm_gem_cma_create_with_handle(
> +		struct drm_file *file_priv,
> +		struct drm_device *drm, unsigned int size,
> +		unsigned int *handle);
> +
> +#endif

Thanks for taking care of this,
- Lars



More information about the dri-devel mailing list