[PATCH] udmabuf: Add support for mapping hugepages (v3)
Dongwon Kim
dongwon.kim at intel.com
Tue Jun 8 17:35:58 UTC 2021
I see the number of entries in the list often exceeds list_limit
currently hardcoded to 1024 for full HD scanout resource (==
1920*1080*4 bytes). Can we include a change to increase it to something
like 4096 or higher in this patch?
On Fri, Jun 04, 2021 at 01:59:39PM -0700, Vivek Kasireddy wrote:
> If the VMM's (Qemu) memory backend is backed up by memfd + Hugepages
> (hugetlbfs and not THP), we have to first find the hugepage(s) where
> the Guest allocations are located and then extract the regular 4k
> sized subpages from them.
>
> v2: Ensure that the subpage and hugepage offsets are calculated correctly
> when the range of subpage allocations cuts across multiple hugepages.
>
> v3: Instead of repeatedly looking up the hugepage for each subpage,
> only do it when the subpage allocation crosses over into a different
> hugepage. (suggested by Gerd and DW)
>
> Cc: Gerd Hoffmann <kraxel at redhat.com>
> Signed-off-by: Vivek Kasireddy <vivek.kasireddy at intel.com>
> Signed-off-by: Dongwon Kim <dongwon.kim at intel.com>
> ---
> drivers/dma-buf/udmabuf.c | 51 +++++++++++++++++++++++++++++++++------
> 1 file changed, 44 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
> index db732f71e59a..2e02bbfe30fd 100644
> --- a/drivers/dma-buf/udmabuf.c
> +++ b/drivers/dma-buf/udmabuf.c
> @@ -11,6 +11,7 @@
> #include <linux/shmem_fs.h>
> #include <linux/slab.h>
> #include <linux/udmabuf.h>
> +#include <linux/hugetlb.h>
>
> static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */
> static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
> @@ -163,7 +164,9 @@ static long udmabuf_create(struct miscdevice *device,
> struct udmabuf *ubuf;
> struct dma_buf *buf;
> pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
> - struct page *page;
> + struct page *page, *hpage = NULL;
> + pgoff_t subpgoff, maxsubpgs;
> + struct hstate *hpstate;
> int seals, ret = -EINVAL;
> u32 i, flags;
>
> @@ -194,7 +197,8 @@ static long udmabuf_create(struct miscdevice *device,
> memfd = fget(list[i].memfd);
> if (!memfd)
> goto err;
> - if (!shmem_mapping(file_inode(memfd)->i_mapping))
> + if (!shmem_mapping(file_inode(memfd)->i_mapping) &&
> + !is_file_hugepages(memfd))
> goto err;
> seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
> if (seals == -EINVAL)
> @@ -205,17 +209,50 @@ static long udmabuf_create(struct miscdevice *device,
> goto err;
> pgoff = list[i].offset >> PAGE_SHIFT;
> pgcnt = list[i].size >> PAGE_SHIFT;
> + if (is_file_hugepages(memfd)) {
> + hpstate = hstate_file(memfd);
> + pgoff = list[i].offset >> huge_page_shift(hpstate);
> + subpgoff = (list[i].offset &
> + ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
> + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
> + }
> for (pgidx = 0; pgidx < pgcnt; pgidx++) {
> - page = shmem_read_mapping_page(
> - file_inode(memfd)->i_mapping, pgoff + pgidx);
> - if (IS_ERR(page)) {
> - ret = PTR_ERR(page);
> - goto err;
> + if (is_file_hugepages(memfd)) {
> + if (!hpage) {
> + hpage = find_get_page_flags(
> + file_inode(memfd)->i_mapping,
> + pgoff, FGP_ACCESSED);
> + if (IS_ERR(hpage)) {
> + ret = PTR_ERR(hpage);
> + goto err;
> + }
> + }
> + page = hpage + subpgoff;
> + get_page(page);
> + subpgoff++;
> + if (subpgoff == maxsubpgs) {
> + put_page(hpage);
> + hpage = NULL;
> + subpgoff = 0;
> + pgoff++;
> + }
> + } else {
> + page = shmem_read_mapping_page(
> + file_inode(memfd)->i_mapping,
> + pgoff + pgidx);
> + if (IS_ERR(page)) {
> + ret = PTR_ERR(page);
> + goto err;
> + }
> }
> ubuf->pages[pgbuf++] = page;
> }
> fput(memfd);
> memfd = NULL;
> + if (hpage) {
> + put_page(hpage);
> + hpage = NULL;
> + }
> }
>
> exp_info.ops = &udmabuf_ops;
> --
> 2.30.2
>
More information about the dri-devel
mailing list