[v2 22/31] drm/xe/svm: implement functions to allocate and free device memory
Matthew Brost
matthew.brost at intel.com
Wed Apr 17 20:55:29 UTC 2024
On Tue, Apr 09, 2024 at 04:17:33PM -0400, Oak Zeng wrote:
> Function xe_devm_alloc_pages allocate pages from drm buddy and perform
> house keeping work for all the pages allocated, such as get a page
> refcount, keep a bitmap of all pages to denote whether a page is in
> use, put pages to a drm lru list for eviction purpose.
>
> Function xe_devm_free_blocks return list of memory blocks to drm buddy
> allocator.
>
> Function xe_devm_free_page is a call back function from hmm layer. It
> is called whenever a page's refcount reaches to 1. This function clears
> the bit of this page in the bitmap. If all the bits in the bitmap is
> cleared, it means all the pages have been freed, we return all the pages
> in this memory block back to drm buddy.
>
> Signed-off-by: Oak Zeng <oak.zeng at intel.com>
> Co-developed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>
> Cc: Thomas Hellström <thomas.hellstrom at intel.com>
> Cc: Brian Welty <brian.welty at intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.h | 7 ++
> drivers/gpu/drm/xe/xe_svm_devmem.c | 147 ++++++++++++++++++++++++++++-
> 2 files changed, 152 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 624c1581f8ba..92a3ee90d5a7 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -46,4 +46,11 @@ static inline struct xe_mem_region *xe_page_to_mem_region(struct page *page)
> return container_of(page->pgmap, struct xe_mem_region, pagemap);
> }
>
> +int xe_devm_alloc_pages(struct xe_tile *tile,
> + unsigned long npages,
> + struct list_head *blocks,
> + unsigned long *pfn);
> +
> +void xe_devm_free_blocks(struct list_head *blocks);
> +void xe_devm_page_free(struct page *page);
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_svm_devmem.c b/drivers/gpu/drm/xe/xe_svm_devmem.c
> index 31af56e8285a..5ba0cd9a70b0 100644
> --- a/drivers/gpu/drm/xe/xe_svm_devmem.c
> +++ b/drivers/gpu/drm/xe/xe_svm_devmem.c
> @@ -5,18 +5,161 @@
>
> #include <linux/mm_types.h>
> #include <linux/sched/mm.h>
> -
> +#include <linux/gfp.h>
> +#include <linux/migrate.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/dma-fence.h>
> +#include <linux/bitops.h>
> +#include <linux/bitmap.h>
> +#include <drm/drm_buddy.h>
> #include "xe_device_types.h"
> #include "xe_svm.h"
> +#include "xe_migrate.h"
> +#include "xe_ttm_vram_mgr_types.h"
> +#include "xe_assert.h"
>
> +/**
> + * struct xe_svm_block_meta - svm uses this data structure to manage each
> + * block allocated from drm buddy. This will be set to the drm_buddy_block's
> + * private field.
> + *
> + * @lru: used to link this block to drm's lru lists. This will be replace
> + * with struct drm_lru_entity later.
> + * @tile: tile from which we allocated this block
> + * @bitmap: A bitmap of each page in this block. 1 means this page is used,
> + * 0 means this page is idle. When all bits of this block are 0, it is time
> + * to return this block to drm buddy subsystem.
> + */
> +struct xe_svm_block_meta {
> + struct list_head lru;
> + struct xe_tile *tile;
> + unsigned long bitmap[];
> +};
>
> static vm_fault_t xe_devm_migrate_to_ram(struct vm_fault *vmf)
> {
> return 0;
> }
>
> -static void xe_devm_page_free(struct page *page)
> +static u64 block_offset_to_pfn(struct xe_mem_region *mr, u64 offset)
> +{
> + /** DRM buddy's block offset is 0-based*/
> + offset += mr->hpa_base;
> +
> + return PHYS_PFN(offset);
> +}
> +
> +/** FIXME: we locked page by calling zone_device_page_init
> + * in xe_devm_alloc_pages. Should we unlock pages here?
> + */
> +static void free_block(struct drm_buddy_block *block)
> +{
> + struct xe_svm_block_meta *meta =
> + (struct xe_svm_block_meta *)block->private;
> + struct xe_tile *tile = meta->tile;
> + struct drm_buddy *mm = &tile->mem.vram_mgr->mm;
> +
> + kfree(block->private);
> + drm_buddy_free_block(mm, block);
> +}
> +
> +void xe_devm_page_free(struct page *page)
> +{
> + struct drm_buddy_block *block =
> + (struct drm_buddy_block *)page->zone_device_data;
> + struct xe_svm_block_meta *meta =
> + (struct xe_svm_block_meta *)block->private;
> + struct xe_tile *tile = meta->tile;
> + struct xe_mem_region *mr = &tile->mem.vram;
> + struct drm_buddy *mm = &tile->mem.vram_mgr->mm;
> + u64 size = drm_buddy_block_size(mm, block);
> + u64 pages_per_block = size >> PAGE_SHIFT;
> + u64 block_pfn_first =
> + block_offset_to_pfn(mr, drm_buddy_block_offset(block));
> + u64 page_pfn = page_to_pfn(page);
> + u64 i = page_pfn - block_pfn_first;
> +
> + xe_assert(tile->xe, i < pages_per_block);
> + clear_bit(i, meta->bitmap);
> + if (bitmap_empty(meta->bitmap, pages_per_block))
> + free_block(block);
> +}
> +
> +/**
> + * xe_devm_alloc_pages() - allocate device pages from buddy allocator
> + *
> + * @xe_tile: which tile to allocate device memory from
> + * @npages: how many pages to allocate
> + * @blocks: used to return the allocated blocks
> + * @pfn: used to return the pfn of all allocated pages. Must be big enough
> + * to hold at @npages entries.
> + *
> + * This function allocate blocks of memory from drm buddy allocator, and
> + * performs initialization work: set struct page::zone_device_data to point
> + * to the memory block; set/initialize drm_buddy_block::private field;
> + * lock_page for each page allocated; add memory block to lru managers lru
> + * list - this is TBD.
> + *
> + * return: 0 on success
> + * error code otherwise
> + */
> +int xe_devm_alloc_pages(struct xe_tile *tile,
> + unsigned long npages,
> + struct list_head *blocks,
> + unsigned long *pfn)
> +{
> + struct drm_buddy *mm = &tile->mem.vram_mgr->mm;
> + struct drm_buddy_block *block, *tmp;
> + u64 size = npages << PAGE_SHIFT;
> + int ret = 0, i, j = 0;
> +
> + ret = drm_buddy_alloc_blocks(mm, 0, mm->size, size, PAGE_SIZE,
> + blocks, DRM_BUDDY_TOPDOWN_ALLOCATION);
Realized this while discussing ref counting off the list, the buddy
allocation size can be either PAGE_SIZE or SZ_64K depending on platform
too. We store this in VM via XE_VM_FLAG_64K flag.
Matt
> +
> + if (unlikely(ret))
> + return ret;
> +
> + list_for_each_entry_safe(block, tmp, blocks, link) {
> + struct xe_mem_region *mr = &tile->mem.vram;
> + u64 block_pfn_first, pages_per_block;
> + struct xe_svm_block_meta *meta;
> + u32 meta_size;
> +
> + size = drm_buddy_block_size(mm, block);
> + pages_per_block = size >> PAGE_SHIFT;
> + meta_size = BITS_TO_BYTES(pages_per_block) +
> + sizeof(struct xe_svm_block_meta);
> + meta = kzalloc(meta_size, GFP_KERNEL);
> + bitmap_fill(meta->bitmap, pages_per_block);
> + meta->tile = tile;
> + block->private = meta;
> + block_pfn_first =
> + block_offset_to_pfn(mr, drm_buddy_block_offset(block));
> + for(i = 0; i < pages_per_block; i++) {
> + struct page *page;
> +
> + pfn[j++] = block_pfn_first + i;
> + page = pfn_to_page(block_pfn_first + i);
> + /**Lock page per hmm requirement, see hmm.rst.*/
> + zone_device_page_init(page);
> + page->zone_device_data = block;
> + }
> + }
> +
> + return ret;
> +}
> +
> +/**
> + * xe_devm_free_blocks() - free all memory blocks
> + *
> + * @blocks: memory blocks list head
> + */
> +void xe_devm_free_blocks(struct list_head *blocks)
> {
> + struct drm_buddy_block *block, *tmp;
> +
> + list_for_each_entry_safe(block, tmp, blocks, link)
> + free_block(block);
> }
>
> static const struct dev_pagemap_ops xe_devm_pagemap_ops = {
> --
> 2.26.3
>
More information about the Intel-xe
mailing list