[PATCH v14 2/8] drm/ttm: Provide a shmem backup implementation
Thomas Hellström
thomas.hellstrom at linux.intel.com
Wed Nov 20 11:20:24 UTC 2024
On Wed, 2024-11-20 at 10:24 +0100, Christian König wrote:
>
[SNIP]
> > > Just that I sleep better: This can never return a folio larger
> > > than a
> > > page, doesn't it?
> > The interface definitely allows for returning larger folios, but
> > the
> > individual page in the folio is selected by folio_file_page(folio,
> > idx).
>
> Ah, yeah completely missed that and was really wondering why that
> would
> work.
One remaining sligtht concern, though, is that if we repeatedly call -
>writepage() for *each* page in a folio, that might degrade
performance. Not sure if the filesystem is supposed to coalesce such
requests if they happen, or whether things will start to crawl.
Right now we can't really tell, since shmem typically only allocates
single page folios (unless told otherwise, like i915 does), and in
addition shmem writepage() also splits any large folios. I've seen
patches floating around to remove that split, though.
/Thomas
>
> >
> > /Thomas
> >
> >
> > > Apart from those background questions looks good to me.
> > >
> > > Regards,
> > > Christian.
> > >
> > > > +
> > > > + folio_mark_accessed(to_folio);
> > > > + folio_lock(to_folio);
> > > > + folio_mark_dirty(to_folio);
> > > > + copy_highpage(folio_file_page(to_folio, idx), page);
> > > > + handle = ttm_backup_shmem_idx_to_handle(idx);
> > > > +
> > > > + if (writeback && !folio_mapped(to_folio) &&
> > > > + folio_clear_dirty_for_io(to_folio)) {
> > > > + struct writeback_control wbc = {
> > > > + .sync_mode = WB_SYNC_NONE,
> > > > + .nr_to_write = SWAP_CLUSTER_MAX,
> > > > + .range_start = 0,
> > > > + .range_end = LLONG_MAX,
> > > > + .for_reclaim = 1,
> > > > + };
> > > > + folio_set_reclaim(to_folio);
> > > > + ret = mapping->a_ops-
> > > > > writepage(folio_file_page(to_folio, idx), &wbc);
> > > > + if (!folio_test_writeback(to_folio))
> > > > + folio_clear_reclaim(to_folio);
> > > > + /* If writepage succeeds, it unlocks the folio
> > > > */
> > > > + if (ret)
> > > > + folio_unlock(to_folio);
>
> The code ignores the error and potentially deserves an explanation
> for that.
>
> Regards,
> Christian.
>
> > > > + } else {
> > > > + folio_unlock(to_folio);
> > > > + }
> > > > +
> > > > + folio_put(to_folio);
> > > > +
> > > > + return handle;
> > > > +}
> > > > +
> > > > +/**
> > > > + * ttm_backup_fini() - Free the struct backup resources after
> > > > last
> > > > use.
> > > > + * @backup: Pointer to the struct backup whose resources to
> > > > free.
> > > > + *
> > > > + * After a call to this function, it's illegal to use the
> > > > @backup
> > > > pointer.
> > > > + */
> > > > +void ttm_backup_fini(struct ttm_backup *backup)
> > > > +{
> > > > + fput(ttm_backup_to_file(backup));
> > > > +}
> > > > +
> > > > +/**
> > > > + * ttm_backup_bytes_avail() - Report the approximate number of
> > > > bytes of backup space
> > > > + * left for backup.
> > > > + *
> > > > + * This function is intended also for driver use to indicate
> > > > whether a
> > > > + * backup attempt is meaningful.
> > > > + *
> > > > + * Return: An approximate size of backup space available.
> > > > + */
> > > > +u64 ttm_backup_bytes_avail(void)
> > > > +{
> > > > + /*
> > > > + * The idea behind backing up to shmem is that shmem
> > > > objects may
> > > > + * eventually be swapped out. So no point swapping out
> > > > if
> > > > there
> > > > + * is no or low swap-space available. But the accuracy
> > > > of
> > > > this
> > > > + * number also depends on shmem actually swapping out
> > > > backed-up
> > > > + * shmem objects without too much buffering.
> > > > + */
> > > > + return (u64)get_nr_swap_pages() << PAGE_SHIFT;
> > > > +}
> > > > +EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
> > > > +
> > > > +/**
> > > > + * ttm_backup_shmem_create() - Create a shmem-based struct
> > > > backup.
> > > > + * @size: The maximum size (in bytes) to back up.
> > > > + *
> > > > + * Create a backup utilizing shmem objects.
> > > > + *
> > > > + * Return: A pointer to a struct ttm_backup on success,
> > > > + * an error pointer on error.
> > > > + */
> > > > +struct ttm_backup *ttm_backup_shmem_create(loff_t size)
> > > > +{
> > > > + struct file *filp;
> > > > +
> > > > + filp = shmem_file_setup("ttm shmem backup", size, 0);
> > > > +
> > > > + return ttm_file_to_backup(filp);
> > > > +}
> > > > diff --git a/include/drm/ttm/ttm_backup.h
> > > > b/include/drm/ttm/ttm_backup.h
> > > > new file mode 100644
> > > > index 000000000000..20609da7e281
> > > > --- /dev/null
> > > > +++ b/include/drm/ttm/ttm_backup.h
> > > > @@ -0,0 +1,74 @@
> > > > +/* SPDX-License-Identifier: MIT */
> > > > +/*
> > > > + * Copyright © 2024 Intel Corporation
> > > > + */
> > > > +
> > > > +#ifndef _TTM_BACKUP_H_
> > > > +#define _TTM_BACKUP_H_
> > > > +
> > > > +#include <linux/mm_types.h>
> > > > +#include <linux/shmem_fs.h>
> > > > +
> > > > +struct ttm_backup;
> > > > +
> > > > +/**
> > > > + * ttm_backup_handle_to_page_ptr() - Convert handle to struct
> > > > page
> > > > pointer
> > > > + * @handle: The handle to convert.
> > > > + *
> > > > + * Converts an opaque handle received from the
> > > > + * struct ttm_backoup_ops::backup_page() function to an
> > > > (invalid)
> > > > + * struct page pointer suitable for a struct page array.
> > > > + *
> > > > + * Return: An (invalid) struct page pointer.
> > > > + */
> > > > +static inline struct page *
> > > > +ttm_backup_handle_to_page_ptr(unsigned long handle)
> > > > +{
> > > > + return (struct page *)(handle << 1 | 1);
> > > > +}
> > > > +
> > > > +/**
> > > > + * ttm_backup_page_ptr_is_handle() - Whether a struct page
> > > > pointer
> > > > is a handle
> > > > + * @page: The struct page pointer to check.
> > > > + *
> > > > + * Return: true if the struct page pointer is a handld
> > > > returned
> > > > from
> > > > + * ttm_backup_handle_to_page_ptr(). False otherwise.
> > > > + */
> > > > +static inline bool ttm_backup_page_ptr_is_handle(const struct
> > > > page
> > > > *page)
> > > > +{
> > > > + return (unsigned long)page & 1;
> > > > +}
> > > > +
> > > > +/**
> > > > + * ttm_backup_page_ptr_to_handle() - Convert a struct page
> > > > pointer
> > > > to a handle
> > > > + * @page: The struct page pointer to convert
> > > > + *
> > > > + * Return: The handle that was previously used in
> > > > + * ttm_backup_handle_to_page_ptr() to obtain a struct page
> > > > pointer, suitable
> > > > + * for use as argument in the struct ttm_backup_ops drop() or
> > > > + * copy_backed_up_page() functions.
> > > > + */
> > > > +static inline unsigned long
> > > > +ttm_backup_page_ptr_to_handle(const struct page *page)
> > > > +{
> > > > + WARN_ON(!ttm_backup_page_ptr_is_handle(page));
> > > > + return (unsigned long)page >> 1;
> > > > +}
> > > > +
> > > > +void ttm_backup_drop(struct ttm_backup *backup, pgoff_t
> > > > handle);
> > > > +
> > > > +int ttm_backup_copy_page(struct ttm_backup *backup, struct
> > > > page
> > > > *dst,
> > > > + pgoff_t handle, bool intr);
> > > > +
> > > > +unsigned long
> > > > +ttm_backup_backup_page(struct ttm_backup *backup, struct page
> > > > *page,
> > > > + bool writeback, pgoff_t idx, gfp_t
> > > > page_gfp,
> > > > + gfp_t alloc_gfp);
> > > > +
> > > > +void ttm_backup_fini(struct ttm_backup *backup);
> > > > +
> > > > +u64 ttm_backup_bytes_avail(void);
> > > > +
> > > > +struct ttm_backup *ttm_backup_shmem_create(loff_t size);
> > > > +
> > > > +#endif
More information about the dri-devel
mailing list