[RFC PATCH v1 07/10] mm: Introduce struct folio_owner_ops

Fuad Tabba tabba at google.com
Fri Nov 8 16:20:37 UTC 2024


Introduce struct folio_owner_ops, a method table that contains
callbacks to owners of folios that need special handling for
certain operations. For now, it only contains a callback for
folio free(), which is called immediately after the folio
refcount drops to 0.

Add a pointer to this struct overlaid on struct page
compound_head, pgmap, and struct page/folio lru. The users of
this struct either will not use lru (e.g., zone device), or would
be able to easily isolate when lru is being used (e.g., hugetlb)
and handle it accordingly. While folios are isolated, they cannot
get freed and the owner_ops are unstable. This is sufficient for
the current use case of returning these folios to a custom
allocator.

To identify that a folio has owner_ops, we set bit 1 of the
field, in a similar way to that bit 0 of compound_head is used to
identify compound pages.

Signed-off-by: Fuad Tabba <tabba at google.com>
---
 include/linux/mm_types.h | 64 +++++++++++++++++++++++++++++++++++++---
 mm/swap.c                | 19 ++++++++++++
 2 files changed, 79 insertions(+), 4 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 365c73be0bb4..6e06286f44f1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -41,10 +41,12 @@ struct mem_cgroup;
  *
  * If you allocate the page using alloc_pages(), you can use some of the
  * space in struct page for your own purposes.  The five words in the main
- * union are available, except for bit 0 of the first word which must be
- * kept clear.  Many users use this word to store a pointer to an object
- * which is guaranteed to be aligned.  If you use the same storage as
- * page->mapping, you must restore it to NULL before freeing the page.
+ * union are available, except for bit 0 (used for compound_head pages)
+ * and bit 1 (used for owner_ops) of the first word, which must be kept
+ * clear and used with care.  Many users use this word to store a pointer
+ * to an object which is guaranteed to be aligned.  If you use the same
+ * storage as page->mapping, you must restore it to NULL before freeing
+ * the page.
  *
  * The mapcount field must not be used for own purposes.
  *
@@ -283,10 +285,16 @@ typedef struct {
 	unsigned long val;
 } swp_entry_t;
 
+struct folio_owner_ops;
+
 /**
  * struct folio - Represents a contiguous set of bytes.
  * @flags: Identical to the page flags.
  * @lru: Least Recently Used list; tracks how recently this folio was used.
+ * @owner_ops: Pointer to callback operations of the folio owner. Valid if bit 1
+ *    is set.
+ *    NOTE: Cannot be used with lru, since it is overlaid with it. To use lru,
+ *          owner_ops must be cleared first, and restored once done with lru.
  * @mlock_count: Number of times this folio has been pinned by mlock().
  * @mapping: The file this page belongs to, or refers to the anon_vma for
  *    anonymous memory.
@@ -330,6 +338,7 @@ struct folio {
 			unsigned long flags;
 			union {
 				struct list_head lru;
+				const struct folio_owner_ops *owner_ops; /* Bit 1 is set */
 	/* private: avoid cluttering the output */
 				struct {
 					void *__filler;
@@ -417,6 +426,7 @@ FOLIO_MATCH(flags, flags);
 FOLIO_MATCH(lru, lru);
 FOLIO_MATCH(mapping, mapping);
 FOLIO_MATCH(compound_head, lru);
+FOLIO_MATCH(compound_head, owner_ops);
 FOLIO_MATCH(index, index);
 FOLIO_MATCH(private, private);
 FOLIO_MATCH(_mapcount, _mapcount);
@@ -452,6 +462,13 @@ FOLIO_MATCH(flags, _flags_3);
 FOLIO_MATCH(compound_head, _head_3);
 #undef FOLIO_MATCH
 
+struct folio_owner_ops {
+	/*
+	 * Called once the folio refcount reaches 0.
+	 */
+	void (*free)(struct folio *folio);
+};
+
 /**
  * struct ptdesc -    Memory descriptor for page tables.
  * @__page_flags:     Same as page flags. Powerpc only.
@@ -560,6 +577,45 @@ static inline void *folio_get_private(struct folio *folio)
 	return folio->private;
 }
 
+/*
+ * Use bit 1, since bit 0 is used to indicate a compound page in compound_head,
+ * which owner_ops is overlaid with.
+ */
+#define FOLIO_OWNER_OPS_BIT    1UL
+#define FOLIO_OWNER_OPS        (1UL << FOLIO_OWNER_OPS_BIT)
+
+/*
+ * Set the folio owner_ops as well as bit 1 of the pointer to indicate that the
+ * folio has owner_ops.
+ */
+static inline void folio_set_owner_ops(struct folio *folio, const struct folio_owner_ops *owner_ops)
+{
+	owner_ops = (const struct folio_owner_ops *)((unsigned long)owner_ops | FOLIO_OWNER_OPS);
+	folio->owner_ops = owner_ops;
+}
+
+/*
+ * Clear the folio owner_ops including bit 1 of the pointer.
+ */
+static inline void folio_clear_owner_ops(struct folio *folio)
+{
+	folio->owner_ops = NULL;
+}
+
+/*
+ * Return the folio's owner_ops if it has them, otherwise, return NULL.
+ */
+static inline const struct folio_owner_ops *folio_get_owner_ops(struct folio *folio)
+{
+	const struct folio_owner_ops *owner_ops = folio->owner_ops;
+
+	if (!((unsigned long)owner_ops & FOLIO_OWNER_OPS))
+		return NULL;
+
+	owner_ops = (const struct folio_owner_ops *)((unsigned long)owner_ops & ~FOLIO_OWNER_OPS);
+	return owner_ops;
+}
+
 struct page_frag_cache {
 	void * va;
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
diff --git a/mm/swap.c b/mm/swap.c
index 638a3f001676..767ff6d8f47b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -110,6 +110,13 @@ static void page_cache_release(struct folio *folio)
 
 void __folio_put(struct folio *folio)
 {
+	const struct folio_owner_ops *owner_ops = folio_get_owner_ops(folio);
+
+	if (unlikely(owner_ops)) {
+		owner_ops->free(folio);
+		return;
+	}
+
 	if (unlikely(folio_is_zone_device(folio))) {
 		free_zone_device_folio(folio);
 		return;
@@ -929,10 +936,22 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
 	for (i = 0, j = 0; i < folios->nr; i++) {
 		struct folio *folio = folios->folios[i];
 		unsigned int nr_refs = refs ? refs[i] : 1;
+		const struct folio_owner_ops *owner_ops;
 
 		if (is_huge_zero_folio(folio))
 			continue;
 
+		owner_ops = folio_get_owner_ops(folio);
+		if (unlikely(owner_ops)) {
+			if (lruvec) {
+				unlock_page_lruvec_irqrestore(lruvec, flags);
+				lruvec = NULL;
+			}
+			if (folio_ref_sub_and_test(folio, nr_refs))
+				owner_ops->free(folio);
+			continue;
+		}
+
 		if (folio_is_zone_device(folio)) {
 			if (lruvec) {
 				unlock_page_lruvec_irqrestore(lruvec, flags);
-- 
2.47.0.277.g8800431eea-goog



More information about the dri-devel mailing list