[PATCH v3 1/4] iov_iter: Provide copy_iomem_to|from_iter()
Michal Wajdeczko
michal.wajdeczko at intel.com
Thu Nov 14 14:12:38 UTC 2024
Define simple copy helpers that work on I/O memory. This will
allow reuse of existing framework functions in new use cases.
Signed-off-by: Michal Wajdeczko <michal.wajdeczko at intel.com>
Cc: Matthew Wilcox <willy at infradead.org>
---
Cc: Alexander Viro <viro at zeniv.linux.org.uk>
Cc: Andrew Morton <akpm at linux-foundation.org>
Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
Cc: Matthew Brost <matthew.brost at intel.com>
Cc: linux-kernel at vger.kernel.org
Cc: linux-fsdevel at vger.kernel.org
---
v2: use iterate_and_advance to treat user_iter separately (Matthew)
v3: add explicit casts to make sparse happy (kernel test robot)
---
include/linux/uio.h | 3 +++
lib/iov_iter.c | 66 +++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 69 insertions(+)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 853f9de5aa05..354a6b2d1e94 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -183,6 +183,9 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+size_t copy_iomem_to_iter(const void __iomem *from, size_t bytes, struct iov_iter *i);
+size_t copy_iomem_from_iter(void __iomem *to, size_t bytes, struct iov_iter *i);
+
static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
size_t bytes, struct iov_iter *i)
{
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 908e75a28d90..a6eae4063366 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -323,6 +323,72 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
+static __always_inline
+size_t memcpy_iomem_to_iter(void *iter_to, size_t progress, size_t len,
+ void *from, void *priv2)
+{
+ memcpy_fromio(iter_to, (const void __iomem *)from + progress, len);
+ return 0;
+}
+
+static __always_inline
+size_t memcpy_iomem_from_iter(void *iter_from, size_t progress, size_t len,
+ void *to, void *priv2)
+{
+ memcpy_toio((void __iomem *)to + progress, iter_from, len);
+ return 0;
+}
+
+static __always_inline
+size_t copy_iomem_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ unsigned char buf[SMP_CACHE_BYTES];
+ size_t chunk = min(len, sizeof(buf));
+
+ memcpy_fromio(buf, (const void __iomem *)from + progress, chunk);
+ chunk -= copy_to_user_iter(iter_to, progress, chunk, buf, priv2);
+ return len - chunk;
+}
+
+static __always_inline
+size_t copy_iomem_from_user_iter(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ unsigned char buf[SMP_CACHE_BYTES];
+ size_t chunk = min(len, sizeof(buf));
+
+ chunk -= copy_from_user_iter(iter_from, progress, chunk, buf, priv2);
+ memcpy_toio((void __iomem *)to, buf, chunk);
+ return len - chunk;
+}
+
+size_t copy_iomem_to_iter(const void __iomem *from, size_t bytes, struct iov_iter *i)
+{
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (user_backed_iter(i))
+ might_fault();
+
+ return iterate_and_advance(i, bytes, (void __force *)from,
+ copy_iomem_to_user_iter,
+ memcpy_iomem_to_iter);
+}
+EXPORT_SYMBOL(copy_iomem_to_iter);
+
+size_t copy_iomem_from_iter(void __iomem *to, size_t bytes, struct iov_iter *i)
+{
+ if (WARN_ON_ONCE(!i->data_source))
+ return 0;
+ if (user_backed_iter(i))
+ might_fault();
+
+ return iterate_and_advance(i, bytes, (void __force *)to,
+ copy_iomem_from_user_iter,
+ memcpy_iomem_from_iter);
+}
+EXPORT_SYMBOL(copy_iomem_from_iter);
+
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
struct page *head;
--
2.43.0
More information about the Intel-xe
mailing list