[pulseaudio-discuss] [PATCH v2 06/12] pulsecore: memexport/memimport: Support memfd blocks
Ahmed S. Darwish
darwish.07 at gmail.com
Fri Feb 12 00:15:10 UTC 2016
Add support for transfering memfd-backed blocks without passing their
file descriptor every time, thus minimizing overhead and the
possibility of fd leaks.
To accomplish this, a new type of 'permanent' segments is introduced.
Suggested-by: David Henningsson <david.henningsson at canonical.com>
Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>
---
src/pulsecore/memblock.c | 134 +++++++++++++++++++++++++++++++++++++++++++----
src/pulsecore/memblock.h | 7 ++-
src/pulsecore/shm.c | 7 +--
src/pulsecore/shm.h | 2 +-
4 files changed, 132 insertions(+), 18 deletions(-)
diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c
index 154bd67..959453e 100644
--- a/src/pulsecore/memblock.c
+++ b/src/pulsecore/memblock.c
@@ -100,6 +100,19 @@ struct pa_memimport_segment {
pa_memtrap *trap;
unsigned n_blocks;
bool writable;
+ /* If true, this segment's lifetime will not be limited by the
+ * number of active blocks (n_blocks) using its shared memory.
+ * Rather, it will exist for the full lifetime of the memimport.
+ *
+ * This is done to support SHM memfd blocks transport.
+ *
+ * To transfer memfd-backed blocks without passing their fd every
+ * time, thus minimizing overhead and the possibility of fd leaks,
+ * a packet is sent with the memfd fd as ancil data very early on.
+ * This packet has an ID that identifies the memfd region. Once
+ * received, a permanent mapping is added to the memimport's
+ * segments hash. */
+ bool permanent;
};
/* A collection of multiple segments */
@@ -926,12 +939,46 @@ int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
}
/* No lock necessary */
-bool pa_mempool_is_shared(pa_mempool *p) {
+bool pa_mempool_is_shared(const pa_mempool *p) {
pa_assert(p);
return p->shared;
}
+/* No lock necessary */
+bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
+ pa_assert(p);
+
+ return pa_mempool_is_shared(p) &&
+ (p->per_type.shm.type == PA_MEM_TYPE_SHARED_MEMFD);
+}
+
+/* Self-locked
+ *
+ * Check the comments over pa_shm->per_type.memfd.fd for context.
+ *
+ * After this method's return, the caller owns the file descriptor
+ * and is responsible for closing it in the appropriate time. This
+ * should only be called once during during a mempool's lifetime. */
+int pa_mempool_get_and_reset_shm_memfd_fd(pa_mempool *p) {
+ pa_shm *memory;
+ int memfd_fd;
+
+ pa_assert(pa_mempool_is_shared(p));
+ pa_assert(pa_mempool_is_memfd_backed(p));
+
+ pa_mutex_lock(p->mutex);
+
+ memory = &p->per_type.shm;
+ memfd_fd = memory->per_type.memfd.fd;
+ memory->per_type.memfd.fd = -1;
+
+ pa_mutex_unlock(p->mutex);
+
+ pa_assert(memfd_fd != -1);
+ return memfd_fd;
+}
+
/* For receiving blocks from other nodes */
pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
pa_memimport *i;
@@ -957,15 +1004,17 @@ pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void
static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
/* Should be called locked */
-static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id, bool writable) {
+static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
+ int memfd_fd, bool writable) {
pa_memimport_segment* seg;
+ pa_assert(pa_mem_type_is_shared(type));
if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
return NULL;
seg = pa_xnew0(pa_memimport_segment, 1);
- if (pa_shm_attach(&seg->memory, shm_id, writable) < 0) {
+ if (pa_shm_attach(&seg->memory, type, shm_id, memfd_fd, writable) < 0) {
pa_xfree(seg);
return NULL;
}
@@ -981,6 +1030,7 @@ static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id, bo
/* Should be called locked */
static void segment_detach(pa_memimport_segment *seg) {
pa_assert(seg);
+ pa_assert(seg->n_blocks == ((seg->permanent) ? 1 : 0));
pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
pa_shm_free(&seg->memory);
@@ -995,6 +1045,8 @@ static void segment_detach(pa_memimport_segment *seg) {
void pa_memimport_free(pa_memimport *i) {
pa_memexport *e;
pa_memblock *b;
+ pa_memimport_segment *seg;
+ void *state = NULL;
pa_assert(i);
@@ -1003,6 +1055,15 @@ void pa_memimport_free(pa_memimport *i) {
while ((b = pa_hashmap_first(i->blocks)))
memblock_replace_import(b);
+ /* Permanent segments exist for the lifetime of the memimport. Now
+ * that we're freeing the memimport itself, clear em all up.
+ *
+ * Careful! segment_detach() internally removes itself from the
+ * memimport's hash; the same hash we're now using for iteration. */
+ PA_HASHMAP_FOREACH(seg, i->segments, state) {
+ if (seg->permanent)
+ segment_detach(seg);
+ }
pa_assert(pa_hashmap_size(i->segments) == 0);
pa_mutex_unlock(i->mutex);
@@ -1025,9 +1086,38 @@ void pa_memimport_free(pa_memimport *i) {
pa_xfree(i);
}
+/* Introduce a new mapping entry: SHM ID to memory mapped memfd region.
+ * For further details, check comments at `pa_shm->per_type.memfd.fd'
+ * and on top of `pa_memimport_segment.permanent' for context. */
+int pa_memimport_add_permanent_shmid_to_memfd_mapping(pa_memimport *i, uint32_t shm_id, int memfd_fd,
+ bool writable)
+{
+ pa_memimport_segment *seg;
+ int ret = -1;
+
+ pa_assert(i);
+ pa_assert(memfd_fd != -1);
+
+ pa_mutex_lock(i->mutex);
+
+ if (!(seg = segment_attach(i, PA_MEM_TYPE_SHARED_MEMFD, shm_id, memfd_fd, writable)))
+ goto finish;
+
+ /* n_blocks acts as a segment reference count. To avoid the segment
+ * being deleted when receiving silent memchunks, etc., mark our
+ * permanent presence by incrementing that refcount. */
+ seg->n_blocks++;
+ seg->permanent = true;
+ ret = 0;
+
+finish:
+ pa_mutex_unlock(i->mutex);
+ return ret;
+}
+
/* Self-locked */
-pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id,
- size_t offset, size_t size, bool writable) {
+static pa_memblock* NEW_API_pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id, uint32_t shm_id,
+ size_t offset, size_t size, bool writable) {
pa_memblock *b = NULL;
pa_memimport_segment *seg;
@@ -1043,12 +1133,17 @@ pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_i
if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
goto finish;
- if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
- if (!(seg = segment_attach(i, shm_id, writable)))
+ if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) {
+ if (type == PA_MEM_TYPE_SHARED_MEMFD) {
+ pa_log("Bailing out! Memfd segment with ID %u should've been earlier cached", shm_id);
goto finish;
+ }
+ if (!(seg = segment_attach(i, type, shm_id, -1, writable)))
+ goto finish;
+ }
- if (writable != seg->writable) {
- pa_log("Cannot open segment - writable status changed!");
+ if (writable && !seg->writable) {
+ pa_log("Cannot import cached segment in write mode - previously mapped as read-only!");
goto finish;
}
@@ -1082,6 +1177,11 @@ finish:
return b;
}
+pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id,
+ size_t offset, size_t size, bool writable) {
+ return NEW_API_pa_memimport_get(i, PA_MEM_TYPE_SHARED_POSIX, block_id, shm_id, offset, size, writable);
+}
+
int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
pa_memblock *b;
int ret = 0;
@@ -1238,7 +1338,8 @@ static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
}
/* Self-locked */
-int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
+static int NEW_API_pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
+ uint32_t *shm_id, size_t *offset, size_t * size) {
pa_shm *memory;
struct memexport_slot *slot;
void *data;
@@ -1282,12 +1383,14 @@ int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32
} else {
pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
pa_assert(b->pool);
+ pa_assert(pa_mempool_is_shared(b->pool));
memory = &b->pool->per_type.shm;
}
pa_assert(data >= memory->mem.ptr);
pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->mem.ptr + memory->mem.size);
+ *type = memory->type;
*shm_id = memory->id;
*offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->mem.ptr);
*size = b->length;
@@ -1299,3 +1402,14 @@ int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32
return 0;
}
+
+int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id,
+ size_t *offset, size_t *size) {
+ pa_mem_type_t type;
+ int ret;
+
+ if (!(ret = NEW_API_pa_memexport_put(e, b, &type, block_id, shm_id, offset, size)))
+ pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
+
+ return ret;
+}
diff --git a/src/pulsecore/memblock.h b/src/pulsecore/memblock.h
index a760b6f..776b017 100644
--- a/src/pulsecore/memblock.h
+++ b/src/pulsecore/memblock.h
@@ -127,14 +127,19 @@ void pa_mempool_free(pa_mempool *p);
const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p);
void pa_mempool_vacuum(pa_mempool *p);
int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id);
-bool pa_mempool_is_shared(pa_mempool *p);
+bool pa_mempool_is_shared(const pa_mempool *p);
+bool pa_mempool_is_memfd_backed(const pa_mempool *p);
bool pa_mempool_is_remote_writable(pa_mempool *p);
void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable);
size_t pa_mempool_block_size_max(pa_mempool *p);
+int pa_mempool_get_and_reset_shm_memfd_fd(pa_mempool *p);
+
/* For receiving blocks from other nodes */
pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata);
void pa_memimport_free(pa_memimport *i);
+int pa_memimport_add_permanent_shmid_to_memfd_mapping(pa_memimport *i, uint32_t shm_id, int memfd_fd,
+ bool writable);
pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id,
size_t offset, size_t size, bool writable);
int pa_memimport_process_revoke(pa_memimport *i, uint32_t block_id);
diff --git a/src/pulsecore/shm.c b/src/pulsecore/shm.c
index 0ca4fb0..7ebe32a 100644
--- a/src/pulsecore/shm.c
+++ b/src/pulsecore/shm.c
@@ -351,7 +351,7 @@ fail:
/* Note: In case of attaching memfd SHM areas, the caller maintains ownership
* of the passed fd and has the responsibility of closing it when appropriate. */
-static int NEW_API_pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable) {
+int pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable) {
#if defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD)
return shm_attach(m, type, id, memfd_fd, writable, false);
#else
@@ -359,11 +359,6 @@ static int NEW_API_pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int
#endif
}
-/* Compatibility version until the new API is used in external sources */
-int pa_shm_attach(pa_shm *m, unsigned id, bool writable) {
- return NEW_API_pa_shm_attach(m, PA_MEM_TYPE_SHARED_POSIX, id, -1, writable);
-}
-
int pa_shm_cleanup(void) {
#ifdef HAVE_SHM_OPEN
diff --git a/src/pulsecore/shm.h b/src/pulsecore/shm.h
index 1c8ce83..e83a36c 100644
--- a/src/pulsecore/shm.h
+++ b/src/pulsecore/shm.h
@@ -50,7 +50,7 @@ typedef struct pa_shm {
} pa_shm;
int pa_shm_create(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode);
-int pa_shm_attach(pa_shm *m, unsigned id, bool writable);
+int pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable);
void pa_shm_punch(pa_shm *m, size_t offset, size_t size);
Regards,
--
Darwish
http://darwish.chasingpointers.com
More information about the pulseaudio-discuss
mailing list