[pulseaudio-discuss] [PATCH v2 08/12] pulsecore: Specially mark global mempools
David Henningsson
david.henningsson at canonical.com
Tue Feb 16 14:17:11 UTC 2016
I probably would just have added a "bool global" directly to
pa_mempool_new instead of creating two extra functions. But this is
probably just a matter of taste. Looks good.
Reviewed-by: David Henningsson <david.henningsson at canonical.com>
On 2016-02-12 01:18, Ahmed S. Darwish wrote:
> Color global mempools with a special mark. Almost all pools are
> now created on a per client basis except the pa_core's mempool,
> which is shared between all clients.
>
> This special marking is needed for handling memfd-backed pools.
>
> To avoid fd leaks, memfd pools are registered with the connection
> pstream to create an ID<->memfd mapping on both PA endpoints.
> Such memory regions are then always referenced by their IDs and
> never by their fds, and so their fds can be safely closed later.
>
> Unfortunately this scheme cannot work with global pools since the
> registration ID<->memfd mechanism needs to happen for each newly
> connected client, and thus the need for a more special handling.
>
> Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>
> ---
> src/pulsecore/core.c | 4 +--
> src/pulsecore/memblock.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++-
> src/pulsecore/memblock.h | 3 +++
> src/pulsecore/shm.h | 6 ++++-
> 4 files changed, 79 insertions(+), 4 deletions(-)
>
> diff --git a/src/pulsecore/core.c b/src/pulsecore/core.c
> index 4714932..aab82f3 100644
> --- a/src/pulsecore/core.c
> +++ b/src/pulsecore/core.c
> @@ -69,14 +69,14 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, size_t shm_size) {
> pa_assert(m);
>
> if (shared) {
> - if (!(pool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, shm_size))) {
> + if (!(pool = pa_global_mempool_new(PA_MEM_TYPE_SHARED_POSIX, shm_size))) {
> pa_log_warn("Failed to allocate shared memory pool. Falling back to a normal memory pool.");
> shared = false;
> }
> }
>
> if (!shared) {
> - if (!(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, shm_size))) {
> + if (!(pool = pa_global_mempool_new(PA_MEM_TYPE_PRIVATE, shm_size))) {
> pa_log("pa_mempool_new() failed.");
> return NULL;
> }
> diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c
> index af27ba5..9c31e53 100644
> --- a/src/pulsecore/memblock.c
> +++ b/src/pulsecore/memblock.c
> @@ -169,6 +169,29 @@ struct pa_mempool {
> } per_type;
> };
>
> + /* Color global mempools with a special mark.
> + *
> + * Mempools are now created on a per client basis by default. The
> + * only exception is the pa_core's mempool, which is shared between
> + * all clients of the system.
> + *
> + * This special mark is needed for handling memfd pools.
> + *
> + * To avoid fd leaks, memfd pools are registered with the connection
> + * pstream to create an ID<->memfd mapping on both PA endpoints.
> + * Such memory regions are then always referenced by their IDs and
> + * never by their fds, and so their fds can be safely closed later.
> + *
> + * Unfortunately this scheme cannot work with global pools since the
> + * registration ID<->memfd mechanism needs to happen for each newly
> + * connected client, and thus the need for a more special handling.
> + *
> + * TODO: Transform the core mempool into a per-client one and remove
> + * global pools support. They conflict with the futuristic xdg-app
> + * model and complicates handling of memfd-based pools.
> + */
> + bool global;
> +
> size_t block_size;
> unsigned n_blocks;
> bool is_remote_writable;
> @@ -767,7 +790,7 @@ static void memblock_replace_import(pa_memblock *b) {
> pa_mutex_unlock(import->mutex);
> }
>
> -pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size) {
> +static pa_mempool *mempool_new(pa_mem_type_t type, size_t size, bool global) {
> pa_mempool *p;
> char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
>
> @@ -802,6 +825,8 @@ pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size) {
> pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
> (unsigned long) pa_mempool_block_size_max(p));
>
> + p->global = global;
> +
> pa_atomic_store(&p->n_init, 0);
>
> PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
> @@ -819,6 +844,15 @@ mem_create_fail:
> return NULL;
> }
>
> +pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size) {
> + return mempool_new(type, size, false);
> +}
> +
> +/* Check comments on top of pa_mempool.global for details */
> +pa_mempool *pa_global_mempool_new(pa_mem_type_t type, size_t size) {
> + return mempool_new(type, size, true);
> +}
> +
> void pa_mempool_free(pa_mempool *p) {
> pa_assert(p);
>
> @@ -953,6 +987,18 @@ bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
> (p->per_type.shm.type == PA_MEM_TYPE_SHARED_MEMFD);
> }
>
> +/* No lock necessary */
> +bool pa_mempool_is_global(pa_mempool *p) {
> + pa_assert(p);
> +
> + return p->global;
> +}
> +
> +/* No lock necessary */
> +static bool pa_mempool_is_per_client(pa_mempool *p) {
> + return !pa_mempool_is_global(p);
> +}
> +
> /* Self-locked
> *
> * Check the comments over pa_shm->per_type.memfd.fd for context.
> @@ -964,6 +1010,8 @@ int pa_mempool_get_and_reset_shm_memfd_fd(pa_mempool *p) {
> pa_shm *memory;
> int memfd_fd;
>
> + pa_assert(p);
> + pa_assert(pa_mempool_is_per_client(p));
> pa_assert(pa_mempool_is_shared(p));
> pa_assert(pa_mempool_is_memfd_backed(p));
>
> @@ -979,6 +1027,26 @@ int pa_mempool_get_and_reset_shm_memfd_fd(pa_mempool *p) {
> return memfd_fd;
> }
>
> +/* No lock necessary
> + *
> + * Global mempools have their memfd descriptor always open. DO NOT
> + * close the returned descriptor by your own!
> + *
> + * Check pa_mempool.global for further context. */
> +int pa_global_mempool_get_shm_memfd_fd(pa_mempool *p) {
> + int memfd_fd;
> +
> + pa_assert(p);
> + pa_assert(pa_mempool_is_shared(p));
> + pa_assert(pa_mempool_is_memfd_backed(p));
> + pa_assert(pa_mempool_is_global(p));
> +
> + memfd_fd = p->per_type.shm.per_type.memfd.fd;
> + pa_assert(memfd_fd != -1);
> +
> + return memfd_fd;
> +}
> +
> /* For receiving blocks from other nodes */
> pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
> pa_memimport *i;
> diff --git a/src/pulsecore/memblock.h b/src/pulsecore/memblock.h
> index 059538f..b2f5e31 100644
> --- a/src/pulsecore/memblock.h
> +++ b/src/pulsecore/memblock.h
> @@ -123,17 +123,20 @@ pa_memblock *pa_memblock_will_need(pa_memblock *b);
>
> /* The memory block manager */
> pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size);
> +pa_mempool *pa_global_mempool_new(pa_mem_type_t type, size_t size);
> void pa_mempool_free(pa_mempool *p);
> const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p);
> void pa_mempool_vacuum(pa_mempool *p);
> int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id);
> bool pa_mempool_is_shared(const pa_mempool *p);
> bool pa_mempool_is_memfd_backed(const pa_mempool *p);
> +bool pa_mempool_is_global(pa_mempool *p);
> bool pa_mempool_is_remote_writable(pa_mempool *p);
> void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable);
> size_t pa_mempool_block_size_max(pa_mempool *p);
>
> int pa_mempool_get_and_reset_shm_memfd_fd(pa_mempool *p);
> +int pa_global_mempool_get_shm_memfd_fd(pa_mempool *p);
>
> /* For receiving blocks from other nodes */
> pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata);
> diff --git a/src/pulsecore/shm.h b/src/pulsecore/shm.h
> index e83a36c..f6b9d89 100644
> --- a/src/pulsecore/shm.h
> +++ b/src/pulsecore/shm.h
> @@ -43,7 +43,11 @@ typedef struct pa_shm {
> *
> * When we don't have ownership for the memfd fd in question (e.g.
> * memfd segment attachment), or the file descriptor has now been
> - * closed, this is set to -1. */
> + * closed, this is set to -1.
> + *
> + * For the special case of a global mempool, we keep this fd
> + * always open. Check comments on top of pa_mempool.global for
> + * rationale. */
> int fd;
> } memfd;
> } per_type;
>
> Regards,
>
--
David Henningsson, Canonical Ltd.
https://launchpad.net/~diwic
More information about the pulseaudio-discuss
mailing list