[pulseaudio-commits] 10 commits - configure.ac src/Makefile.am src/modules src/pulse src/pulsecore src/tests

David Henningsson diwic at kemper.freedesktop.org
Sat Apr 2 04:25:44 UTC 2016


 configure.ac                      |   19 ++
 src/Makefile.am                   |    8 -
 src/modules/module-tunnel.c       |    2 
 src/pulse/context.c               |   61 +++++--
 src/pulsecore/core.c              |   17 --
 src/pulsecore/core.h              |   11 -
 src/pulsecore/creds.h             |    5 
 src/pulsecore/filter/lfe-filter.c |    5 
 src/pulsecore/iochannel.c         |    1 
 src/pulsecore/mem.h               |   51 ++++++
 src/pulsecore/memblock.c          |  277 +++++++++++++++++++++++++++++++++--
 src/pulsecore/memblock.h          |   22 ++
 src/pulsecore/memblockq.c         |    5 
 src/pulsecore/memchunk.c          |    5 
 src/pulsecore/memfd-wrappers.h    |   68 ++++++++
 src/pulsecore/native-common.c     |   78 ++++++++++
 src/pulsecore/native-common.h     |   11 +
 src/pulsecore/pdispatch.c         |   30 ++-
 src/pulsecore/pdispatch.h         |    5 
 src/pulsecore/protocol-native.c   |   46 +++++
 src/pulsecore/pstream-util.c      |  102 ++++++++++++-
 src/pulsecore/pstream-util.h      |    4 
 src/pulsecore/pstream.c           |  200 ++++++++++++++++++++++---
 src/pulsecore/pstream.h           |    8 -
 src/pulsecore/shm.c               |  293 +++++++++++++++++++++++---------------
 src/pulsecore/shm.h               |   23 ++
 src/tests/connect-stress.c        |    9 -
 src/tests/cpu-mix-test.c          |    4 
 src/tests/lfe-filter-test.c       |    4 
 src/tests/mcalign-test.c          |    4 
 src/tests/memblock-test.c         |   21 +-
 src/tests/memblockq-test.c        |    4 
 src/tests/mix-test.c              |    4 
 src/tests/remix-test.c            |    4 
 src/tests/resampler-test.c        |    4 
 src/tests/srbchannel-test.c       |    6 
 36 files changed, 1158 insertions(+), 263 deletions(-)

New commits:
commit d3845a0f8a339b5947fa8b5274f65b75744ad29d
Author: David Henningsson <diwic at ubuntu.com>
Date:   Sat Apr 2 06:24:18 2016 +0200

    memblock/pstream: Fix two compiler warnings
    
    Fix two compiler warnings recently introduced by the memfd patch set.
    
    Signed-off-by: David Henningsson <diwic at ubuntu.com>

diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c
index 57b0645..17520ed 100644
--- a/src/pulsecore/memblock.c
+++ b/src/pulsecore/memblock.c
@@ -1135,7 +1135,7 @@ static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type,
 /* Should be called locked */
 static void segment_detach(pa_memimport_segment *seg) {
     pa_assert(seg);
-    pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1 : 0));
+    pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1u : 0u));
 
     pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
     pa_shm_free(&seg->memory);
diff --git a/src/pulsecore/pstream-util.c b/src/pulsecore/pstream-util.c
index 449ea1f..505f0cf 100644
--- a/src/pulsecore/pstream-util.c
+++ b/src/pulsecore/pstream-util.c
@@ -191,6 +191,8 @@ finish:
     return ret;
 
 #else
-    pa_assert_not_reached();
+    pa_assert(fail_reason);
+    *fail_reason = "memfd support not compiled in";
+    return -1;
 #endif
 }

commit 27d0a3b388899a1799be20040d93cded9d74c813
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 01:12:18 2016 +0200

    pstream: Support memfd blocks transport
    
    Now that we have the necessary infrastructure to memexport and
    mempimport a memfd memblock, extend that support higher up in the
    chain with pstreams.
    
    A PA endpoint can now _transparently_ send a memfd memblock to the
    other end by simply calling pa_pstream_send_memblock() – provided
    the block's memfd pool was earlier registered with the pstream.
    
    If the pipe does not support memfd transfers, we fall back to
    sending the block's full data instead of just its reference.
    
    ** Further details:
    
    A single pstream connection usually transfers blocks from multiple
    pools including the server's srbchannel mempool, the client's
    audio data mempool, and the server's global core mempool.
    
    If these mempools are memfd-backed, we now require registering
    them with the pstream before sending any blocks they cover. This
    is done to minimize fd passing overhead and avoid fd leaks.
    
    Moreover, to support all these pools without hard-coding their
    number or nature in the Pulse communication protocol itself, a new
    REGISTER_MEMFD_SHMID command is introduced. That command can be
    sent _anytime_ during the pstream's lifetime and is used for
    creating on demand SHM ID to memfd mappings.
    
    Suggested-by: David Henningsson <david.henningsson at canonical.com>
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/src/Makefile.am b/src/Makefile.am
index 63e59c5..f8efd9e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -683,7 +683,7 @@ libpulsecommon_ at PA_MAJORMINOR@_la_SOURCES = \
 		pulsecore/memblock.c pulsecore/memblock.h \
 		pulsecore/memblockq.c pulsecore/memblockq.h \
 		pulsecore/memchunk.c pulsecore/memchunk.h \
-		pulsecore/native-common.h \
+		pulsecore/native-common.c pulsecore/native-common.h \
 		pulsecore/once.c pulsecore/once.h \
 		pulsecore/packet.c pulsecore/packet.h \
 		pulsecore/parseaddr.c pulsecore/parseaddr.h \
diff --git a/src/modules/module-tunnel.c b/src/modules/module-tunnel.c
index 53c4402..5c8b84a 100644
--- a/src/modules/module-tunnel.c
+++ b/src/modules/module-tunnel.c
@@ -1778,7 +1778,7 @@ static void pstream_die_callback(pa_pstream *p, void *userdata) {
 }
 
 /* Called from main context */
-static void pstream_packet_callback(pa_pstream *p, pa_packet *packet, const pa_cmsg_ancil_data *ancil_data, void *userdata) {
+static void pstream_packet_callback(pa_pstream *p, pa_packet *packet, pa_cmsg_ancil_data *ancil_data, void *userdata) {
     struct userdata *u = userdata;
 
     pa_assert(p);
diff --git a/src/pulse/context.c b/src/pulse/context.c
index b8f51a6..ef39416 100644
--- a/src/pulse/context.c
+++ b/src/pulse/context.c
@@ -69,6 +69,7 @@
 void pa_command_extension(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
 static void pa_command_enable_srbchannel(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
 static void pa_command_disable_srbchannel(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+static void pa_command_register_memfd_shmid(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
 
 static const pa_pdispatch_cb_t command_table[PA_COMMAND_MAX] = {
     [PA_COMMAND_REQUEST] = pa_command_request,
@@ -90,6 +91,7 @@ static const pa_pdispatch_cb_t command_table[PA_COMMAND_MAX] = {
     [PA_COMMAND_RECORD_BUFFER_ATTR_CHANGED] = pa_command_stream_buffer_attr,
     [PA_COMMAND_ENABLE_SRBCHANNEL] = pa_command_enable_srbchannel,
     [PA_COMMAND_DISABLE_SRBCHANNEL] = pa_command_disable_srbchannel,
+    [PA_COMMAND_REGISTER_MEMFD_SHMID] = pa_command_register_memfd_shmid,
 };
 static void context_free(pa_context *c);
 
@@ -330,7 +332,7 @@ static void pstream_die_callback(pa_pstream *p, void *userdata) {
     pa_context_fail(c, PA_ERR_CONNECTIONTERMINATED);
 }
 
-static void pstream_packet_callback(pa_pstream *p, pa_packet *packet, const pa_cmsg_ancil_data *ancil_data, void *userdata) {
+static void pstream_packet_callback(pa_pstream *p, pa_packet *packet, pa_cmsg_ancil_data *ancil_data, void *userdata) {
     pa_context *c = userdata;
 
     pa_assert(p);
@@ -1432,8 +1434,7 @@ static void pa_command_enable_srbchannel(pa_pdispatch *pd, uint32_t command, uin
     pa_context *c = userdata;
 
 #ifdef HAVE_CREDS
-    const int *fds;
-    int nfd;
+    pa_cmsg_ancil_data *ancil = NULL;
 
     pa_assert(pd);
     pa_assert(command == PA_COMMAND_ENABLE_SRBCHANNEL);
@@ -1441,26 +1442,34 @@ static void pa_command_enable_srbchannel(pa_pdispatch *pd, uint32_t command, uin
     pa_assert(c);
     pa_assert(PA_REFCNT_VALUE(c) >= 1);
 
+    ancil = pa_pdispatch_take_ancil_data(pd);
+    if (!ancil)
+        goto fail;
+
     /* Currently only one srb channel is supported, might change in future versions */
-    if (c->srb_template.readfd != -1) {
-        pa_context_fail(c, PA_ERR_PROTOCOL);
-        return;
-    }
+    if (c->srb_template.readfd != -1)
+        goto fail;
 
-    fds = pa_pdispatch_fds(pd, &nfd);
-    if (nfd != 2 || !fds || fds[0] == -1 || fds[1] == -1) {
-        pa_context_fail(c, PA_ERR_PROTOCOL);
-        return;
-    }
+    if (ancil->nfd != 2 || ancil->fds[0] == -1 || ancil->fds[1] == -1)
+        goto fail;
 
     pa_context_ref(c);
 
-    c->srb_template.readfd = fds[0];
-    c->srb_template.writefd = fds[1];
+    c->srb_template.readfd = ancil->fds[0];
+    c->srb_template.writefd = ancil->fds[1];
     c->srb_setup_tag = tag;
 
     pa_context_unref(c);
 
+    ancil->close_fds_on_cleanup = false;
+    return;
+
+fail:
+    if (ancil)
+        pa_cmsg_ancil_data_close_fds(ancil);
+
+    pa_context_fail(c, PA_ERR_PROTOCOL);
+    return;
 #else
     pa_assert(c);
     pa_context_fail(c, PA_ERR_PROTOCOL);
@@ -1493,6 +1502,18 @@ static void pa_command_disable_srbchannel(pa_pdispatch *pd, uint32_t command, ui
     pa_pstream_send_tagstruct(c->pstream, t2);
 }
 
+static void pa_command_register_memfd_shmid(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata) {
+    pa_context *c = userdata;
+
+    pa_assert(pd);
+    pa_assert(command == PA_COMMAND_REGISTER_MEMFD_SHMID);
+    pa_assert(t);
+    pa_assert(c);
+    pa_assert(PA_REFCNT_VALUE(c) >= 1);
+
+    if (pa_common_command_register_memfd_shmid(c->pstream, pd, c->version, command, t))
+        pa_context_fail(c, PA_ERR_PROTOCOL);
+}
 
 void pa_command_client_event(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata) {
     pa_context *c = userdata;
diff --git a/src/pulsecore/creds.h b/src/pulsecore/creds.h
index 64d8387..9fdbb4f 100644
--- a/src/pulsecore/creds.h
+++ b/src/pulsecore/creds.h
@@ -49,9 +49,14 @@ struct pa_cmsg_ancil_data {
     pa_creds creds;
     bool creds_valid;
     int nfd;
+
+    /* Don't close these fds by your own. Check pa_cmsg_ancil_data_close_fds() */
     int fds[MAX_ANCIL_DATA_FDS];
+    bool close_fds_on_cleanup;
 };
 
+void pa_cmsg_ancil_data_close_fds(struct pa_cmsg_ancil_data *ancil);
+
 #else
 #undef HAVE_CREDS
 #endif
diff --git a/src/pulsecore/iochannel.c b/src/pulsecore/iochannel.c
index d1bb109..e62750b 100644
--- a/src/pulsecore/iochannel.c
+++ b/src/pulsecore/iochannel.c
@@ -450,6 +450,7 @@ ssize_t pa_iochannel_read_with_ancil_data(pa_iochannel*io, void*data, size_t l,
                 }
                 memcpy(ancil_data->fds, CMSG_DATA(cmh), nfd * sizeof(int));
                 ancil_data->nfd = nfd;
+                ancil_data->close_fds_on_cleanup = true;
             }
         }
 
diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c
index 8a7f5f3..57b0645 100644
--- a/src/pulsecore/memblock.c
+++ b/src/pulsecore/memblock.c
@@ -1107,7 +1107,8 @@ pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void
 
 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
 
-/* Should be called locked */
+/* Should be called locked
+ * Caller owns passed @memfd_fd and must close it down when appropriate. */
 static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
                                             int memfd_fd, bool writable) {
     pa_memimport_segment* seg;
@@ -1196,7 +1197,9 @@ void pa_memimport_free(pa_memimport *i) {
  * memory region) as its value.
  *
  * Note! check comments at 'pa_shm->fd', 'segment_is_permanent()',
- * and 'pa_pstream_register_memfd_mempool()' for further details. */
+ * and 'pa_pstream_register_memfd_mempool()' for further details.
+ *
+ * Caller owns passed @memfd_fd and must close it down when appropriate. */
 int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable) {
     pa_memimport_segment *seg;
     int ret = -1;
diff --git a/src/pulsecore/native-common.c b/src/pulsecore/native-common.c
new file mode 100644
index 0000000..282a4ed
--- /dev/null
+++ b/src/pulsecore/native-common.c
@@ -0,0 +1,78 @@
+/***
+  This file is part of PulseAudio.
+
+  Copyright 2016 Ahmed S. Darwish <darwish.07 at gmail.com>
+
+  PulseAudio is free software; you can redistribute it and/or modify
+  it under the terms of the GNU Lesser General Public License as
+  published by the Free Software Foundation; either version 2.1 of the
+  License, or (at your option) any later version.
+
+  PulseAudio is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public
+  License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <pulsecore/core-util.h>
+#include <pulsecore/creds.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/pdispatch.h>
+#include <pulsecore/pstream.h>
+#include <pulsecore/tagstruct.h>
+
+#include "native-common.h"
+
+/*
+ * Command handlers shared between client and server
+ */
+
+/* Check pa_pstream_register_memfd_mempool() for further details */
+int pa_common_command_register_memfd_shmid(pa_pstream *p, pa_pdispatch *pd, uint32_t version,
+                                           uint32_t command, pa_tagstruct *t) {
+#if defined(HAVE_CREDS) && defined(HAVE_MEMFD)
+    pa_cmsg_ancil_data *ancil = NULL;
+    unsigned shm_id;
+    int ret = -1;
+
+    pa_assert(pd);
+    pa_assert(command == PA_COMMAND_REGISTER_MEMFD_SHMID);
+    pa_assert(t);
+
+    ancil = pa_pdispatch_take_ancil_data(pd);
+    if (!ancil)
+        goto finish;
+
+    /* Upon fd leaks and reaching our open fd limit, recvmsg(2)
+     * just strips all passed fds from the ancillary data */
+    if (ancil->nfd == 0) {
+        pa_log("Expected 1 memfd fd to be received over pipe; got 0");
+        pa_log("Did we reach our open file descriptors limit?");
+        goto finish;
+    }
+
+    if (ancil->nfd != 1 || ancil->fds[0] == -1)
+        goto finish;
+
+    if (version < 31 || pa_tagstruct_getu32(t, &shm_id) < 0 || !pa_tagstruct_eof(t))
+        goto finish;
+
+    pa_pstream_attach_memfd_shmid(p, shm_id, ancil->fds[0]);
+
+    ret = 0;
+finish:
+    if (ancil)
+        pa_cmsg_ancil_data_close_fds(ancil);
+
+    return ret;
+#else
+    return -1;
+#endif
+}
diff --git a/src/pulsecore/native-common.h b/src/pulsecore/native-common.h
index dc62895..70338b9 100644
--- a/src/pulsecore/native-common.h
+++ b/src/pulsecore/native-common.h
@@ -24,6 +24,10 @@
 #include <pulse/cdecl.h>
 #include <pulse/def.h>
 
+#include <pulsecore/pdispatch.h>
+#include <pulsecore/pstream.h>
+#include <pulsecore/tagstruct.h>
+
 PA_C_DECL_BEGIN
 
 enum {
@@ -179,6 +183,10 @@ enum {
     PA_COMMAND_ENABLE_SRBCHANNEL,
     PA_COMMAND_DISABLE_SRBCHANNEL,
 
+    /* Supported since protocol v31 (9.0)
+     * BOTH DIRECTIONS */
+    PA_COMMAND_REGISTER_MEMFD_SHMID,
+
     PA_COMMAND_MAX
 };
 
@@ -193,6 +201,9 @@ enum {
 
 #define PA_NATIVE_DEFAULT_UNIX_SOCKET "native"
 
+int pa_common_command_register_memfd_shmid(pa_pstream *p, pa_pdispatch *pd, uint32_t version,
+                                           uint32_t command, pa_tagstruct *t);
+
 PA_C_DECL_END
 
 #endif
diff --git a/src/pulsecore/pdispatch.c b/src/pulsecore/pdispatch.c
index f136875..ab632a5 100644
--- a/src/pulsecore/pdispatch.c
+++ b/src/pulsecore/pdispatch.c
@@ -195,6 +195,10 @@ static const char *command_names[PA_COMMAND_MAX] = {
     /* BOTH DIRECTIONS */
     [PA_COMMAND_ENABLE_SRBCHANNEL] = "ENABLE_SRBCHANNEL",
     [PA_COMMAND_DISABLE_SRBCHANNEL] = "DISABLE_SRBCHANNEL",
+
+    /* Supported since protocol v31 (9.0) */
+    /* BOTH DIRECTIONS */
+    [PA_COMMAND_REGISTER_MEMFD_SHMID] = "REGISTER_MEMFD_SHMID",
 };
 
 #endif
@@ -219,7 +223,7 @@ struct pa_pdispatch {
     PA_LLIST_HEAD(struct reply_info, replies);
     pa_pdispatch_drain_cb_t drain_callback;
     void *drain_userdata;
-    const pa_cmsg_ancil_data *ancil_data;
+    pa_cmsg_ancil_data *ancil_data;
     bool use_rtclock;
 };
 
@@ -289,7 +293,7 @@ static void run_action(pa_pdispatch *pd, struct reply_info *r, uint32_t command,
     pa_pdispatch_unref(pd);
 }
 
-int pa_pdispatch_run(pa_pdispatch *pd, pa_packet *packet, const pa_cmsg_ancil_data *ancil_data, void *userdata) {
+int pa_pdispatch_run(pa_pdispatch *pd, pa_packet *packet, pa_cmsg_ancil_data *ancil_data, void *userdata) {
     uint32_t tag, command;
     pa_tagstruct *ts = NULL;
     int ret = -1;
@@ -448,18 +452,24 @@ const pa_creds * pa_pdispatch_creds(pa_pdispatch *pd) {
     return NULL;
 }
 
-const int * pa_pdispatch_fds(pa_pdispatch *pd, int *nfd) {
+/* Should be called only once during the dispatcher lifetime
+ *
+ * If the returned ancillary data contains any fds, caller maintains sole
+ * responsibility of closing them down using pa_cmsg_ancil_data_close_fds() */
+pa_cmsg_ancil_data *pa_pdispatch_take_ancil_data(pa_pdispatch *pd) {
+    pa_cmsg_ancil_data *ancil;
+
     pa_assert(pd);
     pa_assert(PA_REFCNT_VALUE(pd) >= 1);
-    pa_assert(nfd);
 
-    if (pd->ancil_data) {
-         *nfd = pd->ancil_data->nfd;
-         return pd->ancil_data->fds;
-    }
+    ancil = pd->ancil_data;
 
-    *nfd = 0;
-    return NULL;
+    /* iochannel guarantees us that nfd will always be capped */
+    if (ancil)
+        pa_assert(ancil->nfd <= MAX_ANCIL_DATA_FDS);
+
+    pd->ancil_data = NULL;
+    return ancil;
 }
 
 #endif
diff --git a/src/pulsecore/pdispatch.h b/src/pulsecore/pdispatch.h
index 9cb3419..af76981 100644
--- a/src/pulsecore/pdispatch.h
+++ b/src/pulsecore/pdispatch.h
@@ -39,7 +39,7 @@ pa_pdispatch* pa_pdispatch_new(pa_mainloop_api *m, bool use_rtclock, const pa_pd
 void pa_pdispatch_unref(pa_pdispatch *pd);
 pa_pdispatch* pa_pdispatch_ref(pa_pdispatch *pd);
 
-int pa_pdispatch_run(pa_pdispatch *pd, pa_packet *p, const pa_cmsg_ancil_data *ancil_data, void *userdata);
+int pa_pdispatch_run(pa_pdispatch *pd, pa_packet *p, pa_cmsg_ancil_data *ancil_data, void *userdata);
 
 void pa_pdispatch_register_reply(pa_pdispatch *pd, uint32_t tag, int timeout, pa_pdispatch_cb_t callback, void *userdata, pa_free_cb_t free_cb);
 
@@ -51,7 +51,6 @@ void pa_pdispatch_set_drain_callback(pa_pdispatch *pd, pa_pdispatch_drain_cb_t c
 void pa_pdispatch_unregister_reply(pa_pdispatch *pd, void *userdata);
 
 const pa_creds * pa_pdispatch_creds(pa_pdispatch *pd);
-
-const int * pa_pdispatch_fds(pa_pdispatch *pd, int *nfd);
+pa_cmsg_ancil_data *pa_pdispatch_take_ancil_data(pa_pdispatch *pd);
 
 #endif
diff --git a/src/pulsecore/protocol-native.c b/src/pulsecore/protocol-native.c
index c9182d0..ffa5c4d 100644
--- a/src/pulsecore/protocol-native.c
+++ b/src/pulsecore/protocol-native.c
@@ -301,6 +301,7 @@ static void command_set_card_profile(pa_pdispatch *pd, uint32_t command, uint32_
 static void command_set_sink_or_source_port(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
 static void command_set_port_latency_offset(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
 static void command_enable_srbchannel(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+static void command_register_memfd_shmid(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
 
 static const pa_pdispatch_cb_t command_table[PA_COMMAND_MAX] = {
     [PA_COMMAND_ERROR] = NULL,
@@ -406,6 +407,8 @@ static const pa_pdispatch_cb_t command_table[PA_COMMAND_MAX] = {
 
     [PA_COMMAND_ENABLE_SRBCHANNEL] = command_enable_srbchannel,
 
+    [PA_COMMAND_REGISTER_MEMFD_SHMID] = command_register_memfd_shmid,
+
     [PA_COMMAND_EXTENSION] = command_extension
 };
 
@@ -2646,7 +2649,7 @@ static void setup_srbchannel(pa_native_connection *c) {
     pa_tagstruct_putu32(t, (size_t) srb); /* tag */
     fdlist[0] = srbt.readfd;
     fdlist[1] = srbt.writefd;
-    pa_pstream_send_tagstruct_with_fds(c->pstream, t, 2, fdlist);
+    pa_pstream_send_tagstruct_with_fds(c->pstream, t, 2, fdlist, false);
 
     /* Send ringbuffer memblock to client */
     mc.memblock = srbt.memblock;
@@ -2805,6 +2808,16 @@ static void command_auth(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_ta
     setup_srbchannel(c);
 }
 
+static void command_register_memfd_shmid(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata) {
+    pa_native_connection *c = PA_NATIVE_CONNECTION(userdata);
+
+    pa_native_connection_assert_ref(c);
+    pa_assert(t);
+
+    if (pa_common_command_register_memfd_shmid(c->pstream, pd, c->version, command, t))
+        protocol_error(c);
+}
+
 static void command_set_client_name(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata) {
     pa_native_connection *c = PA_NATIVE_CONNECTION(userdata);
     const char *name = NULL;
@@ -4910,7 +4923,7 @@ static void command_set_port_latency_offset(pa_pdispatch *pd, uint32_t command,
 
 /*** pstream callbacks ***/
 
-static void pstream_packet_callback(pa_pstream *p, pa_packet *packet, const pa_cmsg_ancil_data *ancil_data, void *userdata) {
+static void pstream_packet_callback(pa_pstream *p, pa_packet *packet, pa_cmsg_ancil_data *ancil_data, void *userdata) {
     pa_native_connection *c = PA_NATIVE_CONNECTION(userdata);
 
     pa_assert(p);
diff --git a/src/pulsecore/pstream-util.c b/src/pulsecore/pstream-util.c
index e874503..449ea1f 100644
--- a/src/pulsecore/pstream-util.c
+++ b/src/pulsecore/pstream-util.c
@@ -21,13 +21,16 @@
 #include <config.h>
 #endif
 
-#include <pulsecore/native-common.h>
+#include <pulsecore/core-util.h>
 #include <pulsecore/macro.h>
+#include <pulsecore/native-common.h>
+#include <pulsecore/pstream.h>
+#include <pulsecore/refcnt.h>
 #include <pulse/xmalloc.h>
 
 #include "pstream-util.h"
 
-static void pa_pstream_send_tagstruct_with_ancil_data(pa_pstream *p, pa_tagstruct *t, const pa_cmsg_ancil_data *ancil_data) {
+static void pa_pstream_send_tagstruct_with_ancil_data(pa_pstream *p, pa_tagstruct *t, pa_cmsg_ancil_data *ancil_data) {
     size_t length;
     const uint8_t *data;
     pa_packet *packet;
@@ -58,12 +61,21 @@ void pa_pstream_send_tagstruct_with_creds(pa_pstream *p, pa_tagstruct *t, const
         pa_pstream_send_tagstruct_with_ancil_data(p, t, NULL);
 }
 
-void pa_pstream_send_tagstruct_with_fds(pa_pstream *p, pa_tagstruct *t, int nfd, const int *fds) {
+/* @close_fds: If set then the pstreams code, after invoking a sendmsg(),
+ * will close all passed fds.
+ *
+ * Such fds cannot be closed here as this might lead to freeing them
+ * before they're actually passed to the other end. The internally-used
+ * pa_pstream_send_packet() does not do any actual writes and just
+ * defers write events over the pstream. */
+void pa_pstream_send_tagstruct_with_fds(pa_pstream *p, pa_tagstruct *t, int nfd, const int *fds,
+                                        bool close_fds) {
     if (nfd > 0) {
         pa_cmsg_ancil_data a;
 
         a.nfd = nfd;
         a.creds_valid = false;
+        a.close_fds_on_cleanup = close_fds;
         pa_assert(nfd <= MAX_ANCIL_DATA_FDS);
         memcpy(a.fds, fds, sizeof(int) * nfd);
         pa_pstream_send_tagstruct_with_ancil_data(p, t, &a);
@@ -78,7 +90,8 @@ void pa_pstream_send_tagstruct_with_creds(pa_pstream *p, pa_tagstruct *t, const
     pa_pstream_send_tagstruct_with_ancil_data(p, t, NULL);
 }
 
-void pa_pstream_send_tagstruct_with_fds(pa_pstream *p, pa_tagstruct *t, int nfd, const int *fds) {
+void pa_pstream_send_tagstruct_with_fds(pa_pstream *p, pa_tagstruct *t, int nfd, const int *fds,
+                                        bool close_fds) {
     pa_assert_not_reached();
 }
 
@@ -102,3 +115,82 @@ void pa_pstream_send_simple_ack(pa_pstream *p, uint32_t tag) {
     pa_tagstruct_putu32(t, tag);
     pa_pstream_send_tagstruct(p, t);
 }
+
+/* Before sending blocks from a memfd-backed pool over the pipe, we
+ * must call this method first.
+ *
+ * This is needed to transfer memfd blocks without passing their fd
+ * every time, thus minimizing overhead and avoiding fd leaks.
+ *
+ * On registration a packet is sent with the memfd fd as ancil data;
+ * such packet has an ID that uniquely identifies the pool's memfd
+ * region. Upon arrival the other end creates a permanent mapping
+ * between that ID and the passed memfd memory area.
+ *
+ * By doing so, we won't need to reference the pool's memfd fd any
+ * further - just its ID. Both endpoints can then close their fds. */
+int pa_pstream_register_memfd_mempool(pa_pstream *p, pa_mempool *pool, const char **fail_reason) {
+#if defined(HAVE_CREDS) && defined(HAVE_MEMFD)
+    unsigned shm_id;
+    int memfd_fd, ret = -1;
+    pa_tagstruct *t;
+    bool per_client_mempool;
+
+    pa_assert(p);
+    pa_assert(fail_reason);
+
+    *fail_reason = NULL;
+    per_client_mempool = pa_mempool_is_per_client(pool);
+
+    pa_pstream_ref(p);
+
+    if (!pa_mempool_is_shared(pool)) {
+        *fail_reason = "mempool is not shared";
+        goto finish;
+    }
+
+    if (!pa_mempool_is_memfd_backed(pool)) {
+        *fail_reason = "mempool is not memfd-backed";
+        goto finish;
+    }
+
+    if (pa_mempool_get_shm_id(pool, &shm_id)) {
+        *fail_reason = "could not extract pool SHM ID";
+        goto finish;
+    }
+
+    if (!pa_pstream_get_memfd(p)) {
+        *fail_reason = "pipe does not support memfd transport";
+        goto finish;
+    }
+
+    memfd_fd = (per_client_mempool) ? pa_mempool_take_memfd_fd(pool) :
+                                      pa_mempool_get_memfd_fd(pool);
+
+    /* Note! For per-client mempools we've taken ownership of the memfd
+     * fd, and we're thus the sole code path responsible for closing it.
+     * In case of any failure, it MUST be closed. */
+
+    if (pa_pstream_attach_memfd_shmid(p, shm_id, memfd_fd)) {
+        *fail_reason = "could not attach memfd SHM ID to pipe";
+
+        if (per_client_mempool)
+            pa_assert_se(pa_close(memfd_fd) == 0);
+        goto finish;
+    }
+
+    t = pa_tagstruct_new();
+    pa_tagstruct_putu32(t, PA_COMMAND_REGISTER_MEMFD_SHMID);
+    pa_tagstruct_putu32(t, (uint32_t) -1); /* tag */
+    pa_tagstruct_putu32(t, shm_id);
+    pa_pstream_send_tagstruct_with_fds(p, t, 1, &memfd_fd, per_client_mempool);
+
+    ret = 0;
+finish:
+    pa_pstream_unref(p);
+    return ret;
+
+#else
+    pa_assert_not_reached();
+#endif
+}
diff --git a/src/pulsecore/pstream-util.h b/src/pulsecore/pstream-util.h
index 1366314..1191d48 100644
--- a/src/pulsecore/pstream-util.h
+++ b/src/pulsecore/pstream-util.h
@@ -27,11 +27,13 @@
 
 /* The tagstruct is freed!*/
 void pa_pstream_send_tagstruct_with_creds(pa_pstream *p, pa_tagstruct *t, const pa_creds *creds);
-void pa_pstream_send_tagstruct_with_fds(pa_pstream *p, pa_tagstruct *t, int nfd, const int *fds);
+void pa_pstream_send_tagstruct_with_fds(pa_pstream *p, pa_tagstruct *t, int nfd, const int *fds, bool close_fds);
 
 #define pa_pstream_send_tagstruct(p, t) pa_pstream_send_tagstruct_with_creds((p), (t), NULL)
 
 void pa_pstream_send_error(pa_pstream *p, uint32_t tag, uint32_t error);
 void pa_pstream_send_simple_ack(pa_pstream *p, uint32_t tag);
 
+int pa_pstream_register_memfd_mempool(pa_pstream *p, pa_mempool *pool, const char **fail_reason);
+
 #endif
diff --git a/src/pulsecore/pstream.c b/src/pulsecore/pstream.c
index 0fb37a0..1ea3c5b 100644
--- a/src/pulsecore/pstream.c
+++ b/src/pulsecore/pstream.c
@@ -32,6 +32,7 @@
 
 #include <pulse/xmalloc.h>
 
+#include <pulsecore/idxset.h>
 #include <pulsecore/socket.h>
 #include <pulsecore/queue.h>
 #include <pulsecore/log.h>
@@ -44,6 +45,7 @@
 
 /* We piggyback information if audio data blocks are stored in SHM on the seek mode */
 #define PA_FLAG_SHMDATA     0x80000000LU
+#define PA_FLAG_SHMDATA_MEMFD_BLOCK         0x20000000LU
 #define PA_FLAG_SHMRELEASE  0x40000000LU
 #define PA_FLAG_SHMREVOKE   0xC0000000LU
 #define PA_FLAG_SHMMASK     0xFF000000LU
@@ -143,7 +145,17 @@ struct pa_pstream {
 
     struct pstream_read readio, readsrb;
 
-    bool use_shm;
+    /* @use_shm: beside copying the full audio data to the other
+     * PA end, this pipe supports just sending references of the
+     * same audio data blocks if they reside in a SHM pool.
+     *
+     * @use_memfd: pipe supports sending SHM memfd block references
+     *
+     * @registered_memfd_ids: registered memfd pools SHM IDs. Check
+     * pa_pstream_register_memfd_mempool() for more information. */
+    bool use_shm, use_memfd;
+    pa_idxset *registered_memfd_ids;
+
     pa_memimport *import;
     pa_memexport *export;
 
@@ -168,11 +180,33 @@ struct pa_pstream {
     pa_mempool *mempool;
 
 #ifdef HAVE_CREDS
-    pa_cmsg_ancil_data read_ancil_data, write_ancil_data;
+    pa_cmsg_ancil_data read_ancil_data, *write_ancil_data;
     bool send_ancil_data_now;
 #endif
 };
 
+#ifdef HAVE_CREDS
+/* Don't close the ancillary fds by your own! Always call this method;
+ * it guarantees necessary cleanups after fds close.. This method is
+ * also multiple-invocations safe. */
+void pa_cmsg_ancil_data_close_fds(struct pa_cmsg_ancil_data *ancil) {
+    if (ancil && ancil->close_fds_on_cleanup) {
+        int i;
+
+        pa_assert(ancil->nfd <= MAX_ANCIL_DATA_FDS);
+
+        for (i = 0; i < ancil->nfd; i++)
+            if (ancil->fds[i] != -1) {
+                pa_assert_se(pa_close(ancil->fds[i]) == 0);
+                ancil->fds[i] = -1;
+            }
+
+        ancil->nfd = 0;
+        ancil->close_fds_on_cleanup = false;
+    }
+}
+#endif
+
 static int do_write(pa_pstream *p);
 static int do_read(pa_pstream *p, struct pstream_read *re);
 
@@ -287,6 +321,35 @@ pa_pstream *pa_pstream_new(pa_mainloop_api *m, pa_iochannel *io, pa_mempool *poo
     return p;
 }
 
+/* Attach memfd<->SHM_ID mapping to given pstream and its memimport.
+ * Check pa_pstream_register_memfd_mempool() for further info.
+ *
+ * Caller owns the passed @memfd_fd and must close it down when appropriate. */
+int pa_pstream_attach_memfd_shmid(pa_pstream *p, unsigned shm_id, int memfd_fd) {
+    int err = -1;
+
+    pa_assert(memfd_fd != -1);
+
+    if (!p->use_memfd) {
+        pa_log_warn("Received memfd ID registration request over a pipe "
+                    "that does not support memfds");
+        return err;
+    }
+
+    if (pa_idxset_get_by_data(p->registered_memfd_ids, PA_UINT32_TO_PTR(shm_id), NULL)) {
+        pa_log_warn("previously registered memfd SHM ID = %u", shm_id);
+        return err;
+    }
+
+    if (pa_memimport_attach_memfd(p->import, shm_id, memfd_fd, true)) {
+        pa_log("Failed to create permanent mapping for memfd region with ID = %u", shm_id);
+        return err;
+    }
+
+    pa_assert_se(pa_idxset_put(p->registered_memfd_ids, PA_UINT32_TO_PTR(shm_id), NULL) == 0);
+    return 0;
+}
+
 static void item_free(void *item) {
     struct item_info *i = item;
     pa_assert(i);
@@ -299,6 +362,15 @@ static void item_free(void *item) {
         pa_packet_unref(i->packet);
     }
 
+#ifdef HAVE_CREDS
+    /* On error recovery paths, there might be lingering items
+     * on the pstream send queue and they are usually freed with
+     * a call to 'pa_queue_free(p->send_queue, item_free)'. Make
+     * sure we do not leak any fds in that case! */
+    if (i->with_ancil_data)
+        pa_cmsg_ancil_data_close_fds(&i->ancil_data);
+#endif
+
     if (pa_flist_push(PA_STATIC_FLIST_GET(items), i) < 0)
         pa_xfree(i);
 }
@@ -328,18 +400,25 @@ static void pstream_free(pa_pstream *p) {
     if (p->readio.packet)
         pa_packet_unref(p->readio.packet);
 
+    if (p->registered_memfd_ids)
+        pa_idxset_free(p->registered_memfd_ids, NULL);
+
     pa_xfree(p);
 }
 
-void pa_pstream_send_packet(pa_pstream*p, pa_packet *packet, const pa_cmsg_ancil_data *ancil_data) {
+void pa_pstream_send_packet(pa_pstream*p, pa_packet *packet, pa_cmsg_ancil_data *ancil_data) {
     struct item_info *i;
 
     pa_assert(p);
     pa_assert(PA_REFCNT_VALUE(p) > 0);
     pa_assert(packet);
 
-    if (p->dead)
+    if (p->dead) {
+#ifdef HAVE_CREDS
+        pa_cmsg_ancil_data_close_fds(ancil_data);
+#endif
         return;
+    }
 
     if (!(i = pa_flist_pop(PA_STATIC_FLIST_GET(items))))
         i = pa_xnew(struct item_info, 1);
@@ -556,22 +635,40 @@ static void prepare_next_write_item(pa_pstream *p) {
                                  &shm_id,
                                  &offset,
                                  &length) >= 0) {
-                pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
 
-                flags |= PA_FLAG_SHMDATA;
-                if (pa_mempool_is_remote_writable(current_pool))
-                    flags |= PA_FLAG_SHMWRITABLE;
-                send_payload = false;
+                if (type == PA_MEM_TYPE_SHARED_POSIX)
+                    send_payload = false;
+
+                if (type == PA_MEM_TYPE_SHARED_MEMFD && p->use_memfd) {
+                    if (pa_idxset_get_by_data(p->registered_memfd_ids, PA_UINT32_TO_PTR(shm_id), NULL)) {
+                        flags |= PA_FLAG_SHMDATA_MEMFD_BLOCK;
+                        send_payload = false;
+                    } else {
+                        if (pa_log_ratelimit(PA_LOG_ERROR)) {
+                            pa_log("Cannot send block reference with non-registered memfd ID = %u", shm_id);
+                            pa_log("Fallig back to copying full block data over socket");
+                        }
+                    }
+                }
+
+                if (send_payload) {
+                    pa_assert_se(pa_memexport_process_release(current_export, block_id) == 0);
+                } else {
+                    flags |= PA_FLAG_SHMDATA;
+                    if (pa_mempool_is_remote_writable(current_pool))
+                        flags |= PA_FLAG_SHMWRITABLE;
 
-                shm_info[PA_PSTREAM_SHM_BLOCKID] = htonl(block_id);
-                shm_info[PA_PSTREAM_SHM_SHMID] = htonl(shm_id);
-                shm_info[PA_PSTREAM_SHM_INDEX] = htonl((uint32_t) (offset + p->write.current->chunk.index));
-                shm_info[PA_PSTREAM_SHM_LENGTH] = htonl((uint32_t) p->write.current->chunk.length);
+                    shm_info[PA_PSTREAM_SHM_BLOCKID] = htonl(block_id);
+                    shm_info[PA_PSTREAM_SHM_SHMID] = htonl(shm_id);
+                    shm_info[PA_PSTREAM_SHM_INDEX] = htonl((uint32_t) (offset + p->write.current->chunk.index));
+                    shm_info[PA_PSTREAM_SHM_LENGTH] = htonl((uint32_t) p->write.current->chunk.length);
 
-                p->write.descriptor[PA_PSTREAM_DESCRIPTOR_LENGTH] = htonl(shm_size);
-                p->write.minibuf_validsize = PA_PSTREAM_DESCRIPTOR_SIZE + shm_size;
+                    p->write.descriptor[PA_PSTREAM_DESCRIPTOR_LENGTH] = htonl(shm_size);
+                    p->write.minibuf_validsize = PA_PSTREAM_DESCRIPTOR_SIZE + shm_size;
+                }
             }
 /*             else */
+/*                 FIXME: Avoid memexport slot leaks. Call pa_memexport_process_release() */
 /*                 pa_log_warn("Failed to export memory block."); */
 
             if (current_export != p->export)
@@ -590,7 +687,7 @@ static void prepare_next_write_item(pa_pstream *p) {
 
 #ifdef HAVE_CREDS
     if ((p->send_ancil_data_now = p->write.current->with_ancil_data))
-        p->write_ancil_data = p->write.current->ancil_data;
+        p->write_ancil_data = &p->write.current->ancil_data;
 #endif
 }
 
@@ -650,14 +747,16 @@ static int do_write(pa_pstream *p) {
 
 #ifdef HAVE_CREDS
     if (p->send_ancil_data_now) {
-        if (p->write_ancil_data.creds_valid) {
-            pa_assert(p->write_ancil_data.nfd == 0);
-            if ((r = pa_iochannel_write_with_creds(p->io, d, l, &p->write_ancil_data.creds)) < 0)
+        if (p->write_ancil_data->creds_valid) {
+            pa_assert(p->write_ancil_data->nfd == 0);
+            if ((r = pa_iochannel_write_with_creds(p->io, d, l, &p->write_ancil_data->creds)) < 0)
                 goto fail;
         }
         else
-            if ((r = pa_iochannel_write_with_fds(p->io, d, l, p->write_ancil_data.nfd, p->write_ancil_data.fds)) < 0)
+            if ((r = pa_iochannel_write_with_fds(p->io, d, l, p->write_ancil_data->nfd, p->write_ancil_data->fds)) < 0)
                 goto fail;
+
+        pa_cmsg_ancil_data_close_fds(p->write_ancil_data);
         p->send_ancil_data_now = false;
     } else
 #endif
@@ -688,6 +787,10 @@ static int do_write(pa_pstream *p) {
     return (size_t) r == l ? 1 : 0;
 
 fail:
+#ifdef HAVE_CREDS
+    if (p->send_ancil_data_now)
+        pa_cmsg_ancil_data_close_fds(p->write_ancil_data);
+#endif
 
     if (release_memblock)
         pa_memblock_release(release_memblock);
@@ -768,6 +871,7 @@ static int do_read(pa_pstream *p, struct pstream_read *re) {
             pa_assert(b.nfd <= MAX_ANCIL_DATA_FDS);
             p->read_ancil_data.nfd = b.nfd;
             memcpy(p->read_ancil_data.fds, b.fds, sizeof(int) * b.nfd);
+            p->read_ancil_data.close_fds_on_cleanup = b.close_fds_on_cleanup;
         }
     }
 #else
@@ -844,7 +948,7 @@ static int do_read(pa_pstream *p, struct pstream_read *re) {
                 return -1;
             }
 
-            if ((flags & PA_FLAG_SHMMASK) == PA_FLAG_SHMDATA) {
+            if (((flags & PA_FLAG_SHMMASK) & PA_FLAG_SHMDATA) != 0) {
 
                 if (length != sizeof(re->shm_info)) {
                     pa_log_warn("Received SHM memblock frame with invalid frame length.");
@@ -887,19 +991,28 @@ static int do_read(pa_pstream *p, struct pstream_read *re) {
 
             pa_packet_unref(re->packet);
         } else {
-            pa_memblock *b;
+            pa_memblock *b = NULL;
             uint32_t flags = ntohl(re->descriptor[PA_PSTREAM_DESCRIPTOR_FLAGS]);
-            pa_assert((flags & PA_FLAG_SHMMASK) == PA_FLAG_SHMDATA);
+            uint32_t shm_id = ntohl(re->shm_info[PA_PSTREAM_SHM_SHMID]);
+            pa_mem_type_t type = (flags & PA_FLAG_SHMDATA_MEMFD_BLOCK) ?
+                                 PA_MEM_TYPE_SHARED_MEMFD : PA_MEM_TYPE_SHARED_POSIX;
 
+            pa_assert(((flags & PA_FLAG_SHMMASK) & PA_FLAG_SHMDATA) != 0);
             pa_assert(p->import);
 
-            if (!(b = pa_memimport_get(p->import,
-                                       PA_MEM_TYPE_SHARED_POSIX,
-                                       ntohl(re->shm_info[PA_PSTREAM_SHM_BLOCKID]),
-                                       ntohl(re->shm_info[PA_PSTREAM_SHM_SHMID]),
-                                       ntohl(re->shm_info[PA_PSTREAM_SHM_INDEX]),
-                                       ntohl(re->shm_info[PA_PSTREAM_SHM_LENGTH]),
-                                       !!(flags & PA_FLAG_SHMWRITABLE)))) {
+            if (type == PA_MEM_TYPE_SHARED_MEMFD && p->use_memfd &&
+                !pa_idxset_get_by_data(p->registered_memfd_ids, PA_UINT32_TO_PTR(shm_id), NULL)) {
+
+                if (pa_log_ratelimit(PA_LOG_ERROR))
+                    pa_log("Ignoring received block reference with non-registered memfd ID = %u", shm_id);
+
+            } else if (!(b = pa_memimport_get(p->import,
+                                              type,
+                                              ntohl(re->shm_info[PA_PSTREAM_SHM_BLOCKID]),
+                                              shm_id,
+                                              ntohl(re->shm_info[PA_PSTREAM_SHM_INDEX]),
+                                              ntohl(re->shm_info[PA_PSTREAM_SHM_LENGTH]),
+                                              !!(flags & PA_FLAG_SHMWRITABLE)))) {
 
                 if (pa_log_ratelimit(PA_LOG_DEBUG))
                     pa_log_debug("Failed to import memory block.");
@@ -942,6 +1055,13 @@ frame_done:
     re->data = NULL;
 
 #ifdef HAVE_CREDS
+    /* FIXME: Close received ancillary data fds if the pstream's
+     * receive_packet_callback did not do so.
+     *
+     * Malicious clients can attach fds to unknown commands, or attach them
+     * to commands that does not expect fds. By doing so, server will reach
+     * its open fd limit and future clients' SHM transfers will always fail.
+     */
     p->read_ancil_data.creds_valid = false;
     p->read_ancil_data.nfd = 0;
 #endif
@@ -1090,6 +1210,18 @@ void pa_pstream_enable_shm(pa_pstream *p, bool enable) {
     }
 }
 
+void pa_pstream_enable_memfd(pa_pstream *p) {
+    pa_assert(p);
+    pa_assert(PA_REFCNT_VALUE(p) > 0);
+    pa_assert(p->use_shm);
+
+    p->use_memfd = true;
+
+    if (!p->registered_memfd_ids) {
+        p->registered_memfd_ids = pa_idxset_new(NULL, NULL);
+    }
+}
+
 bool pa_pstream_get_shm(pa_pstream *p) {
     pa_assert(p);
     pa_assert(PA_REFCNT_VALUE(p) > 0);
@@ -1097,6 +1229,13 @@ bool pa_pstream_get_shm(pa_pstream *p) {
     return p->use_shm;
 }
 
+bool pa_pstream_get_memfd(pa_pstream *p) {
+    pa_assert(p);
+    pa_assert(PA_REFCNT_VALUE(p) > 0);
+
+    return p->use_memfd;
+}
+
 void pa_pstream_set_srbchannel(pa_pstream *p, pa_srbchannel *srb) {
     pa_assert(p);
     pa_assert(PA_REFCNT_VALUE(p) > 0 || srb == NULL);
diff --git a/src/pulsecore/pstream.h b/src/pulsecore/pstream.h
index f4e1462..2bff270 100644
--- a/src/pulsecore/pstream.h
+++ b/src/pulsecore/pstream.h
@@ -36,7 +36,7 @@
 
 typedef struct pa_pstream pa_pstream;
 
-typedef void (*pa_pstream_packet_cb_t)(pa_pstream *p, pa_packet *packet, const pa_cmsg_ancil_data *ancil_data, void *userdata);
+typedef void (*pa_pstream_packet_cb_t)(pa_pstream *p, pa_packet *packet, pa_cmsg_ancil_data *ancil_data, void *userdata);
 typedef void (*pa_pstream_memblock_cb_t)(pa_pstream *p, uint32_t channel, int64_t offset, pa_seek_mode_t seek, const pa_memchunk *chunk, void *userdata);
 typedef void (*pa_pstream_notify_cb_t)(pa_pstream *p, void *userdata);
 typedef void (*pa_pstream_block_id_cb_t)(pa_pstream *p, uint32_t block_id, void *userdata);
@@ -48,7 +48,9 @@ void pa_pstream_unref(pa_pstream*p);
 
 void pa_pstream_unlink(pa_pstream *p);
 
-void pa_pstream_send_packet(pa_pstream*p, pa_packet *packet, const pa_cmsg_ancil_data *ancil_data);
+int pa_pstream_attach_memfd_shmid(pa_pstream *p, unsigned shm_id, int memfd_fd);
+
+void pa_pstream_send_packet(pa_pstream*p, pa_packet *packet, pa_cmsg_ancil_data *ancil_data);
 void pa_pstream_send_memblock(pa_pstream*p, uint32_t channel, int64_t offset, pa_seek_mode_t seek, const pa_memchunk *chunk);
 void pa_pstream_send_release(pa_pstream *p, uint32_t block_id);
 void pa_pstream_send_revoke(pa_pstream *p, uint32_t block_id);
@@ -63,7 +65,9 @@ void pa_pstream_set_revoke_callback(pa_pstream *p, pa_pstream_block_id_cb_t cb,
 bool pa_pstream_is_pending(pa_pstream *p);
 
 void pa_pstream_enable_shm(pa_pstream *p, bool enable);
+void pa_pstream_enable_memfd(pa_pstream *p);
 bool pa_pstream_get_shm(pa_pstream *p);
+bool pa_pstream_get_memfd(pa_pstream *p);
 
 /* Enables shared ringbuffer channel. Note that the srbchannel is now owned by the pstream.
    Setting srb to NULL will free any existing srbchannel. */
diff --git a/src/tests/connect-stress.c b/src/tests/connect-stress.c
index 055ef13..a243df9 100644
--- a/src/tests/connect-stress.c
+++ b/src/tests/connect-stress.c
@@ -63,7 +63,8 @@ static const pa_sample_spec sample_spec = {
 
 static void context_state_callback(pa_context *c, void *userdata);
 
-static void connect(const char *name, int *try) {
+/* Note: don't conflict with connect(2) declaration */
+static void _connect(const char *name, int *try) {
     int ret;
     pa_mainloop_api *api;
 
@@ -87,7 +88,7 @@ static void connect(const char *name, int *try) {
     fail_unless(ret == 0);
 }
 
-static void disconnect(void) {
+static void _disconnect(void) {
     int i;
 
     fail_unless(mainloop != NULL);
@@ -201,9 +202,9 @@ START_TEST (connect_stress_test) {
         streams[i] = NULL;
 
     for (i = 0; i < NTESTS; i++) {
-        connect(bname, &i);
+        _connect(bname, &i);
         usleep(rand() % 500000);
-        disconnect();
+        _disconnect();
         usleep(rand() % 500000);
     }
 
diff --git a/src/tests/srbchannel-test.c b/src/tests/srbchannel-test.c
index 9fc5d45..0e7b0ce 100644
--- a/src/tests/srbchannel-test.c
+++ b/src/tests/srbchannel-test.c
@@ -34,7 +34,7 @@ static unsigned packets_received;
 static unsigned packets_checksum;
 static size_t packets_length;
 
-static void packet_received(pa_pstream *p, pa_packet *packet, const pa_cmsg_ancil_data *ancil_data, void *userdata) {
+static void packet_received(pa_pstream *p, pa_packet *packet, pa_cmsg_ancil_data *ancil_data, void *userdata) {
     const uint8_t *pdata;
     size_t plen;
     unsigned i;

commit ee2db622778ae8038c9e178675978ad23f1300a2
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 01:09:39 2016 +0200

    pulsecore: Specially mark global mempools
    
    Color global mempools with a special mark. This special marking
    is needed for handling memfd-backed pools.
    
    To avoid fd leaks, memfd pools are registered with the connection
    pstream to create an ID<->memfd mapping on both PA endpoints.
    Such memory regions are then always referenced by their IDs and
    never by their fds, and so their fds can be safely closed later.
    
    Unfortunately this scheme cannot work with global pools since the
    registration ID<->memfd mechanism needs to happen for each newly
    connected client, and thus the need for a more special handling.
    That is, for the pool's fd to be always open :-(
    
    Almost all mempools are now created on a per-client basis. The
    only exception is the pa_core's mempool which is still shared
    between all clients of the system.
    
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/src/pulse/context.c b/src/pulse/context.c
index e4716fb..b8f51a6 100644
--- a/src/pulse/context.c
+++ b/src/pulse/context.c
@@ -172,11 +172,11 @@ pa_context *pa_context_new_with_proplist(pa_mainloop_api *mainloop, const char *
     c->srb_template.writefd = -1;
 
     type = !c->conf->disable_shm ? PA_MEM_TYPE_SHARED_POSIX : PA_MEM_TYPE_PRIVATE;
-    if (!(c->mempool = pa_mempool_new(type, c->conf->shm_size))) {
+    if (!(c->mempool = pa_mempool_new(type, c->conf->shm_size, true))) {
 
         if (!c->conf->disable_shm) {
             pa_log_warn("Failed to allocate shared memory pool. Falling back to a normal private one.");
-            c->mempool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, c->conf->shm_size);
+            c->mempool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, c->conf->shm_size, true);
         }
 
         if (!c->mempool) {
diff --git a/src/pulsecore/core.c b/src/pulsecore/core.c
index 199a26b..56cbe9d 100644
--- a/src/pulsecore/core.c
+++ b/src/pulsecore/core.c
@@ -69,14 +69,14 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, size_t shm_size) {
     pa_assert(m);
 
     if (shared) {
-        if (!(pool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, shm_size))) {
+        if (!(pool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, shm_size, false))) {
             pa_log_warn("Failed to allocate shared memory pool. Falling back to a normal memory pool.");
             shared = false;
         }
     }
 
     if (!shared) {
-        if (!(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, shm_size))) {
+        if (!(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, shm_size, false))) {
             pa_log("pa_mempool_new() failed.");
             return NULL;
         }
diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c
index cdbc75a..8a7f5f3 100644
--- a/src/pulsecore/memblock.c
+++ b/src/pulsecore/memblock.c
@@ -185,6 +185,9 @@ struct pa_mempool {
     pa_mutex *mutex;
 
     pa_shm memory;
+
+    bool global;
+
     size_t block_size;
     unsigned n_blocks;
     bool is_remote_writable;
@@ -795,7 +798,34 @@ static void memblock_replace_import(pa_memblock *b) {
     pa_mutex_unlock(import->mutex);
 }
 
-pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size) {
+/*@per_client: This is a security measure. By default this should
+ * be set to true where the created mempool is never shared with more
+ * than one client in the system. Set this to false if a global
+ * mempool, shared with all existing and future clients, is required.
+ *
+ * NOTE-1: Do not create any further global mempools! They allow data
+ * leaks between clients and thus conflict with the xdg-app containers
+ * model. They also complicate the handling of memfd-based pools.
+ *
+ * NOTE-2: Almost all mempools are now created on a per client basis.
+ * The only exception is the pa_core's mempool which is still shared
+ * between all clients of the system.
+ *
+ * Beside security issues, special marking for global mempools is
+ * required for memfd communication. To avoid fd leaks, memfd pools
+ * are registered with the connection pstream to create an ID<->memfd
+ * mapping on both PA endpoints. Such memory regions are then always
+ * referenced by their IDs and never by their fds and thus their fds
+ * can be quickly closed later.
+ *
+ * Unfortunately this scheme cannot work with global pools since the
+ * ID registration mechanism needs to happen for each newly connected
+ * client, and thus the need for a more special handling. That is,
+ * for the pool's fd to be always open :-(
+ *
+ * TODO-1: Transform the global core mempool to a per-client one
+ * TODO-2: Remove global mempools support */
+pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client) {
     pa_mempool *p;
     char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
 
@@ -827,6 +857,8 @@ pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size) {
                  pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
                  (unsigned long) pa_mempool_block_size_max(p));
 
+    p->global = !per_client;
+
     pa_atomic_store(&p->n_init, 0);
 
     PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
@@ -986,6 +1018,70 @@ void pa_mempool_unref(pa_mempool *p) {
         mempool_free(p);
 }
 
+/* No lock necessary
+ * Check pa_mempool_new() for per-client vs. global mempools */
+bool pa_mempool_is_global(pa_mempool *p) {
+    pa_assert(p);
+
+    return p->global;
+}
+
+/* No lock necessary
+ * Check pa_mempool_new() for per-client vs. global mempools */
+bool pa_mempool_is_per_client(pa_mempool *p) {
+    return !pa_mempool_is_global(p);
+}
+
+/* Self-locked
+ *
+ * This is only for per-client mempools!
+ *
+ * After this method's return, the caller owns the file descriptor
+ * and is responsible for closing it in the appropriate time. This
+ * should only be called once during during a mempool's lifetime.
+ *
+ * Check pa_shm->fd and pa_mempool_new() for further context. */
+int pa_mempool_take_memfd_fd(pa_mempool *p) {
+    int memfd_fd;
+
+    pa_assert(p);
+    pa_assert(pa_mempool_is_shared(p));
+    pa_assert(pa_mempool_is_memfd_backed(p));
+    pa_assert(pa_mempool_is_per_client(p));
+
+    pa_mutex_lock(p->mutex);
+
+    memfd_fd = p->memory.fd;
+    p->memory.fd = -1;
+
+    pa_mutex_unlock(p->mutex);
+
+    pa_assert(memfd_fd != -1);
+    return memfd_fd;
+}
+
+/* No lock necessary
+ *
+ * This is only for global mempools!
+ *
+ * Global mempools have their memfd descriptor always open. DO NOT
+ * close the returned descriptor by your own.
+ *
+ * Check pa_mempool_new() for further context. */
+int pa_mempool_get_memfd_fd(pa_mempool *p) {
+    int memfd_fd;
+
+    pa_assert(p);
+    pa_assert(pa_mempool_is_shared(p));
+    pa_assert(pa_mempool_is_memfd_backed(p));
+    pa_assert(pa_mempool_is_global(p));
+
+    memfd_fd = p->memory.fd;
+    pa_assert(memfd_fd != -1);
+
+    return memfd_fd;
+}
+
 /* For receiving blocks from other nodes */
 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
     pa_memimport *i;
diff --git a/src/pulsecore/memblock.h b/src/pulsecore/memblock.h
index de93e3d..57ae4b2 100644
--- a/src/pulsecore/memblock.h
+++ b/src/pulsecore/memblock.h
@@ -124,7 +124,7 @@ pa_mempool * pa_memblock_get_pool(pa_memblock *b);
 pa_memblock *pa_memblock_will_need(pa_memblock *b);
 
 /* The memory block manager */
-pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size);
+pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client);
 void pa_mempool_unref(pa_mempool *p);
 pa_mempool* pa_mempool_ref(pa_mempool *p);
 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p);
@@ -132,10 +132,15 @@ void pa_mempool_vacuum(pa_mempool *p);
 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id);
 bool pa_mempool_is_shared(pa_mempool *p);
 bool pa_mempool_is_memfd_backed(const pa_mempool *p);
+bool pa_mempool_is_global(pa_mempool *p);
+bool pa_mempool_is_per_client(pa_mempool *p);
 bool pa_mempool_is_remote_writable(pa_mempool *p);
 void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable);
 size_t pa_mempool_block_size_max(pa_mempool *p);
 
+int pa_mempool_take_memfd_fd(pa_mempool *p);
+int pa_mempool_get_memfd_fd(pa_mempool *p);
+
 /* For receiving blocks from other nodes */
 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata);
 void pa_memimport_free(pa_memimport *i);
diff --git a/src/pulsecore/protocol-native.c b/src/pulsecore/protocol-native.c
index afb4850..c9182d0 100644
--- a/src/pulsecore/protocol-native.c
+++ b/src/pulsecore/protocol-native.c
@@ -2622,7 +2622,7 @@ static void setup_srbchannel(pa_native_connection *c) {
         return;
     }
 
-    if (!(c->rw_mempool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, c->protocol->core->shm_size))) {
+    if (!(c->rw_mempool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, c->protocol->core->shm_size, true))) {
         pa_log_warn("Disabling srbchannel, reason: Failed to allocate shared "
                     "writable memory pool.");
         return;
diff --git a/src/pulsecore/shm.h b/src/pulsecore/shm.h
index e8bfa56..67a2114 100644
--- a/src/pulsecore/shm.h
+++ b/src/pulsecore/shm.h
@@ -41,7 +41,11 @@ typedef struct pa_shm {
      *
      * When we don't have ownership for the memfd fd in question (e.g.
      * pa_shm_attach()), or the file descriptor has now been closed,
-     * this is set to -1. */
+     * this is set to -1.
+     *
+     * For the special case of a global mempool, we keep this fd
+     * always open. Check comments on top of pa_mempool_new() for
+     * rationale. */
     int fd;
 } pa_shm;
 
diff --git a/src/tests/cpu-mix-test.c b/src/tests/cpu-mix-test.c
index e487a20..a3189b7 100644
--- a/src/tests/cpu-mix-test.c
+++ b/src/tests/cpu-mix-test.c
@@ -76,7 +76,7 @@ static void run_mix_test(
     samples_ref = out_ref + (8 - align);
     nsamples = channels * (SAMPLES - (8 - align));
 
-    fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0)) != NULL, NULL);
+    fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true)) != NULL, NULL);
 
     pa_random(samples0, nsamples * sizeof(int16_t));
     c0.memblock = pa_memblock_new_fixed(pool, samples0, nsamples * sizeof(int16_t), false);
diff --git a/src/tests/lfe-filter-test.c b/src/tests/lfe-filter-test.c
index 8d61cc6..ba63895 100644
--- a/src/tests/lfe-filter-test.c
+++ b/src/tests/lfe-filter-test.c
@@ -136,7 +136,7 @@ START_TEST (lfe_filter_test) {
     a.format = PA_SAMPLE_S16NE;
 
     lft.ss = &a;
-    pa_assert_se(lft.pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
+    pa_assert_se(lft.pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true));
 
     /* We prepare pseudo-random input audio samples for lfe-filter rewind testing*/
     ori_sample_ptr = pa_xmalloc(pa_frame_size(lft.ss) * TOTAL_SAMPLES);
diff --git a/src/tests/mcalign-test.c b/src/tests/mcalign-test.c
index f121972..3127ccf 100644
--- a/src/tests/mcalign-test.c
+++ b/src/tests/mcalign-test.c
@@ -36,7 +36,7 @@ int main(int argc, char *argv[]) {
     pa_mcalign *a;
     pa_memchunk c;
 
-    p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0);
+    p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true);
 
     a = pa_mcalign_new(11);
 
diff --git a/src/tests/memblock-test.c b/src/tests/memblock-test.c
index 04eb377..d48349f 100644
--- a/src/tests/memblock-test.c
+++ b/src/tests/memblock-test.c
@@ -81,11 +81,11 @@ START_TEST (memblock_test) {
 
     const char txt[] = "This is a test!";
 
-    pool_a = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
+    pool_a = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0, true);
     fail_unless(pool_a != NULL);
-    pool_b = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
+    pool_b = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0, true);
     fail_unless(pool_b != NULL);
-    pool_c = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
+    pool_c = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0, true);
     fail_unless(pool_c != NULL);
 
     pa_mempool_get_shm_id(pool_a, &id_a);
diff --git a/src/tests/memblockq-test.c b/src/tests/memblockq-test.c
index 37cdd77..2404ee2 100644
--- a/src/tests/memblockq-test.c
+++ b/src/tests/memblockq-test.c
@@ -108,7 +108,7 @@ START_TEST (memblockq_test) {
 
     pa_log_set_level(PA_LOG_DEBUG);
 
-    p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0);
+    p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true);
 
     silence.memblock = pa_memblock_new_fixed(p, (char*) "__", 2, 1);
     fail_unless(silence.memblock != NULL);
diff --git a/src/tests/mix-test.c b/src/tests/mix-test.c
index ce6686a..972f5d7 100644
--- a/src/tests/mix-test.c
+++ b/src/tests/mix-test.c
@@ -286,7 +286,7 @@ START_TEST (mix_test) {
     if (!getenv("MAKE_CHECK"))
         pa_log_set_level(PA_LOG_DEBUG);
 
-    fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0)) != NULL, NULL);
+    fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true)) != NULL, NULL);
 
     a.channels = 1;
     a.rate = 44100;
diff --git a/src/tests/remix-test.c b/src/tests/remix-test.c
index 6ee5f6d..e21c109 100644
--- a/src/tests/remix-test.c
+++ b/src/tests/remix-test.c
@@ -51,7 +51,7 @@ int main(int argc, char *argv[]) {
 
     pa_log_set_level(PA_LOG_DEBUG);
 
-    pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
+    pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true));
 
     for (i = 0; maps[i].channels > 0; i++)
         for (j = 0; maps[j].channels > 0; j++) {
diff --git a/src/tests/resampler-test.c b/src/tests/resampler-test.c
index 28f03d6..40000d5 100644
--- a/src/tests/resampler-test.c
+++ b/src/tests/resampler-test.c
@@ -404,7 +404,7 @@ int main(int argc, char *argv[]) {
     }
 
     ret = 0;
-    pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
+    pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0, true));
 
     if (!all_formats) {
 
diff --git a/src/tests/srbchannel-test.c b/src/tests/srbchannel-test.c
index 253abcf..9fc5d45 100644
--- a/src/tests/srbchannel-test.c
+++ b/src/tests/srbchannel-test.c
@@ -85,7 +85,7 @@ START_TEST (srbchannel_test) {
     int pipefd[4];
 
     pa_mainloop *ml = pa_mainloop_new();
-    pa_mempool *mp = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
+    pa_mempool *mp = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0, true);
     pa_iochannel *io1, *io2;
     pa_pstream *p1, *p2;
     pa_srbchannel *sr1, *sr2;

commit f8714af56bfcfeccd15a091c47492d3dc3008f61
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 01:07:27 2016 +0200

    memimport: Support memfd blocks
    
    To transfer memfd-backed blocks without passing their fd every time,
    thus minimizing overhead and avoiding fd leaks, a command is sent
    with the memfd fd as ancil data very early on.
    
    This command has an ID that uniquely identifies the memfd region.
    Further memfd block references are then exclusively done using this
    ID.
    
    This commit implements the details of such 'permanent' mappings on
    the receiving end, using memimport segments.
    
    Suggested-by: David Henningsson <david.henningsson at canonical.com>
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c
index 49665ee..cdbc75a 100644
--- a/src/pulsecore/memblock.c
+++ b/src/pulsecore/memblock.c
@@ -100,6 +100,28 @@ struct pa_memimport_segment {
     bool writable;
 };
 
+/*
+ * If true, this segment's lifetime will not be limited by the
+ * number of active blocks (seg->n_blocks) using its shared memory.
+ * Rather, it will exist for the full lifetime of the memimport it
+ * is attached to.
+ *
+ * This is done to support memfd blocks transport.
+ *
+ * To transfer memfd-backed blocks without passing their fd every
+ * time, thus minimizing overhead and avoiding fd leaks, a command
+ * is sent with the memfd fd as ancil data very early on.
+ *
+ * This command has an ID that identifies the memfd region. Further
+ * block references are then exclusively done using this ID. On the
+ * receiving end, such logic is enabled by the memimport's segment
+ * hash and 'permanent' segments below.
+ */
+static bool segment_is_permanent(pa_memimport_segment *seg) {
+    pa_assert(seg);
+    return seg->memory.type == PA_MEM_TYPE_SHARED_MEMFD;
+}
+
 /* A collection of multiple segments */
 struct pa_memimport {
     pa_mutex *mutex;
@@ -930,6 +952,13 @@ bool pa_mempool_is_shared(pa_mempool *p) {
 }
 
 /* No lock necessary */
+bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
+    pa_assert(p);
+
+    return (p->memory.type == PA_MEM_TYPE_SHARED_MEMFD);
+}
+
+/* No lock necessary */
 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
     pa_assert(p);
 
@@ -983,15 +1012,17 @@ pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void
 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
 
 /* Should be called locked */
-static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id, bool writable) {
+static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
+                                            int memfd_fd, bool writable) {
     pa_memimport_segment* seg;
+    pa_assert(pa_mem_type_is_shared(type));
 
     if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
         return NULL;
 
     seg = pa_xnew0(pa_memimport_segment, 1);
 
-    if (pa_shm_attach(&seg->memory, shm_id, writable) < 0) {
+    if (pa_shm_attach(&seg->memory, type, shm_id, memfd_fd, writable) < 0) {
         pa_xfree(seg);
         return NULL;
     }
@@ -1007,6 +1038,7 @@ static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id, bo
 /* Should be called locked */
 static void segment_detach(pa_memimport_segment *seg) {
     pa_assert(seg);
+    pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1 : 0));
 
     pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
     pa_shm_free(&seg->memory);
@@ -1021,6 +1053,8 @@ static void segment_detach(pa_memimport_segment *seg) {
 void pa_memimport_free(pa_memimport *i) {
     pa_memexport *e;
     pa_memblock *b;
+    pa_memimport_segment *seg;
+    void *state = NULL;
 
     pa_assert(i);
 
@@ -1029,6 +1063,15 @@ void pa_memimport_free(pa_memimport *i) {
     while ((b = pa_hashmap_first(i->blocks)))
         memblock_replace_import(b);
 
+    /* Permanent segments exist for the lifetime of the memimport. Now
+     * that we're freeing the memimport itself, clear them all up.
+     *
+     * Careful! segment_detach() internally removes itself from the
+     * memimport's hash; the same hash we're now using for iteration. */
+    PA_HASHMAP_FOREACH(seg, i->segments, state) {
+        if (segment_is_permanent(seg))
+            segment_detach(seg);
+    }
     pa_assert(pa_hashmap_size(i->segments) == 0);
 
     pa_mutex_unlock(i->mutex);
@@ -1052,13 +1095,45 @@ void pa_memimport_free(pa_memimport *i) {
     pa_xfree(i);
 }
 
+/* Create a new memimport's memfd segment entry, with passed SHM ID
+ * as key and the newly-created segment (with its mmap()-ed memfd
+ * memory region) as its value.
+ *
+ * Note! check comments at 'pa_shm->fd', 'segment_is_permanent()',
+ * and 'pa_pstream_register_memfd_mempool()' for further details. */
+int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable) {
+    pa_memimport_segment *seg;
+    int ret = -1;
+
+    pa_assert(i);
+    pa_assert(memfd_fd != -1);
+
+    pa_mutex_lock(i->mutex);
+
+    if (!(seg = segment_attach(i, PA_MEM_TYPE_SHARED_MEMFD, shm_id, memfd_fd, writable)))
+        goto finish;
+
+    /* n_blocks acts as a segment reference count. To avoid the segment
+     * being deleted when receiving silent memchunks, etc., mark our
+     * permanent presence by incrementing that refcount. */
+    seg->n_blocks++;
+
+    pa_assert(segment_is_permanent(seg));
+    ret = 0;
+
+finish:
+    pa_mutex_unlock(i->mutex);
+    return ret;
+}
+
 /* Self-locked */
-pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id,
+pa_memblock* pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id, uint32_t shm_id,
                               size_t offset, size_t size, bool writable) {
     pa_memblock *b = NULL;
     pa_memimport_segment *seg;
 
     pa_assert(i);
+    pa_assert(pa_mem_type_is_shared(type));
 
     pa_mutex_lock(i->mutex);
 
@@ -1070,12 +1145,20 @@ pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_i
     if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
         goto finish;
 
-    if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
-        if (!(seg = segment_attach(i, shm_id, writable)))
+    if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) {
+        if (type == PA_MEM_TYPE_SHARED_MEMFD) {
+            pa_log("Bailing out! No cached memimport segment for memfd ID %u", shm_id);
+            pa_log("Did the other PA endpoint forget registering its memfd pool?");
             goto finish;
+        }
+
+        pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
+        if (!(seg = segment_attach(i, type, shm_id, -1, writable)))
+            goto finish;
+    }
 
-    if (writable != seg->writable) {
-        pa_log("Cannot open segment - writable status changed!");
+    if (writable && !seg->writable) {
+        pa_log("Cannot import cached segment in write mode - previously mapped as read-only");
         goto finish;
     }
 
@@ -1268,13 +1351,15 @@ static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
 }
 
 /* Self-locked */
-int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
-    pa_shm *memory;
+int pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
+                     uint32_t *shm_id, size_t *offset, size_t * size) {
+    pa_shm  *memory;
     struct memexport_slot *slot;
     void *data;
 
     pa_assert(e);
     pa_assert(b);
+    pa_assert(type);
     pa_assert(block_id);
     pa_assert(shm_id);
     pa_assert(offset);
@@ -1312,12 +1397,14 @@ int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32
     } else {
         pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
         pa_assert(b->pool);
+        pa_assert(pa_mempool_is_shared(b->pool));
         memory = &b->pool->memory;
     }
 
     pa_assert(data >= memory->ptr);
     pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
 
+    *type = memory->type;
     *shm_id = memory->id;
     *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
     *size = b->length;
diff --git a/src/pulsecore/memblock.h b/src/pulsecore/memblock.h
index 718235f..de93e3d 100644
--- a/src/pulsecore/memblock.h
+++ b/src/pulsecore/memblock.h
@@ -131,6 +131,7 @@ const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p);
 void pa_mempool_vacuum(pa_mempool *p);
 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id);
 bool pa_mempool_is_shared(pa_mempool *p);
+bool pa_mempool_is_memfd_backed(const pa_mempool *p);
 bool pa_mempool_is_remote_writable(pa_mempool *p);
 void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable);
 size_t pa_mempool_block_size_max(pa_mempool *p);
@@ -138,14 +139,16 @@ size_t pa_mempool_block_size_max(pa_mempool *p);
 /* For receiving blocks from other nodes */
 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata);
 void pa_memimport_free(pa_memimport *i);
-pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id,
-                              size_t offset, size_t size, bool writable);
+int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable);
+pa_memblock* pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id,
+                              uint32_t shm_id, size_t offset, size_t size, bool writable);
 int pa_memimport_process_revoke(pa_memimport *i, uint32_t block_id);
 
 /* For sending blocks to other nodes */
 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata);
 void pa_memexport_free(pa_memexport *e);
-int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t *size);
+int pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
+                     uint32_t *shm_id, size_t *offset, size_t * size);
 int pa_memexport_process_release(pa_memexport *e, uint32_t id);
 
 #endif
diff --git a/src/pulsecore/pstream.c b/src/pulsecore/pstream.c
index fb43a1b..0fb37a0 100644
--- a/src/pulsecore/pstream.c
+++ b/src/pulsecore/pstream.c
@@ -536,6 +536,7 @@ static void prepare_next_write_item(pa_pstream *p) {
         flags = (uint32_t) (p->write.current->seek_mode & PA_FLAG_SEEKMASK);
 
         if (p->use_shm) {
+            pa_mem_type_t type;
             uint32_t block_id, shm_id;
             size_t offset, length;
             uint32_t *shm_info = (uint32_t *) &p->write.minibuf[PA_PSTREAM_DESCRIPTOR_SIZE];
@@ -550,10 +551,12 @@ static void prepare_next_write_item(pa_pstream *p) {
 
             if (pa_memexport_put(current_export,
                                  p->write.current->chunk.memblock,
+                                 &type,
                                  &block_id,
                                  &shm_id,
                                  &offset,
                                  &length) >= 0) {
+                pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
 
                 flags |= PA_FLAG_SHMDATA;
                 if (pa_mempool_is_remote_writable(current_pool))
@@ -891,6 +894,7 @@ static int do_read(pa_pstream *p, struct pstream_read *re) {
             pa_assert(p->import);
 
             if (!(b = pa_memimport_get(p->import,
+                                       PA_MEM_TYPE_SHARED_POSIX,
                                        ntohl(re->shm_info[PA_PSTREAM_SHM_BLOCKID]),
                                        ntohl(re->shm_info[PA_PSTREAM_SHM_SHMID]),
                                        ntohl(re->shm_info[PA_PSTREAM_SHM_INDEX]),
diff --git a/src/pulsecore/shm.c b/src/pulsecore/shm.c
index c2cf8ff..bcf7182 100644
--- a/src/pulsecore/shm.c
+++ b/src/pulsecore/shm.c
@@ -417,15 +417,10 @@ fail:
 }
 
 /* Caller owns passed @memfd_fd and must close it down when appropriate. */
-static int NEW_API_pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable) {
+int pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable) {
     return shm_attach(m, type, id, memfd_fd, writable, false);
 }
 
-/* Compatibility version until the new API is used in external sources */
-int pa_shm_attach(pa_shm *m, unsigned id, bool writable) {
-    return NEW_API_pa_shm_attach(m, PA_MEM_TYPE_SHARED_POSIX, id, -1, writable);
-}
-
 int pa_shm_cleanup(void) {
 
 #ifdef HAVE_SHM_OPEN
diff --git a/src/pulsecore/shm.h b/src/pulsecore/shm.h
index 10df9c1..e8bfa56 100644
--- a/src/pulsecore/shm.h
+++ b/src/pulsecore/shm.h
@@ -46,7 +46,7 @@ typedef struct pa_shm {
 } pa_shm;
 
 int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode);
-int pa_shm_attach(pa_shm *m, unsigned id, bool writable);
+int pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable);
 
 void pa_shm_punch(pa_shm *m, size_t offset, size_t size);
 
diff --git a/src/tests/memblock-test.c b/src/tests/memblock-test.c
index 089648f..04eb377 100644
--- a/src/tests/memblock-test.c
+++ b/src/tests/memblock-test.c
@@ -74,6 +74,7 @@ START_TEST (memblock_test) {
     pa_memblock *mb_a, *mb_b, *mb_c;
     int r, i;
     pa_memblock* blocks[5];
+    pa_mem_type_t mem_type;
     uint32_t id, shm_id;
     size_t offset, size;
     char *x;
@@ -122,22 +123,22 @@ START_TEST (memblock_test) {
         import_c = pa_memimport_new(pool_c, release_cb, (void*) "C");
         fail_unless(import_b != NULL);
 
-        r = pa_memexport_put(export_a, mb_a, &id, &shm_id, &offset, &size);
+        r = pa_memexport_put(export_a, mb_a, &mem_type, &id, &shm_id, &offset, &size);
         fail_unless(r >= 0);
         fail_unless(shm_id == id_a);
 
         pa_log("A: Memory block exported as %u", id);
 
-        mb_b = pa_memimport_get(import_b, id, shm_id, offset, size, false);
+        mb_b = pa_memimport_get(import_b, PA_MEM_TYPE_SHARED_POSIX, id, shm_id, offset, size, false);
         fail_unless(mb_b != NULL);
-        r = pa_memexport_put(export_b, mb_b, &id, &shm_id, &offset, &size);
+        r = pa_memexport_put(export_b, mb_b, &mem_type, &id, &shm_id, &offset, &size);
         fail_unless(r >= 0);
         fail_unless(shm_id == id_a || shm_id == id_b);
         pa_memblock_unref(mb_b);
 
         pa_log("B: Memory block exported as %u", id);
 
-        mb_c = pa_memimport_get(import_c, id, shm_id, offset, size, false);
+        mb_c = pa_memimport_get(import_c, PA_MEM_TYPE_SHARED_POSIX, id, shm_id, offset, size, false);
         fail_unless(mb_c != NULL);
         x = pa_memblock_acquire(mb_c);
         pa_log_debug("1 data=%s", x);

commit 73e86b1cb164b1c37b27238b529879a4a2d9f24c
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 01:04:18 2016 +0200

    pulsecore: Introduce memfd support
    
    Memfd is a simple memory sharing mechanism, added by the systemd/kdbus
    developers, to share pages between processes in an anonymous, no global
    registry needed, no mount-point required, relatively secure, manner.
    
    This patch introduces the necessary building blocks for using memfd
    shared memory transfers in PulseAudio.
    
    Memfd support shall also help us in laying out the necessary (but not
    yet sufficient) groundwork for application sandboxing, protecting PA
    from its clients, and protecting clients data from each other.
    
    We plan to exclusively use memfds, instead of POSIX SHM, on the way
    forward.
    
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/configure.ac b/configure.ac
index 8454e4c..ee64988 100644
--- a/configure.ac
+++ b/configure.ac
@@ -612,6 +612,23 @@ AC_DEFINE(HAVE_DLADDR, [1], [Have dladdr?])
 
 AM_ICONV
 
+#### Linux memfd_create(2) SHM support ####
+
+AC_ARG_ENABLE([memfd],
+    AS_HELP_STRING([--disable-memfd], [Disable Linux memfd shared memory]))
+
+AS_IF([test "x$enable_memfd" != "xno"],
+    AC_CHECK_DECL(SYS_memfd_create, [HAVE_MEMFD=1], [HAVE_MEMFD=0], [#include <sys/syscall.h>]),
+    [HAVE_MEMFD=0])
+
+AS_IF([test "x$enable_memfd" = "xyes" && test "x$HAVE_MEMFD" = "x0"],
+    [AC_MSG_ERROR([*** Your Linux kernel does not support memfd shared memory.
+                  *** Use linux v3.17 or higher for such a feature.])])
+
+AC_SUBST(HAVE_MEMFD)
+AM_CONDITIONAL([HAVE_MEMFD], [test "x$HAVE_MEMFD" = x1])
+AS_IF([test "x$HAVE_MEMFD" = "x1"], AC_DEFINE([HAVE_MEMFD], 1, [Have memfd shared memory.]))
+
 #### X11 (optional) ####
 
 AC_ARG_ENABLE([x11],
@@ -1549,6 +1566,7 @@ AC_OUTPUT
 
 # ==========================================================================
 
+AS_IF([test "x$HAVE_MEMFD" = "x1"], ENABLE_MEMFD=yes, ENABLE_MEMFD=no)
 AS_IF([test "x$HAVE_X11" = "x1"], ENABLE_X11=yes, ENABLE_X11=no)
 AS_IF([test "x$HAVE_OSS_OUTPUT" = "x1"], ENABLE_OSS_OUTPUT=yes, ENABLE_OSS_OUTPUT=no)
 AS_IF([test "x$HAVE_OSS_WRAPPER" = "x1"], ENABLE_OSS_WRAPPER=yes, ENABLE_OSS_WRAPPER=no)
@@ -1610,6 +1628,7 @@ echo "
     CPPFLAGS:                      ${CPPFLAGS}
     LIBS:                          ${LIBS}
 
+    Enable memfd shared memory:    ${ENABLE_MEMFD}
     Enable X11:                    ${ENABLE_X11}
     Enable OSS Output:             ${ENABLE_OSS_OUTPUT}
     Enable OSS Wrapper:            ${ENABLE_OSS_WRAPPER}
diff --git a/src/Makefile.am b/src/Makefile.am
index 62cac9c..63e59c5 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -729,6 +729,11 @@ libpulsecommon_ at PA_MAJORMINOR@_la_CFLAGS = $(AM_CFLAGS) $(LIBJSON_CFLAGS) $(LIBS
 libpulsecommon_ at PA_MAJORMINOR@_la_LDFLAGS = $(AM_LDFLAGS) $(AM_LIBLDFLAGS) -avoid-version
 libpulsecommon_ at PA_MAJORMINOR@_la_LIBADD = $(AM_LIBADD) $(LIBJSON_LIBS)  $(LIBWRAP_LIBS) $(WINSOCK_LIBS) $(LTLIBICONV) $(LIBSNDFILE_LIBS)
 
+if HAVE_MEMFD
+libpulsecommon_ at PA_MAJORMINOR@_la_SOURCES += \
+		pulsecore/memfd-wrappers.h
+endif
+
 if HAVE_X11
 libpulsecommon_ at PA_MAJORMINOR@_la_SOURCES += \
 		pulse/client-conf-x11.c pulse/client-conf-x11.h \
diff --git a/src/pulsecore/memfd-wrappers.h b/src/pulsecore/memfd-wrappers.h
new file mode 100644
index 0000000..3bed9b2
--- /dev/null
+++ b/src/pulsecore/memfd-wrappers.h
@@ -0,0 +1,68 @@
+#ifndef foopulsememfdwrappershfoo
+#define foopulsememfdwrappershfoo
+
+/***
+  This file is part of PulseAudio.
+
+  Copyright 2016 Ahmed S. Darwish <darwish.07 at gmail.com>
+
+  PulseAudio is free software; you can redistribute it and/or modify
+  it under the terms of the GNU Lesser General Public License as
+  published by the Free Software Foundation; either version 2.1 of the
+  License, or (at your option) any later version.
+
+  PulseAudio is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public
+  License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
+***/
+
+#ifdef HAVE_MEMFD
+
+#include <sys/syscall.h>
+#include <fcntl.h>
+
+/*
+ * No glibc wrappers exist for memfd_create(2), so provide our own.
+ *
+ * Also define memfd fcntl sealing macros. While they are already
+ * defined in the kernel header file <linux/fcntl.h>, that file as
+ * a whole conflicts with the original glibc header <fnctl.h>.
+ */
+
+static inline int memfd_create(const char *name, unsigned int flags) {
+    return syscall(SYS_memfd_create, name, flags);
+}
+
+/* memfd_create(2) flags */
+
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC       0x0001U
+#endif
+
+#ifndef MFD_ALLOW_SEALING
+#define MFD_ALLOW_SEALING 0x0002U
+#endif
+
+/* fcntl() seals-related flags */
+
+#ifndef F_LINUX_SPECIFIC_BASE
+#define F_LINUX_SPECIFIC_BASE 1024
+#endif
+
+#ifndef F_ADD_SEALS
+#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
+#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
+
+#define F_SEAL_SEAL     0x0001  /* prevent further seals from being set */
+#define F_SEAL_SHRINK   0x0002  /* prevent file from shrinking */
+#define F_SEAL_GROW     0x0004  /* prevent file from growing */
+#define F_SEAL_WRITE    0x0008  /* prevent writes */
+#endif
+
+#endif /* HAVE_MEMFD */
+
+#endif
diff --git a/src/pulsecore/shm.c b/src/pulsecore/shm.c
index c3306b7..c2cf8ff 100644
--- a/src/pulsecore/shm.c
+++ b/src/pulsecore/shm.c
@@ -45,6 +45,7 @@
 #include <pulse/xmalloc.h>
 #include <pulse/gccmacro.h>
 
+#include <pulsecore/memfd-wrappers.h>
 #include <pulsecore/core-error.h>
 #include <pulsecore/log.h>
 #include <pulsecore/random.h>
@@ -92,7 +93,12 @@ struct shm_marker {
     uint64_t _reserved4;
 } PA_GCC_PACKED;
 
-#define SHM_MARKER_SIZE PA_ALIGN(sizeof(struct shm_marker))
+static inline size_t shm_marker_size(pa_shm *m) {
+    if (m->type == PA_MEM_TYPE_SHARED_POSIX)
+        return PA_ALIGN(sizeof(struct shm_marker));
+
+    return 0;
+}
 
 #ifdef HAVE_SHM_OPEN
 static char *segment_name(char *fn, size_t l, unsigned id) {
@@ -105,9 +111,11 @@ static int privatemem_create(pa_shm *m, size_t size) {
     pa_assert(m);
     pa_assert(size > 0);
 
+    m->type = PA_MEM_TYPE_PRIVATE;
     m->id = 0;
     m->size = size;
     m->do_unlink = false;
+    m->fd = -1;
 
 #ifdef MAP_ANONYMOUS
     if ((m->ptr = mmap(NULL, m->size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, (off_t) 0)) == MAP_FAILED) {
@@ -130,41 +138,44 @@ static int privatemem_create(pa_shm *m, size_t size) {
     return 0;
 }
 
-int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
-#ifdef HAVE_SHM_OPEN
+static int sharedmem_create(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
+#if defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD)
     char fn[32];
     int fd = -1;
     struct shm_marker *marker;
-#endif
-
-    pa_assert(m);
-    pa_assert(size > 0);
-    pa_assert(size <= MAX_SHM_SIZE);
-    pa_assert(!(mode & ~0777));
-    pa_assert(mode >= 0600);
+    bool do_unlink = false;
 
     /* Each time we create a new SHM area, let's first drop all stale
      * ones */
     pa_shm_cleanup();
 
-    /* Round up to make it page aligned */
-    size = PA_PAGE_ALIGN(size);
-
-    m->type = type;
-
-    if (type == PA_MEM_TYPE_PRIVATE)
-        return privatemem_create(m, size);
+    pa_random(&m->id, sizeof(m->id));
 
+    switch (type) {
 #ifdef HAVE_SHM_OPEN
-    pa_random(&m->id, sizeof(m->id));
-    segment_name(fn, sizeof(fn), m->id);
+    case PA_MEM_TYPE_SHARED_POSIX:
+        segment_name(fn, sizeof(fn), m->id);
+        fd = shm_open(fn, O_RDWR|O_CREAT|O_EXCL, mode);
+        do_unlink = true;
+        break;
+#endif
+#ifdef HAVE_MEMFD
+    case PA_MEM_TYPE_SHARED_MEMFD:
+        fd = memfd_create("pulseaudio", MFD_ALLOW_SEALING);
+        break;
+#endif
+    default:
+        goto fail;
+    }
 
-    if ((fd = shm_open(fn, O_RDWR|O_CREAT|O_EXCL, mode)) < 0) {
-        pa_log("shm_open() failed: %s", pa_cstrerror(errno));
+    if (fd < 0) {
+        pa_log("%s open() failed: %s", pa_mem_type_to_string(type), pa_cstrerror(errno));
         goto fail;
     }
 
-    m->size = size + SHM_MARKER_SIZE;
+    m->type = type;
+    m->size = size + shm_marker_size(m);
+    m->do_unlink = do_unlink;
 
     if (ftruncate(fd, (off_t) m->size) < 0) {
         pa_log("ftruncate() failed: %s", pa_cstrerror(errno));
@@ -180,32 +191,54 @@ int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
         goto fail;
     }
 
-    /* We store our PID at the end of the shm block, so that we
-     * can check for dead shm segments later */
-    marker = (struct shm_marker*) ((uint8_t*) m->ptr + m->size - SHM_MARKER_SIZE);
-    pa_atomic_store(&marker->pid, (int) getpid());
-    pa_atomic_store(&marker->marker, SHM_MARKER);
+    if (type == PA_MEM_TYPE_SHARED_POSIX) {
+        /* We store our PID at the end of the shm block, so that we
+         * can check for dead shm segments later */
+        marker = (struct shm_marker*) ((uint8_t*) m->ptr + m->size - shm_marker_size(m));
+        pa_atomic_store(&marker->pid, (int) getpid());
+        pa_atomic_store(&marker->marker, SHM_MARKER);
+    }
 
-    pa_assert_se(pa_close(fd) == 0);
-    m->do_unlink = true;
-#else
-    goto fail;
-#endif
+    /* For memfds, we keep the fd open until we pass it
+     * to the other PA endpoint over unix domain socket. */
+    if (type == PA_MEM_TYPE_SHARED_MEMFD)
+        m->fd = fd;
+    else {
+        pa_assert_se(pa_close(fd) == 0);
+        m->fd = -1;
+    }
 
     return 0;
 
 fail:
-
-#ifdef HAVE_SHM_OPEN
     if (fd >= 0) {
-        shm_unlink(fn);
+#ifdef HAVE_SHM_OPEN
+        if (type == PA_MEM_TYPE_SHARED_POSIX)
+            shm_unlink(fn);
+#endif
         pa_close(fd);
     }
-#endif
+#endif /* defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD) */
 
     return -1;
 }
 
+int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
+    pa_assert(m);
+    pa_assert(size > 0);
+    pa_assert(size <= MAX_SHM_SIZE);
+    pa_assert(!(mode & ~0777));
+    pa_assert(mode >= 0600);
+
+    /* Round up to make it page aligned */
+    size = PA_PAGE_ALIGN(size);
+
+    if (type == PA_MEM_TYPE_PRIVATE)
+        return privatemem_create(m, size);
+
+    return sharedmem_create(m, type, size, mode);
+}
+
 static void privatemem_free(pa_shm *m) {
     pa_assert(m);
     pa_assert(m->ptr);
@@ -235,19 +268,26 @@ void pa_shm_free(pa_shm *m) {
         goto finish;
     }
 
-#ifdef HAVE_SHM_OPEN
+#if defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD)
     if (munmap(m->ptr, PA_PAGE_ALIGN(m->size)) < 0)
         pa_log("munmap() failed: %s", pa_cstrerror(errno));
 
-    if (m->do_unlink) {
+#ifdef HAVE_SHM_OPEN
+    if (m->type == PA_MEM_TYPE_SHARED_POSIX && m->do_unlink) {
         char fn[32];
 
         segment_name(fn, sizeof(fn), m->id);
         if (shm_unlink(fn) < 0)
             pa_log(" shm_unlink(%s) failed: %s", fn, pa_cstrerror(errno));
     }
+#endif
+#ifdef HAVE_MEMFD
+    if (m->type == PA_MEM_TYPE_SHARED_MEMFD && m->fd != -1)
+        pa_assert_se(pa_close(m->fd) == 0);
+#endif
+
 #else
-    /* We shouldn't be here without shm support */
+    /* We shouldn't be here without shm or memfd support */
     pa_assert_not_reached();
 #endif
 
@@ -301,9 +341,8 @@ void pa_shm_punch(pa_shm *m, size_t offset, size_t size) {
 #endif
 }
 
-#ifdef HAVE_SHM_OPEN
-
-static int shm_attach(pa_shm *m, unsigned id, bool writable, bool for_cleanup) {
+static int shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable, bool for_cleanup) {
+#if defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD)
     char fn[32];
     int fd = -1;
     int prot;
@@ -311,11 +350,25 @@ static int shm_attach(pa_shm *m, unsigned id, bool writable, bool for_cleanup) {
 
     pa_assert(m);
 
-    segment_name(fn, sizeof(fn), m->id = id);
-
-    if ((fd = shm_open(fn, writable ? O_RDWR : O_RDONLY, 0)) < 0) {
-        if ((errno != EACCES && errno != ENOENT) || !for_cleanup)
-            pa_log("shm_open() failed: %s", pa_cstrerror(errno));
+    switch (type) {
+#ifdef HAVE_SHM_OPEN
+    case PA_MEM_TYPE_SHARED_POSIX:
+        pa_assert(memfd_fd == -1);
+        segment_name(fn, sizeof(fn), id);
+        if ((fd = shm_open(fn, writable ? O_RDWR : O_RDONLY, 0)) < 0) {
+            if ((errno != EACCES && errno != ENOENT) || !for_cleanup)
+                pa_log("shm_open() failed: %s", pa_cstrerror(errno));
+            goto fail;
+        }
+        break;
+#endif
+#ifdef HAVE_MEMFD
+    case PA_MEM_TYPE_SHARED_MEMFD:
+        pa_assert(memfd_fd != -1);
+        fd = memfd_fd;
+        break;
+#endif
+    default:
         goto fail;
     }
 
@@ -325,46 +378,54 @@ static int shm_attach(pa_shm *m, unsigned id, bool writable, bool for_cleanup) {
     }
 
     if (st.st_size <= 0 ||
-        st.st_size > (off_t) (MAX_SHM_SIZE+SHM_MARKER_SIZE) ||
+        st.st_size > (off_t) MAX_SHM_SIZE + (off_t) shm_marker_size(m) ||
         PA_ALIGN((size_t) st.st_size) != (size_t) st.st_size) {
         pa_log("Invalid shared memory segment size");
         goto fail;
     }
 
-    m->size = (size_t) st.st_size;
-
     prot = writable ? PROT_READ | PROT_WRITE : PROT_READ;
-    if ((m->ptr = mmap(NULL, PA_PAGE_ALIGN(m->size), prot, MAP_SHARED, fd, (off_t) 0)) == MAP_FAILED) {
+    if ((m->ptr = mmap(NULL, PA_PAGE_ALIGN(st.st_size), prot, MAP_SHARED, fd, (off_t) 0)) == MAP_FAILED) {
         pa_log("mmap() failed: %s", pa_cstrerror(errno));
         goto fail;
     }
 
-    m->do_unlink = false;
-    m->type = PA_MEM_TYPE_SHARED_POSIX;
+    /* In case of attaching to memfd areas, _the caller_ maintains
+     * ownership of the passed fd and has the sole responsibility
+     * of closing it down.. For other types, we're the code path
+     * which created the fd in the first place and we're thus the
+     * ones responsible for closing it down */
+    if (type != PA_MEM_TYPE_SHARED_MEMFD)
+        pa_assert_se(pa_close(fd) == 0);
 
-    pa_assert_se(pa_close(fd) == 0);
+    m->type = type;
+    m->id = id;
+    m->size = (size_t) st.st_size;
+    m->do_unlink = false;
+    m->fd = -1;
 
     return 0;
 
 fail:
-    if (fd >= 0)
+    /* In case of memfds, caller maintains fd ownership */
+    if (fd >= 0 && type != PA_MEM_TYPE_SHARED_MEMFD)
         pa_close(fd);
 
+#endif /* defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD) */
+
     return -1;
 }
 
-int pa_shm_attach(pa_shm *m, unsigned id, bool writable) {
-    return shm_attach(m, id, writable, false);
+/* Caller owns passed @memfd_fd and must close it down when appropriate. */
+static int NEW_API_pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable) {
+    return shm_attach(m, type, id, memfd_fd, writable, false);
 }
 
-#else /* HAVE_SHM_OPEN */
-
+/* Compatibility version until the new API is used in external sources */
 int pa_shm_attach(pa_shm *m, unsigned id, bool writable) {
-    return -1;
+    return NEW_API_pa_shm_attach(m, PA_MEM_TYPE_SHARED_POSIX, id, -1, writable);
 }
 
-#endif /* HAVE_SHM_OPEN */
-
 int pa_shm_cleanup(void) {
 
 #ifdef HAVE_SHM_OPEN
@@ -394,15 +455,15 @@ int pa_shm_cleanup(void) {
         if (pa_atou(de->d_name + SHM_ID_LEN, &id) < 0)
             continue;
 
-        if (shm_attach(&seg, id, false, true) < 0)
+        if (shm_attach(&seg, PA_MEM_TYPE_SHARED_POSIX, id, -1, false, true) < 0)
             continue;
 
-        if (seg.size < SHM_MARKER_SIZE) {
+        if (seg.size < shm_marker_size(&seg)) {
             pa_shm_free(&seg);
             continue;
         }
 
-        m = (struct shm_marker*) ((uint8_t*) seg.ptr + seg.size - SHM_MARKER_SIZE);
+        m = (struct shm_marker*) ((uint8_t*) seg.ptr + seg.size - shm_marker_size(&seg));
 
         if (pa_atomic_load(&m->marker) != SHM_MARKER) {
             pa_shm_free(&seg);
diff --git a/src/pulsecore/shm.h b/src/pulsecore/shm.h
index f0fda91..10df9c1 100644
--- a/src/pulsecore/shm.h
+++ b/src/pulsecore/shm.h
@@ -30,7 +30,19 @@ typedef struct pa_shm {
     unsigned id;
     void *ptr;
     size_t size;
+
+    /* Only for type = PA_MEM_TYPE_SHARED_POSIX */
     bool do_unlink:1;
+
+    /* Only for type = PA_MEM_TYPE_SHARED_MEMFD
+     *
+     * To avoid fd leaks, we keep this fd open only until we pass it
+     * to the other PA endpoint over unix domain socket.
+     *
+     * When we don't have ownership for the memfd fd in question (e.g.
+     * pa_shm_attach()), or the file descriptor has now been closed,
+     * this is set to -1. */
+    int fd;
 } pa_shm;
 
 int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode);

commit 1c3a2bcaf17014ad81e0d6bc61ca42c477de1140
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 01:01:38 2016 +0200

    SHM: Refactor private allocations
    
    pa_shm_create_rw() is responsible for creating two types of memory:
    POSIX shared memory and regular malloc()-ed ones.
    
    A third memory type, memfds, will be added later. Thus to add this
    extra shared memory type in a sane manner, refactor private memory
    allocations into their own static methods.
    
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/src/pulsecore/shm.c b/src/pulsecore/shm.c
index 758dece..c3306b7 100644
--- a/src/pulsecore/shm.c
+++ b/src/pulsecore/shm.c
@@ -101,10 +101,40 @@ static char *segment_name(char *fn, size_t l, unsigned id) {
 }
 #endif
 
+static int privatemem_create(pa_shm *m, size_t size) {
+    pa_assert(m);
+    pa_assert(size > 0);
+
+    m->id = 0;
+    m->size = size;
+    m->do_unlink = false;
+
+#ifdef MAP_ANONYMOUS
+    if ((m->ptr = mmap(NULL, m->size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, (off_t) 0)) == MAP_FAILED) {
+        pa_log("mmap() failed: %s", pa_cstrerror(errno));
+        return -1;
+    }
+#elif defined(HAVE_POSIX_MEMALIGN)
+    {
+        int r;
+
+        if ((r = posix_memalign(&m->ptr, PA_PAGE_SIZE, size)) < 0) {
+            pa_log("posix_memalign() failed: %s", pa_cstrerror(r));
+            return r;
+        }
+    }
+#else
+    m->ptr = pa_xmalloc(m->size);
+#endif
+
+    return 0;
+}
+
 int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
 #ifdef HAVE_SHM_OPEN
     char fn[32];
     int fd = -1;
+    struct shm_marker *marker;
 #endif
 
     pa_assert(m);
@@ -120,72 +150,47 @@ int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
     /* Round up to make it page aligned */
     size = PA_PAGE_ALIGN(size);
 
-    if (!pa_mem_type_is_shared(type)) {
-        m->id = 0;
-        m->size = size;
-
-#ifdef MAP_ANONYMOUS
-        if ((m->ptr = mmap(NULL, m->size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, (off_t) 0)) == MAP_FAILED) {
-            pa_log("mmap() failed: %s", pa_cstrerror(errno));
-            goto fail;
-        }
-#elif defined(HAVE_POSIX_MEMALIGN)
-        {
-            int r;
-
-            if ((r = posix_memalign(&m->ptr, PA_PAGE_SIZE, size)) < 0) {
-                pa_log("posix_memalign() failed: %s", pa_cstrerror(r));
-                goto fail;
-            }
-        }
-#else
-        m->ptr = pa_xmalloc(m->size);
-#endif
+    m->type = type;
 
-        m->do_unlink = false;
+    if (type == PA_MEM_TYPE_PRIVATE)
+        return privatemem_create(m, size);
 
-    } else {
 #ifdef HAVE_SHM_OPEN
-        struct shm_marker *marker;
-
-        pa_random(&m->id, sizeof(m->id));
-        segment_name(fn, sizeof(fn), m->id);
+    pa_random(&m->id, sizeof(m->id));
+    segment_name(fn, sizeof(fn), m->id);
 
-        if ((fd = shm_open(fn, O_RDWR|O_CREAT|O_EXCL, mode)) < 0) {
-            pa_log("shm_open() failed: %s", pa_cstrerror(errno));
-            goto fail;
-        }
+    if ((fd = shm_open(fn, O_RDWR|O_CREAT|O_EXCL, mode)) < 0) {
+        pa_log("shm_open() failed: %s", pa_cstrerror(errno));
+        goto fail;
+    }
 
-        m->size = size + SHM_MARKER_SIZE;
+    m->size = size + SHM_MARKER_SIZE;
 
-        if (ftruncate(fd, (off_t) m->size) < 0) {
-            pa_log("ftruncate() failed: %s", pa_cstrerror(errno));
-            goto fail;
-        }
+    if (ftruncate(fd, (off_t) m->size) < 0) {
+        pa_log("ftruncate() failed: %s", pa_cstrerror(errno));
+        goto fail;
+    }
 
 #ifndef MAP_NORESERVE
 #define MAP_NORESERVE 0
 #endif
 
-        if ((m->ptr = mmap(NULL, PA_PAGE_ALIGN(m->size), PROT_READ|PROT_WRITE, MAP_SHARED|MAP_NORESERVE, fd, (off_t) 0)) == MAP_FAILED) {
-            pa_log("mmap() failed: %s", pa_cstrerror(errno));
-            goto fail;
-        }
+    if ((m->ptr = mmap(NULL, PA_PAGE_ALIGN(m->size), PROT_READ|PROT_WRITE, MAP_SHARED|MAP_NORESERVE, fd, (off_t) 0)) == MAP_FAILED) {
+        pa_log("mmap() failed: %s", pa_cstrerror(errno));
+        goto fail;
+    }
 
-        /* We store our PID at the end of the shm block, so that we
-         * can check for dead shm segments later */
-        marker = (struct shm_marker*) ((uint8_t*) m->ptr + m->size - SHM_MARKER_SIZE);
-        pa_atomic_store(&marker->pid, (int) getpid());
-        pa_atomic_store(&marker->marker, SHM_MARKER);
+    /* We store our PID at the end of the shm block, so that we
+     * can check for dead shm segments later */
+    marker = (struct shm_marker*) ((uint8_t*) m->ptr + m->size - SHM_MARKER_SIZE);
+    pa_atomic_store(&marker->pid, (int) getpid());
+    pa_atomic_store(&marker->marker, SHM_MARKER);
 
-        pa_assert_se(pa_close(fd) == 0);
-        m->do_unlink = true;
+    pa_assert_se(pa_close(fd) == 0);
+    m->do_unlink = true;
 #else
-        goto fail;
+    goto fail;
 #endif
-    }
-
-    m->type = type;
 
     return 0;
 
@@ -201,6 +206,21 @@ fail:
     return -1;
 }
 
+static void privatemem_free(pa_shm *m) {
+    pa_assert(m);
+    pa_assert(m->ptr);
+    pa_assert(m->size > 0);
+
+#ifdef MAP_ANONYMOUS
+    if (munmap(m->ptr, m->size) < 0)
+        pa_log("munmap() failed: %s", pa_cstrerror(errno));
+#elif defined(HAVE_POSIX_MEMALIGN)
+    free(m->ptr);
+#else
+    pa_xfree(m->ptr);
+#endif
+}
+
 void pa_shm_free(pa_shm *m) {
     pa_assert(m);
     pa_assert(m->ptr);
@@ -210,34 +230,28 @@ void pa_shm_free(pa_shm *m) {
     pa_assert(m->ptr != MAP_FAILED);
 #endif
 
-    if (!pa_mem_type_is_shared(m->type)) {
-#ifdef MAP_ANONYMOUS
-        if (munmap(m->ptr, m->size) < 0)
-            pa_log("munmap() failed: %s", pa_cstrerror(errno));
-#elif defined(HAVE_POSIX_MEMALIGN)
-        free(m->ptr);
-#else
-        pa_xfree(m->ptr);
-#endif
-    } else {
-#ifdef HAVE_SHM_OPEN
-        if (munmap(m->ptr, PA_PAGE_ALIGN(m->size)) < 0)
-            pa_log("munmap() failed: %s", pa_cstrerror(errno));
+    if (m->type == PA_MEM_TYPE_PRIVATE) {
+        privatemem_free(m);
+        goto finish;
+    }
 
-        if (m->do_unlink) {
-            char fn[32];
+#ifdef HAVE_SHM_OPEN
+    if (munmap(m->ptr, PA_PAGE_ALIGN(m->size)) < 0)
+        pa_log("munmap() failed: %s", pa_cstrerror(errno));
 
-            segment_name(fn, sizeof(fn), m->id);
+    if (m->do_unlink) {
+        char fn[32];
 
-            if (shm_unlink(fn) < 0)
-                pa_log(" shm_unlink(%s) failed: %s", fn, pa_cstrerror(errno));
-        }
+        segment_name(fn, sizeof(fn), m->id);
+        if (shm_unlink(fn) < 0)
+            pa_log(" shm_unlink(%s) failed: %s", fn, pa_cstrerror(errno));
+    }
 #else
-        /* We shouldn't be here without shm support */
-        pa_assert_not_reached();
+    /* We shouldn't be here without shm support */
+    pa_assert_not_reached();
 #endif
-    }
 
+finish:
     pa_zero(*m);
 }
 

commit b88acd0266384a754157b515d4b51256afad8266
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 00:57:06 2016 +0200

    pulsecore: Transform pa_mempool_new() into a factory method
    
    Soon we're going to have three types of memory pools: POSIX shm_open()
    pools, memfd memfd_create() ones, and privately malloc()-ed pools.
    
    Thus introduce annotations for the memory types supported and change
    pa_mempool_new() into a factory method based on required memory.
    
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/src/Makefile.am b/src/Makefile.am
index 433eaf6..62cac9c 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -700,6 +700,7 @@ libpulsecommon_ at PA_MAJORMINOR@_la_SOURCES = \
 		pulsecore/refcnt.h \
 		pulsecore/srbchannel.c pulsecore/srbchannel.h \
 		pulsecore/sample-util.c pulsecore/sample-util.h \
+		pulsecore/mem.h \
 		pulsecore/shm.c pulsecore/shm.h \
 		pulsecore/bitset.c pulsecore/bitset.h \
 		pulsecore/socket-client.c pulsecore/socket-client.h \
diff --git a/src/pulse/context.c b/src/pulse/context.c
index 927d020..e4716fb 100644
--- a/src/pulse/context.c
+++ b/src/pulse/context.c
@@ -125,6 +125,7 @@ static void reset_callbacks(pa_context *c) {
 
 pa_context *pa_context_new_with_proplist(pa_mainloop_api *mainloop, const char *name, pa_proplist *p) {
     pa_context *c;
+    pa_mem_type_t type;
 
     pa_assert(mainloop);
 
@@ -170,10 +171,13 @@ pa_context *pa_context_new_with_proplist(pa_mainloop_api *mainloop, const char *
     c->srb_template.readfd = -1;
     c->srb_template.writefd = -1;
 
-    if (!(c->mempool = pa_mempool_new(!c->conf->disable_shm, c->conf->shm_size))) {
+    type = !c->conf->disable_shm ? PA_MEM_TYPE_SHARED_POSIX : PA_MEM_TYPE_PRIVATE;
+    if (!(c->mempool = pa_mempool_new(type, c->conf->shm_size))) {
 
-        if (!c->conf->disable_shm)
-            c->mempool = pa_mempool_new(false, c->conf->shm_size);
+        if (!c->conf->disable_shm) {
+            pa_log_warn("Failed to allocate shared memory pool. Falling back to a normal private one.");
+            c->mempool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, c->conf->shm_size);
+        }
 
         if (!c->mempool) {
             context_free(c);
diff --git a/src/pulsecore/core.c b/src/pulsecore/core.c
index fe67109..199a26b 100644
--- a/src/pulsecore/core.c
+++ b/src/pulsecore/core.c
@@ -69,14 +69,14 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, size_t shm_size) {
     pa_assert(m);
 
     if (shared) {
-        if (!(pool = pa_mempool_new(shared, shm_size))) {
+        if (!(pool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, shm_size))) {
             pa_log_warn("Failed to allocate shared memory pool. Falling back to a normal memory pool.");
             shared = false;
         }
     }
 
     if (!shared) {
-        if (!(pool = pa_mempool_new(shared, shm_size))) {
+        if (!(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, shm_size))) {
             pa_log("pa_mempool_new() failed.");
             return NULL;
         }
diff --git a/src/pulsecore/mem.h b/src/pulsecore/mem.h
new file mode 100644
index 0000000..11a8086
--- /dev/null
+++ b/src/pulsecore/mem.h
@@ -0,0 +1,51 @@
+#ifndef foopulsememhfoo
+#define foopulsememhfoo
+
+/***
+  This file is part of PulseAudio.
+
+  Copyright 2016 Ahmed S. Darwish <darwish.07 at gmail.com>
+
+  PulseAudio is free software; you can redistribute it and/or modify
+  it under the terms of the GNU Lesser General Public License as
+  published by the Free Software Foundation; either version 2.1 of the
+  License, or (at your option) any later version.
+
+  PulseAudio is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public
+  License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <stdbool.h>
+
+#include <pulsecore/macro.h>
+
+typedef enum pa_mem_type {
+    PA_MEM_TYPE_SHARED_POSIX,         /* Data is shared and created using POSIX shm_open() */
+    PA_MEM_TYPE_SHARED_MEMFD,         /* Data is shared and created using Linux memfd_create() */
+    PA_MEM_TYPE_PRIVATE,              /* Data is private and created using classic memory allocation
+                                         (posix_memalign(), malloc() or anonymous mmap()) */
+} pa_mem_type_t;
+
+static inline const char *pa_mem_type_to_string(pa_mem_type_t type) {
+    switch (type) {
+    case PA_MEM_TYPE_SHARED_POSIX:
+        return "shared posix-shm";
+    case PA_MEM_TYPE_SHARED_MEMFD:
+        return "shared memfd";
+    case PA_MEM_TYPE_PRIVATE:
+        return "private";
+    }
+
+    pa_assert_not_reached();
+}
+
+static inline bool pa_mem_type_is_shared(pa_mem_type_t t) {
+    return (t == PA_MEM_TYPE_SHARED_POSIX) || (t == PA_MEM_TYPE_SHARED_MEMFD);
+}
+
+#endif
diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c
index ceea813..49665ee 100644
--- a/src/pulsecore/memblock.c
+++ b/src/pulsecore/memblock.c
@@ -773,7 +773,7 @@ static void memblock_replace_import(pa_memblock *b) {
     pa_mutex_unlock(import->mutex);
 }
 
-pa_mempool* pa_mempool_new(bool shared, size_t size) {
+pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size) {
     pa_mempool *p;
     char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
 
@@ -793,13 +793,13 @@ pa_mempool* pa_mempool_new(bool shared, size_t size) {
             p->n_blocks = 2;
     }
 
-    if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
+    if (pa_shm_create_rw(&p->memory, type, p->n_blocks * p->block_size, 0700) < 0) {
         pa_xfree(p);
         return NULL;
     }
 
     pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
-                 p->memory.shared ? "shared" : "private",
+                 pa_mem_type_to_string(type),
                  p->n_blocks,
                  pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
                  pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
@@ -923,10 +923,17 @@ void pa_mempool_vacuum(pa_mempool *p) {
 }
 
 /* No lock necessary */
+bool pa_mempool_is_shared(pa_mempool *p) {
+    pa_assert(p);
+
+    return pa_mem_type_is_shared(p->memory.type);
+}
+
+/* No lock necessary */
 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
     pa_assert(p);
 
-    if (!p->memory.shared)
+    if (!pa_mempool_is_shared(p))
         return -1;
 
     *id = p->memory.id;
@@ -934,13 +941,6 @@ int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
     return 0;
 }
 
-/* No lock necessary */
-bool pa_mempool_is_shared(pa_mempool *p) {
-    pa_assert(p);
-
-    return p->memory.shared;
-}
-
 pa_mempool* pa_mempool_ref(pa_mempool *p) {
     pa_assert(p);
     pa_assert(PA_REFCNT_VALUE(p) > 0);
@@ -1139,7 +1139,7 @@ pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void
     pa_assert(p);
     pa_assert(cb);
 
-    if (!p->memory.shared)
+    if (!pa_mempool_is_shared(p))
         return NULL;
 
     e = pa_xnew(pa_memexport, 1);
diff --git a/src/pulsecore/memblock.h b/src/pulsecore/memblock.h
index 960ef25..718235f 100644
--- a/src/pulsecore/memblock.h
+++ b/src/pulsecore/memblock.h
@@ -30,6 +30,7 @@ typedef struct pa_memblock pa_memblock;
 #include <pulse/xmalloc.h>
 #include <pulsecore/atomic.h>
 #include <pulsecore/memchunk.h>
+#include <pulsecore/mem.h>
 
 /* A pa_memblock is a reference counted memory block. PulseAudio
  * passes references to pa_memblocks around instead of copying
@@ -123,7 +124,7 @@ pa_mempool * pa_memblock_get_pool(pa_memblock *b);
 pa_memblock *pa_memblock_will_need(pa_memblock *b);
 
 /* The memory block manager */
-pa_mempool* pa_mempool_new(bool shared, size_t size);
+pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size);
 void pa_mempool_unref(pa_mempool *p);
 pa_mempool* pa_mempool_ref(pa_mempool *p);
 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p);
diff --git a/src/pulsecore/protocol-native.c b/src/pulsecore/protocol-native.c
index 82ea267..afb4850 100644
--- a/src/pulsecore/protocol-native.c
+++ b/src/pulsecore/protocol-native.c
@@ -55,6 +55,7 @@
 #include <pulsecore/core-util.h>
 #include <pulsecore/ipacl.h>
 #include <pulsecore/thread-mq.h>
+#include <pulsecore/mem.h>
 
 #include "protocol-native.h"
 
@@ -2621,7 +2622,7 @@ static void setup_srbchannel(pa_native_connection *c) {
         return;
     }
 
-    if (!(c->rw_mempool = pa_mempool_new(true, c->protocol->core->shm_size))) {
+    if (!(c->rw_mempool = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, c->protocol->core->shm_size))) {
         pa_log_warn("Disabling srbchannel, reason: Failed to allocate shared "
                     "writable memory pool.");
         return;
diff --git a/src/pulsecore/shm.c b/src/pulsecore/shm.c
index d613168..758dece 100644
--- a/src/pulsecore/shm.c
+++ b/src/pulsecore/shm.c
@@ -51,6 +51,7 @@
 #include <pulsecore/core-util.h>
 #include <pulsecore/macro.h>
 #include <pulsecore/atomic.h>
+#include <pulsecore/mem.h>
 
 #include "shm.h"
 
@@ -100,7 +101,7 @@ static char *segment_name(char *fn, size_t l, unsigned id) {
 }
 #endif
 
-int pa_shm_create_rw(pa_shm *m, size_t size, bool shared, mode_t mode) {
+int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
 #ifdef HAVE_SHM_OPEN
     char fn[32];
     int fd = -1;
@@ -119,7 +120,7 @@ int pa_shm_create_rw(pa_shm *m, size_t size, bool shared, mode_t mode) {
     /* Round up to make it page aligned */
     size = PA_PAGE_ALIGN(size);
 
-    if (!shared) {
+    if (!pa_mem_type_is_shared(type)) {
         m->id = 0;
         m->size = size;
 
@@ -184,7 +185,7 @@ int pa_shm_create_rw(pa_shm *m, size_t size, bool shared, mode_t mode) {
 #endif
     }
 
-    m->shared = shared;
+    m->type = type;
 
     return 0;
 
@@ -209,7 +210,7 @@ void pa_shm_free(pa_shm *m) {
     pa_assert(m->ptr != MAP_FAILED);
 #endif
 
-    if (!m->shared) {
+    if (!pa_mem_type_is_shared(m->type)) {
 #ifdef MAP_ANONYMOUS
         if (munmap(m->ptr, m->size) < 0)
             pa_log("munmap() failed: %s", pa_cstrerror(errno));
@@ -325,7 +326,7 @@ static int shm_attach(pa_shm *m, unsigned id, bool writable, bool for_cleanup) {
     }
 
     m->do_unlink = false;
-    m->shared = true;
+    m->type = PA_MEM_TYPE_SHARED_POSIX;
 
     pa_assert_se(pa_close(fd) == 0);
 
diff --git a/src/pulsecore/shm.h b/src/pulsecore/shm.h
index d438961..f0fda91 100644
--- a/src/pulsecore/shm.h
+++ b/src/pulsecore/shm.h
@@ -23,16 +23,17 @@
 #include <sys/types.h>
 
 #include <pulsecore/macro.h>
+#include <pulsecore/mem.h>
 
 typedef struct pa_shm {
+    pa_mem_type_t type;
     unsigned id;
     void *ptr;
     size_t size;
     bool do_unlink:1;
-    bool shared:1;
 } pa_shm;
 
-int pa_shm_create_rw(pa_shm *m, size_t size, bool shared, mode_t mode);
+int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode);
 int pa_shm_attach(pa_shm *m, unsigned id, bool writable);
 
 void pa_shm_punch(pa_shm *m, size_t offset, size_t size);
diff --git a/src/tests/cpu-mix-test.c b/src/tests/cpu-mix-test.c
index 2513d14..e487a20 100644
--- a/src/tests/cpu-mix-test.c
+++ b/src/tests/cpu-mix-test.c
@@ -76,7 +76,7 @@ static void run_mix_test(
     samples_ref = out_ref + (8 - align);
     nsamples = channels * (SAMPLES - (8 - align));
 
-    fail_unless((pool = pa_mempool_new(false, 0)) != NULL, NULL);
+    fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0)) != NULL, NULL);
 
     pa_random(samples0, nsamples * sizeof(int16_t));
     c0.memblock = pa_memblock_new_fixed(pool, samples0, nsamples * sizeof(int16_t), false);
diff --git a/src/tests/lfe-filter-test.c b/src/tests/lfe-filter-test.c
index e64288d..8d61cc6 100644
--- a/src/tests/lfe-filter-test.c
+++ b/src/tests/lfe-filter-test.c
@@ -136,7 +136,7 @@ START_TEST (lfe_filter_test) {
     a.format = PA_SAMPLE_S16NE;
 
     lft.ss = &a;
-    pa_assert_se(lft.pool = pa_mempool_new(false, 0));
+    pa_assert_se(lft.pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
 
     /* We prepare pseudo-random input audio samples for lfe-filter rewind testing*/
     ori_sample_ptr = pa_xmalloc(pa_frame_size(lft.ss) * TOTAL_SAMPLES);
diff --git a/src/tests/mcalign-test.c b/src/tests/mcalign-test.c
index ad9a760..f121972 100644
--- a/src/tests/mcalign-test.c
+++ b/src/tests/mcalign-test.c
@@ -36,7 +36,7 @@ int main(int argc, char *argv[]) {
     pa_mcalign *a;
     pa_memchunk c;
 
-    p = pa_mempool_new(false, 0);
+    p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0);
 
     a = pa_mcalign_new(11);
 
diff --git a/src/tests/memblock-test.c b/src/tests/memblock-test.c
index 78a43cd..089648f 100644
--- a/src/tests/memblock-test.c
+++ b/src/tests/memblock-test.c
@@ -80,11 +80,11 @@ START_TEST (memblock_test) {
 
     const char txt[] = "This is a test!";
 
-    pool_a = pa_mempool_new(true, 0);
+    pool_a = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
     fail_unless(pool_a != NULL);
-    pool_b = pa_mempool_new(true, 0);
+    pool_b = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
     fail_unless(pool_b != NULL);
-    pool_c = pa_mempool_new(true, 0);
+    pool_c = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
     fail_unless(pool_c != NULL);
 
     pa_mempool_get_shm_id(pool_a, &id_a);
diff --git a/src/tests/memblockq-test.c b/src/tests/memblockq-test.c
index ed33b2c..37cdd77 100644
--- a/src/tests/memblockq-test.c
+++ b/src/tests/memblockq-test.c
@@ -108,7 +108,7 @@ START_TEST (memblockq_test) {
 
     pa_log_set_level(PA_LOG_DEBUG);
 
-    p = pa_mempool_new(false, 0);
+    p = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0);
 
     silence.memblock = pa_memblock_new_fixed(p, (char*) "__", 2, 1);
     fail_unless(silence.memblock != NULL);
diff --git a/src/tests/mix-test.c b/src/tests/mix-test.c
index abf5fc7..ce6686a 100644
--- a/src/tests/mix-test.c
+++ b/src/tests/mix-test.c
@@ -286,7 +286,7 @@ START_TEST (mix_test) {
     if (!getenv("MAKE_CHECK"))
         pa_log_set_level(PA_LOG_DEBUG);
 
-    fail_unless((pool = pa_mempool_new(false, 0)) != NULL, NULL);
+    fail_unless((pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0)) != NULL, NULL);
 
     a.channels = 1;
     a.rate = 44100;
diff --git a/src/tests/remix-test.c b/src/tests/remix-test.c
index 578a30c..6ee5f6d 100644
--- a/src/tests/remix-test.c
+++ b/src/tests/remix-test.c
@@ -51,7 +51,7 @@ int main(int argc, char *argv[]) {
 
     pa_log_set_level(PA_LOG_DEBUG);
 
-    pa_assert_se(pool = pa_mempool_new(false, 0));
+    pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
 
     for (i = 0; maps[i].channels > 0; i++)
         for (j = 0; maps[j].channels > 0; j++) {
diff --git a/src/tests/resampler-test.c b/src/tests/resampler-test.c
index 2833d7e..28f03d6 100644
--- a/src/tests/resampler-test.c
+++ b/src/tests/resampler-test.c
@@ -404,7 +404,7 @@ int main(int argc, char *argv[]) {
     }
 
     ret = 0;
-    pa_assert_se(pool = pa_mempool_new(false, 0));
+    pa_assert_se(pool = pa_mempool_new(PA_MEM_TYPE_PRIVATE, 0));
 
     if (!all_formats) {
 
diff --git a/src/tests/srbchannel-test.c b/src/tests/srbchannel-test.c
index 3dbcc2b..253abcf 100644
--- a/src/tests/srbchannel-test.c
+++ b/src/tests/srbchannel-test.c
@@ -85,7 +85,7 @@ START_TEST (srbchannel_test) {
     int pipefd[4];
 
     pa_mainloop *ml = pa_mainloop_new();
-    pa_mempool *mp = pa_mempool_new(true, 0);
+    pa_mempool *mp = pa_mempool_new(PA_MEM_TYPE_SHARED_POSIX, 0);
     pa_iochannel *io1, *io2;
     pa_pstream *p1, *p2;
     pa_srbchannel *sr1, *sr2;

commit 211a520543b97c2c769d85bdc4d4c8d3a9023199
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 00:53:34 2016 +0200

    srbchannel: Introduce per-client SHM files
    
    The PA daemon currently uses a single SHM file for all clients
    sending and receiving commands over the low-latency srbchannel
    mechanism.
    
    To avoid leaks between clients in that case, and to provide the
    necessary ground work later for sandboxing and memfds, create the
    srbchannel SHM files on a per-client basis.
    
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/src/pulsecore/core.c b/src/pulsecore/core.c
index 30dbde9..fe67109 100644
--- a/src/pulsecore/core.c
+++ b/src/pulsecore/core.c
@@ -126,11 +126,6 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, size_t shm_size) {
     c->shm_size = shm_size;
     pa_silence_cache_init(&c->silence_cache);
 
-    if (shared && !(c->rw_mempool = pa_mempool_new(shared, shm_size)))
-        pa_log_warn("Failed to allocate shared writable memory pool.");
-    if (c->rw_mempool)
-        pa_mempool_set_is_remote_writable(c->rw_mempool, true);
-
     c->exit_event = NULL;
     c->scache_auto_unload_event = NULL;
 
@@ -217,8 +212,6 @@ static void core_free(pa_object *o) {
     pa_assert(!c->default_sink);
 
     pa_silence_cache_done(&c->silence_cache);
-    if (c->rw_mempool)
-        pa_mempool_unref(c->rw_mempool);
     pa_mempool_unref(c->mempool);
 
     for (j = 0; j < PA_CORE_HOOK_MAX; j++)
@@ -284,9 +277,6 @@ void pa_core_maybe_vacuum(pa_core *c) {
     }
 
     pa_mempool_vacuum(c->mempool);
-
-    if (c->rw_mempool)
-        pa_mempool_vacuum(c->rw_mempool);
 }
 
 pa_time_event* pa_core_rttime_new(pa_core *c, pa_usec_t usec, pa_time_event_cb_t cb, void *userdata) {
diff --git a/src/pulsecore/core.h b/src/pulsecore/core.h
index 428689c..9f5c445 100644
--- a/src/pulsecore/core.h
+++ b/src/pulsecore/core.h
@@ -177,10 +177,8 @@ struct pa_core {
     PA_LLIST_HEAD(pa_subscription_event, subscription_event_queue);
     pa_subscription_event *subscription_event_last;
 
-    /* The mempool is used for data we write to, it's readonly for the client.
-       The rw_mempool is used for data writable by both server and client (and
-       can be NULL in some cases). */
-    pa_mempool *mempool, *rw_mempool;
+    /* The mempool is used for data we write to, it's readonly for the client. */
+    pa_mempool *mempool;
 
     /* Shared memory size, as specified either by daemon configuration
      * or PA daemon defaults (~ 64 MiB). */
diff --git a/src/pulsecore/protocol-native.c b/src/pulsecore/protocol-native.c
index 145db04..82ea267 100644
--- a/src/pulsecore/protocol-native.c
+++ b/src/pulsecore/protocol-native.c
@@ -173,6 +173,12 @@ struct pa_native_connection {
     bool is_local:1;
     uint32_t version;
     pa_client *client;
+    /* R/W mempool, one per client connection, for srbchannel transport.
+     * Both server and client can write to this shm area.
+     *
+     * Note: This will be NULL if our connection with the client does
+     * not support srbchannels */
+    pa_mempool *rw_mempool;
     pa_pstream *pstream;
     pa_pdispatch *pdispatch;
     pa_idxset *record_streams, *output_streams;
@@ -1371,6 +1377,9 @@ static void native_connection_free(pa_object *o) {
 
     pa_pdispatch_unref(c->pdispatch);
     pa_pstream_unref(c->pstream);
+    if (c->rw_mempool)
+        pa_mempool_unref(c->rw_mempool);
+
     pa_client_free(c->client);
 
     pa_xfree(c);
@@ -2606,14 +2615,25 @@ static void setup_srbchannel(pa_native_connection *c) {
         return;
     }
 
-    if (!c->protocol->core->rw_mempool) {
-        pa_log_debug("Disabling srbchannel, reason: No rw memory pool");
+    if (c->rw_mempool) {
+        pa_log_debug("Ignoring srbchannel setup, reason: received COMMAND_AUTH "
+                     "more than once");
+        return;
+    }
+
+    if (!(c->rw_mempool = pa_mempool_new(true, c->protocol->core->shm_size))) {
+        pa_log_warn("Disabling srbchannel, reason: Failed to allocate shared "
+                    "writable memory pool.");
         return;
     }
+    pa_mempool_set_is_remote_writable(c->rw_mempool, true);
 
-    srb = pa_srbchannel_new(c->protocol->core->mainloop, c->protocol->core->rw_mempool);
+    srb = pa_srbchannel_new(c->protocol->core->mainloop, c->rw_mempool);
     if (!srb) {
         pa_log_debug("Failed to create srbchannel");
+
+        pa_mempool_unref(c->rw_mempool);
+        c->rw_mempool = NULL;
         return;
     }
     pa_log_debug("Enabling srbchannel...");
@@ -5125,6 +5145,8 @@ void pa_native_protocol_connect(pa_native_protocol *p, pa_iochannel *io, pa_nati
     c->client->send_event = client_send_event_cb;
     c->client->userdata = c;
 
+    c->rw_mempool = NULL;
+
     c->pstream = pa_pstream_new(p->core->mainloop, io, p->core->mempool);
     pa_pstream_set_receive_packet_callback(c->pstream, pstream_packet_callback, c);
     pa_pstream_set_receive_memblock_callback(c->pstream, pstream_memblock_callback, c);

commit 9bda6e344a67a9df245bd679706c9a0291b30d08
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 00:51:12 2016 +0200

    pulsecore: Reference count mempools
    
    In future commits, server-wide SHMs will be replaced with per-client
    ones that will be dynamically created and freed according to clients
    connections open and close.
    
    Meanwhile, current PA design does not guarantee that the per-client
    mempool blocks are referenced only by client-specific objects.
    
    Thus reference count the pools and let each memblock inside the pool
    itself, or just attached to it, increment the pool's refcount upon
    allocation. This way, per-client mempools will only be freed when no
    further component in the system holds any references to its blocks.
    
    DiscussionLink: https://goo.gl/qesVMV
    Suggested-by: Tanu Kaskinen <tanuk at iki.fi>
    Suggested-by: David Henningsson <david.henningsson at canonical.com>
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/src/pulse/context.c b/src/pulse/context.c
index 4f084e8..927d020 100644
--- a/src/pulse/context.c
+++ b/src/pulse/context.c
@@ -249,7 +249,7 @@ static void context_free(pa_context *c) {
         pa_hashmap_free(c->playback_streams);
 
     if (c->mempool)
-        pa_mempool_free(c->mempool);
+        pa_mempool_unref(c->mempool);
 
     if (c->conf)
         pa_client_conf_free(c->conf);
diff --git a/src/pulsecore/core.c b/src/pulsecore/core.c
index b2df7de..30dbde9 100644
--- a/src/pulsecore/core.c
+++ b/src/pulsecore/core.c
@@ -218,8 +218,8 @@ static void core_free(pa_object *o) {
 
     pa_silence_cache_done(&c->silence_cache);
     if (c->rw_mempool)
-        pa_mempool_free(c->rw_mempool);
-    pa_mempool_free(c->mempool);
+        pa_mempool_unref(c->rw_mempool);
+    pa_mempool_unref(c->mempool);
 
     for (j = 0; j < PA_CORE_HOOK_MAX; j++)
         pa_hook_done(&c->hooks[j]);
diff --git a/src/pulsecore/filter/lfe-filter.c b/src/pulsecore/filter/lfe-filter.c
index 5f5ace2..c0b1eb0 100644
--- a/src/pulsecore/filter/lfe-filter.c
+++ b/src/pulsecore/filter/lfe-filter.c
@@ -110,6 +110,7 @@ static void process_block(pa_lfe_filter_t *f, pa_memchunk *buf, bool store_resul
 }
 
 pa_memchunk * pa_lfe_filter_process(pa_lfe_filter_t *f, pa_memchunk *buf) {
+    pa_mempool *pool;
     struct saved_state *s, *s2;
     void *data;
 
@@ -129,10 +130,12 @@ pa_memchunk * pa_lfe_filter_process(pa_lfe_filter_t *f, pa_memchunk *buf) {
     /* TODO: This actually memcpys the entire chunk into a new allocation, because we need to retain the original
        in case of rewinding. Investigate whether this can be avoided. */
     data = pa_memblock_acquire_chunk(buf);
-    s->chunk.memblock = pa_memblock_new_malloced(pa_memblock_get_pool(buf->memblock), pa_xmemdup(data, buf->length), buf->length);
+    pool = pa_memblock_get_pool(buf->memblock);
+    s->chunk.memblock = pa_memblock_new_malloced(pool, pa_xmemdup(data, buf->length), buf->length);
     s->chunk.length = buf->length;
     s->chunk.index = 0;
     pa_memblock_release(buf->memblock);
+    pa_mempool_unref(pool), pool = NULL;
 
     s->index = f->index;
     memcpy(s->lr4, f->lr4, sizeof(struct lr4) * f->cm.channels);
diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c
index 9b6810d..ceea813 100644
--- a/src/pulsecore/memblock.c
+++ b/src/pulsecore/memblock.c
@@ -142,6 +142,23 @@ struct pa_memexport {
 };
 
 struct pa_mempool {
+    /* Reference count the mempool
+     *
+     * Any block allocation from the pool itself, or even just imported from
+     * another process through SHM and attached to it (PA_MEMBLOCK_IMPORTED),
+     * shall increase the refcount.
+     *
+     * This is done for per-client mempools: global references to blocks in
+     * the pool, or just to attached ones, can still be lingering around when
+     * the client connection dies and all per-client objects are to be freed.
+     * That is, current PulseAudio design does not guarantee that the client
+     * mempool blocks are referenced only by client-specific objects.
+     *
+     * For further details, please check:
+     * https://lists.freedesktop.org/archives/pulseaudio-discuss/2016-February/025587.html
+     */
+    PA_REFCNT_DECLARE;
+
     pa_semaphore *semaphore;
     pa_mutex *mutex;
 
@@ -237,6 +254,7 @@ static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
     b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
     PA_REFCNT_INIT(b);
     b->pool = p;
+    pa_mempool_ref(b->pool);
     b->type = PA_MEMBLOCK_APPENDED;
     b->read_only = b->is_silence = false;
     pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
@@ -367,6 +385,7 @@ pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
 
     PA_REFCNT_INIT(b);
     b->pool = p;
+    pa_mempool_ref(b->pool);
     b->read_only = b->is_silence = false;
     b->length = length;
     pa_atomic_store(&b->n_acquired, 0);
@@ -390,6 +409,7 @@ pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool r
 
     PA_REFCNT_INIT(b);
     b->pool = p;
+    pa_mempool_ref(b->pool);
     b->type = PA_MEMBLOCK_FIXED;
     b->read_only = read_only;
     b->is_silence = false;
@@ -423,6 +443,7 @@ pa_memblock *pa_memblock_new_user(
 
     PA_REFCNT_INIT(b);
     b->pool = p;
+    pa_mempool_ref(b->pool);
     b->type = PA_MEMBLOCK_USER;
     b->read_only = read_only;
     b->is_silence = false;
@@ -518,10 +539,13 @@ size_t pa_memblock_get_length(pa_memblock *b) {
     return b->length;
 }
 
+/* Note! Always unref the returned pool after use */
 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
     pa_assert(b);
     pa_assert(PA_REFCNT_VALUE(b) > 0);
+    pa_assert(b->pool);
 
+    pa_mempool_ref(b->pool);
     return b->pool;
 }
 
@@ -535,10 +559,13 @@ pa_memblock* pa_memblock_ref(pa_memblock*b) {
 }
 
 static void memblock_free(pa_memblock *b) {
-    pa_assert(b);
+    pa_mempool *pool;
 
+    pa_assert(b);
+    pa_assert(b->pool);
     pa_assert(pa_atomic_load(&b->n_acquired) == 0);
 
+    pool = b->pool;
     stat_remove(b);
 
     switch (b->type) {
@@ -620,6 +647,8 @@ static void memblock_free(pa_memblock *b) {
         default:
             pa_assert_not_reached();
     }
+
+    pa_mempool_unref(pool);
 }
 
 /* No lock necessary */
@@ -749,6 +778,7 @@ pa_mempool* pa_mempool_new(bool shared, size_t size) {
     char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
 
     p = pa_xnew0(pa_mempool, 1);
+    PA_REFCNT_INIT(p);
 
     p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
     if (p->block_size < PA_PAGE_SIZE)
@@ -788,7 +818,7 @@ pa_mempool* pa_mempool_new(bool shared, size_t size) {
     return p;
 }
 
-void pa_mempool_free(pa_mempool *p) {
+static void mempool_free(pa_mempool *p) {
     pa_assert(p);
 
     pa_mutex_lock(p->mutex);
@@ -911,6 +941,22 @@ bool pa_mempool_is_shared(pa_mempool *p) {
     return p->memory.shared;
 }
 
+pa_mempool* pa_mempool_ref(pa_mempool *p) {
+    pa_assert(p);
+    pa_assert(PA_REFCNT_VALUE(p) > 0);
+
+    PA_REFCNT_INC(p);
+    return p;
+}
+
+void pa_mempool_unref(pa_mempool *p) {
+    pa_assert(p);
+    pa_assert(PA_REFCNT_VALUE(p) > 0);
+
+    if (PA_REFCNT_DEC(p) <= 0)
+        mempool_free(p);
+}
+
 /* For receiving blocks from other nodes */
 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
     pa_memimport *i;
@@ -921,6 +967,7 @@ pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void
     i = pa_xnew(pa_memimport, 1);
     i->mutex = pa_mutex_new(true, true);
     i->pool = p;
+    pa_mempool_ref(i->pool);
     i->segments = pa_hashmap_new(NULL, NULL);
     i->blocks = pa_hashmap_new(NULL, NULL);
     i->release_cb = cb;
@@ -996,6 +1043,7 @@ void pa_memimport_free(pa_memimport *i) {
 
     pa_mutex_unlock(i->pool->mutex);
 
+    pa_mempool_unref(i->pool);
     pa_hashmap_free(i->blocks);
     pa_hashmap_free(i->segments);
 
@@ -1039,6 +1087,7 @@ pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_i
 
     PA_REFCNT_INIT(b);
     b->pool = i->pool;
+    pa_mempool_ref(b->pool);
     b->type = PA_MEMBLOCK_IMPORTED;
     b->read_only = !writable;
     b->is_silence = false;
@@ -1096,6 +1145,7 @@ pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void
     e = pa_xnew(pa_memexport, 1);
     e->mutex = pa_mutex_new(true, true);
     e->pool = p;
+    pa_mempool_ref(e->pool);
     PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
     PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
     e->n_init = 0;
@@ -1123,6 +1173,7 @@ void pa_memexport_free(pa_memexport *e) {
     PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
     pa_mutex_unlock(e->pool->mutex);
 
+    pa_mempool_unref(e->pool);
     pa_mutex_free(e->mutex);
     pa_xfree(e);
 }
diff --git a/src/pulsecore/memblock.h b/src/pulsecore/memblock.h
index 4faef75..960ef25 100644
--- a/src/pulsecore/memblock.h
+++ b/src/pulsecore/memblock.h
@@ -116,13 +116,16 @@ void *pa_memblock_acquire_chunk(const pa_memchunk *c);
 void pa_memblock_release(pa_memblock *b);
 
 size_t pa_memblock_get_length(pa_memblock *b);
+
+/* Note! Always unref the returned pool after use */
 pa_mempool * pa_memblock_get_pool(pa_memblock *b);
 
 pa_memblock *pa_memblock_will_need(pa_memblock *b);
 
 /* The memory block manager */
 pa_mempool* pa_mempool_new(bool shared, size_t size);
-void pa_mempool_free(pa_mempool *p);
+void pa_mempool_unref(pa_mempool *p);
+pa_mempool* pa_mempool_ref(pa_mempool *p);
 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p);
 void pa_mempool_vacuum(pa_mempool *p);
 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id);
diff --git a/src/pulsecore/memblockq.c b/src/pulsecore/memblockq.c
index d314d4e..d283ed2 100644
--- a/src/pulsecore/memblockq.c
+++ b/src/pulsecore/memblockq.c
@@ -530,6 +530,7 @@ int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
 }
 
 int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
+    pa_mempool *pool;
     pa_memchunk tchunk, rchunk;
     int64_t ri;
     struct list_item *item;
@@ -548,9 +549,11 @@ int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchun
         return 0;
     }
 
-    rchunk.memblock = pa_memblock_new(pa_memblock_get_pool(tchunk.memblock), block_size);
+    pool = pa_memblock_get_pool(tchunk.memblock);
+    rchunk.memblock = pa_memblock_new(pool, block_size);
     rchunk.index = 0;
     rchunk.length = tchunk.length;
+    pa_mempool_unref(pool), pool = NULL;
 
     pa_memchunk_memcpy(&rchunk, &tchunk);
     pa_memblock_unref(tchunk.memblock);
diff --git a/src/pulsecore/memchunk.c b/src/pulsecore/memchunk.c
index eb5917c..8822134 100644
--- a/src/pulsecore/memchunk.c
+++ b/src/pulsecore/memchunk.c
@@ -32,6 +32,7 @@
 #include "memchunk.h"
 
 pa_memchunk* pa_memchunk_make_writable(pa_memchunk *c, size_t min) {
+    pa_mempool *pool;
     pa_memblock *n;
     size_t l;
     void *tdata, *sdata;
@@ -46,7 +47,9 @@ pa_memchunk* pa_memchunk_make_writable(pa_memchunk *c, size_t min) {
 
     l = PA_MAX(c->length, min);
 
-    n = pa_memblock_new(pa_memblock_get_pool(c->memblock), l);
+    pool = pa_memblock_get_pool(c->memblock);
+    n = pa_memblock_new(pool, l);
+    pa_mempool_unref(pool), pool = NULL;
 
     sdata = pa_memblock_acquire(c->memblock);
     tdata = pa_memblock_acquire(n);
diff --git a/src/pulsecore/pstream.c b/src/pulsecore/pstream.c
index 98a8382..fb43a1b 100644
--- a/src/pulsecore/pstream.c
+++ b/src/pulsecore/pstream.c
@@ -573,6 +573,7 @@ static void prepare_next_write_item(pa_pstream *p) {
 
             if (current_export != p->export)
                 pa_memexport_free(current_export);
+            pa_mempool_unref(current_pool);
         }
 
         if (send_payload) {
diff --git a/src/tests/cpu-mix-test.c b/src/tests/cpu-mix-test.c
index f42530d..2513d14 100644
--- a/src/tests/cpu-mix-test.c
+++ b/src/tests/cpu-mix-test.c
@@ -142,7 +142,7 @@ static void run_mix_test(
     pa_memblock_unref(c0.memblock);
     pa_memblock_unref(c1.memblock);
 
-    pa_mempool_free(pool);
+    pa_mempool_unref(pool);
 }
 
 START_TEST (mix_special_test) {
diff --git a/src/tests/lfe-filter-test.c b/src/tests/lfe-filter-test.c
index 389a2b9..e64288d 100644
--- a/src/tests/lfe-filter-test.c
+++ b/src/tests/lfe-filter-test.c
@@ -163,7 +163,7 @@ START_TEST (lfe_filter_test) {
 
     pa_lfe_filter_free(lft.lf);
 
-    pa_mempool_free(lft.pool);
+    pa_mempool_unref(lft.pool);
 
     if (!ret)
         pa_log_debug("lfe-filter-test: tests for both rewind to block boundary and rewind to middle position of a block passed!");
diff --git a/src/tests/mcalign-test.c b/src/tests/mcalign-test.c
index 0d27dfd..ad9a760 100644
--- a/src/tests/mcalign-test.c
+++ b/src/tests/mcalign-test.c
@@ -100,7 +100,7 @@ int main(int argc, char *argv[]) {
     if (c.memblock)
         pa_memblock_unref(c.memblock);
 
-    pa_mempool_free(p);
+    pa_mempool_unref(p);
 
     return 0;
 }
diff --git a/src/tests/memblock-test.c b/src/tests/memblock-test.c
index 2b51108..78a43cd 100644
--- a/src/tests/memblock-test.c
+++ b/src/tests/memblock-test.c
@@ -169,9 +169,9 @@ START_TEST (memblock_test) {
 
     pa_log("vacuuming done...");
 
-    pa_mempool_free(pool_a);
-    pa_mempool_free(pool_b);
-    pa_mempool_free(pool_c);
+    pa_mempool_unref(pool_a);
+    pa_mempool_unref(pool_b);
+    pa_mempool_unref(pool_c);
 }
 END_TEST
 
diff --git a/src/tests/memblockq-test.c b/src/tests/memblockq-test.c
index eea6cfa..ed33b2c 100644
--- a/src/tests/memblockq-test.c
+++ b/src/tests/memblockq-test.c
@@ -208,7 +208,7 @@ START_TEST (memblockq_test) {
     pa_memblock_unref(chunk3.memblock);
     pa_memblock_unref(chunk4.memblock);
 
-    pa_mempool_free(p);
+    pa_mempool_unref(p);
 }
 END_TEST
 
diff --git a/src/tests/mix-test.c b/src/tests/mix-test.c
index c8af600..abf5fc7 100644
--- a/src/tests/mix-test.c
+++ b/src/tests/mix-test.c
@@ -338,7 +338,7 @@ START_TEST (mix_test) {
         pa_memblock_unref(k.memblock);
     }
 
-    pa_mempool_free(pool);
+    pa_mempool_unref(pool);
 }
 END_TEST
 
diff --git a/src/tests/remix-test.c b/src/tests/remix-test.c
index 6feb8dc..578a30c 100644
--- a/src/tests/remix-test.c
+++ b/src/tests/remix-test.c
@@ -75,7 +75,7 @@ int main(int argc, char *argv[]) {
             pa_resampler_free(r);
         }
 
-    pa_mempool_free(pool);
+    pa_mempool_unref(pool);
 
     return 0;
 }
diff --git a/src/tests/resampler-test.c b/src/tests/resampler-test.c
index 8569ac7..2833d7e 100644
--- a/src/tests/resampler-test.c
+++ b/src/tests/resampler-test.c
@@ -473,7 +473,7 @@ int main(int argc, char *argv[]) {
 
  quit:
     if (pool)
-        pa_mempool_free(pool);
+        pa_mempool_unref(pool);
 
     return ret;
 }
diff --git a/src/tests/srbchannel-test.c b/src/tests/srbchannel-test.c
index cd4d397..3dbcc2b 100644
--- a/src/tests/srbchannel-test.c
+++ b/src/tests/srbchannel-test.c
@@ -116,7 +116,7 @@ START_TEST (srbchannel_test) {
 
     pa_pstream_unref(p1);
     pa_pstream_unref(p2);
-    pa_mempool_free(mp);
+    pa_mempool_unref(mp);
     pa_mainloop_free(ml);
 }
 END_TEST

commit 1f5e72264ea3ccc23aa34460874c8581909d0e89
Author: Ahmed S. Darwish <darwish.07 at gmail.com>
Date:   Sun Mar 13 00:48:15 2016 +0200

    pulsecore: Cache daemon shm size inside pa_core
    
    The daemon `shm-size-bytes' configuration value was read, and then
    directly used, for creating the initial server-wide SHM files.
    
    This is fine for now, but soon, such server-wide SHMs will be replaced
    with per-client SHM files that will be dynamically created and deleted
    according to clients open and close. Thus, appropriately cache this
    configuration value.
    
    Signed-off-by: Ahmed S. Darwish <darwish.07 at gmail.com>

diff --git a/src/pulsecore/core.c b/src/pulsecore/core.c
index 43fd30e..b2df7de 100644
--- a/src/pulsecore/core.c
+++ b/src/pulsecore/core.c
@@ -123,6 +123,7 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, size_t shm_size) {
     c->subscription_event_last = NULL;
 
     c->mempool = pool;
+    c->shm_size = shm_size;
     pa_silence_cache_init(&c->silence_cache);
 
     if (shared && !(c->rw_mempool = pa_mempool_new(shared, shm_size)))
diff --git a/src/pulsecore/core.h b/src/pulsecore/core.h
index aefc1eb..428689c 100644
--- a/src/pulsecore/core.h
+++ b/src/pulsecore/core.h
@@ -181,6 +181,11 @@ struct pa_core {
        The rw_mempool is used for data writable by both server and client (and
        can be NULL in some cases). */
     pa_mempool *mempool, *rw_mempool;
+
+    /* Shared memory size, as specified either by daemon configuration
+     * or PA daemon defaults (~ 64 MiB). */
+    size_t shm_size;
+
     pa_silence_cache silence_cache;
 
     pa_time_event *exit_event;



More information about the pulseaudio-commits mailing list