[pulseaudio-commits] 2 commits - src/modules

Arun Raghavan arun at kemper.freedesktop.org
Thu Mar 9 16:49:13 UTC 2017


 src/modules/alsa/alsa-sink.c                 |   20 +++++++++++++++-
 src/modules/echo-cancel/module-echo-cancel.c |   32 ++++++++++++++++++++-------
 2 files changed, 42 insertions(+), 10 deletions(-)

New commits:
commit 1992c4cce12ccd5004c68b85e3fd29680be96ec7
Author: Arun Raghavan <arun at arunraghavan.net>
Date:   Thu Mar 9 11:02:09 2017 +0530

    echo-cancel: Limit the maximum sink/source latency
    
    On systems with constrained CPUs, we might run into a situation where
    the master source/sink is configured to have too high a latency.
    
    On the source side, this would cause us to wake up with a large chunk of
    data to process, which might cause us to exhust our RT limit and thus be
    killed.
    
    So it makes sense to limit the overall latency that we request from the
    source (and correspondingly, the sink, so we don't starve for playback
    data on the source side).
    
    The 10 blocks maximum is somewhat arbitrary (I'm assuming the system has
    enough headroom to process 10 chunks through the canceller without
    getting close to the RT limit). This might make sense to make tunable in
    the future.

diff --git a/src/modules/echo-cancel/module-echo-cancel.c b/src/modules/echo-cancel/module-echo-cancel.c
index dfd05b6..3641106 100644
--- a/src/modules/echo-cancel/module-echo-cancel.c
+++ b/src/modules/echo-cancel/module-echo-cancel.c
@@ -145,6 +145,8 @@ static const pa_echo_canceller ec_table[] = {
 
 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
 
+#define MAX_LATENCY_BLOCKS 10
+
 /* Can only be used in main context */
 #define IS_ACTIVE(u) ((pa_source_get_state((u)->source) == PA_SOURCE_RUNNING) && \
                       (pa_sink_get_state((u)->sink) == PA_SINK_RUNNING))
@@ -515,6 +517,7 @@ static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) {
 /* Called from source I/O thread context */
 static void source_update_requested_latency_cb(pa_source *s) {
     struct userdata *u;
+    pa_usec_t latency;
 
     pa_source_assert_ref(s);
     pa_assert_se(u = s->userdata);
@@ -525,15 +528,17 @@ static void source_update_requested_latency_cb(pa_source *s) {
 
     pa_log_debug("Source update requested latency");
 
-    /* Just hand this one over to the master source */
-    pa_source_output_set_requested_latency_within_thread(
-            u->source_output,
-            pa_source_get_requested_latency_within_thread(s));
+    /* Cap the maximum latency so we don't have to process too large chunks */
+    latency = PA_MIN(pa_source_get_requested_latency_within_thread(s),
+                     pa_bytes_to_usec(u->source_blocksize, &s->sample_spec) * MAX_LATENCY_BLOCKS);
+
+    pa_source_output_set_requested_latency_within_thread(u->source_output, latency);
 }
 
 /* Called from sink I/O thread context */
 static void sink_update_requested_latency_cb(pa_sink *s) {
     struct userdata *u;
+    pa_usec_t latency;
 
     pa_sink_assert_ref(s);
     pa_assert_se(u = s->userdata);
@@ -544,10 +549,11 @@ static void sink_update_requested_latency_cb(pa_sink *s) {
 
     pa_log_debug("Sink update requested latency");
 
-    /* Just hand this one over to the master sink */
-    pa_sink_input_set_requested_latency_within_thread(
-            u->sink_input,
-            pa_sink_get_requested_latency_within_thread(s));
+    /* Cap the maximum latency so we don't have to process too large chunks */
+    latency = PA_MIN(pa_sink_get_requested_latency_within_thread(s),
+                     pa_bytes_to_usec(u->sink_blocksize, &s->sample_spec) * MAX_LATENCY_BLOCKS);
+
+    pa_sink_input_set_requested_latency_within_thread(u->sink_input, latency);
 }
 
 /* Called from sink I/O thread context */
@@ -1653,6 +1659,7 @@ int pa__init(pa_module*m) {
     uint32_t temp;
     uint32_t nframes = 0;
     bool use_master_format;
+    pa_usec_t blocksize_usec;
 
     pa_assert(m);
 
@@ -2015,6 +2022,15 @@ int pa__init(pa_module*m) {
 
     u->thread_info.current_volume = u->source->reference_volume;
 
+    /* We don't want to deal with too many chunks at a time */
+    blocksize_usec = pa_bytes_to_usec(u->source_blocksize, &u->source->sample_spec);
+    pa_source_set_latency_range(u->source, blocksize_usec, blocksize_usec * MAX_LATENCY_BLOCKS);
+    pa_source_output_set_requested_latency(u->source_output, blocksize_usec * MAX_LATENCY_BLOCKS);
+
+    blocksize_usec = pa_bytes_to_usec(u->sink_blocksize, &u->sink->sample_spec);
+    pa_sink_set_latency_range(u->sink, blocksize_usec, blocksize_usec * MAX_LATENCY_BLOCKS);
+    pa_sink_input_set_requested_latency(u->sink_input, blocksize_usec * MAX_LATENCY_BLOCKS);
+
     pa_sink_put(u->sink);
     pa_source_put(u->source);
 

commit c82e4913e8666a47f11f91a313075ecdd7163d5a
Author: Arun Raghavan <arun at arunraghavan.net>
Date:   Tue Feb 28 10:45:25 2017 +0530

    alsa: Avoid creating tiny memchunks on write iterations
    
    If the ALSA device supports granular pointer reporting, we end up in a
    situation where we write out a bunch of data, iterate, and then find a
    small amount of data available in the buffer (consumed while we were
    writing data into the available buffer space). We do this 10 times
    before quitting the write loop.
    
    This is inefficient in itself, but can also have wider consequences. For
    example, with module-combine-sink, this will end up pushing the same
    small chunks to all other devices too.
    
    Given both of these, it just makes sense to not try to write out data
    unless a minimum threshold is available. This could potentially be a
    fragment, but it's likely most robust to just work with a fraction of
    the total available buffer size.

diff --git a/src/modules/alsa/alsa-sink.c b/src/modules/alsa/alsa-sink.c
index 886c735..073413f 100644
--- a/src/modules/alsa/alsa-sink.c
+++ b/src/modules/alsa/alsa-sink.c
@@ -88,6 +88,8 @@
 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
 
+#define DEFAULT_WRITE_ITERATION_THRESHOLD 0.03 /* don't iterate write if < 3% of the buffer is available */
+
 struct userdata {
     pa_core *core;
     pa_module *module;
@@ -580,12 +582,19 @@ static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bo
             break;
         }
 
-        if (++j > 10) {
+        j++;
+
+        if (j > 10) {
 #ifdef DEBUG_TIMING
             pa_log_debug("Not filling up, because already too many iterations.");
 #endif
 
             break;
+        } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
+#ifdef DEBUG_TIMING
+            pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
+#endif
+            break;
         }
 
         n_bytes -= u->hwbuf_unused;
@@ -754,12 +763,19 @@ static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bo
             break;
         }
 
-        if (++j > 10) {
+        j++;
+
+        if (j > 10) {
 #ifdef DEBUG_TIMING
             pa_log_debug("Not filling up, because already too many iterations.");
 #endif
 
             break;
+        } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
+#ifdef DEBUG_TIMING
+            pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
+#endif
+            break;
         }
 
         n_bytes -= u->hwbuf_unused;



More information about the pulseaudio-commits mailing list