[Spice-commits] 4 commits - server/red-channel.c server/red-worker.c server/tests

Frediano Ziglio fziglio at kemper.freedesktop.org
Wed Feb 3 16:05:13 UTC 2016


 server/red-channel.c  |   39 ++++++++++++++++++---------------------
 server/red-worker.c   |   23 ++++++++++++-----------
 server/tests/replay.c |    6 ------
 3 files changed, 30 insertions(+), 38 deletions(-)

New commits:
commit 56182be52204a052fbeb7aa828b1aadd61786816
Author: Frediano Ziglio <fziglio at redhat.com>
Date:   Thu Jan 28 23:58:24 2016 +0000

    worker: don't do too much polling
    
    Now that processing is correctly restored there is no need to keep
    polling to avoid main loop hangs. Reduce the polling count to 1
    (just try once).
    This reduce cpu usage if guests are mainly idle.
    If you consider 100 guests waiting to login with cursor blinking
    and considering the polling was done 200 times every 10ms (maximum)
    just the cursor blinking was causing 10100 loop iterations per second
    while now only 200 are needed (considering cursor blinking every
    second).
    
    Signed-by: Frediano Ziglio <figlio at redhat.com>
    Acked-by: Jonathon Jongsma <jjongsma at redhat.com>

diff --git a/server/red-worker.c b/server/red-worker.c
index fbf5fa6..6a48434 100644
--- a/server/red-worker.c
+++ b/server/red-worker.c
@@ -53,7 +53,7 @@
 #include "tree.h"
 
 #define CMD_RING_POLL_TIMEOUT 10 //milli
-#define CMD_RING_POLL_RETRIES 200
+#define CMD_RING_POLL_RETRIES 1
 
 #define INF_EVENT_WAIT ~0
 
commit 916e48e331012d4f7d18cdd68bad1bf7551c08ee
Author: Frediano Ziglio <fziglio at redhat.com>
Date:   Thu Jan 28 23:51:17 2016 +0000

    worker: avoid blocking loop
    
    Make sure we process commands after we can send data to client.
    If during processing we detected that there was too much data in the
    clients queues the processing of commands just stop till the next
    iteration.
    However if all data are pushed in a single iteration of the loop
    and commands were already processed there was the (remote) possibility
    that the loop hangs till a separate event (like a screen resize on
    client window) arrive.
    I manage to reproduce and after half an hour no events arrived.
    This patch detect that processing was stuck and now we can process new
    commands and force a new iteration.
    
    Signed-off-by: Frediano Ziglio <fziglio at redhat.com>
    Acked-by: Jonathon Jongsma <jjongsma at redhat.com>

diff --git a/server/red-worker.c b/server/red-worker.c
index 0d91eac..fbf5fa6 100644
--- a/server/red-worker.c
+++ b/server/red-worker.c
@@ -69,6 +69,7 @@ struct RedWorker {
 
     DisplayChannel *display_channel;
     uint32_t display_poll_tries;
+    gboolean was_blocked;
 
     CursorChannel *cursor_channel;
     uint32_t cursor_poll_tries;
@@ -196,6 +197,7 @@ static int red_process_cursor(RedWorker *worker, int *ring_is_empty)
         }
         n++;
     }
+    worker->was_blocked = TRUE;
     return n;
 }
 
@@ -318,9 +320,16 @@ static int red_process_display(RedWorker *worker, int *ring_is_empty)
             return n;
         }
     }
+    worker->was_blocked = TRUE;
     return n;
 }
 
+static bool red_process_is_blocked(RedWorker *worker)
+{
+    return red_channel_max_pipe_size(RED_CHANNEL(worker->cursor_channel)) > MAX_PIPE_SIZE ||
+           red_channel_max_pipe_size(RED_CHANNEL(worker->display_channel)) > MAX_PIPE_SIZE;
+}
+
 static void red_disconnect_display(RedWorker *worker)
 {
     spice_warning("update timeout");
@@ -1416,6 +1425,10 @@ static gboolean worker_source_prepare(GSource *source, gint *p_timeout)
     if (*p_timeout == 0)
         return TRUE;
 
+    if (worker->was_blocked && !red_process_is_blocked(worker)) {
+        return TRUE;
+    }
+
     return FALSE;
 }
 
@@ -1445,6 +1458,7 @@ static gboolean worker_source_dispatch(GSource *source, GSourceFunc callback,
     stream_timeout(display);
 
     worker->event_timeout = INF_EVENT_WAIT;
+    worker->was_blocked = FALSE;
     red_process_cursor(worker, &ring_is_empty);
     red_process_display(worker, &ring_is_empty);
 
commit 77adbb7574005f9479e55bffb7f08256d4f6b594
Author: Frediano Ziglio <fziglio at redhat.com>
Date:   Thu Jan 28 23:58:41 2016 +0000

    replay: remove a message that could be caused by a race condition
    
    The req_cmd_notification callback is called by spice-server when it
    has processed all commands and wants to be notified (by a wakeup) that
    new commands have been appended to the command queue.
    Replay utility tries to fill the commands when it detects that
    spice-server is trying to read commands but there are no more commands.
    However, new commands are appended in a separate thread so if the main
    red worker loop on spice-server is really tight this request can
    happen.
    Avoid printing any message on this condition, message will be appended
    and loop woken up when replay code can do it.
    
    Signed-by: Frediano Ziglio <figlio at redhat.com>
    Acked-by: Jonathon Jongsma <jjongsma at redhat.com>

diff --git a/server/tests/replay.c b/server/tests/replay.c
index c7f3f00..f3b670f 100644
--- a/server/tests/replay.c
+++ b/server/tests/replay.c
@@ -183,12 +183,6 @@ static int get_command(QXLInstance *qin, QXLCommandExt *ext)
 
 static int req_cmd_notification(QXLInstance *qin)
 {
-    if (!started)
-        return TRUE;
-
-    g_printerr("id: %d, queue length: %d",
-                   g_source_get_id(fill_source), g_async_queue_length(aqueue));
-
     return TRUE;
 }
 
commit 5c460de1a3972b7cf2b9b2944d0b500c3affc363
Author: Frediano Ziglio <fziglio at redhat.com>
Date:   Thu Jan 21 00:42:30 2016 +0000

    worker: push data when clients can receive them
    
    During every iteration of the main worker loop, outgoing data was pushed to
    the client. However, there was no guarantee that the loop would be woken up
    in every situation. So there were some conditions where the loop would stop
    iterating until a new event was sent.
    
    Currently, the events that can wake up the main worker loop include:
     - data from dispatcher (including wakeups from the guest)
     - data from clients
     - timeouts on a stream
     - other timeouts
     - polling
    
    This patch adds a new wakeup event: when we have items that are queued to
    be sent to a client, we set up a watch event for writing data to the
    client. If no items are waiting to be sent, this watch will be disabled.
    This allows us to remove the explicit push from the main worker loop.
    
    This has some advantages:
     - it could lower latency as we don't have to wait for a polling timeout.
       From my experiments using a tight loop (so not really the ideal
       condition to see the improvements) the total time was reduced by 2-3%)
     - helps reduce the possibility of hanging loops
     - avoids having to scan all clients to detect which one can accept data.
    
    Signed-by: Frediano Ziglio <figlio at redhat.com>
    Acked-by: Jonathon Jongsma <jjongsma at redhat.com>

diff --git a/server/red-channel.c b/server/red-channel.c
index 33d4158..fdd85b9 100644
--- a/server/red-channel.c
+++ b/server/red-channel.c
@@ -1359,6 +1359,11 @@ void red_channel_client_push(RedChannelClient *rcc)
     while ((pipe_item = red_channel_client_pipe_item_get(rcc))) {
         red_channel_client_send_item(rcc, pipe_item);
     }
+    if (red_channel_client_no_item_being_sent(rcc) && ring_is_empty(&rcc->pipe)
+        && rcc->stream->watch) {
+        rcc->channel->core->watch_update_mask(rcc->stream->watch,
+                                              SPICE_WATCH_EVENT_READ);
+    }
     rcc->during_send = FALSE;
     red_channel_client_unref(rcc);
 }
@@ -1659,7 +1664,7 @@ void red_channel_pipe_item_init(RedChannel *channel, PipeItem *item, int type)
     item->type = type;
 }
 
-static inline int validate_pipe_add(RedChannelClient *rcc, PipeItem *item)
+static inline gboolean client_pipe_add(RedChannelClient *rcc, PipeItem *item, RingItem *pos)
 {
     spice_assert(rcc && item);
     if (SPICE_UNLIKELY(!red_channel_client_is_connected(rcc))) {
@@ -1667,17 +1672,20 @@ static inline int validate_pipe_add(RedChannelClient *rcc, PipeItem *item)
         red_channel_client_release_item(rcc, item, FALSE);
         return FALSE;
     }
+    if (ring_is_empty(&rcc->pipe) && rcc->stream->watch) {
+        rcc->channel->core->watch_update_mask(rcc->stream->watch,
+                                         SPICE_WATCH_EVENT_READ |
+                                         SPICE_WATCH_EVENT_WRITE);
+    }
+    rcc->pipe_size++;
+    ring_add(pos, &item->link);
     return TRUE;
 }
 
 void red_channel_client_pipe_add(RedChannelClient *rcc, PipeItem *item)
 {
 
-    if (!validate_pipe_add(rcc, item)) {
-        return;
-    }
-    rcc->pipe_size++;
-    ring_add(&rcc->pipe, &item->link);
+    client_pipe_add(rcc, item, &rcc->pipe);
 }
 
 void red_channel_client_pipe_add_push(RedChannelClient *rcc, PipeItem *item)
@@ -1690,11 +1698,7 @@ void red_channel_client_pipe_add_after(RedChannelClient *rcc,
                                        PipeItem *item, PipeItem *pos)
 {
     spice_assert(pos);
-    if (!validate_pipe_add(rcc, item)) {
-        return;
-    }
-    rcc->pipe_size++;
-    ring_add_after(&item->link, &pos->link);
+    client_pipe_add(rcc, item, &pos->link);
 }
 
 int red_channel_client_pipe_item_is_linked(RedChannelClient *rcc,
@@ -1706,21 +1710,14 @@ int red_channel_client_pipe_item_is_linked(RedChannelClient *rcc,
 void red_channel_client_pipe_add_tail_no_push(RedChannelClient *rcc,
                                               PipeItem *item)
 {
-    if (!validate_pipe_add(rcc, item)) {
-        return;
-    }
-    rcc->pipe_size++;
-    ring_add_before(&item->link, &rcc->pipe);
+    client_pipe_add(rcc, item, rcc->pipe.prev);
 }
 
 void red_channel_client_pipe_add_tail(RedChannelClient *rcc, PipeItem *item)
 {
-    if (!validate_pipe_add(rcc, item)) {
-        return;
+    if (client_pipe_add(rcc, item, rcc->pipe.prev)) {
+        red_channel_client_push(rcc);
     }
-    rcc->pipe_size++;
-    ring_add_before(&item->link, &rcc->pipe);
-    red_channel_client_push(rcc);
 }
 
 void red_channel_client_pipe_add_type(RedChannelClient *rcc, int pipe_item_type)
diff --git a/server/red-worker.c b/server/red-worker.c
index f874939..0d91eac 100644
--- a/server/red-worker.c
+++ b/server/red-worker.c
@@ -321,16 +321,6 @@ static int red_process_display(RedWorker *worker, int *ring_is_empty)
     return n;
 }
 
-static void red_push(RedWorker *worker)
-{
-    if (worker->cursor_channel) {
-        red_channel_push(RED_CHANNEL(worker->cursor_channel));
-    }
-    if (worker->display_channel) {
-        red_channel_push(RED_CHANNEL(worker->display_channel));
-    }
-}
-
 static void red_disconnect_display(RedWorker *worker)
 {
     spice_warning("update timeout");
@@ -1458,9 +1448,6 @@ static gboolean worker_source_dispatch(GSource *source, GSourceFunc callback,
     red_process_cursor(worker, &ring_is_empty);
     red_process_display(worker, &ring_is_empty);
 
-    /* TODO: remove me? that should be handled by watch out condition */
-    red_push(worker);
-
     return TRUE;
 }
 


More information about the Spice-commits mailing list