[Spice-devel] [PATCH spice-gtk 2/3] Don't print error message on successful file transfer
Jonathon Jongsma
jjongsma at redhat.com
Wed Oct 21 13:52:50 PDT 2015
In certain circumstances we were printing an error message even though
the file transfer had completed successfully. It didn't cause any
problems, but it pointed out an issue in the handling of outgoing agent
messages.
The described behavior was generally only encountered when there were multiple
files being transferred at once, and most often when one or two files were
significantly smaller than another file being transferred. For example, two
20kB files and another 3MB file. The failure mechanism is basically as follows:
For each file transfer, we read a chunk for the file and then we queue a series
of messages to the guest and wait for them to be flushed before continuing to
read a new chunk of the file. (Since the maximum agent message size is 2kB, each
64kB chunk of file being read can generate up to 32 messages at a time). The
file transfer task remains in "pending" state until the flush operation is
complete. Since all agent messages go into a single channel-wide queue, waiting
for the messages to be flushed meant waiting for *all* outgoing messages to be
flushed, including those that were queued after we called flush_async().
Since agent messages can only be sent if the client has enough agent tokens, it
can take a little while for all queued messages to be sent. This means that
even if a file transfer task has sent out all of its data to the guest, it can
be kept in "pending" state for some time if other file transfer tasks or other
agent messages are added to the queue. In this situation, The guest might
notice that it has received all of the data for a file transfer task and send a
XFER_STATUS_SUCCESS message while we're still in "pending" state.
This patch changes to code so that flush_async() only waits for the
currently-queued messages to be sent. This means that it is not affected by any
new messages that get added to the outgoing queue after we've called
flush_async(). This means that the task will exit "pending" state immediately
after sending out our last data packet.
Fixes: rhbz#1265562
---
src/channel-main.c | 49 +++++++++++++++++++++++++++++++++++--------------
1 file changed, 35 insertions(+), 14 deletions(-)
diff --git a/src/channel-main.c b/src/channel-main.c
index 0a73e92..01a3f89 100644
--- a/src/channel-main.c
+++ b/src/channel-main.c
@@ -152,7 +152,7 @@ struct _SpiceMainChannelPrivate {
gint timer_id;
GQueue *agent_msg_queue;
GHashTable *file_xfer_tasks;
- GSList *flushing;
+ GHashTable *flushing;
guint switch_host_delayed_id;
guint migrate_delayed_id;
@@ -289,6 +289,8 @@ static void spice_main_channel_init(SpiceMainChannel *channel)
c->agent_msg_queue = g_queue_new();
c->file_xfer_tasks = g_hash_table_new_full(g_direct_hash, g_direct_equal,
NULL, g_object_unref);
+ c->flushing = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
+ g_object_unref);
c->cancellable_volume_info = g_cancellable_new();
spice_main_channel_reset_capabilties(SPICE_CHANNEL(channel));
@@ -410,6 +412,11 @@ static void spice_main_channel_dispose(GObject *obj)
c->file_xfer_tasks = NULL;
}
+ if (c->flushing) {
+ g_hash_table_unref(c->flushing);
+ c->flushing = NULL;
+ }
+
g_cancellable_cancel(c->cancellable_volume_info);
g_clear_object(&c->cancellable_volume_info);
@@ -920,20 +927,22 @@ static void agent_free_msg_queue(SpiceMainChannel *channel)
c->agent_msg_queue = NULL;
}
-/* Here, flushing algorithm is stolen from spice-channel.c */
-static void file_xfer_flushed(SpiceMainChannel *channel, gboolean success)
+static gboolean flush_foreach_remove(gpointer key G_GNUC_UNUSED,
+ gpointer value, gpointer user_data)
{
- SpiceMainChannelPrivate *c = channel->priv;
- GSList *l;
+ gboolean success = GPOINTER_TO_UINT(user_data);
+ GSimpleAsyncResult *result = G_SIMPLE_ASYNC_RESULT(value);
- for (l = c->flushing; l != NULL; l = l->next) {
- GSimpleAsyncResult *result = G_SIMPLE_ASYNC_RESULT(l->data);
- g_simple_async_result_set_op_res_gboolean(result, success);
- g_simple_async_result_complete_in_idle(result);
- }
+ g_simple_async_result_set_op_res_gboolean(result, success);
+ g_simple_async_result_complete_in_idle(result);
+ return TRUE;
+}
- g_slist_free_full(c->flushing, g_object_unref);
- c->flushing = NULL;
+static void file_xfer_flushed(SpiceMainChannel *channel, gboolean success)
+{
+ SpiceMainChannelPrivate *c = channel->priv;
+ g_hash_table_foreach_remove(c->flushing, flush_foreach_remove,
+ GUINT_TO_POINTER(success));
}
static void file_xfer_flush_async(SpiceMainChannel *channel, GCancellable *cancellable,
@@ -954,7 +963,8 @@ static void file_xfer_flush_async(SpiceMainChannel *channel, GCancellable *cance
return;
}
- c->flushing = g_slist_append(c->flushing, simple);
+ /* wait until the last message currently in the queue has been sent */
+ g_hash_table_insert(c->flushing, g_queue_peek_tail(c->agent_msg_queue), simple);
}
static gboolean file_xfer_flush_finish(SpiceMainChannel *channel, GAsyncResult *result,
@@ -981,11 +991,22 @@ static void agent_send_msg_queue(SpiceMainChannel *channel)
while (c->agent_tokens > 0 &&
!g_queue_is_empty(c->agent_msg_queue)) {
+ GSimpleAsyncResult *simple;
c->agent_tokens--;
out = g_queue_pop_head(c->agent_msg_queue);
spice_msg_out_send_internal(out);
+
+ simple = g_hash_table_lookup(c->flushing, out);
+ if (simple) {
+ /* if there's a flush task waiting for this message, finish it */
+ g_simple_async_result_set_op_res_gboolean(simple, TRUE);
+ g_simple_async_result_complete_in_idle(simple);
+ g_hash_table_remove(c->flushing, out);
+ }
}
- if (g_queue_is_empty(c->agent_msg_queue) && c->flushing != NULL) {
+ if (g_queue_is_empty(c->agent_msg_queue) &&
+ g_hash_table_size(c->flushing) != 0) {
+ g_warning("unexpected flush task in list, clearing");
file_xfer_flushed(channel, TRUE);
}
}
--
2.4.3
More information about the Spice-devel
mailing list