[Spice-devel] [PATCH 01/14] Add red_client_seamless_migration_done_for_channel()

Pavel Grunt pgrunt at redhat.com
Wed May 4 07:23:42 UTC 2016


On Wed, 2016-05-04 at 09:12 +0200, Pavel Grunt wrote:
> Hey,
> 
> On Tue, 2016-05-03 at 15:00 -0500, Jonathon Jongsma wrote:
> > This is a public RedClient API that handles updating itself rather than
> > having the RedChannelClient poke around at the internal structure in
> > rec_channel_client_seamless_migration_done().
> 
> It looks good, I would drop the rcc parameter - it is not needed. Also later
> it
> can become static in red channel client.
> 
I see that in your refactory branch it is moved to red-client, so that is ok.

Pavel
> 
> > ---
> >  server/red-channel.c | 35 +++++++++++++++++++++++------------
> >  1 file changed, 23 insertions(+), 12 deletions(-)
> > 
> > diff --git a/server/red-channel.c b/server/red-channel.c
> > index cdd1cf0..c1e3dad 100644
> > --- a/server/red-channel.c
> > +++ b/server/red-channel.c
> > @@ -939,23 +939,35 @@ error:
> >      return NULL;
> >  }
> >  
> > -static void red_channel_client_seamless_migration_done(RedChannelClient
> > *rcc)
> > +/* returns TRUE If all channels are finished migrating, FALSE otherwise */
> > +static gboolean red_client_seamless_migration_done_for_channel(RedClient
> > *client,
> > +                                                               RedChannelCl
> > ie
> > nt *rcc)
> >  {
> > -    RedsState *reds = red_channel_get_server(rcc->channel);
> > -    rcc->wait_migrate_data = FALSE;
> > -
> > -    pthread_mutex_lock(&rcc->client->lock);
> > -    rcc->client->num_migrated_channels--;
> > +    gboolean ret = FALSE;
> >  
> > +    pthread_mutex_lock(&client->lock);
> > +    client->num_migrated_channels--;
> >      /* we assume we always have at least one channel who has migration data
> > transfer,
> >       * otherwise, this flag will never be set back to FALSE*/
> > -    if (!rcc->client->num_migrated_channels) {
> > -        rcc->client->during_target_migrate = FALSE;
> > -        rcc->client->seamless_migrate = FALSE;
> > +    if (!client->num_migrated_channels) {
> > +        client->during_target_migrate = FALSE;
> > +        client->seamless_migrate = FALSE;
> >          /* migration completion might have been triggered from a different
> > thread
> >           * than the main thread */
> > -        main_dispatcher_seamless_migrate_dst_complete(reds_get_main_dispatc
> > he
> > r(reds),
> > -                                                      rcc->client);
> > +        main_dispatcher_seamless_migrate_dst_complete(reds_get_main_dispatc
> > he
> > r(client->reds),
> > +                                                      client);
> > +        ret = TRUE;
> > +    }
> > +    pthread_mutex_unlock(&client->lock);
> > +
> > +    return ret;
> > +}
> > +
> > +static void red_channel_client_seamless_migration_done(RedChannelClient
> > *rcc)
> > +{
> > +    rcc->wait_migrate_data = FALSE;
> > +
> > +    if (red_client_seamless_migration_done_for_channel(rcc->client, rcc)) {
> >          if (rcc->latency_monitor.timer) {
> >              red_channel_client_start_ping_timer(rcc,
> > PING_TEST_IDLE_NET_TIMEOUT_MS);
> >          }
> > @@ -964,7 +976,6 @@ static void
> > red_channel_client_seamless_migration_done(RedChannelClient *rcc)
> >                                              rcc-
> > > connectivity_monitor.timeout);
> >          }
> >      }
> > -    pthread_mutex_unlock(&rcc->client->lock);
> >  }
> >  
> >  int red_channel_client_is_waiting_for_migrate_data(RedChannelClient *rcc)


More information about the Spice-devel mailing list