[Spice-devel] [RFCv5 34/47] server/red_worker: copy and free new surfaces after first client
Alon Levy
alevy at redhat.com
Sun May 8 06:11:30 PDT 2011
---
server/red_worker.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++++---
1 files changed, 137 insertions(+), 7 deletions(-)
diff --git a/server/red_worker.c b/server/red_worker.c
index c8770d5..25738a3 100644
--- a/server/red_worker.c
+++ b/server/red_worker.c
@@ -1503,15 +1503,16 @@ static SurfaceDestroyItem *get_surface_destroy_item(RedChannel *channel,
return destroy;
}
-static inline void red_destroy_surface_item(RedWorker *worker,
- DisplayChannelClient *dcc, uint32_t surface_id)
+static inline void red_destroy_surface_item(DisplayChannelClient *dcc, uint32_t surface_id)
{
SurfaceDestroyItem *destroy;
RedChannel *channel;
+ RedWorker *worker;
if (!dcc || !dcc->surface_client_created[surface_id]) {
return;
}
+ worker = DCC_TO_WORKER(dcc);
dcc->surface_client_created[surface_id] = FALSE;
channel = &worker->display_channel->common.base;
destroy = get_surface_destroy_item(channel, surface_id);
@@ -1556,7 +1557,7 @@ static inline void red_destroy_surface(RedRender *render, uint32_t surface_id)
region_destroy(&surface->draw_dirty_region);
surface->context.canvas = NULL;
- red_destroy_surface_item(worker, render->dcc, surface_id);
+ red_destroy_surface_item(render->dcc, surface_id);
PANIC_ON(!ring_is_empty(&surface->depend_on_me));
}
@@ -8646,6 +8647,102 @@ static void init_render(RedRender *render, RedWorker *worker)
render->worker = worker;
}
+static void free_render(RedWorker *worker, RedRender *render)
+{
+ RingItem *link;
+ RingItem *next;
+ int count = 0;
+ int i;
+
+ // TODO: this prevents remove_drawable from sending anything. Is that really
+ // required?
+ render->dcc = NULL;
+
+ RING_FOREACH_SAFE(link, next, &render->current_list) {
+ Drawable *drawable = SPICE_CONTAINEROF(link, Drawable, list_link);
+ remove_drawable(render, drawable);
+ count++;
+ }
+ red_printf("released %d drawables", count);
+ // TODO - we should ensure the reference counts on the surfaces
+ // have zeroed, and that there are no contexts
+ for (i = 0 ; i < render->n_surfaces ; ++i) {
+ RedSurface *s = &render->surfaces[i];
+ if (s->refs == 0) {
+ continue;
+ }
+ ASSERT(s->context.canvas);
+ red_destroy_surface(render, i);
+ }
+ free(render);
+}
+
+static RedRender *copy_render(DisplayChannelClient *dcc, RedRender *orig)
+{
+ int i;
+ RedRender *render = spice_malloc0(sizeof(RedRender));
+
+ init_render(render, orig->worker);
+ dcc->common.render = render;
+ render->dcc = dcc;
+ render->n_surfaces = orig->n_surfaces;
+ render->image_surfaces = orig->image_surfaces; // just ops table
+
+ // must init streams before rest of copy, since red_add_current tries
+ // to attach to stream.
+ red_init_streams(dcc->common.render); // TODO - move surfaces to dcc
+ red_display_client_init_streams(dcc);
+
+ for (i = 0 ; i < orig->n_surfaces ; ++i) {
+ RedSurface *s = &orig->surfaces[i];
+ if (s->refs == 0) { // stop when we reached the last surface,
+ // TODO - any better way then refs?
+ continue;
+ }
+ ASSERT(s->context.canvas);
+ red_create_surface(render, i,
+ s->context.width, s->context.height, s->context.stride,
+ s->context.format, s->context.line_0,
+ TRUE /* surface is valid */, s->release_info);
+ }
+#ifdef PIPE_DEBUG
+ render->last_id = 0; // orig->last_id; // TODO: does this make any sense?
+#endif
+ // clone the current_list, adding references to everything
+ //current_list - we update all current_list's (both the main
+ //and the per surface one) in this loop:
+ //TODO: we definitely need to copy all the surfaces, but how do we copy surface trees?
+ // A. replay all current operations (which are by definition ordered from oldest to
+ // newest). This is what we currently do.
+ // B. flatten each tree to a surface image, and send that. That's what we actually
+ // send to the client. Any reason to have the new client's tree equal to the
+ // worker tree?
+ RingItem *link;
+ int count = 0;
+ RING_FOREACH(link, &orig->current_list) {
+ Drawable *orig_drawable = SPICE_CONTAINEROF(link, Drawable, list_link);
+ uint32_t group_id = orig_drawable->group_id;
+ RedDrawable *red_drawable = orig_drawable->red_drawable;
+ red_process_drawable_surfaces(render, red_drawable,
+ group_id);
+ count++;
+ }
+ ASSERT(count == orig->drawable_count);
+ ASSERT(render->current_size <= orig->current_size);
+ ASSERT(render->drawable_count <= orig->drawable_count);
+ ASSERT(render->transparent_count <= orig->transparent_count);
+ ASSERT(render->shadows_count <= orig->shadows_count);
+ ASSERT(render->containers_count <= orig->containers_count);
+ red_printf("current/drawable/transparent/shadows/containers: "
+ "(%d,%d,%d,%d,%d)->(%d,%d,%d,%d,%d)",
+ orig->current_size, orig->drawable_count, orig->transparent_count,
+ orig->shadows_count, orig->containers_count,
+ render->current_size, render->drawable_count,
+ render->transparent_count, render->shadows_count,
+ render->containers_count);
+ return render;
+}
+
static void display_channel_client_disconnect(RedChannelClient *rcc)
{
// TODO: MC: right now we assume single channel
@@ -8677,7 +8774,36 @@ static void display_channel_client_disconnect(RedChannelClient *rcc)
free(dcc->send_data.free_list.res);
red_display_destroy_streams(dcc);
red_channel_client_pipe_clear(rcc); // do this before deleting surfaces
- worker->render.dcc = NULL;
+ if (dcc->common.render != &worker->render) {
+ free_render(worker, dcc->common.render);
+ } else {
+ // if this isn't the last client, free one client's surfaces
+ // and let it have the worker's surfaces (this makes it the "primary",
+ // and we should (TODO) let him know new channels like sound are
+ // now available. I wonder what the client will do :).
+ if (display_channel->common.base.clients_num > 1) {
+ // since this may have happened already, the client with the
+ // worker surfaces can be anybody - since this is a doubly linked
+ // list - we can check next and prev, one can't be the head,
+ // and so will be our other.
+ red_printf("swapping surfaces");
+ RingItem *dcc_link = &dcc->common.base.channel_link;
+ Ring *clients = &display_channel->common.base.clients;
+ DisplayChannelClient *other = SPICE_CONTAINEROF(
+ ring_next(clients, dcc_link), DisplayChannelClient,
+ common.base.channel_link);
+ if (other == NULL) {
+ other = SPICE_CONTAINEROF(ring_prev(clients, dcc_link),
+ DisplayChannelClient, common.base.channel_link);
+ }
+ ASSERT(other != dcc && other != NULL);
+ free_render(worker, other->common.render);
+ worker->render.dcc = other;
+ other->common.render = &worker->render;
+ } else {
+ worker->render.dcc = NULL;
+ }
+ }
red_channel_client_disconnect(rcc);
}
@@ -9768,9 +9894,13 @@ static void handle_new_display_channel(RedWorker *worker, RedClient *client, Red
if (!listen_to_new_client_channel(&display_channel->common, &dcc->common, stream)) {
goto error;
}
- dcc->common.render = &worker->render;
- dcc->common.render->dcc = dcc;
- red_display_client_init_streams(dcc);
+ if (display_channel->common.base.clients_num == 1) {
+ dcc->common.render = &worker->render;
+ dcc->common.render->dcc = dcc;
+ red_display_client_init_streams(dcc);
+ } else {
+ copy_render(dcc, &worker->render);
+ }
on_new_display_channel_client(dcc);
return;
--
1.7.5.1
More information about the Spice-devel
mailing list