[Spice-devel] [PATCH x11spice] Add cache for SHM segments
Frediano Ziglio
fziglio at redhat.com
Mon Jul 15 09:40:41 UTC 2019
>
> Add a cache to allow the reuse of SHM segments.
> Shared memory segments are added to the cache instead of being
> deallocated, and the cache is searched instead of/before allocating a
> new segment.
>
> Both the SHM segments and their attachment with the X server are cached.
>
> The cache currently has a fixed number of 10 entries, this provided a
> good cache hit rate while keeping memory usage under control.
> In my testing, over 90% of requests for SHM segments were being
> satisfied from the cache.
>
"In my testing", could you detail these testing? Like the kind of system
and application you were using? How did you compute this 90%?
> Signed-off-by: Brendan Shanks <bshanks at codeweavers.com>
> ---
> src/display.c | 172 +++++++++++++++++++++++++++++++++++++++++++-------
> src/display.h | 12 ++++
> 2 files changed, 162 insertions(+), 22 deletions(-)
>
> diff --git a/src/display.c b/src/display.c
> index 01e0e85..346d310 100644
> --- a/src/display.c
> +++ b/src/display.c
> @@ -213,11 +213,125 @@ static int register_for_events(display_t *d)
> return 0;
> }
>
> +static void shm_cache_entry_destroy(display_t *d, shm_cache_entry_t *entry)
> +{
> + if (entry->shmid == -1)
> + return;
Style: always bracket. Here and in other many lines.
> +
> + xcb_shm_detach(d->c, entry->shmseg);
> + shmdt(entry->shmaddr);
I would add a "entry->shmaddr = NULL" (or "entry->shmaddr = (void *) -1")
to avoid a dangling pointer.
> + shmctl(entry->shmid, IPC_RMID, NULL);
> + entry->shmid = -1;
> +}
> +
> +static int shm_cache_get(display_t *d, int size, shm_image_t *shmi)
> +{
> + int i;
> +
> + g_mutex_lock(&d->shm_cache_mutex);
> +
> + /* First check for a cache entry of the exact size being requested */
> + for (i = 0; i < sizeof(d->shm_cache) / sizeof(d->shm_cache[0]); i++) {
Use G_N_ELEMENTS ?
> + shm_cache_entry_t *entry = &d->shm_cache[i];
> +
> + if (entry->shmid == -1 || size != entry->size)
> + continue;
why not also computing the first bigger element so if this loop
fails we don't need to scan the list again?
I would add a shm_cache_entry_t pointer to store the found item too.
> +
> + shmi->shmid = entry->shmid;
> + shmi->shmseg = entry->shmseg;
> + shmi->shmaddr = entry->shmaddr;
> + shmi->shmsize = entry->size;
Maybe these fields could be in a structure and we could copy that
structure instead ?
> + entry->shmid = -1;
> +
> + g_mutex_unlock(&d->shm_cache_mutex);
> + return 1;
> + }
> +
> + /* An exact-size entry wasn't found, look for the first entry that's big
> enough */
> + for (i = 0; i < sizeof(d->shm_cache) / sizeof(d->shm_cache[0]); i++) {
> + shm_cache_entry_t *entry = &d->shm_cache[i];
> +
> + if (entry->shmid == -1 || size > entry->size)
> + continue;
> +
> + shmi->shmid = entry->shmid;
> + shmi->shmseg = entry->shmseg;
> + shmi->shmaddr = entry->shmaddr;
> + shmi->shmsize = entry->size;
> + entry->shmid = -1;
> +
> + g_mutex_unlock(&d->shm_cache_mutex);
> + return 1;
> + }
> +
> + /* No usable entry found in the cache */
> + g_mutex_unlock(&d->shm_cache_mutex);
> + return 0;
> +}
> +
> +static int shm_cache_add(display_t *d, shm_image_t *shmi)
> +{
> + int i;
> +
> + g_mutex_lock(&d->shm_cache_mutex);
> +
> + /* Search cache and use the first empty entry */
> + for (i = 0; i < sizeof(d->shm_cache) / sizeof(d->shm_cache[0]); i++) {
> + shm_cache_entry_t *entry = &d->shm_cache[i];
> +
> + if (entry->shmid != -1)
> + continue;
> +
> + entry->shmid = shmi->shmid;
> + entry->shmseg = shmi->shmseg;
> + entry->shmaddr = shmi->shmaddr;
> + entry->size = shmi->shmsize;
> +
Similar considerations here for this function and shm_cache_get
> + g_mutex_unlock(&d->shm_cache_mutex);
> + return 1;
> + }
> +
> + /* Cache is full. Search again, but this time evict the first entry
> smaller than the one being added */
> + /* TODO: a possible optimization: remove the smallest entry in the
> cache, rather than just the first smaller one */
> + for (i = 0; i < sizeof(d->shm_cache) / sizeof(d->shm_cache[0]); i++) {
> + shm_cache_entry_t *entry = &d->shm_cache[i];
> +
> + if (entry->shmid != -1 && entry->size < shmi->shmsize) {
> + shm_cache_entry_destroy(d, entry);
> +
> + entry->shmid = shmi->shmid;
> + entry->shmseg = shmi->shmseg;
> + entry->shmaddr = shmi->shmaddr;
> + entry->size = shmi->shmsize;
> +
> + g_mutex_unlock(&d->shm_cache_mutex);
> + return 1;
> + }
> + }
> +
> + /* Cache is full, and contained no entries smaller than the one being
> added */
> + g_mutex_unlock(&d->shm_cache_mutex);
> + return 0;
> +}
> +
> +static void shm_cache_destroy(display_t *d)
> +{
> + int i;
> +
> + g_mutex_lock(&d->shm_cache_mutex);
> + for (i = 0; i < sizeof(d->shm_cache) / sizeof(d->shm_cache[0]); i++) {
> + shm_cache_entry_t *entry = &d->shm_cache[i];
> +
> + shm_cache_entry_destroy(d, entry);
> + }
> + g_mutex_unlock(&d->shm_cache_mutex);
> +}
>
> int display_open(display_t *d, session_t *session)
> {
> int scr;
> int rc;
> + int i;
> xcb_damage_query_version_cookie_t dcookie;
> xcb_damage_query_version_reply_t *damage_version;
> xcb_xkb_use_extension_cookie_t use_cookie;
> @@ -314,6 +428,11 @@ int display_open(display_t *d, session_t *session)
> if (rc)
> return rc;
>
> + g_mutex_init(&d->shm_cache_mutex);
> + for (i = 0; i < sizeof(d->shm_cache) / sizeof(d->shm_cache[0]); i++) {
> + d->shm_cache[i].shmid = -1;
> + }
> +
> rc = display_create_screen_images(d);
>
> g_message("Display %s opened", session->options.display ?
> session->options.display : "");
> @@ -349,25 +468,29 @@ shm_image_t *create_shm_image(display_t *d, int w, int
> h)
> shmi->bytes_per_line = (bits_per_pixel(d) / 8) * shmi->w;
> imgsize = shmi->bytes_per_line * shmi->h;
>
> - shmi->shmid = shmget(IPC_PRIVATE, imgsize, IPC_CREAT | 0700);
> - if (shmi->shmid != -1)
> - shmi->shmaddr = shmat(shmi->shmid, 0, 0);
> - if (shmi->shmid == -1 || shmi->shmaddr == (void *) -1) {
> - g_warning("Cannot get shared memory of size %d; errno %d", imgsize,
> errno);
> - free(shmi);
> - return NULL;
> - }
> - /* We tell shmctl to detach now; that prevents us from holding this
> - shared memory segment forever in case of abnormal process exit. */
> - shmctl(shmi->shmid, IPC_RMID, NULL);
> -
> - shmi->shmseg = xcb_generate_id(d->c);
> - cookie = xcb_shm_attach_checked(d->c, shmi->shmseg, shmi->shmid, 0);
> - error = xcb_request_check(d->c, cookie);
> - if (error) {
> - g_warning("Could not attach; type %d; code %d; major %d; minor
> %d\n",
> - error->response_type, error->error_code, error->major_code,
> error->minor_code);
> - return NULL;
> + if (!shm_cache_get(d, imgsize, shmi)) {
> + /* No usable shared memory segment found in cache, allocate a new
> one */
> + shmi->shmid = shmget(IPC_PRIVATE, imgsize, IPC_CREAT | 0700);
> + if (shmi->shmid != -1)
> + shmi->shmaddr = shmat(shmi->shmid, 0, 0);
> + if (shmi->shmid == -1 || shmi->shmaddr == (void *) -1) {
> + g_warning("Cannot get shared memory of size %d; errno %d",
> imgsize, errno);
> + free(shmi);
> + return NULL;
> + }
> + /* We tell shmctl to detach now; that prevents us from holding this
> + shared memory segment forever in case of abnormal process exit.
> */
> + shmctl(shmi->shmid, IPC_RMID, NULL);
> + shmi->shmsize = imgsize;
> +
> + shmi->shmseg = xcb_generate_id(d->c);
> + cookie = xcb_shm_attach_checked(d->c, shmi->shmseg, shmi->shmid, 0);
> + error = xcb_request_check(d->c, cookie);
> + if (error) {
> + g_warning("Could not attach; type %d; code %d; major %d; minor
> %d\n",
> + error->response_type, error->error_code,
> error->major_code, error->minor_code);
> + return NULL;
> + }
> }
>
> return shmi;
> @@ -451,9 +574,12 @@ void display_copy_image_into_fullscreen(display_t *d,
> shm_image_t *shmi, int x,
>
> void destroy_shm_image(display_t *d, shm_image_t *shmi)
> {
> - xcb_shm_detach(d->c, shmi->shmseg);
> - shmdt(shmi->shmaddr);
> - shmctl(shmi->shmid, IPC_RMID, NULL);
> + if (!shm_cache_add(d, shmi)) {
> + /* Could not add to cache, destroy this segment */
> + xcb_shm_detach(d->c, shmi->shmseg);
> + shmdt(shmi->shmaddr);
> + shmctl(shmi->shmid, IPC_RMID, NULL);
> + }
> if (shmi->drawable_ptr)
> free(shmi->drawable_ptr);
> free(shmi);
> @@ -503,6 +629,8 @@ void display_stop_event_thread(display_t *d)
>
> void display_close(display_t *d)
> {
> + shm_cache_destroy(d);
> + g_mutex_clear(&d->shm_cache_mutex);
> xcb_damage_destroy(d->c, d->damage);
> display_destroy_screen_images(d);
> xcb_disconnect(d->c);
> diff --git a/src/display.h b/src/display.h
> index dc4254b..175512e 100644
> --- a/src/display.h
> +++ b/src/display.h
> @@ -21,6 +21,7 @@
> #ifndef DISPLAY_H_
> #define DISPLAY_H_
>
> +#include <glib.h>
> #include <xcb/xcb.h>
> #include <xcb/damage.h>
> #include <xcb/shm.h>
> @@ -39,8 +40,16 @@ typedef struct {
> xcb_shm_seg_t shmseg;
> void *shmaddr;
> void *drawable_ptr;
> + int shmsize;
> } shm_image_t;
>
> +typedef struct {
> + int shmid;
I would document that if this field is -1 the entry is "free".
> + int size;
Can the size be negative?
> + xcb_shm_seg_t shmseg;
> + void *shmaddr;
> +} shm_cache_entry_t;
> +
> typedef struct {
> xcb_connection_t *c;
> xcb_window_t root;
> @@ -58,6 +67,9 @@ typedef struct {
> shm_image_t *fullscreen;
> shm_image_t *scanline;
>
> + shm_cache_entry_t shm_cache[10];
> + GMutex shm_cache_mutex;
> +
> pthread_t event_thread;
> struct session_struct *session;
> } display_t;
Frediano
More information about the Spice-devel
mailing list