[cairo] glyph caching bug
TOKUNAGA Hiroyuki
tkng at xem.jp
Thu Dec 30 02:01:31 PST 2004
On Wed, 29 Dec 2004 15:05:24 -0800
Keith Packard <keithp at keithp.com> wrote:
> Around 7 o'clock on Dec 30, TOKUNAGA Hiroyuki wrote:
>
> > _random_live_entry is called when we want to destroy an cache entry
> > to keep cache size. So if this function is failed, we wouldn't able
> > to keep cache size.
>
> Right, it will only fail when all of the cache entries are locked, in
> which case the cache will (temporarily) need to grow larger than the
> limit. There's not much we can do about that at the cache level, the
> correct fix would be to change the upper levels to not lock down so
> many cache entries.
I see. Attached patch is lock each cache entry and make
_random_live_entry fail if live_entries == locked_entries.
> > A little while ago I conceived, what happen if we use many glyphs
> > which over maximum cache size?
>
> Then we temporarily expand the cache to cover the demand. We might
> consider doing some working set analysis to find an optimum cache size
> at run time, but I suggest we wait on that and just get the simple
> code working first.
I implemented a simple lock patch. It will work, but not consider such a
situation at all.
> > > This doesn't appear to handle the case of multiple threads looking
> > > up the same element in the cache simultaneously -- they'll both
> > > miss, and then they'll both create the element and register it.
> >
> > Yes, such a thing will happen. One will be registered, another will
> > not be registered.
>
> Oh, that would work, and would let us leave the cache unlocked for
> more of the time. We could also do this entirely within the nominally
> 'atomic' cache architecture we have now. I suggest we leave those
> details hidden behind the existing API and see how things look when
> it is working.
OK, Later I'll implement my proposal with current architecture. That
will also resolve above cache expand problem.
Regards,
--
TOKUNAGA Hiroyuki
tkng at xem.jp
http://kodou.net/
-------------- next part --------------
Index: cairo_cache.c
===================================================================
RCS file: /cvs/cairo/cairo/src/cairo_cache.c,v
retrieving revision 1.5
diff -u -r1.5 cairo_cache.c
--- cairo_cache.c 20 Dec 2004 17:43:59 -0000 1.5
+++ cairo_cache.c 30 Dec 2004 09:32:30 -0000
@@ -112,6 +112,8 @@
#define DEAD_ENTRY_P(cache, i) ((cache)->entries[i] == DEAD_ENTRY)
#define LIVE_ENTRY_P(cache, i) \
(!((NULL_ENTRY_P((cache),(i))) || (DEAD_ENTRY_P((cache),(i)))))
+#define LOCKED_ENTRY_P(cache, i) \
+ ((cairo_cache_entry_base_t *) ((cache)->entries[i])->lock_count > 0)
#ifdef CAIRO_DO_SANITY_CHECKING
static void
@@ -121,7 +123,8 @@
assert (cache->entries != NULL);
assert (cache->backend != NULL);
assert (cache->arrangement != NULL);
- assert (cache->used_memory <= cache->max_memory);
+ if(cache->locked_entries == 0)
+ assert (cache->used_memory <= cache->max_memory);
assert (cache->live_entries <= cache->arrangement->size);
}
#else
@@ -282,13 +285,14 @@
#endif
static unsigned long
-_random_live_entry (cairo_cache_t *cache)
+_random_live_unlocked_entry (cairo_cache_t *cache)
{
unsigned long idx;
assert(cache != NULL);
do {
idx = rand () % cache->arrangement->size;
- } while (! LIVE_ENTRY_P(cache, idx));
+ } while ((!LIVE_ENTRY_P(cache, idx)) ||
+ (LOCKED_ENTRY_P(cache, idx)));
return idx;
}
@@ -308,6 +312,7 @@
cache->max_memory = max_memory;
cache->used_memory = 0;
cache->live_entries = 0;
+ cache->locked_entries = 0;
#ifdef CAIRO_MEASURE_CACHE_PERFORMANCE
cache->hits = 0;
@@ -410,15 +415,17 @@
new_entry->hashcode = cache->backend->hash (cache, key);
/* Make some entries die if we're under memory pressure. */
- while (cache->live_entries > 0 &&
- ((cache->max_memory - cache->used_memory) < new_entry->memory)) {
- idx = _random_live_entry (cache);
- assert (idx < cache->arrangement->size);
- _entry_destroy (cache, idx);
+ if (cache->locked_entries != cache->live_entries &&
+ cache->live_entries > 0 ) {
+ while ((cache->max_memory - cache->used_memory) < new_entry->memory) {
+ idx = _random_live_unlocked_entry (cache);
+ assert (idx < cache->arrangement->size);
+ _entry_destroy (cache, idx);
+ }
+ assert(cache->max_memory >= (cache->used_memory + new_entry->memory));
}
-
- assert(cache->max_memory >= (cache->used_memory + new_entry->memory));
-
+
+
/* Make room in the table for a new slot. */
status = _resize_cache (cache, cache->live_entries + 1);
if (status != CAIRO_STATUS_SUCCESS) {
@@ -432,6 +439,7 @@
/* Store entry in slot and increment statistics. */
*slot = new_entry;
+ new_entry->lock_count = 0;
cache->live_entries++;
cache->used_memory += new_entry->memory;
@@ -440,6 +448,34 @@
return status;
}
+/*
+ * _cairo_cache_lookup may call destroy_entry function. This will be a problem
+ * if you want to use multiple entries at the same time. (i.e. a created
+ * entry may be destroyed before used.)
+ * If this function is called, _cairo_cache_lookup doesn't call destroy_entry
+ * function. You must call this function before multiple calls of
+ * _cairo_cache_lookup.
+ */
+void
+_cairo_cache_lock_entry (cairo_cache_t *cache, void *entry)
+{
+ cairo_cache_entry_base_t *e = (cairo_cache_entry_base_t *) entry;
+ if(e->lock_count == 0) {
+ cache->locked_entries++;
+ }
+ e->lock_count++;
+}
+
+void
+_cairo_cache_unlock_entry (cairo_cache_t *cache, void *entry)
+{
+ cairo_cache_entry_base_t *e = (cairo_cache_entry_base_t *) entry;
+ e->lock_count--;
+ if(e->lock_count == 0) {
+ cache->locked_entries--;
+ }
+}
+
unsigned long
_cairo_hash_string (const char *c)
{
Index: cairo_xlib_surface.c
===================================================================
RCS file: /cvs/cairo/cairo/src/cairo_xlib_surface.c,v
retrieving revision 1.31
diff -u -r1.31 cairo_xlib_surface.c
--- cairo_xlib_surface.c 21 Dec 2004 21:14:46 -0000 1.31
+++ cairo_xlib_surface.c 30 Dec 2004 09:32:30 -0000
@@ -959,13 +959,11 @@
static void
_lock_xlib_glyphset_caches (void)
{
- /* FIXME: implement locking */
}
static void
_unlock_xlib_glyphset_caches (void)
{
- /* FIXME: implement locking */
}
static glyphset_cache_t *
@@ -1291,7 +1289,7 @@
_lock_xlib_glyphset_caches ();
g = _get_glyphset_cache (self->dpy);
if (g == NULL)
- goto UNLOCK;
+ goto FREE_SRC;
/* Work out the index size to use. */
elt_size = 8;
@@ -1303,6 +1301,7 @@
status = _cairo_cache_lookup (&g->base, &key, (void **) (&entries[i]));
if (status != CAIRO_STATUS_SUCCESS || entries[i] == NULL)
goto UNLOCK;
+ _cairo_cache_lock_entry(&g->base, entries[i]);
if (elt_size == 8 && entries[i]->glyph > 0xff)
elt_size = 16;
@@ -1334,13 +1333,22 @@
cairo_surface_destroy (&(tmp->base));
}
+ for (i = 0; i < num_glyphs; ++i) {
+ _cairo_cache_unlock_entry(&g->base, entries[i]);
+ }
+
if (num_glyphs >= N_STACK_BUF)
free (entries);
return status;
UNLOCK:
- _unlock_xlib_glyphset_caches ();
+ for (i = 0; i < num_glyphs; ++i) {
+ if(entries[i] == NULL) {
+ break;
+ }
+ _cairo_cache_unlock_entry(&g->base, entries[i]);
+ }
FREE_SRC:
cairo_surface_destroy (&(src->base));
Index: cairoint.h
===================================================================
RCS file: /cvs/cairo/cairo/src/cairoint.h,v
retrieving revision 1.78
diff -u -r1.78 cairoint.h
--- cairoint.h 21 Dec 2004 21:22:44 -0000 1.78
+++ cairoint.h 30 Dec 2004 09:32:31 -0000
@@ -298,6 +298,7 @@
typedef struct {
unsigned long memory;
unsigned long hashcode;
+ unsigned int lock_count;
} cairo_cache_entry_base_t;
typedef struct {
@@ -317,6 +318,7 @@
unsigned long max_memory;
unsigned long used_memory;
unsigned long live_entries;
+ unsigned long locked_entries;
#ifdef CAIRO_MEASURE_CACHE_PERFORMANCE
unsigned long hits;
@@ -341,6 +343,12 @@
void *key,
void **entry_return);
+cairo_private void
+_cairo_cache_lock_entry (cairo_cache_t *cache, void *entry);
+
+cairo_private void
+_cairo_cache_unlock_entry (cairo_cache_t *cache, void *entry);
+
cairo_private unsigned long
_cairo_hash_string (const char *c);
More information about the cairo
mailing list