[cairo-commit] 4 commits - src/cairo-bentley-ottmann.c src/cairo-compiler-private.h src/cairo-path-stroke.c src/cairo-wideint.c src/cairo-wideint-private.h test/Makefile.am
Chris Wilson
ickle at kemper.freedesktop.org
Mon Oct 6 04:53:28 PDT 2008
src/cairo-bentley-ottmann.c | 19 +-----------
src/cairo-compiler-private.h | 6 +++
src/cairo-path-stroke.c | 65 ++++++++++++++++++-------------------------
src/cairo-wideint-private.h | 12 ++++++-
src/cairo-wideint.c | 48 +++++++++++++++++++++++++++++++
test/Makefile.am | 13 ++++++++
6 files changed, 108 insertions(+), 55 deletions(-)
New commits:
commit c98c1cb5325bbc05c88ea350ff99d6eaf56fd339
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Oct 5 10:39:10 2008 +0100
[wideint] Mark functions as pure
Take advantage of the gcc function attribute 'pure', which tells gcc that
the function result only depends upon its arguments and it has zero side
effects (e.g. it does not clobber memory). This gives gcc greater
opportunity to rearrange and optimize the wideint arithmetic.
diff --git a/src/cairo-compiler-private.h b/src/cairo-compiler-private.h
index fc41645..d772f77 100644
--- a/src/cairo-compiler-private.h
+++ b/src/cairo-compiler-private.h
@@ -111,6 +111,12 @@
# define CAIRO_FUNCTION_ALIAS(old, new)
#endif
+#if __GNUC__ >= 3
+#define cairo_pure __attribute__((pure))
+#else
+#define cairo_pure
+#endif
+
#ifndef __GNUC__
#undef __attribute__
#define __attribute__(x)
diff --git a/src/cairo-wideint-private.h b/src/cairo-wideint-private.h
index 5f34e2b..60c560f 100644
--- a/src/cairo-wideint-private.h
+++ b/src/cairo-wideint-private.h
@@ -47,7 +47,7 @@
* as a pair of 32-bit ints
*/
-#define I cairo_private
+#define I cairo_private cairo_pure
#if !HAVE_UINT64_T
commit 76dd4603d01068b1b377312ced6b44fe5419794f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Oct 5 10:15:49 2008 +0100
[tessellator] Replace open-coding _cairo_int64_cmp().
We often use the construct:
if (_cairo_int64_lt (A, B)
return -1;
if (_cairo_int64_gt (A, B)
return 1;
return 0;
to compare two large integers (int64, or int128) which does twice the
required work on CPUs without large integer support. So replace it with a
single wideint function _cairo_int64_cmp() and therefore allow
opportunities to both shrink the code size and write a more efficient
comparison. (The primarily motivation is to simply replace each block with
a single more expressive line.)
diff --git a/src/cairo-bentley-ottmann.c b/src/cairo-bentley-ottmann.c
index 28952dd..47ee833 100644
--- a/src/cairo-bentley-ottmann.c
+++ b/src/cairo-bentley-ottmann.c
@@ -201,21 +201,13 @@ _slope_compare (cairo_bo_edge_t *a,
* with respect to x. */
if ((adx ^ bdx) < 0) {
return adx < 0 ? -1 : +1;
- }
- else {
+ } else {
int32_t ady = a->bottom.y - a->top.y;
int32_t bdy = b->bottom.y - b->top.y;
int64_t adx_bdy = _cairo_int32x32_64_mul (adx, bdy);
int64_t bdx_ady = _cairo_int32x32_64_mul (bdx, ady);
- /* if (adx * bdy > bdx * ady) */
- if (_cairo_int64_gt (adx_bdy, bdx_ady))
- return 1;
-
- /* if (adx * bdy < bdx * ady) */
- if (_cairo_int64_lt (adx_bdy, bdx_ady))
- return -1;
- return 0;
+ return _cairo_int64_cmp (adx_bdy, bdx_ady);
}
}
@@ -320,12 +312,7 @@ edge_compare_for_y_against_x (const cairo_bo_edge_t *a,
L = _cairo_int32x32_64_mul (dy, adx);
R = _cairo_int32x32_64_mul (dx, ady);
- /* return _cairo_int64_cmp (L, R); */
- if (_cairo_int64_lt (L, R))
- return -1;
- if (_cairo_int64_gt (L, R))
- return 1;
- return 0;
+ return _cairo_int64_cmp (L, R);
}
static int
diff --git a/src/cairo-wideint-private.h b/src/cairo-wideint-private.h
index 0738042..5f34e2b 100644
--- a/src/cairo-wideint-private.h
+++ b/src/cairo-wideint-private.h
@@ -61,6 +61,7 @@ cairo_uint64_t I _cairo_uint64_lsl (cairo_uint64_t a, int shift);
cairo_uint64_t I _cairo_uint64_rsl (cairo_uint64_t a, int shift);
cairo_uint64_t I _cairo_uint64_rsa (cairo_uint64_t a, int shift);
int I _cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b);
+int I _cairo_uint64_cmp (cairo_uint64_t a, cairo_uint64_t b);
int I _cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b);
cairo_uint64_t I _cairo_uint64_negate (cairo_uint64_t a);
#define _cairo_uint64_negative(a) (((int32_t) ((a).hi)) < 0)
@@ -75,7 +76,8 @@ cairo_int64_t I _cairo_int32_to_int64(int32_t i);
#define _cairo_int64_sub(a,b) _cairo_uint64_sub (a,b)
#define _cairo_int64_mul(a,b) _cairo_uint64_mul (a,b)
cairo_int64_t I _cairo_int32x32_64_mul (int32_t a, int32_t b);
-int I _cairo_int64_lt (cairo_uint64_t a, cairo_uint64_t b);
+int I _cairo_int64_lt (cairo_int64_t a, cairo_int64_t b);
+int I _cairo_int64_cmp (cairo_int64_t a, cairo_int64_t b);
#define _cairo_int64_eq(a,b) _cairo_uint64_eq (a,b)
#define _cairo_int64_lsl(a,b) _cairo_uint64_lsl (a,b)
#define _cairo_int64_rsl(a,b) _cairo_uint64_rsl (a,b)
@@ -96,6 +98,7 @@ int I _cairo_int64_lt (cairo_uint64_t a, cairo_uint64_t b);
#define _cairo_uint64_rsl(a,b) ((uint64_t) (a) >> (b))
#define _cairo_uint64_rsa(a,b) ((uint64_t) ((int64_t) (a) >> (b)))
#define _cairo_uint64_lt(a,b) ((a) < (b))
+#define _cairo_uint64_cmp(a,b) ((a) == (b) ? 0 : (a) < (b) ? -1 : 1)
#define _cairo_uint64_eq(a,b) ((a) == (b))
#define _cairo_uint64_negate(a) ((uint64_t) -((int64_t) (a)))
#define _cairo_uint64_negative(a) ((int64_t) (a) < 0)
@@ -111,6 +114,7 @@ int I _cairo_int64_lt (cairo_uint64_t a, cairo_uint64_t b);
#define _cairo_int64_mul(a,b) ((a) * (b))
#define _cairo_int32x32_64_mul(a,b) ((int64_t) (a) * (b))
#define _cairo_int64_lt(a,b) ((a) < (b))
+#define _cairo_int64_cmp(a,b) ((a) == (b) ? 0 : (a) < (b) ? -1 : 1)
#define _cairo_int64_eq(a,b) ((a) == (b))
#define _cairo_int64_lsl(a,b) ((a) << (b))
#define _cairo_int64_rsl(a,b) ((int64_t) ((uint64_t) (a) >> (b)))
@@ -165,6 +169,7 @@ cairo_uint128_t I _cairo_uint128_lsl (cairo_uint128_t a, int shift);
cairo_uint128_t I _cairo_uint128_rsl (cairo_uint128_t a, int shift);
cairo_uint128_t I _cairo_uint128_rsa (cairo_uint128_t a, int shift);
int I _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b);
+int I _cairo_uint128_cmp (cairo_uint128_t a, cairo_uint128_t b);
int I _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b);
cairo_uint128_t I _cairo_uint128_negate (cairo_uint128_t a);
#define _cairo_uint128_negative(a) (_cairo_uint64_negative(a.hi))
@@ -186,6 +191,7 @@ cairo_int128_t I _cairo_int64x64_128_mul (cairo_int64_t a, cairo_int64_t b);
#define _cairo_int128_rsl(a,b) _cairo_uint128_rsl(a,b)
#define _cairo_int128_rsa(a,b) _cairo_uint128_rsa(a,b)
int I _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b);
+int I _cairo_int128_cmp (cairo_int128_t a, cairo_int128_t b);
#define _cairo_int128_eq(a,b) _cairo_uint128_eq (a,b)
#define _cairo_int128_negate(a) _cairo_uint128_negate(a)
#define _cairo_int128_negative(a) (_cairo_uint128_negative(a))
@@ -205,6 +211,7 @@ int I _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b);
#define _cairo_uint128_rsl(a,b) ((uint128_t) (a) >> (b))
#define _cairo_uint128_rsa(a,b) ((uint128_t) ((int128_t) (a) >> (b)))
#define _cairo_uint128_lt(a,b) ((a) < (b))
+#define _cairo_uint128_cmp(a,b) ((a) == (b) ? 0 : (a) < (b) ? -1 : 1)
#define _cairo_uint128_eq(a,b) ((a) == (b))
#define _cairo_uint128_negate(a) ((uint128_t) -((int128_t) (a)))
#define _cairo_uint128_negative(a) ((int128_t) (a) < 0)
@@ -222,6 +229,7 @@ int I _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b);
#define _cairo_int128_mul(a,b) ((a) * (b))
#define _cairo_int64x64_128_mul(a,b) ((int128_t) (a) * (b))
#define _cairo_int128_lt(a,b) ((a) < (b))
+#define _cairo_int128_cmp(a,b) ((a) == (b) ? 0 : (a) < (b) ? -1 : 1)
#define _cairo_int128_eq(a,b) ((a) == (b))
#define _cairo_int128_lsl(a,b) ((a) << (b))
#define _cairo_int128_rsl(a,b) ((int128_t) ((uint128_t) (a) >> (b)))
diff --git a/src/cairo-wideint.c b/src/cairo-wideint.c
index e8d3ab6..4565593 100644
--- a/src/cairo-wideint.c
+++ b/src/cairo-wideint.c
@@ -235,6 +235,32 @@ _cairo_int64_lt (cairo_int64_t a, cairo_int64_t b)
return _cairo_uint64_lt (a, b);
}
+int
+_cairo_uint64_cmp (cairo_uint64_t a, cairo_uint64_t b)
+{
+ if (a.hi < b.hi)
+ return -1;
+ else if (a.hi > b.hi)
+ return 1;
+ else if (a.lo < b.lo)
+ return -1;
+ else if (a.lo > b.lo)
+ return 1;
+ else
+ return 0;
+}
+
+int
+_cairo_int64_cmp (cairo_int64_t a, cairo_int64_t b)
+{
+ if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
+ return -1;
+ if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
+ return 1;
+
+ return _cairo_uint64_cmp (a, b);
+}
+
cairo_uint64_t
_cairo_uint64_not (cairo_uint64_t a)
{
@@ -570,6 +596,28 @@ _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b)
}
int
+_cairo_uint128_cmp (cairo_uint128_t a, cairo_uint128_t b)
+{
+ int cmp;
+
+ cmp = _cairo_uint64_cmp (a.hi, b.hi);
+ if (cmp)
+ return cmp;
+ return _cairo_uint64_cmp (a.lo, b.lo);
+}
+
+int
+_cairo_int128_cmp (cairo_int128_t a, cairo_int128_t b)
+{
+ if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
+ return -1;
+ if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
+ return 1;
+
+ return _cairo_uint128_cmp (a, b);
+}
+
+int
_cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
{
return (_cairo_uint64_eq (a.hi, b.hi) &&
commit 6eead4a5f746e182eabfcda9959cd9cc53d95a89
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sat Oct 4 12:39:21 2008 +0100
[stroke] Simplify dash-off logic.
Remove a redundant level of if-else branching (and remove a duplicate
code) block by testing dash_on first.
diff --git a/src/cairo-path-stroke.c b/src/cairo-path-stroke.c
index 419412a..627c71e 100644
--- a/src/cairo-path-stroke.c
+++ b/src/cairo-path-stroke.c
@@ -872,48 +872,39 @@ _cairo_stroker_line_to_dashed (void *closure, cairo_point_t *point)
segment.p2.x = _cairo_fixed_from_double (dx2) + p1->x;
segment.p2.y = _cairo_fixed_from_double (dy2) + p1->y;
- if (fully_in_bounds ||
- _cairo_box_intersects_line_segment (&stroker->bounds, &segment))
+ if (stroker->dash_on &&
+ (fully_in_bounds ||
+ _cairo_box_intersects_line_segment (&stroker->bounds, &segment)))
{
- if (stroker->dash_on) {
- status = _cairo_stroker_add_sub_edge (stroker, &segment.p1, &segment.p2, &dev_slope, slope_dx, slope_dy, &sub_start, &sub_end);
+ status = _cairo_stroker_add_sub_edge (stroker, &segment.p1, &segment.p2, &dev_slope, slope_dx, slope_dy, &sub_start, &sub_end);
+ if (status)
+ return status;
+
+ if (stroker->has_current_face) {
+ /* Join with final face from previous segment */
+ status = _cairo_stroker_join (stroker, &stroker->current_face, &sub_start);
+ stroker->has_current_face = FALSE;
if (status)
return status;
+ } else if (!stroker->has_first_face && stroker->dash_starts_on) {
+ /* Save sub path's first face in case needed for closing join */
+ stroker->first_face = sub_start;
+ stroker->has_first_face = TRUE;
+ } else {
+ /* Cap dash start if not connecting to a previous segment */
+ status = _cairo_stroker_add_leading_cap (stroker, &sub_start);
+ if (status)
+ return status;
+ }
- if (stroker->has_current_face) {
- /* Join with final face from previous segment */
- status = _cairo_stroker_join (stroker, &stroker->current_face, &sub_start);
- stroker->has_current_face = FALSE;
- if (status)
- return status;
- } else if (!stroker->has_first_face && stroker->dash_starts_on) {
- /* Save sub path's first face in case needed for closing join */
- stroker->first_face = sub_start;
- stroker->has_first_face = TRUE;
- } else {
- /* Cap dash start if not connecting to a previous segment */
- status = _cairo_stroker_add_leading_cap (stroker, &sub_start);
- if (status)
- return status;
- }
-
- if (remain) {
- /* Cap dash end if not at end of segment */
- status = _cairo_stroker_add_trailing_cap (stroker, &sub_end);
- if (status)
- return status;
- } else {
- stroker->current_face = sub_end;
- stroker->has_current_face = TRUE;
- }
+ if (remain) {
+ /* Cap dash end if not at end of segment */
+ status = _cairo_stroker_add_trailing_cap (stroker, &sub_end);
+ if (status)
+ return status;
} else {
- if (stroker->has_current_face) {
- /* Cap final face from previous segment */
- status = _cairo_stroker_add_trailing_cap (stroker, &stroker->current_face);
- if (status)
- return status;
- stroker->has_current_face = FALSE;
- }
+ stroker->current_face = sub_end;
+ stroker->has_current_face = TRUE;
}
} else {
if (stroker->has_current_face) {
commit 4aa0991a11f03a47735dcfafe43d9505b939554e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sat Oct 4 20:22:18 2008 +0100
[test] Mark fallback-resolution as XFAIL.
There appears to be no simple solution here, as it seems to be a
fundamental flaw in the design of the meta-surface wrt to replaying into
a fallback image. (I may be wrong, but if Carl found no easy solution then
I feel no shame for my own failure ;-)
diff --git a/test/Makefile.am b/test/Makefile.am
index d5c0f43..df41a3d 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -925,6 +925,18 @@ $(REFERENCE_IMAGES)
# manipulation for SVG
# extend-pad - lacks implementation in pixman and consequently used
# as an excuse for lack of support in other backends
+# fallback-resolution - The essential problem here is that the meta-surface
+# has recorded a sequence of operations with one device
+# transformation, and we attempt to replay it with
+# another (basically a scale-factor for the falback
+# resolution). Carl begun to look at this with his
+# chain-of-bugs, but the can of worms is much bigger.
+# It appears to be a design flaw in the meta-surface
+# that may spread further...
+# My solution would be to lock Behad and Adrian in a
+# room, with Carl as a moderator and not let them out
+# until they have come up with an interface and
+# semantics that actually work. :-)
# long-lines - range overflow of fixed-point
# self-copy-overlap - vector surfaces take snapshot of patterns in contrast
# to the raster backends which don't. One solution
@@ -947,6 +959,7 @@ big-trap$(EXEEXT) \
degenerate-path$(EXEEXT) \
device-offset-scale$(EXEEXT) \
extend-pad$(EXEEXT) \
+fallback-resolution$(EXEEXT) \
long-lines$(EXEEXT) \
self-copy-overlap$(EXEEXT) \
self-intersecting$(EXEEXT) \
More information about the cairo-commit
mailing list