[PATCH 16/27] Input: Convert acceleration code to using ValuatorMask
Simon Thum
simon.thum at gmx.de
Sun Jun 5 08:04:59 PDT 2011
On 06/03/2011 04:59 PM, Daniel Stone wrote:
> Instead of passing a set of int* to the acceleration code, pass it a
> mask instead, which avoids an unfortunate loss of precision.
>
> Signed-off-by: Daniel Stone <daniel at fooishbar.org>
> ---
> dix/getevents.c | 41 +++++++++++++++++-----------
> dix/ptrveloc.c | 82 ++++++++++++++++---------------------------------------
> 2 files changed, 49 insertions(+), 74 deletions(-)
>
> diff --git a/dix/getevents.c b/dix/getevents.c
> index b796000..2f8e137 100644
> --- a/dix/getevents.c
> +++ b/dix/getevents.c
> @@ -727,17 +727,19 @@ moveAbsolute(DeviceIntPtr dev, int *x_out, int *y_out, ValuatorMask *mask)
>
> for (i = 0; i < valuator_mask_size(mask); i++)
> {
> + double val;
> if (valuator_mask_isset(mask, i))
> - {
> - double val = valuator_mask_get_double(mask, i);
> - clipAxis(dev, i, &val);
> - dev->last.valuators[i] = val;
> - valuator_mask_set_double(mask, i, val);
> - }
> + val = valuator_mask_get_double(mask, i);
> + else
> + val = dev->last.valuators[i] + dev->last.remainder[i];
> + clipAxis(dev, i, &val);
> + dev->last.valuators[i] = floor(val);
> + dev->last.remainder[i] = val - floor(val);
> + valuator_mask_set_double(mask, i, val);
> }
>
> - *x_out = round_towards_zero(x);
> - *y_out = round_towards_zero(y);
> + *x_out = dev->last.valuators[0];
> + *y_out = dev->last.valuators[1];
> }
The above seems to belong to another patch.
Otherwise, consider this
Reviewed-by: Simon Thum <simon.thum at gmx.de>
>
> @@ -1185,16 +1188,22 @@ GetPointerEvents(InternalEvent *events, DeviceIntPtr pDev, int type, int buttons
> /* x and y are only set, but not used, by moveAbsolute */
> moveAbsolute(pDev, &x, &y, &mask);
> } else {
> - if (flags & POINTER_ACCELERATE) {
> + if (flags & POINTER_ACCELERATE)
> accelPointer(pDev, &mask, ms);
> - /* The pointer acceleration code modifies the fractional part
> - * in-place, so we need to extract this information first */
> - x_frac = pDev->last.remainder[0];
> - y_frac = pDev->last.remainder[1];
> - }
> moveRelative(pDev, &x, &y, &mask);
> }
>
> + if (valuator_mask_isset(&mask, 0))
> + {
> + x_frac = valuator_mask_get_double(&mask, 0);
> + x_frac -= round_towards_zero(x_frac);
> + }
> + if (valuator_mask_isset(&mask, 1))
> + {
> + y_frac = valuator_mask_get_double(&mask, 1);
> + y_frac -= round_towards_zero(y_frac);
> + }
> +
> set_raw_valuators(raw, &mask, raw->valuators.data,
> raw->valuators.data_frac);
>
> diff --git a/dix/ptrveloc.c b/dix/ptrveloc.c
> index 92e75b6..c4b916e 100644
> --- a/dix/ptrveloc.c
> +++ b/dix/ptrveloc.c
> @@ -1118,11 +1118,10 @@ acceleratePointerPredictable(
> CARD32 evtime)
> {
> double dx = 0, dy = 0;
> - int tmpi;
> DeviceVelocityPtr velocitydata = GetDevicePredictableAccelData(dev);
> Bool soften = TRUE;
>
> - if (!velocitydata)
> + if (valuator_mask_num_valuators(val) == 0 || !velocitydata)
> return;
>
> if (velocitydata->statistics.profile_number == AccelProfileNone &&
> @@ -1131,11 +1130,11 @@ acceleratePointerPredictable(
> }
>
> if (valuator_mask_isset(val, 0)) {
> - dx = valuator_mask_get(val, 0);
> + dx = valuator_mask_get_double(val, 0);
> }
>
> if (valuator_mask_isset(val, 1)) {
> - dy = valuator_mask_get(val, 1);
> + dy = valuator_mask_get_double(val, 1);
> }
>
> if (dx || dy){
> @@ -1158,29 +1157,12 @@ acceleratePointerPredictable(
> ApplySoftening(velocitydata, &dx, &dy);
> ApplyConstantDeceleration(velocitydata, &dx, &dy);
>
> - /* Calculate the new delta (with accel) and drop it back
> - * into the valuator masks */
> - if (dx) {
> - double tmp;
> - tmp = mult * dx + dev->last.remainder[0];
> - /* Since it may not be apparent: lrintf() does not offer
> - * strong statements about rounding; however because we
> - * process each axis conditionally, there's no danger
> - * of a toggling remainder. Its lack of guarantees likely
> - * makes it faster on the average target. */
> - tmpi = lrintf(tmp);
> - valuator_mask_set(val, 0, tmpi);
> - dev->last.remainder[0] = tmp - (double)tmpi;
> - }
> - if (dy) {
> - double tmp;
> - tmp = mult * dy + dev->last.remainder[1];
> - tmpi = lrintf(tmp);
> - valuator_mask_set(val, 1, tmpi);
> - dev->last.remainder[1] = tmp - (double)tmpi;
> - }
> - DebugAccelF("pos (%i | %i) remainders x: %.3f y: %.3f delta x:%.3f y:%.3f\n",
> - *px, *py, dev->last.remainder[0], dev->last.remainder[1], dx, dy);
> + if (dx)
> + valuator_mask_set_double(val, 0, mult * dx);
> + if (dy)
> + valuator_mask_set_double(val, 1, mult * dy);
> + DebugAccelF("pos (%i | %i) delta x:%.3f y:%.3f\n", mult * dx,
> + mult * dy);
> }
> }
> }
> @@ -1203,7 +1185,6 @@ acceleratePointerLightweight(
> {
> double mult = 0.0, tmpf;
> double dx = 0.0, dy = 0.0;
> - int tmpi;
>
> if (valuator_mask_isset(val, 0)) {
> dx = valuator_mask_get(val, 0);
> @@ -1213,53 +1194,38 @@ acceleratePointerLightweight(
> dy = valuator_mask_get(val, 1);
> }
>
> + if (valuator_mask_num_valuators(val) == 0)
> + return;
> +
> if (!dx && !dy)
> return;
>
> if (dev->ptrfeed && dev->ptrfeed->ctrl.num) {
> /* modeled from xf86Events.c */
> if (dev->ptrfeed->ctrl.threshold) {
> - if ((abs(dx) + abs(dy)) >= dev->ptrfeed->ctrl.threshold) {
> - tmpf = ((double)dx *
> - (double)(dev->ptrfeed->ctrl.num)) /
> - (double)(dev->ptrfeed->ctrl.den) +
> - dev->last.remainder[0];
> + if ((fabs(dx) + fabs(dy)) >= dev->ptrfeed->ctrl.threshold) {
> if (dx) {
> - tmpi = (int) tmpf;
> - valuator_mask_set(val, 0, tmpi);
> - dev->last.remainder[0] = tmpf - (double)tmpi;
> + tmpf = (dx * (double)(dev->ptrfeed->ctrl.num)) /
> + (double)(dev->ptrfeed->ctrl.den);
> + valuator_mask_set_double(val, 0, tmpf);
> }
>
> - tmpf = ((double)dy *
> - (double)(dev->ptrfeed->ctrl.num)) /
> - (double)(dev->ptrfeed->ctrl.den) +
> - dev->last.remainder[1];
> if (dy) {
> - tmpi = (int) tmpf;
> - valuator_mask_set(val, 1, tmpi);
> - dev->last.remainder[1] = tmpf - (double)tmpi;
> + tmpf = (dy * (double)(dev->ptrfeed->ctrl.num)) /
> + (double)(dev->ptrfeed->ctrl.den);
> + valuator_mask_set_double(val, 1, tmpf);
> }
> }
> }
> else {
> - mult = pow((double)dx * (double)dx + (double)dy * (double)dy,
> + mult = pow(dx * dx + dy * dy,
> ((double)(dev->ptrfeed->ctrl.num) /
> (double)(dev->ptrfeed->ctrl.den) - 1.0) /
> 2.0) / 2.0;
> - if (dx) {
> - tmpf = mult * (double)dx +
> - dev->last.remainder[0];
> - tmpi = (int) tmpf;
> - valuator_mask_set(val, 0, tmpi);
> - dev->last.remainder[0] = tmpf - (double)tmpi;
> - }
> - if (dy) {
> - tmpf = mult * (double)dy +
> - dev->last.remainder[1];
> - tmpi = (int)tmpf;
> - valuator_mask_set(val, 1, tmpi);
> - dev->last.remainder[1] = tmpf - (double)tmpi;
> - }
> + if (dx)
> + valuator_mask_set_double(val, 0, mult * dx);
> + if (dy)
> + valuator_mask_set_double(val, 1, mult * dy);
> }
> }
> }
More information about the xorg-devel
mailing list