Possible mouse mapping architecture quad-mesh -> quad-mesh transformations?

Felix Bellaby felix at bellaby.plus.com
Fri Jul 7 05:40:20 PDT 2006


Keith Packard wrote:
> > Have you considered glRenderMode (GL_SELECT) in this discussion ? 
> > 
> > When the transformed desktops is drawn by texture mapping GL
polygons
> > within a compositor then it should be fairly simple to identify
which
> > polygon is drawn at the mouse location. After you have that info
then
> > working out which point on the texture underlies the mouse is just a
> > matter of linear interpolation. The point on the texture can be
passed
> > to the application owning the relevent window as the location of the
> > mouse. 
> 
> I think this is essentially what I was proposing; treat the screen as
a
> polygonal mesh and map each polygon back to a polygon within a
specific
> window. Use the implicit projective transformation given by those two
> polygons to interpolate the position within the window.
> 
> I'm hoping to get a chance to prototype this after I'm done updating
> RandR; it sure seems like it should work for our current 3D windowing
> environments.

The beauty of doing it via glRenderMode is that there is very little
coding involved and the GL hardware should be able to take care of the
work. Since there is no pixel drawing involved the cost of redrawing
every quad in every window is minimal. In pseudo code:

int
foo (int tx[4], int x[4], offset) {
   i = (x[1] != x[0]) ? 1 : 2;
   return tx[0] + (tx[i] - tx[0]) * (offset - x[0]) / (x[i] - x[0]);
}

struct {
  uint two, minz, maxz, window, quad;
} select_buffer [(number of client windows)];
double n[16] = {
   1,  0,  0,    -pointer_screen_x,
  -1,  0,  0, 1 + pointer_screen_x,
   0,  1,  0,    -pointer_screen_y,
   0, -1,  0, 1 + pointer_screen_y,
};

glSelectBuffer (sizeof (select_buffer), select_buffer);
glInitNames ();
glRenderMode (GL_SELECT);

for (i = 0; i < 4; i++) {
  glClipPlane (GL_CLIP_PLANE0 + i, &n[i*4]);
  glEnable (GL_CLIP_PLANE0 + i);
};
glBegin (GL_QUADS)

for (window in client_windows) {
  glPushName ((unint)window); 
  glTranslatef (window->x, window->y, window->z); // z reflects stacking
  for (quads in window_input_shape) {
    glPushName ((unint)quad);
    for (i = 0; i < 4; i++) {
       glTexCoord2i (quad->tx[i], quad->ty[i]);
       glVertex2i   (quad->x[i], quad->y[i]);
    }
    glPopName ();
  }
  glPopName ();
}

glEnd (GL_QUADS);
for (i = 0; i < 4; i++) {
  glDisable (GL_CLIP_PLANE0 + i);
};

assert (glRenderMode (GL_RENDER) != -1);

glGetPointerv (GL_SELECTION_BUFFER_POiNTER, &&n);

for (minz = 100000, i = 0; i < n / 5; i) 
  if (select_buffer[i].minz < minz) {
     pointer_window = (window)select_buffer[i].window;
     pointer_quad = (quad)select_buffer[i].quad; 
     minz = select_buffer[i].minz;
  }

pointer_window_x = foo (pointer_quad->tx, pointer_quad->x, 
                        (pointer_screen_x - pointer_window->x));
pointer_window_y = foo (pointer_quad->ty, pointer_quad->y, 
                        (pointer_screen_y - pointer_window->y));

protocol_send (pointer_window, pointer_window_x, pointer_window_y);

The main problem is that the window input shape need not match the
output shape. Compositors are currently able to ignore the input shape,
but would now need to track it and deform it along with the rest of the
window. The pointer location would need to be passed to the compositor
overlay window and then back to the app windows through the server via
some new protocol.

Felix





More information about the xorg mailing list