[Intel-gfx] [Patch 1/4] multiple ring buffer support, introduce intel_ring_buffer struct

Zou, Nanhai nanhai.zou at intel.com
Fri Apr 23 10:47:35 CEST 2010


This patch introduces an intel_ring_buffer structure.

Sequential number, IRQ logic and hardware status page were included in the intel_ring_buffer structure.

Signed-off-by: Xiang Haihao <haihao.xiang at intel.com>
Signed-off-by: Zou Nan hai <nanhai.zou at intel.com>

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84..bf8d097 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
          intel_fb.o \
          intel_tv.o \
          intel_dvo.o \
+         intel_ringbuffer.o \
          intel_overlay.o \
          dvo_ch7xxx.o \
          dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 790fef3..32953ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@

 #include "i915_reg.h"
 #include "intel_bios.h"
+#include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>

 /* General customization:
@@ -838,6 +839,12 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
+               u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
+               u32 mask);
+

 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7701cbd..301702a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -73,7 +73,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }

-static inline void
+void
 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -114,7 +114,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }

-static inline void
+void
 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->irq_mask_reg & mask) != mask) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..65c540b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright 漏 20010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Zou Nan hai <nanhai.zou at intel.com>
+ *    Xiang Hai hao<haihao.xiang at intel.com>
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "i915_trace.h"
+
+#define I915_GEM_GPU_DOMAINS   (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+static void
+render_ring_flush(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               u32     invalidate_domains,
+               u32     flush_domains)
+{
+#if WATCH_EXEC
+       DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+                 invalidate_domains, flush_domains);
+#endif
+       u32 cmd;
+       trace_i915_gem_request_flush(dev, ring->next_seqno,
+                                    invalidate_domains, flush_domains);
+
+       if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+               /*
+                * read/write caches:
+                *
+                * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+                * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+                * also flushed at 2d versus 3d pipeline switches.
+                *
+                * read-only caches:
+                *
+                * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+                * MI_READ_FLUSH is set, and is always flushed on 965.
+                *
+                * I915_GEM_DOMAIN_COMMAND may not exist?
+                *
+                * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+                * invalidated when MI_EXE_FLUSH is set.
+                *
+                * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+                * invalidated with every MI_FLUSH.
+                *
+                * TLBs:
+                *
+                * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+                * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+                * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+                * are flushed at any MI_FLUSH.
+                */
+
+               cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+               if ((invalidate_domains|flush_domains) &
+                               I915_GEM_DOMAIN_RENDER)
+                       cmd &= ~MI_NO_WRITE_FLUSH;
+               if (!IS_I965G(dev)) {
+                       /*
+                        * On the 965, the sampler cache always gets flushed
+                        * and this bit is reserved.
+                        */
+                       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+                               cmd |= MI_READ_FLUSH;
+               }
+               if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+                       cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+               DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+               intel_ring_begin(dev, ring, 8);
+               intel_ring_emit(dev, ring, cmd);
+               intel_ring_emit(dev, ring, MI_NOOP);
+               intel_ring_advance(dev, ring);
+       }
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
+
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+}
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+       return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+
+static int init_render_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 head;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       obj_priv = ring->gem_object->driver_private;
+
+       /* Stop the ring if it's running. */
+       I915_WRITE(PRB0_CTL, 0);
+       I915_WRITE(PRB0_HEAD, 0);
+       I915_WRITE(PRB0_TAIL, 0);
+
+       /* Initialize the ring. */
+       I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+       head = ring->get_head(dev, ring);
+
+       /* G45 ring initialization fails to reset head to zero */
+       if (head != 0) {
+               DRM_ERROR("%s head not reset to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+               I915_WRITE(PRB0_HEAD, 0);
+
+               DRM_ERROR("%s head forced to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+       }
+
+       I915_WRITE(PRB0_CTL,
+                       ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+                       | RING_NO_REPORT | RING_VALID);
+
+       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+       /* If the head is still not zero, the ring is dead */
+       if (head != 0) {
+               DRM_ERROR("%s initialization failed "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+               return -EIO;
+       }
+
+       return 0;
+}
+
+
+static u32
+render_ring_add_request(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_file *file_priv,
+               u32 flush_domains)
+{
+       u32 seqno;
+       seqno = intel_ring_get_seqno(dev, ring);
+
+       intel_ring_begin(dev, ring, 4);
+       intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(dev, ring,
+                       I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(dev, ring, seqno);
+       intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+       intel_ring_advance(dev, ring);
+
+       DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+       return seqno;
+}
+
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_enable_graphics_irq(dev_priv,
+                                       GT_USER_INTERRUPT);
+               else
+                       i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+}
+
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+       if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_disable_graphics_irq(dev_priv,
+                                       GT_USER_INTERRUPT);
+               else
+                       i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void render_setup_status_page(struct drm_device *dev,
+       struct  intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+       (void)I915_READ(HWS_PGA);
+}
+
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_i915_gem_execbuffer2 *exec,
+               struct drm_clip_rect *cliprects,
+               uint64_t exec_offset)
+{
+       int nbox = exec->num_cliprects;
+       int i = 0, count;
+       uint32_t exec_start, exec_len;
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       exec_len = (uint32_t) exec->batch_len;
+
+       count = nbox ? nbox : 1;
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+                       int ret = i915_emit_box(dev, cliprects, i,
+                                       exec->DR1, exec->DR4);
+                       if (ret)
+                               return ret;
+               }
+
+               if (IS_I830(dev) || IS_845G(dev)) {
+                       intel_ring_begin(dev, ring, 4);
+                       intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
+                       intel_ring_emit(dev, ring, exec_start | MI_BATCH_NON_SECURE);
+                       intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+                       intel_ring_emit(dev, ring, 0);
+               } else {
+                       intel_ring_begin(dev, ring, 4);
+                       if (IS_I965G(dev)) {
+                               intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
+                                               (2 << 6) | MI_BATCH_NON_SECURE_I965);
+                               intel_ring_emit(dev, ring, exec_start);
+                       } else {
+                               intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
+                                               (2 << 6));
+                               intel_ring_emit(dev, ring, exec_start | MI_BATCH_NON_SECURE);
+                       }
+               }
+               intel_ring_advance(dev, ring);
+       }
+
+       /* XXX breadcrumb */
+       return 0;
+}
+
+static void cleanup_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       obj = ring->status_page.obj;
+       if (obj == NULL)
+               return;
+       obj_priv = obj->driver_private;
+
+       kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       ring->status_page.obj = NULL;
+
+       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+static int init_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
+{
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       int ret;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       obj = drm_gem_object_alloc(dev, PAGE_SIZE*10);
+       if (obj == NULL)
+               return -ENOMEM;
+
+       obj_priv = obj->driver_private;
+       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+       ret = i915_gem_object_pin(obj, PAGE_SIZE);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               return ret;
+       }
+
+       ring->status_page.gfx_addr = obj_priv->gtt_offset;
+
+       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+       if (ring->status_page.page_addr == NULL) {
+               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               return -EINVAL;
+       }
+       ring->status_page.obj = obj;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       ring->setup_status_page(dev, ring);
+       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr);
+       return 0;
+}
+
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       int ret;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       ring->dev = dev;
+
+       if (I915_NEED_GFX_HWS(dev)) {
+               ret = init_status_page(dev, ring);
+               if (ret)
+                       return ret;
+       }
+
+       obj = drm_gem_object_alloc(dev, ring->size);
+       if (obj == NULL) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       ring->gem_object = obj;
+
+       ret = i915_gem_object_pin(obj, ring->alignment);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               goto cleanup;
+       }
+
+       obj_priv = obj->driver_private;
+       ring->map.size = ring->size;
+       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+       ring->map.type = 0;
+       ring->map.flags = 0;
+       ring->map.mtrr = 0;
+
+       drm_core_ioremap_wc(&ring->map, dev);
+
+       if (ring->map.handle == NULL) {
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       ring->virtual_start = ring->map.handle;
+       ret = ring->init(dev, ring);
+       if (ret != 0) {
+               intel_cleanup_ring_buffer(dev, ring);
+               return ret;
+       }
+
+       ring->head = ring->get_head(dev, ring);
+       ring->tail = ring->get_tail(dev, ring);
+       ring->space = ring->head - (ring->tail + 8);
+       if (ring->space < 0)
+               ring->space += ring->size;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       return ret;
+cleanup:
+       cleanup_status_page(dev, ring);
+       return ret;
+}
+
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       if (ring->gem_object == NULL)
+               return;
+
+       drm_core_ioremapfree(&ring->map, dev);
+
+       i915_gem_object_unpin(ring->gem_object);
+       drm_gem_object_unreference(ring->gem_object);
+       ring->gem_object = NULL;
+       cleanup_status_page(dev, ring);
+}
+
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       unsigned int *virt;
+       int rem;
+       rem = ring->size - ring->tail;
+
+       if (ring->space < rem) {
+               int ret = intel_wait_ring_buffer(dev, ring, rem);
+               if (ret)
+                       return ret;
+       }
+
+       virt = (unsigned int *)(ring->virtual_start + ring->tail);
+       rem /= 4;
+       while (rem--)
+               *virt++ = MI_NOOP;
+
+       ring->tail = 0;
+
+       return 0;
+}
+
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       unsigned long end;
+
+       trace_i915_ring_wait_begin (dev);
+       end = jiffies + 3 * HZ;
+       do {
+               ring->head = ring->get_head(dev, ring);
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+               if (ring->space >= n) {
+                       trace_i915_ring_wait_end (dev);
+                       return 0;
+               }
+               yield();
+       } while(!time_after(jiffies, end));
+       trace_i915_ring_wait_end (dev);
+
+       return -EBUSY;
+}
+
+void intel_ring_begin(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       if (unlikely(ring->tail + n > ring->size))
+               intel_wrap_ring_buffer(dev, ring);
+       if (unlikely(ring->space < n))
+               intel_wait_ring_buffer(dev, ring, n);
+}
+
+void intel_ring_emit(struct drm_device *dev,
+               struct intel_ring_buffer *ring, unsigned int data)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       *virt = data;
+       ring->tail += 4;
+       ring->tail &= ring->size - 1;
+       ring->space -= 4;
+}
+
+void intel_ring_advance(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       ring->advance_ring(dev, ring);
+}
+
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       BUG_ON((len&~(4-1)) != 0);
+       intel_ring_begin (dev, ring, len);
+       memcpy(virt, data, len);
+       ring->tail += len;
+       ring->tail &= ring->size - 1;
+       ring->space -= len;
+       intel_ring_advance (dev, ring);
+}
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 seqno;
+       seqno = ring->next_seqno;
+
+       /* reserve 0 for non-seqno */
+       if (++ring->next_seqno == 0)
+               ring->next_seqno = 1;
+       return seqno;
+}
+
+struct intel_ring_buffer render_ring = {
+       .name                   = "render ring",
+       .ring_flag              = I915_EXEC_RENDER,
+       .size                   = 32 * PAGE_SIZE,
+       .alignment              = PAGE_SIZE,
+       .virtual_start          = NULL,
+       .dev                    = NULL,
+       .gem_object             = NULL,
+       .head                   = 0,
+       .tail                   = 0,
+       .space                  = 0,
+       .next_seqno             = 1,
+       .user_irq_refcount      = 0,
+       .irq_gem_seqno          = 0,
+       .waiting_gem_seqno      = 0,
+       .setup_status_page      = render_setup_status_page,
+       .init                   = init_render_ring,
+       .get_head               = render_ring_get_head,
+       .get_tail               = render_ring_get_tail,
+       .get_active_head        = render_ring_get_active_head,
+       .advance_ring           = render_ring_advance_ring,
+       .flush                  = render_ring_flush,
+       .add_request            = render_ring_add_request,
+       .get_gem_seqno          = render_ring_get_gem_seqno,
+       .user_irq_get           = render_ring_get_user_irq,
+       .user_irq_put           = render_ring_put_user_irq,
+       .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+       .status_page            = {NULL, 0, NULL},
+       .map                    = {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..e3b40d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,119 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct  intel_hw_status_page {
+       void            *page_addr;
+       unsigned int    gfx_addr;
+       struct          drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct  intel_ring_buffer {
+       const char      *name;
+       unsigned int    ring_flag;
+       unsigned long   size;
+       unsigned int    alignment;
+       void            *virtual_start;
+       struct          drm_device *dev;
+       struct          drm_gem_object *gem_object;
+
+       unsigned int    head;
+       unsigned int    tail;
+       unsigned int    space;
+       u32             next_seqno;
+       struct intel_hw_status_page status_page;
+
+       u32             irq_gem_seqno;          /* last seq seem at irq time */
+       u32             waiting_gem_seqno;
+       int             user_irq_refcount;
+       void            (*user_irq_get)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*user_irq_put)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*setup_status_page)(struct drm_device *dev,
+                       struct  intel_ring_buffer *ring);
+
+       int             (*init)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+
+       unsigned int    (*get_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_tail)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_active_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*advance_ring)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*flush)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       u32     invalidate_domains,
+                       u32     flush_domains);
+       u32             (*add_request)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_file *file_priv,
+                       u32 flush_domains);
+       u32             (*get_gem_seqno)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       int             (*dispatch_gem_execbuffer)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_i915_gem_execbuffer2 *exec,
+                       struct drm_clip_rect *cliprects,
+                       uint64_t exec_offset);
+
+       /**
+        * List of objects currently involved in rendering from the
+        * ringbuffer.
+        *
+        * Includes buffers having the contents of their GPU caches
+        * flushed, not necessarily primitives.  last_rendering_seqno
+        * represents when the rendering involved will be completed.
+        *
+        * A reference is held on the buffer while on this list.
+        */
+       struct list_head active_list;
+
+       /**
+        * List of breadcrumbs associated with GPU requests currently
+        * outstanding.
+        */
+       struct list_head request_list;
+
+       wait_queue_head_t irq_queue;
+       drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+               int reg)
+{
+       u32 *regs = ring->status_page.page_addr;
+       return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_ring_begin(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+void intel_ring_emit(struct drm_device *dev,
+               struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len);
+void intel_ring_advance(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
+
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7..e916870 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -616,7 +616,9 @@ struct drm_i915_gem_execbuffer2 {
        __u32 num_cliprects;
        /** This is a struct drm_clip_rect *cliprects */
        __u64 cliprects_ptr;
-       __u64 flags; /* currently unused */
+#define I915_EXEC_RENDER                 (1<<0)
+#define I915_EXEC_BSD                    (1<<1)
+       __u64 flags;
        __u64 rsvd1;
        __u64 rsvd2;
 };
-------------- next part --------------
A non-text attachment was scrubbed...
Name: intel_ring_buffer_struct.patch
Type: application/octet-stream
Size: 22356 bytes
Desc: intel_ring_buffer_struct.patch
URL: <http://lists.freedesktop.org/archives/intel-gfx/attachments/20100423/5bd7cecf/attachment.obj>


More information about the Intel-gfx mailing list