[Intel-gfx] [Patch 4/5] multiple ring buffer support, adapt the intel_ring_buffer with gem

Zou, Nanhai nanhai.zou at intel.com
Wed Apr 7 08:28:36 CEST 2010


Adapt intel_ring_buffer into gem.
On which ring to run is decided on the flag of execbuffer2.
With other legacy ioctls, gem objects and requests are consider to be on
render ring by default.

Signed-off-by: Xiang Haihao <haihao.xiang at intel.com>
Signed-off-by: Zou Nan hai <nanhai.zou at intel.com>

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ea81ec1..a3202cd 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -76,7 +76,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        case ACTIVE_LIST:
                seq_printf(m, "Active:\n");
                lock = &dev_priv->mm.active_list_lock;
-               head = &dev_priv->mm.active_list;
+               head = &dev_priv->render_ring.active_list;
                break;
        case INACTIVE_LIST:
                seq_printf(m, "Inactive:\n");
@@ -129,7 +129,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
        struct drm_i915_gem_request *gem_request;

        seq_printf(m, "Request:\n");
-       list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+       list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, list) {
                seq_printf(m, "    %d @ %d\n",
                           gem_request->seqno,
                           (int) (jiffies - gem_request->emitted_jiffies));
@@ -145,13 +145,13 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)

        if (dev_priv->render_ring.status_page.page_addr != NULL) {
                seq_printf(m, "Current sequence: %d\n",
-                          i915_get_gem_seqno(dev));
+                          i915_get_gem_seqno(dev, &dev_priv->render_ring));
        } else {
                seq_printf(m, "Current sequence: hws uninitialized\n");
        }
        seq_printf(m, "Waiter sequence:  %d\n",
-                       dev_priv->mm.waiting_gem_seqno);
-       seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+                       dev_priv->render_ring.waiting_gem_seqno);
+       seq_printf(m, "IRQ sequence:     %d\n", dev_priv->render_ring.irq_gem_seqno);
        return 0;
 }

@@ -197,14 +197,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                   atomic_read(&dev_priv->irq_received));
        if (dev_priv->render_ring.status_page.page_addr != NULL) {
                seq_printf(m, "Current sequence:    %d\n",
-                          i915_get_gem_seqno(dev));
+                          i915_get_gem_seqno(dev, &dev_priv->render_ring));
        } else {
                seq_printf(m, "Current sequence:    hws uninitialized\n");
        }
        seq_printf(m, "Waiter sequence:     %d\n",
-                  dev_priv->mm.waiting_gem_seqno);
+                  dev_priv->render_ring.waiting_gem_seqno);
        seq_printf(m, "IRQ sequence:        %d\n",
-                  dev_priv->mm.irq_gem_seqno);
+                  dev_priv->render_ring.irq_gem_seqno);
        return 0;
 }

@@ -287,7 +287,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)

        spin_lock(&dev_priv->mm.active_list_lock);

-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+       list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, list) {
                obj = obj_priv->obj;
                if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
                    ret = i915_gem_object_get_pages(obj, 0);
@@ -682,7 +682,7 @@ i915_wedged_write(struct file *filp,

        atomic_set(&dev_priv->mm.wedged, val);
        if (val) {
-               DRM_WAKEUP(&dev_priv->irq_queue);
+               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
                queue_work(dev_priv->wq, &dev_priv->error_work);
        }

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ead4a29..3fb1162 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -39,6 +39,7 @@
 #include <linux/pnp.h>
 #include <linux/vga_switcheroo.h>

+
 /**
  * Sets up the hardware status page for devices that need a physical address
  * in the register.
@@ -61,7 +62,7 @@ static int i915_init_phys_hws(struct drm_device *dev)

        if (IS_I965G(dev))
                dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
-                       0xf0;
+                                            0xf0;

        I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
        DRM_DEBUG_DRIVER("Enabled hardware status page\n");
@@ -80,8 +81,8 @@ static void i915_free_hws(struct drm_device *dev)
                dev_priv->status_page_dmah = NULL;
        }

-       if (dev_priv->status_gfx_addr) {
-               dev_priv->status_gfx_addr = 0;
+       if (dev_priv->render_ring.status_page.gfx_addr) {
+               dev_priv->render_ring.status_page.gfx_addr = 0;
                drm_core_ioremapfree(&dev_priv->hws_map, dev);
        }

@@ -116,6 +117,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }

+
 static int i915_dma_cleanup(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -127,8 +129,6 @@ static int i915_dma_cleanup(struct drm_device * dev)
                drm_irq_uninstall(dev);

        intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-       if (HAS_BSD(dev))
-               intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);

        /* Clear the HWS virtual address at teardown */
        if (I915_NEED_GFX_HWS(dev))
@@ -200,13 +200,11 @@ static int i915_dma_resume(struct drm_device * dev)
        DRM_DEBUG_DRIVER("%s\n", __func__);

        ring = &dev_priv->render_ring;
-
        if (ring->map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                return -ENOMEM;
        }
-
        /* Program Hardware Status Page */
        if (!ring->status_page.page_addr) {
                DRM_ERROR("Can not find hardware status page\n");
@@ -214,11 +212,11 @@ static int i915_dma_resume(struct drm_device * dev)
        }
        DRM_DEBUG_DRIVER("hw status page @ %p\n",
                                ring->status_page.page_addr);
+
        if (ring->status_page.gfx_addr != 0)
                ring->setup_status_page(dev, ring);
        else
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
-
        DRM_DEBUG_DRIVER("Enabled hardware status page\n");

        return 0;
@@ -428,7 +426,6 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
        }

        i915_kernel_lost_context(dev);
-
        count = nbox ? nbox : 1;

        for (i = 0; i < count; i++) {
@@ -455,13 +452,12 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
        int nbox = batch->num_cliprects;
        int i = 0, count;

+       i915_kernel_lost_context(dev);
        if ((batch->start | batch->used) & 0x7) {
                DRM_ERROR("alignment");
                return -EINVAL;
        }

-       i915_kernel_lost_context(dev);
-
        count = nbox ? nbox : 1;

        for (i = 0; i < count; i++) {
@@ -510,9 +506,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
                          __func__,
                         dev_priv->current_page,
                         master_priv->sarea_priv->pf_current_page);
-
        i915_kernel_lost_context(dev);
-
        BEGIN_LP_RING(2);
        OUT_RING(MI_FLUSH | MI_READ_FLUSH);
        OUT_RING(0);
@@ -552,8 +546,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
 static int i915_quiescent(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-
        i915_kernel_lost_context(dev);
+
        return intel_wait_ring_buffer(dev, &dev_priv->render_ring, dev_priv->render_ring.size - 8);
 }

@@ -835,9 +829,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
        memset(ring->status_page.page_addr, 0, PAGE_SIZE);
        I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
        DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
-                       ring->status_page.gfx_addr);
+                               ring->status_page.gfx_addr);
        DRM_DEBUG_DRIVER("load hws at %p\n",
-                       ring->status_page.page_addr);
+                               ring->status_page.page_addr);
        return 0;
 }

@@ -1612,9 +1606,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (!IS_I945G(dev) && !IS_I945GM(dev))
                pci_enable_msi(dev->pdev);

-       spin_lock_init(&dev_priv->user_irq_lock);
        spin_lock_init(&dev_priv->error_lock);
-       dev_priv->user_irq_refcount = 0;
        dev_priv->trace_irq_seqno = 0;

        ret = drm_vblank_init(dev, I915_NUM_PIPE);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 77cd198..07a31b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -310,7 +310,9 @@ int i965_reset(struct drm_device *dev, u8 flags)
        /*
         * Clear request list
         */
-       i915_gem_retire_requests(dev);
+
+       i915_gem_retire_requests(dev, &dev_priv->render_ring);
+       i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

        if (need_display)
                i915_save_display(dev);
@@ -359,6 +361,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
            !dev_priv->mm.suspended) {
+
                struct intel_ring_buffer *ring = &dev_priv->render_ring;
                dev_priv->mm.suspended = 0;
                ring->init(dev, ring);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 368d3eb..71130c1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -83,6 +83,7 @@ enum plane {
 #define I915_GEM_PHYS_OVERLAY_REGS 3
 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)

+
 struct drm_i915_gem_phys_object {
        int id;
        struct page **page_list;
@@ -232,9 +233,10 @@ typedef struct drm_i915_private {
        void __iomem *regs;

        struct pci_dev *bridge_dev;
+
        struct intel_ring_buffer render_ring;
-       struct intel_ring_buffer bsd_ring;

+       struct intel_ring_buffer bsd_ring;

        drm_dma_handle_t *status_page_dmah;
        dma_addr_t dma_status_page;
@@ -252,12 +254,7 @@ typedef struct drm_i915_private {
        int current_page;
        int page_flipping;

-       wait_queue_head_t irq_queue;
        atomic_t irq_received;
-       /** Protects user_irq_refcount and irq_mask_reg */
-       spinlock_t user_irq_lock;
-       /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-       int user_irq_refcount;
        u32 trace_irq_seqno;
        /** Cached value of IMR to avoid reads in updating the bitfield */
        u32 irq_mask_reg;
@@ -500,18 +497,7 @@ typedef struct drm_i915_private {
                 */
                struct list_head shrink_list;

-               /**
-                * List of objects currently involved in rendering from the
-                * ringbuffer.
-                *
-                * Includes buffers having the contents of their GPU caches
-                * flushed, not necessarily primitives.  last_rendering_seqno
-                * represents when the rendering involved will be completed.
-                *
-                * A reference is held on the buffer while on this list.
-                */
                spinlock_t active_list_lock;
-               struct list_head active_list;

                /**
                 * List of objects which are not in the ringbuffer but which
@@ -549,12 +535,6 @@ typedef struct drm_i915_private {
                struct list_head fence_list;

                /**
-                * List of breadcrumbs associated with GPU requests currently
-                * outstanding.
-                */
-               struct list_head request_list;
-
-               /**
                 * We leave the user IRQ off as much as possible,
                 * but this means that requests will finish and never
                 * be retired once the system goes idle. Set a timer to
@@ -563,18 +543,6 @@ typedef struct drm_i915_private {
                 */
                struct delayed_work retire_work;

-               uint32_t next_gem_seqno;
-
-               /**
-                * Waiting sequence number, if any
-                */
-               uint32_t waiting_gem_seqno;
-
-               /**
-                * Last seq seen at irq time
-                */
-               uint32_t irq_gem_seqno;
-
                /**
                 * Flag if the X Server, and thus DRM, is not currently in
                 * control of the device.
@@ -725,6 +693,9 @@ struct drm_i915_gem_object {
         */
        int madv;

+       /* Which ring refers to is this object */
+       struct intel_ring_buffer *ring;
+
        /**
         * Number of crtcs where this object is currently the fb, but
         * will be page flipped away on the next vblank.  When it
@@ -746,6 +717,9 @@ struct drm_i915_gem_object {
  * an emission time with seqnos for tracking how far ahead of the GPU we are.
  */
 struct drm_i915_gem_request {
+       /** On Which ring this request is generated */
+       struct intel_ring_buffer *ring;
+
        /** GEM sequence number associated with this request. */
        uint32_t seqno;

@@ -810,9 +784,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
 extern void i915_enable_interrupt (struct drm_device *dev);

 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -834,7 +806,6 @@ extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
 extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
 extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);

-
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);

@@ -905,11 +876,11 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
 int i915_gem_object_unbind(struct drm_gem_object *obj);
 void i915_gem_release_mmap(struct drm_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev, struct intel_ring_buffer *ring);
 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev, struct intel_ring_buffer *ring);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -920,9 +891,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end);
 int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-                         uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+int i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+               int interruptible,
+               struct intel_ring_buffer *ring);
+uint32_t i915_add_request(struct drm_device *dev,
+               struct drm_file *file_priv,
+               uint32_t flush_domains,
+               struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
                                      int write);
@@ -1051,7 +1026,7 @@ extern void g4x_disable_fbc(struct drm_device *dev);
  *
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
-#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->render_ring.status_page.page_addr))[reg])
+#define READ_HWSP(dev_priv, reg)  intel_read_status_page(&dev_priv->render_ring, reg)
 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX             0x20
 #define I915_BREADCRUMB_INDEX          0x21
@@ -1104,7 +1079,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
                         (dev)->pci_device == 0x2A42 ||         \
                         (dev)->pci_device == 0x2E42)

-#define HAS_BSD(dev)           (IS_IRONLAKE(dev) || IS_G4X(dev))
+#define HAS_BSD(dev)           (IS_IRONLAKE(dev) || IS_G4X(dev))
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)

 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d4b5e38..13ea0ed 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -55,6 +55,8 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);

+
+
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);

@@ -1481,11 +1483,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
 }

 static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+                              struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       BUG_ON(ring == NULL);
+       obj_priv->ring = ring;

        /* Add a reference if we're newly entering the active list. */
        if (!obj_priv->active) {
@@ -1494,8 +1499,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
        }
        /* Move from whatever list we were on to the tail of execution. */
        spin_lock(&dev_priv->mm.active_list_lock);
-       list_move_tail(&obj_priv->list,
-                      &dev_priv->mm.active_list);
+       list_move_tail(&obj_priv->list, &ring->active_list);
        spin_unlock(&dev_priv->mm.active_list_lock);
        obj_priv->last_rendering_seqno = seqno;
 }
@@ -1548,6 +1552,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
        BUG_ON(!list_empty(&obj_priv->gpu_write_list));

        obj_priv->last_rendering_seqno = 0;
+       obj_priv->ring = NULL;
        if (obj_priv->active) {
                obj_priv->active = 0;
                drm_gem_object_unreference(obj);
@@ -1557,7 +1562,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)

 static void
 i915_gem_process_flushing_list(struct drm_device *dev,
-                              uint32_t flush_domains, uint32_t seqno)
+                              uint32_t flush_domains, uint32_t seqno,
+                              struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv, *next;
@@ -1568,12 +1574,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                struct drm_gem_object *obj = obj_priv->obj;

                if ((obj->write_domain & flush_domains) ==
-                   obj->write_domain) {
+                   obj->write_domain && obj_priv->ring->ring_flag == ring->ring_flag) {
                        uint32_t old_write_domain = obj->write_domain;

                        obj->write_domain = 0;
                        list_del_init(&obj_priv->gpu_write_list);
-                       i915_gem_object_move_to_active(obj, seqno);
+                       i915_gem_object_move_to_active(obj, seqno, ring);

                        /* update the fence lru list */
                        if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
@@ -1597,7 +1603,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
  */
 uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-                uint32_t flush_domains)
+                uint32_t flush_domains, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_file_private *i915_file_priv = NULL;
@@ -1612,28 +1618,13 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (request == NULL)
                return 0;

-       /* Grab the seqno we're going to make this request be, and bump the
-        * next (skipping 0 so it can be the reserved no-seqno value).
-        */
-       seqno = dev_priv->mm.next_gem_seqno;
-       dev_priv->mm.next_gem_seqno++;
-       if (dev_priv->mm.next_gem_seqno == 0)
-               dev_priv->mm.next_gem_seqno++;
-
-       BEGIN_LP_RING(4);
-       OUT_RING(MI_STORE_DWORD_INDEX);
-       OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       OUT_RING(seqno);
-
-       OUT_RING(MI_USER_INTERRUPT);
-       ADVANCE_LP_RING();
-
-       DRM_DEBUG_DRIVER("%d\n", seqno);
+       seqno = ring->add_request(dev, ring, file_priv, flush_domains);

        request->seqno = seqno;
+       request->ring = ring;
        request->emitted_jiffies = jiffies;
-       was_empty = list_empty(&dev_priv->mm.request_list);
-       list_add_tail(&request->list, &dev_priv->mm.request_list);
+       was_empty = list_empty(&ring->request_list);
+       list_add_tail(&request->list, &ring->request_list);
        if (i915_file_priv) {
                list_add_tail(&request->client_list,
                              &i915_file_priv->mm.request_list);
@@ -1645,7 +1636,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
         * domain we're flushing with our flush.
         */
        if (flush_domains != 0)
-               i915_gem_process_flushing_list(dev, flush_domains, seqno);
+               i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);

        if (!dev_priv->mm.suspended) {
                mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1662,18 +1653,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  * before signalling the CPU
  */
 static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-       uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
        uint32_t flush_domains = 0;

        /* The sampler always gets flushed on i965 (sigh) */
        if (IS_I965G(dev))
                flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-       BEGIN_LP_RING(2);
-       OUT_RING(cmd);
-       OUT_RING(0); /* noop */
-       ADVANCE_LP_RING();
+
+       ring->flush(dev, ring,
+                       I915_GEM_DOMAIN_COMMAND, flush_domains);
        return flush_domains;
 }

@@ -1693,11 +1682,11 @@ i915_gem_retire_request(struct drm_device *dev,
         * by the ringbuffer to the flushing/inactive lists as appropriate.
         */
        spin_lock(&dev_priv->mm.active_list_lock);
-       while (!list_empty(&dev_priv->mm.active_list)) {
+       while (!list_empty(&request->ring->active_list)) {
                struct drm_gem_object *obj;
                struct drm_i915_gem_object *obj_priv;

-               obj_priv = list_first_entry(&dev_priv->mm.active_list,
+               obj_priv = list_first_entry(&request->ring->active_list,
                                            struct drm_i915_gem_object,
                                            list);
                obj = obj_priv->obj;
@@ -1706,6 +1695,7 @@ i915_gem_retire_request(struct drm_device *dev,
                 * list, then the oldest in the list must still be newer than
                 * this seqno.
                 */
+
                if (obj_priv->last_rendering_seqno != request->seqno)
                        goto out;

@@ -1744,38 +1734,36 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 }

 uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+       return ring->get_gem_seqno(dev, ring);
 }

 /**
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;

-       if (!dev_priv->render_ring.status_page.page_addr || list_empty(&dev_priv->mm.request_list))
+       if (!ring->status_page.page_addr || list_empty(&ring->request_list))
                return;

-       seqno = i915_get_gem_seqno(dev);
+       seqno = ring->get_gem_seqno(dev, ring);

-       while (!list_empty(&dev_priv->mm.request_list)) {
+       while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
                uint32_t retiring_seqno;

-               request = list_first_entry(&dev_priv->mm.request_list,
+               request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
                                           list);
                retiring_seqno = request->seqno;

                if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   atomic_read(&dev_priv->mm.wedged)) {
+                               atomic_read(&dev_priv->mm.wedged)) {
                        i915_gem_retire_request(dev, request);

                        list_del(&request->list);
@@ -1787,7 +1775,7 @@ i915_gem_retire_requests(struct drm_device *dev)

        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-               i915_user_irq_put(dev);
+               ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
 }
@@ -1803,15 +1791,21 @@ i915_gem_retire_work_handler(struct work_struct *work)
        dev = dev_priv->dev;

        mutex_lock(&dev->struct_mutex);
-       i915_gem_retire_requests(dev);
-       if (!dev_priv->mm.suspended &&
-           !list_empty(&dev_priv->mm.request_list))
+       i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+       if (HAS_BSD(dev))
+               i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
+       if (!dev_priv->mm.suspended &&
+               (!list_empty(&dev_priv->render_ring.request_list) ||
+                       (HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list))))
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
 }

 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+               int interruptible, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
@@ -1822,7 +1816,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
        if (atomic_read(&dev_priv->mm.wedged))
                return -EIO;

-       if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+       if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
@@ -1836,19 +1830,19 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)

                trace_i915_gem_request_wait_begin(dev, seqno);

-               dev_priv->mm.waiting_gem_seqno = seqno;
-               i915_user_irq_get(dev);
+               ring->waiting_gem_seqno = seqno;
+               ring->user_irq_get(dev, ring);
                if (interruptible)
-                       ret = wait_event_interruptible(dev_priv->irq_queue,
-                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+                       ret = wait_event_interruptible(ring->irq_queue,
+                               i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno) ||
                                atomic_read(&dev_priv->mm.wedged));
                else
-                       wait_event(dev_priv->irq_queue,
-                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+                       wait_event(ring->irq_queue,
+                               i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno) ||
                                atomic_read(&dev_priv->mm.wedged));

-               i915_user_irq_put(dev);
-               dev_priv->mm.waiting_gem_seqno = 0;
+               ring->user_irq_put(dev, ring);
+               ring->waiting_gem_seqno = 0;

                trace_i915_gem_request_wait_end(dev, seqno);
        }
@@ -1857,7 +1851,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)

        if (ret && ret != -ERESTARTSYS)
                DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-                         __func__, ret, seqno, i915_get_gem_seqno(dev));
+                         __func__, ret, seqno, ring->get_gem_seqno(dev, ring));

        /* Directly dispatch request retiring.  While we have the work queue
         * to handle this, the waiter on a request often wants an associated
@@ -1865,7 +1859,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
         * a separate wait queue to handle that.
         */
        if (ret == 0)
-               i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev, ring);

        return ret;
 }
@@ -1875,9 +1869,9 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
  * request and object lists appropriately for that event.
  */
 static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno, struct intel_ring_buffer *ring)
 {
-       return i915_do_wait_request(dev, seqno, 1);
+       return i915_do_wait_request(dev, seqno, 1, ring);
 }

 static void
@@ -1889,14 +1883,27 @@ i915_gem_flush(struct drm_device *dev,
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);

-       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
                        invalidate_domains,
                        flush_domains);

        if (HAS_BSD(dev))
-               dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
-                               invalidate_domains,
-                               flush_domains);
+               dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+                                       invalidate_domains,
+                                       flush_domains);
+}
+
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+              uint32_t invalidate_domains,
+              uint32_t flush_domains,
+              struct intel_ring_buffer *ring)
+{
+       if (flush_domains & I915_GEM_DOMAIN_CPU)
+               drm_agp_chipset_flush(dev);
+       ring->flush(dev, ring,
+                       invalidate_domains,
+                       flush_domains);
 }

 /**
@@ -1923,7 +1930,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
                DRM_INFO("%s: object %p wait for seqno %08x\n",
                          __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-               ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+               ret = i915_wait_request(dev, obj_priv->last_rendering_seqno, obj_priv->ring);
                if (ret != 0)
                        return ret;
        }
@@ -2039,11 +2046,13 @@ i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
-       uint32_t seqno;
+       uint32_t seqno1, seqno2;
+       int ret;

        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->mm.active_list);
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list));
        spin_unlock(&dev_priv->mm.active_list_lock);

        if (lists_empty)
@@ -2051,11 +2060,23 @@ i915_gpu_idle(struct drm_device *dev)

        /* Flush everything onto the inactive list. */
        i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-       if (seqno == 0)
+       seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, &dev_priv->render_ring);
+       if (seqno1 == 0)
                return -ENOMEM;
+       ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);

-       return i915_wait_request(dev, seqno);
+       if (HAS_BSD(dev)) {
+               seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, &dev_priv->bsd_ring);
+               if (seqno2 == 0)
+                       return -ENOMEM;
+
+               ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+               if (ret)
+                       return ret;
+       }
+
+
+       return ret;
 }

 static int
@@ -2068,7 +2089,8 @@ i915_gem_evict_everything(struct drm_device *dev)
        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->mm.active_list));
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list)));
        spin_unlock(&dev_priv->mm.active_list_lock);

        if (lists_empty)
@@ -2088,7 +2110,8 @@ i915_gem_evict_everything(struct drm_device *dev)
        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->mm.active_list));
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list)));
        spin_unlock(&dev_priv->mm.active_list_lock);
        BUG_ON(!lists_empty);

@@ -2103,7 +2126,10 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
        int ret;

        for (;;) {
-               i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+               if (HAS_BSD(dev))
+                       i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

                /* If there's an inactive buffer available now, grab it
                 * and be done.
@@ -2127,14 +2153,28 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
                 * things, wait for the next to finish and hopefully leave us
                 * a buffer to evict.
                 */
-               if (!list_empty(&dev_priv->mm.request_list)) {
+               if (!list_empty(&dev_priv->render_ring.request_list)) {
+                       struct drm_i915_gem_request *request;
+
+                       request = list_first_entry(&dev_priv->render_ring.request_list,
+                                                  struct drm_i915_gem_request,
+                                                  list);
+
+                       ret = i915_wait_request(dev, request->seqno, request->ring);
+                       if (ret)
+                               return ret;
+
+                       continue;
+               }
+
+               if (HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)) {
                        struct drm_i915_gem_request *request;

-                       request = list_first_entry(&dev_priv->mm.request_list,
+                       request = list_first_entry(&dev_priv->bsd_ring.request_list,
                                                   struct drm_i915_gem_request,
                                                   list);

-                       ret = i915_wait_request(dev, request->seqno);
+                       ret = i915_wait_request(dev, request->seqno, request->ring);
                        if (ret)
                                return ret;

@@ -2161,10 +2201,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
                        if (obj != NULL) {
                                uint32_t seqno;

-                               i915_gem_flush(dev,
+                               i915_gem_flush_ring(dev,
+                                              obj->write_domain,
                                               obj->write_domain,
-                                              obj->write_domain);
-                               seqno = i915_add_request(dev, NULL, obj->write_domain);
+                                              obj_priv->ring);
+                               seqno = i915_add_request(dev, NULL, obj->write_domain,
+                                               obj_priv->ring);
                                if (seqno == 0)
                                        return -ENOMEM;
                                continue;
@@ -2690,6 +2732,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        uint32_t old_write_domain;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;

        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return;
@@ -2697,7 +2740,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
-       (void) i915_add_request(dev, NULL, obj->write_domain);
+       (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
        BUG_ON(obj->write_domain);

        trace_i915_gem_object_change_domain(obj,
@@ -2837,7 +2880,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
                DRM_INFO("%s: object %p wait for seqno %08x\n",
                          __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-               ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+               ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0, obj_priv->ring);
                if (ret != 0)
                        return ret;
        }
@@ -3428,61 +3471,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
        return 0;
 }

-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
-                             struct drm_i915_gem_execbuffer2 *exec,
-                             struct drm_clip_rect *cliprects,
-                             uint64_t exec_offset)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int nbox = exec->num_cliprects;
-       int i = 0, count;
-       uint32_t exec_start, exec_len;
-
-       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-       exec_len = (uint32_t) exec->batch_len;
-
-       trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
-       count = nbox ? nbox : 1;
-
-       for (i = 0; i < count; i++) {
-               if (i < nbox) {
-                       int ret = i915_emit_box(dev, cliprects, i,
-                                               exec->DR1, exec->DR4);
-                       if (ret)
-                               return ret;
-               }
-
-               if (IS_I830(dev) || IS_845G(dev)) {
-                       BEGIN_LP_RING(4);
-                       OUT_RING(MI_BATCH_BUFFER);
-                       OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-                       OUT_RING(exec_start + exec_len - 4);
-                       OUT_RING(0);
-                       ADVANCE_LP_RING();
-               } else {
-                       BEGIN_LP_RING(2);
-                       if (IS_I965G(dev)) {
-                               OUT_RING(MI_BATCH_BUFFER_START |
-                                        (2 << 6) |
-                                        MI_BATCH_NON_SECURE_I965);
-                               OUT_RING(exec_start);
-                       } else {
-                               OUT_RING(MI_BATCH_BUFFER_START |
-                                        (2 << 6));
-                               OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-                       }
-                       ADVANCE_LP_RING();
-               }
-       }
-
-       /* XXX breadcrumb */
-       return 0;
-}
-
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3511,7 +3499,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;

-               ret = i915_wait_request(dev, request->seqno);
+               ret = i915_wait_request(dev, request->seqno, request->ring);
                if (ret != 0)
                        break;
        }
@@ -3668,6 +3656,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        uint32_t seqno, flush_domains, reloc_index;
        int pin_tries, flips;

+       struct intel_ring_buffer *ring = NULL;
+
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3725,6 +3715,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }

+       if (args->flags & I915_EXEC_BSD) {
+               BUG_ON(!HAS_BSD(dev));
+               ring = &dev_priv->bsd_ring;
+       } else {
+               ring = &dev_priv->render_ring;
+       }
+
        /* Look up object handles */
        flips = 0;
        for (i = 0; i < args->buffer_count; i++) {
@@ -3858,9 +3855,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                i915_gem_flush(dev,
                               dev->invalidate_domains,
                               dev->flush_domains);
-               if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+               if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
                        (void)i915_add_request(dev, file_priv,
-                                              dev->flush_domains);
+                                       dev->flush_domains, &dev_priv->render_ring);
+
+                       if (HAS_BSD(dev))
+                               (void)i915_add_request(dev, file_priv,
+                                               dev->flush_domains, &dev_priv->bsd_ring);
+               }
        }

        for (i = 0; i < args->buffer_count; i++) {
@@ -3897,7 +3899,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 #endif

        /* Exec the batchbuffer */
-       ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+       ret = ring->dispatch_gem_execbuffer(dev, ring, args, cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
@@ -3907,7 +3909,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * Ensure that the commands in the batch buffer are
         * finished before the interrupt fires
         */
-       flush_domains = i915_retire_commands(dev);
+       flush_domains = i915_retire_commands(dev, ring);

        i915_verify_inactive(dev, __FILE__, __LINE__);

@@ -3918,12 +3920,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * *some* interrupts representing completion of buffers that we can
         * wait on when trying to clear up gtt space).
         */
-       seqno = i915_add_request(dev, file_priv, flush_domains);
+       seqno = i915_add_request(dev, file_priv, flush_domains, ring);
        BUG_ON(seqno == 0);
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
+               obj_priv = obj->driver_private;

-               i915_gem_object_move_to_active(obj, seqno);
+               i915_gem_object_move_to_active(obj, seqno, ring);
 #if WATCH_LRU
                DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 #endif
@@ -4035,7 +4038,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        exec2.DR4 = args->DR4;
        exec2.num_cliprects = args->num_cliprects;
        exec2.cliprects_ptr = args->cliprects_ptr;
-       exec2.flags = 0;
+       exec2.flags = I915_EXEC_RENDER;

        ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
        if (!ret) {
@@ -4274,6 +4277,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       drm_i915_private_t *dev_priv = dev->dev_private;

        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
@@ -4288,7 +4292,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
         * actually unmasked, and our working set ends up being larger than
         * required.
         */
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+       if (HAS_BSD(dev))
+               i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

        obj_priv = to_intel_bo(obj);
        /* Don't count being on the flushing list against the object being
@@ -4449,7 +4456,9 @@ i915_gem_idle(struct drm_device *dev)

        mutex_lock(&dev->struct_mutex);

-       if (dev_priv->mm.suspended || dev_priv->render_ring.gem_object == NULL) {
+       if (dev_priv->mm.suspended ||
+               (dev_priv->render_ring.gem_object == NULL) ||
+               (HAS_BSD(dev) && dev_priv->bsd_ring.gem_object == NULL)) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4499,10 +4508,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
                memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
        }
        ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
-       if (!ret && HAS_BSD(dev)) {
+       if (HAS_BSD(dev)) {
                dev_priv->bsd_ring = bsd_ring;
                ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
        }
+
        return ret;
 }

@@ -4510,10 +4520,10 @@ void
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+
        intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-       if (HAS_BSD(dev))
+       if (HAS_BSD(dev))
                intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-
 }

 int
@@ -4541,12 +4551,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }

        spin_lock(&dev_priv->mm.active_list_lock);
-       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
        spin_unlock(&dev_priv->mm.active_list_lock);

        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-       BUG_ON(!list_empty(&dev_priv->mm.request_list));
+       BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
        mutex_unlock(&dev->struct_mutex);

        drm_irq_install(dev);
@@ -4585,15 +4597,20 @@ i915_gem_load(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;

        spin_lock_init(&dev_priv->mm.active_list_lock);
-       INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-       INIT_LIST_HEAD(&dev_priv->mm.request_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+       INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+
+       if (HAS_BSD(dev)) {
+               INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+               INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+       }
+
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
-       dev_priv->mm.next_gem_seqno = 1;

        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
@@ -4856,8 +4873,10 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
                        continue;

                spin_unlock(&shrink_list_lock);
+               i915_gem_retire_requests(dev, &dev_priv->render_ring);

-               i915_gem_retire_requests(dev);
+               if (HAS_BSD(dev))
+                       i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

                list_for_each_entry_safe(obj_priv, next_obj,
                                         &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf..830680b 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -106,12 +106,21 @@ i915_dump_lru(struct drm_device *dev, const char *where)

        DRM_INFO("active list %s {\n", where);
        spin_lock(&dev_priv->mm.active_list_lock);
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+       list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
                            list)
        {
                DRM_INFO("    %p: %08x\n", obj_priv,
                         obj_priv->last_rendering_seqno);
        }
+
+       if (HAS_BSD(dev)) {
+               list_for_each_entry(obj_priv, &dev_priv->bsd_ring.active_list,
+                               list)
+               {
+                       DRM_INFO("    %p: %08x\n", obj_priv,
+                               obj_priv->last_rendering_seqno);
+               }
+       }
        spin_unlock(&dev_priv->mm.active_list_lock);
        DRM_INFO("}\n");
        DRM_INFO("flushing list %s {\n", where);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 566b060..22c8f66 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -52,7 +52,7 @@
         I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)

 /** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)

 #define I915_PIPE_VBLANK_STATUS        (PIPE_START_VBLANK_INTERRUPT_STATUS |\
                                 PIPE_VBLANK_INTERRUPT_STATUS)
@@ -326,7 +326,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
        int ret = IRQ_NONE;
        u32 de_iir, gt_iir, de_ier, pch_iir;
        struct drm_i915_master_private *master_priv;
-
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
        /* disable master interrupt before clearing iir  */
        de_ier = I915_READ(DEIER);
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -349,14 +349,17 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
        }

        if (gt_iir & GT_USER_INTERRUPT) {
-               u32 seqno = i915_get_gem_seqno(dev);
-               dev_priv->mm.irq_gem_seqno = seqno;
+               u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+               render_ring->irq_gem_seqno = seqno;
                trace_i915_gem_request_complete(dev, seqno);
-               DRM_WAKEUP(&dev_priv->irq_queue);
+               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
                dev_priv->hangcheck_count = 0;
                mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
        }

+       if (gt_iir & GT_BSD_USER_INTERRUPT)
+               DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
        if (de_iir & DE_GSE)
                ironlake_opregion_gse_intr(dev);

@@ -547,7 +550,6 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
        return bbaddr;
 }

-
 /**
  * i915_capture_error_state - capture an error record for later analysis
  * @dev: drm device
@@ -579,7 +581,7 @@ static void i915_capture_error_state(struct drm_device *dev)
                return;
        }

-       error->seqno = i915_get_gem_seqno(dev);
+       error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
        error->pipeastat = I915_READ(PIPEASTAT);
@@ -607,7 +609,7 @@ static void i915_capture_error_state(struct drm_device *dev)
        batchbuffer[0] = NULL;
        batchbuffer[1] = NULL;
        count = 0;
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+       list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, list) {
                struct drm_gem_object *obj = obj_priv->obj;

                if (batchbuffer[0] == NULL &&
@@ -643,7 +645,7 @@ static void i915_capture_error_state(struct drm_device *dev)

        if (error->active_bo) {
                int i = 0;
-               list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+               list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, list) {
                        struct drm_gem_object *obj = obj_priv->obj;

                        error->active_bo[i].size = obj->size;
@@ -821,7 +823,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
                /*
                 * Wakeup waiting processes so they don't hang
                 */
-               DRM_WAKEUP(&dev_priv->irq_queue);
+               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
        }

        queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -840,6 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
        unsigned long irqflags;
        int irq_received;
        int ret = IRQ_NONE;
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;

        atomic_inc(&dev_priv->irq_received);

@@ -864,7 +867,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
-               spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+               spin_lock_irqsave(&render_ring->user_irq_lock, irqflags);
                pipea_stats = I915_READ(PIPEASTAT);
                pipeb_stats = I915_READ(PIPEBSTAT);

@@ -887,7 +890,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                        I915_WRITE(PIPEBSTAT, pipeb_stats);
                        irq_received = 1;
                }
-               spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+               spin_unlock_irqrestore(&render_ring->user_irq_lock, irqflags);

                if (!irq_received)
                        break;
@@ -920,14 +923,17 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                }

                if (iir & I915_USER_INTERRUPT) {
-                       u32 seqno = i915_get_gem_seqno(dev);
-                       dev_priv->mm.irq_gem_seqno = seqno;
+                       u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+                       render_ring->irq_gem_seqno = seqno;
                        trace_i915_gem_request_complete(dev, seqno);
-                       DRM_WAKEUP(&dev_priv->irq_queue);
+                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
                        dev_priv->hangcheck_count = 0;
                        mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
                }

+               if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
                if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
                        intel_prepare_page_flip(dev, 0);

@@ -975,9 +981,7 @@ static int i915_emit_irq(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
        i915_kernel_lost_context(dev);
-
        DRM_DEBUG_DRIVER("\n");

        dev_priv->counter++;
@@ -996,43 +1000,13 @@ static int i915_emit_irq(struct drm_device * dev)
        return dev_priv->counter;
 }

-void i915_user_irq_get(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
-               if (HAS_PCH_SPLIT(dev))
-                       ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
-               else
-                       i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-       }
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void i915_user_irq_put(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
-       if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
-               if (HAS_PCH_SPLIT(dev))
-                       ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
-               else
-                       i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-       }
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;

        if (dev_priv->trace_irq_seqno == 0)
-               i915_user_irq_get(dev);
+               render_ring->user_irq_get(dev, render_ring);

        dev_priv->trace_irq_seqno = seqno;
 }
@@ -1042,6 +1016,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        int ret = 0;
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;

        DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
                  READ_BREADCRUMB(dev_priv));
@@ -1055,10 +1030,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
        if (master_priv->sarea_priv)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;

-       i915_user_irq_get(dev);
-       DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+       render_ring->user_irq_get(dev, render_ring);
+       DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
                    READ_BREADCRUMB(dev_priv) >= irq_nr);
-       i915_user_irq_put(dev);
+       render_ring->user_irq_put(dev, render_ring);

        if (ret == -EBUSY) {
                DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1126,7 +1101,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
        if (!(pipeconf & PIPEACONF_ENABLE))
                return -EINVAL;

-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
        if (HAS_PCH_SPLIT(dev))
                ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
                                            DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1136,7 +1111,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
        else
                i915_enable_pipestat(dev_priv, pipe,
                                     PIPE_VBLANK_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock, irqflags);
        return 0;
 }

@@ -1148,7 +1123,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;

-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
        if (HAS_PCH_SPLIT(dev))
                ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
                                             DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1156,7 +1131,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
                i915_disable_pipestat(dev_priv, pipe,
                                      PIPE_VBLANK_INTERRUPT_ENABLE |
                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock, irqflags);
 }

 void i915_enable_interrupt (struct drm_device *dev)
@@ -1225,7 +1200,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,

 struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+       return list_entry(dev_priv->render_ring.request_list.prev, struct drm_i915_gem_request, list);
 }

 /**
@@ -1250,12 +1225,16 @@ void i915_hangcheck_elapsed(unsigned long data)
                acthd = I915_READ(ACTHD_I965);

        /* If all work is done then ACTHD clearly hasn't advanced. */
-       if (list_empty(&dev_priv->mm.request_list) ||
-                      i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+       if (list_empty(&dev_priv->render_ring.request_list) ||
+               i915_seqno_passed(i915_get_gem_seqno(dev, &dev_priv->render_ring), i915_get_tail_request(dev)->seqno)) {
                dev_priv->hangcheck_count = 0;
                return;
        }

+       /* XXX fix later
+        * check other ring buffer
+        */
+
        if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
                DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
                i915_handle_error(dev, true);
@@ -1304,7 +1283,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        /* enable kind of interrupts always enabled */
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
-       u32 render_mask = GT_USER_INTERRUPT;
+       u32 render_mask = GT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
        u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
                           SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;

@@ -1381,7 +1360,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
        u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
        u32 error_mask;

-       DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+       DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+
+       if (HAS_BSD(dev))
+               DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);

        dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;

diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410..393e468 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -328,9 +328,9 @@ void opregion_enable_asle(struct drm_device *dev)
                if (IS_MOBILE(dev)) {
                        unsigned long irqflags;

-                       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+                       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
                        intel_enable_asle(dev);
-                       spin_unlock_irqrestore(&dev_priv->user_irq_lock,
+                       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock,
                                               irqflags);
                }

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c378ba7..e450b7a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -323,8 +323,8 @@
 #define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT          (1<<4)
 #define   I915_DEBUG_INTERRUPT                         (1<<2)
 #define   I915_USER_INTERRUPT                          (1<<1)
+#define   I915_BSD_USER_INTERRUPT                      (1<<25)
 #define   I915_ASLE_INTERRUPT                          (1<<0)
-#define   I915_BSD_USER_INTERRUPT                      (1<<25)
 #define EIR            0x020b0
 #define EMR            0x020b4
 #define ESR            0x020b8
@@ -359,17 +359,17 @@
 #define BB_ADDR                0x02140 /* 8 bytes */
 #define GFX_FLSH_CNTL  0x02170 /* 915+ only */

+
 /*
  * BSD (bit stream decoder instruction and interrupt control register defines
  * (G4X and Ironlake only)
  */
-
-#define BSD_RING_TAIL          0x04030
-#define BSD_RING_HEAD          0x04034
-#define BSD_RING_START         0x04038
-#define BSD_RING_CTL           0x0403c
-#define BSD_RING_ACTHD         0x04074
-#define BSD_HWS_PGA            0x04080
+#define BSD_RING_TAIL          0x04030
+#define BSD_RING_HEAD          0x04034
+#define BSD_RING_START         0x04038
+#define BSD_RING_CTL           0x0403c
+#define BSD_RING_ACTHD         0x04074
+#define BSD_HWS_PGA                    0x04080


 /*
@@ -2301,8 +2301,7 @@
 #define GT_SYNC_STATUS          (1 << 2)
 #define GT_USER_INTERRUPT       (1 << 0)

-#define GT_BSD_USER_INTERRUPT          (1 << 5)
-
+#define GT_BSD_USER_INTERRUPT          (1 << 5)

 #define GTISR   0x44010
 #define GTIMR   0x44014
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2e490ad..413125f 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -212,6 +212,8 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
        struct drm_device *dev = overlay->dev;
        int ret;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+
        BUG_ON(overlay->active);

        overlay->active = 1;
@@ -224,11 +226,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
        OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
        if (overlay->last_flip_req == 0)
                return -ENOMEM;

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -261,7 +263,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
        OUT_RING(flip_addr);
         ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 }

 static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -272,7 +274,7 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
        u32 tmp;

        if (overlay->last_flip_req != 0) {
-               ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+               ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
                if (ret == 0) {
                        overlay->last_flip_req = 0;

@@ -291,11 +293,11 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
        if (overlay->last_flip_req == 0)
                return -ENOMEM;

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -309,6 +311,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 {
        u32 flip_addr = overlay->flip_addr;
        struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;

        BUG_ON(!overlay->active);
@@ -329,11 +332,11 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
        if (overlay->last_flip_req == 0)
                return -ENOMEM;

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -347,11 +350,11 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
        if (overlay->last_flip_req == 0)
                return -ENOMEM;

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -384,6 +387,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 {
        struct drm_device *dev = overlay->dev;
        struct drm_gem_object *obj;
+        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 flip_addr;
        int ret;

@@ -391,12 +395,12 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
                return -EIO;

        if (overlay->last_flip_req == 0) {
-               overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+               overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
                if (overlay->last_flip_req == 0)
                        return -ENOMEM;
        }

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -420,12 +424,12 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
                        OUT_RING(MI_NOOP);
                        ADVANCE_LP_RING();

-                       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+                       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
                        if (overlay->last_flip_req == 0)
                                return -ENOMEM;

                        ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                                       interruptible);
+                                       interruptible, &dev_priv->render_ring);
                        if (ret != 0)
                                return ret;

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b30b1a9..c6be229 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -276,11 +276,11 @@ static void render_setup_status_page(struct drm_device *dev,
        (void)I915_READ(HWS_PGA);
 }

-void
+void
 bsd_ring_flush(struct drm_device *dev,
                struct intel_ring_buffer *ring,
-               u32     invalidate_domains,
-               u32     flush_domains)
+               u32     invalidate_domains,
+               u32     flush_domains)
 {
        intel_ring_begin (dev, ring, 8);
        intel_ring_emit (dev, ring, MI_FLUSH);
@@ -313,7 +313,7 @@ static inline void bsd_ring_advance_ring(struct drm_device *dev,
                struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       I915_WRITE(BSD_RING_TAIL, ring->tail);
+       I915_WRITE(BSD_RING_TAIL, ring->tail);
 }

 static int init_bsd_ring(struct drm_device *dev,
@@ -322,7 +322,7 @@ static int init_bsd_ring(struct drm_device *dev,
        return init_ring_common(dev, ring);
 }

-static u32
+static u32
 bsd_ring_add_request(struct drm_device *dev,
                struct intel_ring_buffer *ring,
                struct drm_file *file_priv,
@@ -343,7 +343,7 @@ bsd_ring_add_request(struct drm_device *dev,
 }

 static void bsd_setup_status_page(struct drm_device *dev,
-               struct  intel_ring_buffer *ring)
+       struct  intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
@@ -386,6 +386,42 @@ bsd_ring_put_user_irq(struct drm_device *dev,
        spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
 }

+static int init_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
+{
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       int ret;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       obj = drm_gem_object_alloc(dev, PAGE_SIZE*10);
+       if (obj == NULL)
+               return -ENOMEM;
+
+       obj_priv = obj->driver_private;
+       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+       ret = i915_gem_object_pin(obj, PAGE_SIZE);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               return ret;
+       }
+
+       ring->status_page.gfx_addr = obj_priv->gtt_offset;
+
+       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+       if (ring->status_page.page_addr == NULL) {
+               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               return -EINVAL;
+       }
+       ring->status_page.obj = obj;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       ring->setup_status_page(dev, ring);
+       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr);
+       return 0;
+}
+
 static u32
 bsd_ring_get_gem_seqno(struct drm_device *dev,
                struct intel_ring_buffer *ring)
@@ -393,7 +429,7 @@ bsd_ring_get_gem_seqno(struct drm_device *dev,
        return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }

-static int
+static int
 bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
                struct intel_ring_buffer *ring,
                struct drm_i915_gem_execbuffer2 *exec,
@@ -480,43 +516,6 @@ static void cleanup_status_page(struct drm_device *dev, struct intel_ring_buffer
        memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 }

-static int init_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
-{
-       struct drm_i915_gem_object *obj_priv;
-       struct drm_gem_object *obj;
-       int ret;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       obj = drm_gem_object_alloc(dev, PAGE_SIZE*10);
-       if (obj == NULL)
-               return -ENOMEM;
-
-       obj_priv = obj->driver_private;
-       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
-       ret = i915_gem_object_pin(obj, PAGE_SIZE);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               return ret;
-       }
-
-       ring->status_page.gfx_addr = obj_priv->gtt_offset;
-
-       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
-       if (ring->status_page.page_addr == NULL) {
-               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
-               return -EINVAL;
-       }
-       ring->status_page.obj = obj;
-       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
-       ring->setup_status_page(dev, ring);
-       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr);
-       return 0;
-}
-
-
 int intel_init_ring_buffer(struct drm_device *dev,
                struct intel_ring_buffer *ring)
 {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dc83b7a..90561a8 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1236,10 +1236,10 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
        tv_dac = I915_READ(TV_DAC);

        /* Disable TV interrupts around load detect or we'll recurse */
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
        i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
                              PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock, irqflags);

        /*
         * Detect TV by polling)
@@ -1290,10 +1290,10 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
        }

        /* Restore interrupt config */
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
        i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
                             PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock, irqflags);

        return type;
 }
-------------- next part --------------
A non-text attachment was scrubbed...
Name: intel_ring_buffer_gem.patch
Type: application/octet-stream
Size: 65909 bytes
Desc: intel_ring_buffer_gem.patch
URL: <http://lists.freedesktop.org/archives/intel-gfx/attachments/20100407/dac1dc29/attachment.obj>


More information about the Intel-gfx mailing list