introduce intel_ring_buffer structure, convert render ring buffer to
use the structure.

Signed-off-by: Zou Nan hai <nanhai....@intel.com>
Signed-off-by: Xiang Hai hao <haihao.xi...@intel.com>
---
 drivers/gpu/drm/i915/Makefile           |    1 +
 drivers/gpu/drm/i915/i915_debugfs.c     |    8 +-
 drivers/gpu/drm/i915/i915_dma.c         |  153 ++------
 drivers/gpu/drm/i915/i915_drv.c         |   29 +--
 drivers/gpu/drm/i915/i915_drv.h         |   64 ++--
 drivers/gpu/drm/i915/i915_gem.c         |  298 +-------------
 drivers/gpu/drm/i915/i915_irq.c         |   21 +-
 drivers/gpu/drm/i915/intel_display.c    |    1 -
 drivers/gpu/drm/i915/intel_overlay.c    |    8 -
 drivers/gpu/drm/i915/intel_ringbuffer.c |  670 +++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h |  124 ++++++
 include/drm/i915_drm.h                  |    4 +-
 12 files changed, 898 insertions(+), 483 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/intel_ringbuffer.c
 create mode 100644 drivers/gpu/drm/i915/intel_ringbuffer.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9563901..da78f2c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
          intel_fb.o \
          intel_tv.o \
          intel_dvo.o \
+         intel_ringbuffer.o \
          intel_overlay.o \
          dvo_ch7xxx.o \
          dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 322070c..4fddf09 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -317,14 +317,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void 
*data)
        u8 *virt;
        uint32_t *ptr, off;
 
-       if (!dev_priv->ring.ring_obj) {
+       if (!dev_priv->render_ring.gem_object) {
                seq_printf(m, "No ringbuffer setup\n");
                return 0;
        }
 
-       virt = dev_priv->ring.virtual_start;
+       virt = dev_priv->render_ring.virtual_start;
 
-       for (off = 0; off < dev_priv->ring.Size; off += 4) {
+       for (off = 0; off < dev_priv->render_ring.size; off += 4) {
                ptr = (uint32_t *)(virt + off);
                seq_printf(m, "%08x :  %08x\n", off, *ptr);
        }
@@ -344,7 +344,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void 
*data)
 
        seq_printf(m, "RingHead :  %08x\n", head);
        seq_printf(m, "RingTail :  %08x\n", tail);
-       seq_printf(m, "RingSize :  %08lx\n", dev_priv->ring.Size);
+       seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
        seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? 
ACTHD_I965 : ACTHD));
 
        return 0;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 851a2f8..1e52693 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,84 +40,6 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 
-/* Really want an OS-independent resettable timer.  Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
- */
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
-       u32 last_acthd = I915_READ(acthd_reg);
-       u32 acthd;
-       u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       int i;
-
-       trace_i915_ring_wait_begin (dev);
-
-       for (i = 0; i < 100000; i++) {
-               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-               acthd = I915_READ(acthd_reg);
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->Size;
-               if (ring->space >= n) {
-                       trace_i915_ring_wait_end (dev);
-                       return 0;
-               }
-
-               if (dev->primary->master) {
-                       struct drm_i915_master_private *master_priv = 
dev->primary->master->driver_priv;
-                       if (master_priv->sarea_priv)
-                               master_priv->sarea_priv->perf_boxes |= 
I915_BOX_WAIT;
-               }
-
-
-               if (ring->head != last_head)
-                       i = 0;
-               if (acthd != last_acthd)
-                       i = 0;
-
-               last_head = ring->head;
-               last_acthd = acthd;
-               msleep_interruptible(10);
-
-       }
-
-       trace_i915_ring_wait_end (dev);
-       return -EBUSY;
-}
-
-/* As a ringbuffer is only allowed to wrap between instructions, fill
- * the tail with NOOPs.
- */
-int i915_wrap_ring(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       volatile unsigned int *virt;
-       int rem;
-
-       rem = dev_priv->ring.Size - dev_priv->ring.tail;
-       if (dev_priv->ring.space < rem) {
-               int ret = i915_wait_ring(dev, rem, __func__);
-               if (ret)
-                       return ret;
-       }
-       dev_priv->ring.space -= rem;
-
-       virt = (unsigned int *)
-               (dev_priv->ring.virtual_start + dev_priv->ring.tail);
-       rem /= 4;
-       while (rem--)
-               *virt++ = MI_NOOP;
-
-       dev_priv->ring.tail = 0;
-
-       return 0;
-}
-
 /**
  * Sets up the hardware status page for devices that need a physical address
  * in the register.
@@ -172,7 +94,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
-       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+       struct intel_ring_buffer *ring = &(dev_priv->render_ring);
 
        /*
         * We should never lose context on the ring with modesetting
@@ -185,7 +107,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
        ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
        ring->space = ring->head - (ring->tail + 8);
        if (ring->space < 0)
-               ring->space += ring->Size;
+               ring->space += ring->size;
 
        if (!dev->primary->master)
                return;
@@ -205,12 +127,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);
 
-       if (dev_priv->ring.virtual_start) {
-               drm_core_ioremapfree(&dev_priv->ring.map, dev);
-               dev_priv->ring.virtual_start = NULL;
-               dev_priv->ring.map.handle = NULL;
-               dev_priv->ring.map.size = 0;
-       }
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
 
        /* Clear the HWS virtual address at teardown */
        if (I915_NEED_GFX_HWS(dev))
@@ -233,32 +150,32 @@ static int i915_initialize(struct drm_device * dev, 
drm_i915_init_t * init)
        }
 
        if (init->ring_size != 0) {
-               if (dev_priv->ring.ring_obj != NULL) {
+               if (dev_priv->render_ring.gem_object != NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("Client tried to initialize ringbuffer in "
-                                 "GEM mode\n");
+                                       "GEM mode\n");
                        return -EINVAL;
                }
 
-               dev_priv->ring.Size = init->ring_size;
+               dev_priv->render_ring.size = init->ring_size;
 
-               dev_priv->ring.map.offset = init->ring_start;
-               dev_priv->ring.map.size = init->ring_size;
-               dev_priv->ring.map.type = 0;
-               dev_priv->ring.map.flags = 0;
-               dev_priv->ring.map.mtrr = 0;
+               dev_priv->render_ring.map.offset = init->ring_start;
+               dev_priv->render_ring.map.size = init->ring_size;
+               dev_priv->render_ring.map.type = 0;
+               dev_priv->render_ring.map.flags = 0;
+               dev_priv->render_ring.map.mtrr = 0;
 
-               drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+               drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
 
-               if (dev_priv->ring.map.handle == NULL) {
+               if (dev_priv->render_ring.map.handle == NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("can not ioremap virtual address for"
-                                 " ring buffer\n");
+                                       " ring buffer\n");
                        return -ENOMEM;
                }
        }
 
-       dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+       dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
 
        dev_priv->cpp = init->cpp;
        dev_priv->back_offset = init->back_offset;
@@ -278,26 +195,29 @@ static int i915_dma_resume(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
+       struct intel_ring_buffer *ring;
        DRM_DEBUG_DRIVER("%s\n", __func__);
 
-       if (dev_priv->ring.map.handle == NULL) {
+       ring = &dev_priv->render_ring;
+
+       if (ring->map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                return -ENOMEM;
        }
 
        /* Program Hardware Status Page */
-       if (!dev_priv->hw_status_page) {
+       if (!ring->status_page.page_addr) {
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
        DRM_DEBUG_DRIVER("hw status page @ %p\n",
-                               dev_priv->hw_status_page);
-
-       if (dev_priv->status_gfx_addr != 0)
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+                               ring->status_page.page_addr);
+       if (ring->status_page.gfx_addr != 0)
+               ring->setup_status_page(dev, ring);
        else
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+
        DRM_DEBUG_DRIVER("Enabled hardware status page\n");
 
        return 0;
@@ -407,9 +327,8 @@ static int i915_emit_cmds(struct drm_device * dev, int 
*buffer, int dwords)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
-       RING_LOCALS;
 
-       if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+       if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
                return -EINVAL;
 
        BEGIN_LP_RING((dwords+1)&~1);
@@ -442,13 +361,12 @@ i915_emit_box(struct drm_device *dev,
              struct drm_clip_rect *boxes,
              int i, int DR1, int DR4)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_clip_rect box = boxes[i];
-       RING_LOCALS;
 
-       if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) 
{
+       if (box.y2 <= box.y1 || box.x2 <= box.x1
+                       || box.y2 <= 0 || box.x2 <= 0) {
                DRM_ERROR("Bad box %d,%d..%d,%d\n",
-                         box.x1, box.y1, box.x2, box.y2);
+                               box.x1, box.y1, box.x2, box.y2);
                return -EINVAL;
        }
 
@@ -481,7 +399,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = 
dev->primary->master->driver_priv;
-       RING_LOCALS;
 
        dev_priv->counter++;
        if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -535,10 +452,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * 
dev,
                                     drm_i915_batchbuffer_t * batch,
                                     struct drm_clip_rect *cliprects)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        int nbox = batch->num_cliprects;
        int i = 0, count;
-       RING_LOCALS;
 
        if ((batch->start | batch->used) & 0x7) {
                DRM_ERROR("alignment");
@@ -552,7 +467,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * 
dev,
        for (i = 0; i < count; i++) {
                if (i < nbox) {
                        int ret = i915_emit_box(dev, cliprects, i,
-                                               batch->DR1, batch->DR4);
+                                       batch->DR1, batch->DR4);
                        if (ret)
                                return ret;
                }
@@ -587,15 +502,14 @@ static int i915_dispatch_flip(struct drm_device * dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv =
                dev->primary->master->driver_priv;
-       RING_LOCALS;
 
        if (!master_priv->sarea_priv)
                return -EINVAL;
 
        DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
-                         __func__,
-                        dev_priv->current_page,
-                        master_priv->sarea_priv->pf_current_page);
+                       __func__,
+                       dev_priv->current_page,
+                       master_priv->sarea_priv->pf_current_page);
 
        i915_kernel_lost_context(dev);
 
@@ -640,7 +554,8 @@ static int i915_quiescent(struct drm_device * dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        i915_kernel_lost_context(dev);
-       return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+       return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
+                       dev_priv->render_ring.size - 8);
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c51e45..c57c54f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -388,33 +388,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
         * switched away).
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
-           !dev_priv->mm.suspended) {
-               drm_i915_ring_buffer_t *ring = &dev_priv->ring;
-               struct drm_gem_object *obj = ring->ring_obj;
-               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+                       !dev_priv->mm.suspended) {
+               struct intel_ring_buffer *ring = &dev_priv->render_ring;
                dev_priv->mm.suspended = 0;
-
-               /* Stop the ring if it's running. */
-               I915_WRITE(PRB0_CTL, 0);
-               I915_WRITE(PRB0_TAIL, 0);
-               I915_WRITE(PRB0_HEAD, 0);
-
-               /* Initialize the ring. */
-               I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-               I915_WRITE(PRB0_CTL,
-                          ((obj->size - 4096) & RING_NR_PAGES) |
-                          RING_NO_REPORT |
-                          RING_VALID);
-               if (!drm_core_check_feature(dev, DRIVER_MODESET))
-                       i915_kernel_lost_context(dev);
-               else {
-                       ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-                       ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-                       ring->space = ring->head - (ring->tail + 8);
-                       if (ring->space < 0)
-                               ring->space += ring->Size;
-               }
-
+               ring->init(dev, ring);
                mutex_unlock(&dev->struct_mutex);
                drm_irq_uninstall(dev);
                drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7f797ef..aa6c9b5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@
 
 #include "i915_reg.h"
 #include "intel_bios.h"
+#include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>
 
 /* General customization:
@@ -89,16 +90,6 @@ struct drm_i915_gem_phys_object {
        struct drm_gem_object *cur_obj;
 };
 
-typedef struct _drm_i915_ring_buffer {
-       unsigned long Size;
-       u8 *virtual_start;
-       int head;
-       int tail;
-       int space;
-       drm_local_map_t map;
-       struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
-
 struct mem_block {
        struct mem_block *next;
        struct mem_block *prev;
@@ -241,7 +232,7 @@ typedef struct drm_i915_private {
        void __iomem *regs;
 
        struct pci_dev *bridge_dev;
-       drm_i915_ring_buffer_t ring;
+       struct intel_ring_buffer render_ring;
 
        drm_dma_handle_t *status_page_dmah;
        void *hw_status_page;
@@ -849,6 +840,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, 
int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
+               u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
+               u32 mask);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1026,7 +1022,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc 
*crtc);
  * has access to the ring.
  */
 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {                        
\
-       if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+       if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
+                       == NULL)                                        \
                LOCK_TEST_WITH_RETURN(dev, file_priv);                  \
 } while (0)
 
@@ -1042,32 +1039,27 @@ extern int intel_trans_dp_port_sel (struct drm_crtc 
*crtc);
 
 #define I915_VERBOSE 0
 
-#define RING_LOCALS    volatile unsigned int *ring_virt__;
-
-#define BEGIN_LP_RING(n) do {                                          \
-       int bytes__ = 4*(n);                                            \
-       if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));        \
-       /* a wrap must occur between instructions so pad beforehand */  \
-       if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
-               i915_wrap_ring(dev);                                    \
-       if (unlikely (dev_priv->ring.space < bytes__))                  \
-               i915_wait_ring(dev, bytes__, __func__);                 \
-       ring_virt__ = (unsigned int *)                                  \
-               (dev_priv->ring.virtual_start + dev_priv->ring.tail);   \
-       dev_priv->ring.tail += bytes__;                                 \
-       dev_priv->ring.tail &= dev_priv->ring.Size - 1;                 \
-       dev_priv->ring.space -= bytes__;                                \
+#define BEGIN_LP_RING(n)  do { \
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
+       if (I915_VERBOSE)                                               \
+               DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));           \
+       intel_ring_begin(dev, &dev_priv->render_ring, 4*(n));           \
 } while (0)
 
-#define OUT_RING(n) do {                                               \
-       if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
-       *ring_virt__++ = (n);                                           \
+
+#define OUT_RING(x) do {                                               \
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
+       if (I915_VERBOSE)                                               \
+               DRM_DEBUG("   OUT_RING %x\n", (int)(x));                \
+       intel_ring_emit(dev, &dev_priv->render_ring, x);                \
 } while (0)
 
 #define ADVANCE_LP_RING() do {                                         \
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
        if (I915_VERBOSE)                                               \
-               DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \
-       I915_WRITE(PRB0_TAIL, dev_priv->ring.tail);                     \
+               DRM_DEBUG("ADVANCE_LP_RING %x\n",                       \
+                               dev_priv->render_ring.tail);            \
+       intel_ring_advance(dev, &dev_priv->render_ring);                \
 } while(0)
 
 /**
@@ -1085,14 +1077,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc 
*crtc);
  *
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
-#define READ_HWSP(dev_priv, reg)  (((volatile 
u32*)(dev_priv->hw_status_page))[reg])
+#define READ_HWSP(dev_priv, reg)  (((volatile u32 *)\
+                       (dev_priv->render_ring.status_page.page_addr))[reg])
 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX             0x20
 #define I915_BREADCRUMB_INDEX          0x21
 
-extern int i915_wrap_ring(struct drm_device * dev);
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
 #define INTEL_INFO(dev)        (((struct drm_i915_private *) 
(dev)->dev_private)->info)
 
 #define IS_I830(dev)           ((dev)->pci_device == 0x3577)
@@ -1170,4 +1160,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, 
const char *caller);
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
+#define I915_GEM_GPU_DOMAINS    (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 112699f..e2b97fd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1617,7 +1617,6 @@ i915_add_request(struct drm_device *dev, struct drm_file 
*file_priv,
        struct drm_i915_gem_request *request;
        uint32_t seqno;
        int was_empty;
-       RING_LOCALS;
 
        if (file_priv != NULL)
                i915_file_priv = file_priv->driver_priv;
@@ -1712,10 +1711,8 @@ i915_add_request(struct drm_device *dev, struct drm_file 
*file_priv,
 static uint32_t
 i915_retire_commands(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
        uint32_t flush_domains = 0;
-       RING_LOCALS;
 
        /* The sampler always gets flushed on i965 (sigh) */
        if (IS_I965G(dev))
@@ -1933,77 +1930,18 @@ i915_wait_request(struct drm_device *dev, uint32_t 
seqno)
        return i915_do_wait_request(dev, seqno, 1);
 }
 
+
 static void
 i915_gem_flush(struct drm_device *dev,
               uint32_t invalidate_domains,
               uint32_t flush_domains)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t cmd;
-       RING_LOCALS;
-
-#if WATCH_EXEC
-       DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
-                 invalidate_domains, flush_domains);
-#endif
-       trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
-                                    invalidate_domains, flush_domains);
-
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);
-
-       if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
-               /*
-                * read/write caches:
-                *
-                * I915_GEM_DOMAIN_RENDER is always invalidated, but is
-                * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
-                * also flushed at 2d versus 3d pipeline switches.
-                *
-                * read-only caches:
-                *
-                * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
-                * MI_READ_FLUSH is set, and is always flushed on 965.
-                *
-                * I915_GEM_DOMAIN_COMMAND may not exist?
-                *
-                * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
-                * invalidated when MI_EXE_FLUSH is set.
-                *
-                * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
-                * invalidated with every MI_FLUSH.
-                *
-                * TLBs:
-                *
-                * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
-                * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
-                * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
-                * are flushed at any MI_FLUSH.
-                */
-
-               cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-               if ((invalidate_domains|flush_domains) &
-                   I915_GEM_DOMAIN_RENDER)
-                       cmd &= ~MI_NO_WRITE_FLUSH;
-               if (!IS_I965G(dev)) {
-                       /*
-                        * On the 965, the sampler cache always gets flushed
-                        * and this bit is reserved.
-                        */
-                       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
-                               cmd |= MI_READ_FLUSH;
-               }
-               if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
-                       cmd |= MI_EXE_FLUSH;
-
-#if WATCH_EXEC
-               DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
-               BEGIN_LP_RING(2);
-               OUT_RING(cmd);
-               OUT_RING(MI_NOOP);
-               ADVANCE_LP_RING();
-       }
+       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+                       invalidate_domains,
+                       flush_domains);
 }
 
 /**
@@ -3557,7 +3495,6 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
        int nbox = exec->num_cliprects;
        int i = 0, count;
        uint32_t exec_start, exec_len;
-       RING_LOCALS;
 
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
        exec_len = (uint32_t) exec->batch_len;
@@ -4573,7 +4510,8 @@ i915_gem_idle(struct drm_device *dev)
 
        mutex_lock(&dev->struct_mutex);
 
-       if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+       if (dev_priv->mm.suspended ||
+                       dev_priv->render_ring.gem_object == NULL) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4654,71 +4592,6 @@ err:
        return ret;
 }
 
-static int
-i915_gem_init_hws(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       int ret;
-
-       /* If we need a physical address for the status page, it's already
-        * initialized at driver load time.
-        */
-       if (!I915_NEED_GFX_HWS(dev))
-               return 0;
-
-       obj = i915_gem_alloc_object(dev, 4096);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate status page\n");
-               ret = -ENOMEM;
-               goto err;
-       }
-       obj_priv = to_intel_bo(obj);
-       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
-       ret = i915_gem_object_pin(obj, 4096);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               goto err_unref;
-       }
-
-       dev_priv->status_gfx_addr = obj_priv->gtt_offset;
-
-       dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
-       if (dev_priv->hw_status_page == NULL) {
-               DRM_ERROR("Failed to map status page.\n");
-               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-               ret = -EINVAL;
-               goto err_unpin;
-       }
-
-       if (HAS_PIPE_CONTROL(dev)) {
-               ret = i915_gem_init_pipe_control(dev);
-               if (ret)
-                       goto err_unpin;
-       }
-
-       dev_priv->hws_obj = obj;
-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-       if (IS_GEN6(dev)) {
-               I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
-               I915_READ(HWS_PGA_GEN6); /* posting read */
-       } else {
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-               I915_READ(HWS_PGA); /* posting read */
-       }
-       DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-
-       return 0;
-
-err_unpin:
-       i915_gem_object_unpin(obj);
-err_unref:
-       drm_gem_object_unreference(obj);
-err:
-       return 0;
-}
 
 static void
 i915_gem_cleanup_pipe_control(struct drm_device *dev)
@@ -4737,146 +4610,25 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
        dev_priv->seqno_page = NULL;
 }
 
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-
-       if (dev_priv->hws_obj == NULL)
-               return;
-
-       obj = dev_priv->hws_obj;
-       obj_priv = to_intel_bo(obj);
-
-       kunmap(obj_priv->pages[0]);
-       i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
-       dev_priv->hws_obj = NULL;
-
-       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-       dev_priv->hw_status_page = NULL;
-
-       if (HAS_PIPE_CONTROL(dev))
-               i915_gem_cleanup_pipe_control(dev);
-
-       /* Write high address into HWS_PGA when disabling. */
-       I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
 int
 i915_gem_init_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       drm_i915_ring_buffer_t *ring = &dev_priv->ring;
        int ret;
-       u32 head;
-
-       ret = i915_gem_init_hws(dev);
-       if (ret != 0)
-               return ret;
-
-       obj = i915_gem_alloc_object(dev, 128 * 1024);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate ringbuffer\n");
-               i915_gem_cleanup_hws(dev);
-               return -ENOMEM;
+       dev_priv->render_ring = render_ring;
+       if (!I915_NEED_GFX_HWS(dev)) {
+               dev_priv->render_ring.status_page.page_addr
+                       = dev_priv->status_page_dmah->vaddr;
+               memset(dev_priv->render_ring.status_page.page_addr,
+                               0, PAGE_SIZE);
        }
-       obj_priv = to_intel_bo(obj);
-
-       ret = i915_gem_object_pin(obj, 4096);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               i915_gem_cleanup_hws(dev);
-               return ret;
-       }
-
-       /* Set up the kernel mapping for the ring. */
-       ring->Size = obj->size;
-
-       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
-       ring->map.size = obj->size;
-       ring->map.type = 0;
-       ring->map.flags = 0;
-       ring->map.mtrr = 0;
-
-       drm_core_ioremap_wc(&ring->map, dev);
-       if (ring->map.handle == NULL) {
-               DRM_ERROR("Failed to map ringbuffer.\n");
-               memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
-               i915_gem_cleanup_hws(dev);
-               return -EINVAL;
-       }
-       ring->ring_obj = obj;
-       ring->virtual_start = ring->map.handle;
-
-       /* Stop the ring if it's running. */
-       I915_WRITE(PRB0_CTL, 0);
-       I915_WRITE(PRB0_TAIL, 0);
-       I915_WRITE(PRB0_HEAD, 0);
-
-       /* Initialize the ring. */
-       I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-       /* G45 ring initialization fails to reset head to zero */
-       if (head != 0) {
-               DRM_ERROR("Ring head not reset to zero "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-               I915_WRITE(PRB0_HEAD, 0);
-
-               DRM_ERROR("Ring head forced to zero "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-       }
-
-       I915_WRITE(PRB0_CTL,
-                  ((obj->size - 4096) & RING_NR_PAGES) |
-                  RING_NO_REPORT |
-                  RING_VALID);
-
-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-       /* If the head is still not zero, the ring is dead */
-       if (head != 0) {
-               DRM_ERROR("Ring initialization failed "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-               return -EIO;
-       }
-
-       /* Update our cache of the ring state */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_kernel_lost_context(dev);
-       else {
-               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-               ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->Size;
-       }
-
-       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-               I915_WRITE(MI_MODE,
-                          (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+       if (HAS_PIPE_CONTROL(dev)) {
+               ret = i915_gem_init_pipe_control(dev);
+               if (ret)
+                       return ret;
        }
-
-       return 0;
+       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       return ret;
 }
 
 void
@@ -4884,17 +4636,9 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (dev_priv->ring.ring_obj == NULL)
-               return;
-
-       drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
-       i915_gem_object_unpin(dev_priv->ring.ring_obj);
-       drm_gem_object_unreference(dev_priv->ring.ring_obj);
-       dev_priv->ring.ring_obj = NULL;
-       memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
-       i915_gem_cleanup_hws(dev);
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
 }
 
 int
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a7e4b1f..9ff3202 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, 
u32 mask)
        }
 }
 
-static inline void
+inline void
 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }
 
-static inline void
+void
 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -533,17 +533,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
         */
        bbaddr = 0;
        head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       ring = (u32 *)(dev_priv->ring.virtual_start + head);
+       ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
 
-       while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+       while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
                bbaddr = i915_get_bbaddr(dev, ring);
                if (bbaddr)
                        break;
        }
 
        if (bbaddr == 0) {
-               ring = (u32 *)(dev_priv->ring.virtual_start + 
dev_priv->ring.Size);
-               while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+               ring = (u32 *)(dev_priv->render_ring.virtual_start
+                               + dev_priv->render_ring.size);
+               while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
                        bbaddr = i915_get_bbaddr(dev, ring);
                        if (bbaddr)
                                break;
@@ -636,7 +637,8 @@ static void i915_capture_error_state(struct drm_device *dev)
        error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
 
        /* Record the ringbuffer */
-       error->ringbuffer = i915_error_object_create(dev, 
dev_priv->ring.ring_obj);
+       error->ringbuffer = i915_error_object_create(dev,
+                       dev_priv->render_ring.gem_object);
 
        /* Record buffers on the active list. */
        error->active_bo = NULL;
@@ -981,10 +983,7 @@ static int i915_emit_irq(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = 
dev->primary->master->driver_priv;
-       RING_LOCALS;
-
        i915_kernel_lost_context(dev);
-
        DRM_DEBUG_DRIVER("\n");
 
        dev_priv->counter++;
@@ -1084,7 +1083,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
        drm_i915_irq_emit_t *emit = data;
        int result;
 
-       if (!dev_priv || !dev_priv->ring.virtual_start) {
+       if (!dev_priv || !dev_priv->render_ring.virtual_start) {
                DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index e775ce6..08930a1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4629,7 +4629,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        unsigned long flags;
        int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
        int ret, pipesrc;
-       RING_LOCALS;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (work == NULL)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c 
b/drivers/gpu/drm/i915/intel_overlay.c
index b0e17b0..93da837 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -211,9 +211,7 @@ static void intel_overlay_unmap_regs_atomic(struct 
intel_overlay *overlay)
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
-       RING_LOCALS;
 
        BUG_ON(overlay->active);
 
@@ -248,7 +246,6 @@ static void intel_overlay_continue(struct intel_overlay 
*overlay,
         drm_i915_private_t *dev_priv = dev->dev_private;
        u32 flip_addr = overlay->flip_addr;
        u32 tmp;
-       RING_LOCALS;
 
        BUG_ON(!overlay->active);
 
@@ -274,7 +271,6 @@ static int intel_overlay_wait_flip(struct intel_overlay 
*overlay)
         drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
        u32 tmp;
-       RING_LOCALS;
 
        if (overlay->last_flip_req != 0) {
                ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
@@ -314,9 +310,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 {
        u32 flip_addr = overlay->flip_addr;
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
-       RING_LOCALS;
 
        BUG_ON(!overlay->active);
 
@@ -390,11 +384,9 @@ int intel_overlay_recover_from_interrupt(struct 
intel_overlay *overlay,
                                         int interruptible)
 {
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
        u32 flip_addr;
        int ret;
-       RING_LOCALS;
 
        if (overlay->hw_wedged == HW_WEDGED)
                return -EIO;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..f9f66c4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Zou Nan hai <nanhai....@intel.com>
+ *    Xiang Hai hao<haihao.xi...@intel.com>
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "i915_trace.h"
+
+static void
+render_ring_flush(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               u32     invalidate_domains,
+               u32     flush_domains)
+{
+#if WATCH_EXEC
+       DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+                 invalidate_domains, flush_domains);
+#endif
+       u32 cmd;
+       trace_i915_gem_request_flush(dev, ring->next_seqno,
+                                    invalidate_domains, flush_domains);
+
+       if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+               /*
+                * read/write caches:
+                *
+                * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+                * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+                * also flushed at 2d versus 3d pipeline switches.
+                *
+                * read-only caches:
+                *
+                * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+                * MI_READ_FLUSH is set, and is always flushed on 965.
+                *
+                * I915_GEM_DOMAIN_COMMAND may not exist?
+                *
+                * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+                * invalidated when MI_EXE_FLUSH is set.
+                *
+                * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+                * invalidated with every MI_FLUSH.
+                *
+                * TLBs:
+                *
+                * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+                * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+                * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+                * are flushed at any MI_FLUSH.
+                */
+
+               cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+               if ((invalidate_domains|flush_domains) &
+                               I915_GEM_DOMAIN_RENDER)
+                       cmd &= ~MI_NO_WRITE_FLUSH;
+               if (!IS_I965G(dev)) {
+                       /*
+                        * On the 965, the sampler cache always gets flushed
+                        * and this bit is reserved.
+                        */
+                       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+                               cmd |= MI_READ_FLUSH;
+               }
+               if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+                       cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+               DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+               intel_ring_begin(dev, ring, 8);
+               intel_ring_emit(dev, ring, cmd);
+               intel_ring_emit(dev, ring, MI_NOOP);
+               intel_ring_advance(dev, ring);
+       }
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
+
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+}
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+       return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+static int init_ring_common(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 head;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       obj_priv = to_intel_bo(ring->gem_object);
+
+       /* Stop the ring if it's running. */
+       I915_WRITE(ring->regs.ctl, 0);
+       I915_WRITE(ring->regs.head, 0);
+       I915_WRITE(ring->regs.tail, 0);
+
+       /* Initialize the ring. */
+       I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
+       head = ring->get_head(dev, ring);
+
+       /* G45 ring initialization fails to reset head to zero */
+       if (head != 0) {
+               DRM_ERROR("%s head not reset to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(ring->regs.ctl),
+                               I915_READ(ring->regs.head),
+                               I915_READ(ring->regs.tail),
+                               I915_READ(ring->regs.start));
+
+               I915_WRITE(ring->regs.head, 0);
+
+               DRM_ERROR("%s head forced to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(ring->regs.ctl),
+                               I915_READ(ring->regs.head),
+                               I915_READ(ring->regs.tail),
+                               I915_READ(ring->regs.start));
+       }
+
+       I915_WRITE(ring->regs.ctl,
+                       ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+                       | RING_NO_REPORT | RING_VALID);
+
+       head = I915_READ(ring->regs.head) & HEAD_ADDR;
+       /* If the head is still not zero, the ring is dead */
+       if (head != 0) {
+               DRM_ERROR("%s initialization failed "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(ring->regs.ctl),
+                               I915_READ(ring->regs.head),
+                               I915_READ(ring->regs.tail),
+                               I915_READ(ring->regs.start));
+               return -EIO;
+       }
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               i915_kernel_lost_context(dev);
+       else {
+               ring->head = ring->get_head(dev, ring);
+               ring->tail = ring->get_tail(dev, ring);
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+       }
+       return 0;
+}
+
+static int init_render_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret = init_ring_common(dev, ring);
+       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+               I915_WRITE(MI_MODE,
+                               (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+       }
+       return ret;
+}
+
+#define PIPE_CONTROL_FLUSH(addr)                                       \
+do {                                                                   \
+       OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
+       PIPE_CONTROL_DEPTH_STALL);                                      \
+       OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
+       OUT_RING(0);                                                    \
+       OUT_RING(0);                                                    \
+} while (0)
+
+
+
+static u32
+render_ring_add_request(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_file *file_priv,
+               u32 flush_domains)
+{
+       u32 seqno;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       seqno = intel_ring_get_seqno(dev, ring);
+       if (HAS_PIPE_CONTROL(dev)) {
+               u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+
+               /*
+                * Workaround qword write incoherence by flushing the
+                * PIPE_NOTIFY buffers out to memory before requesting
+                * an interrupt.
+                */
+               BEGIN_LP_RING(32);
+               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+                               PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+               OUT_RING(seqno);
+               OUT_RING(0);
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128; /* write to separate cachelines */
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+                               PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+                               PIPE_CONTROL_NOTIFY);
+               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+               OUT_RING(seqno);
+               OUT_RING(0);
+               ADVANCE_LP_RING();
+       } else {
+               BEGIN_LP_RING(4);
+               OUT_RING(MI_STORE_DWORD_INDEX);
+               OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+               OUT_RING(seqno);
+               OUT_RING(MI_USER_INTERRUPT);
+               ADVANCE_LP_RING();
+       }
+
+       return seqno;
+}
+
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       if (HAS_PIPE_CONTROL(dev))
+               return ((volatile u32 *)(dev_priv->seqno_page))[0];
+       else
+               return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_enable_graphics_irq(dev_priv,
+                                       GT_PIPE_NOTIFY);
+               else
+                       i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+}
+
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+       if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_disable_graphics_irq(dev_priv,
+                                       GT_PIPE_NOTIFY);
+               else
+                       i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void render_setup_status_page(struct drm_device *dev,
+       struct  intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       if (IS_GEN6(dev)) {
+               I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
+               I915_READ(HWS_PGA_GEN6); /* posting read */
+       } else {
+               I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+               I915_READ(HWS_PGA); /* posting read */
+       }
+
+}
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_i915_gem_execbuffer2 *exec,
+               struct drm_clip_rect *cliprects,
+               uint64_t exec_offset)
+{
+       int nbox = exec->num_cliprects;
+       int i = 0, count;
+       uint32_t exec_start, exec_len;
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       exec_len = (uint32_t) exec->batch_len;
+
+       count = nbox ? nbox : 1;
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+                       int ret = i915_emit_box(dev, cliprects, i,
+                                       exec->DR1, exec->DR4);
+                       if (ret)
+                               return ret;
+               }
+
+               if (IS_I830(dev) || IS_845G(dev)) {
+                       intel_ring_begin(dev, ring, 4);
+                       intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
+                       intel_ring_emit(dev, ring,
+                                       exec_start | MI_BATCH_NON_SECURE);
+                       intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+                       intel_ring_emit(dev, ring, 0);
+               } else {
+                       intel_ring_begin(dev, ring, 4);
+                       if (IS_I965G(dev)) {
+                               intel_ring_emit(dev, ring,
+                                               MI_BATCH_BUFFER_START | (2 << 6)
+                                               | MI_BATCH_NON_SECURE_I965);
+                               intel_ring_emit(dev, ring, exec_start);
+                       } else {
+                               intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
+                                               | (2 << 6));
+                               intel_ring_emit(dev, ring, exec_start |
+                                               MI_BATCH_NON_SECURE);
+                       }
+               }
+               intel_ring_advance(dev, ring);
+       }
+
+       /* XXX breadcrumb */
+       return 0;
+}
+
+static void cleanup_status_page(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       obj = ring->status_page.obj;
+       if (obj == NULL)
+               return;
+       obj_priv = to_intel_bo(obj);
+
+       kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       ring->status_page.obj = NULL;
+
+       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+
+static int init_status_page(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       int ret;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       obj = i915_gem_alloc_object(dev, PAGE_SIZE);
+       if (obj == NULL)
+               return -ENOMEM;
+
+       obj_priv = to_intel_bo(obj);
+       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+       ret = i915_gem_object_pin(obj, PAGE_SIZE);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               return ret;
+       }
+
+       ring->status_page.gfx_addr = obj_priv->gtt_offset;
+       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+       if (ring->status_page.page_addr == NULL) {
+               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               return -EINVAL;
+       }
+       ring->status_page.obj = obj;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+       ring->setup_status_page(dev, ring);
+       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+                       ring->name, ring->status_page.gfx_addr);
+       return 0;
+}
+
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       int ret;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       ring->dev = dev;
+
+       if (I915_NEED_GFX_HWS(dev)) {
+               ret = init_status_page(dev, ring);
+               if (ret)
+                       return ret;
+       }
+
+       obj = i915_gem_alloc_object(dev, ring->size);
+       if (obj == NULL) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       ring->gem_object = obj;
+
+       ret = i915_gem_object_pin(obj, ring->alignment);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               goto cleanup;
+       }
+
+       obj_priv = to_intel_bo(obj);
+       ring->map.size = ring->size;
+       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+       ring->map.type = 0;
+       ring->map.flags = 0;
+       ring->map.mtrr = 0;
+
+       drm_core_ioremap_wc(&ring->map, dev);
+
+       if (ring->map.handle == NULL) {
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       ring->virtual_start = ring->map.handle;
+       ret = ring->init(dev, ring);
+       if (ret != 0) {
+               intel_cleanup_ring_buffer(dev, ring);
+               return ret;
+       }
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               i915_kernel_lost_context(dev);
+       else {
+               ring->head = ring->get_head(dev, ring);
+               ring->tail = ring->get_tail(dev, ring);
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+       }
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       return ret;
+cleanup:
+       cleanup_status_page(dev, ring);
+       return ret;
+}
+
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       if (ring->gem_object == NULL)
+               return;
+
+       drm_core_ioremapfree(&ring->map, dev);
+
+       i915_gem_object_unpin(ring->gem_object);
+       drm_gem_object_unreference(ring->gem_object);
+       ring->gem_object = NULL;
+       cleanup_status_page(dev, ring);
+}
+
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       unsigned int *virt;
+       int rem;
+       rem = ring->size - ring->tail;
+
+       if (ring->space < rem) {
+               int ret = intel_wait_ring_buffer(dev, ring, rem);
+               if (ret)
+                       return ret;
+       }
+
+       virt = (unsigned int *)(ring->virtual_start + ring->tail);
+       rem /= 4;
+       while (rem--)
+               *virt++ = MI_NOOP;
+
+       ring->tail = 0;
+
+       return 0;
+}
+
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       unsigned long end;
+
+       trace_i915_ring_wait_begin(dev);
+       end = jiffies + 3 * HZ;
+       do {
+               ring->head = ring->get_head(dev, ring);
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+               if (ring->space >= n) {
+                       trace_i915_ring_wait_end(dev);
+                       return 0;
+               }
+               if (dev->primary->master) {
+                       struct drm_i915_master_private *master_priv
+                               = dev->primary->master->driver_priv;
+                       if (master_priv->sarea_priv)
+                               master_priv->sarea_priv->perf_boxes
+                                       |= I915_BOX_WAIT;
+               }
+               yield();
+       } while (!time_after(jiffies, end));
+       trace_i915_ring_wait_end(dev);
+
+       return -EBUSY;
+}
+
+void intel_ring_begin(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       if (unlikely(ring->tail + n > ring->size))
+               intel_wrap_ring_buffer(dev, ring);
+       if (unlikely(ring->space < n))
+               intel_wait_ring_buffer(dev, ring, n);
+}
+
+void intel_ring_emit(struct drm_device *dev,
+               struct intel_ring_buffer *ring, unsigned int data)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       *virt = data;
+       ring->tail += 4;
+       ring->tail &= ring->size - 1;
+       ring->space -= 4;
+}
+
+void intel_ring_advance(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       ring->advance_ring(dev, ring);
+}
+
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       BUG_ON((len&~(4-1)) != 0);
+       intel_ring_begin(dev, ring, len);
+       memcpy(virt, data, len);
+       ring->tail += len;
+       ring->tail &= ring->size - 1;
+       ring->space -= len;
+       intel_ring_advance(dev, ring);
+}
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 seqno;
+       seqno = ring->next_seqno;
+
+       /* reserve 0 for non-seqno */
+       if (++ring->next_seqno == 0)
+               ring->next_seqno = 1;
+       return seqno;
+}
+
+struct intel_ring_buffer render_ring = {
+       .name                   = "render ring",
+       .regs                   = {
+               .ctl = PRB0_CTL,
+               .head = PRB0_HEAD,
+               .tail = PRB0_TAIL,
+               .start = PRB0_START
+       },
+       .ring_flag              = I915_EXEC_RENDER,
+       .size                   = 32 * PAGE_SIZE,
+       .alignment              = PAGE_SIZE,
+       .virtual_start          = NULL,
+       .dev                    = NULL,
+       .gem_object             = NULL,
+       .head                   = 0,
+       .tail                   = 0,
+       .space                  = 0,
+       .next_seqno             = 1,
+       .user_irq_refcount      = 0,
+       .irq_gem_seqno          = 0,
+       .waiting_gem_seqno      = 0,
+       .setup_status_page      = render_setup_status_page,
+       .init                   = init_render_ring,
+       .get_head               = render_ring_get_head,
+       .get_tail               = render_ring_get_tail,
+       .get_active_head        = render_ring_get_active_head,
+       .advance_ring           = render_ring_advance_ring,
+       .flush                  = render_ring_flush,
+       .add_request            = render_ring_add_request,
+       .get_gem_seqno          = render_ring_get_gem_seqno,
+       .user_irq_get           = render_ring_get_user_irq,
+       .user_irq_put           = render_ring_put_user_irq,
+       .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+       .status_page            = {NULL, 0, NULL},
+       .map                    = {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..d5568d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,124 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct  intel_hw_status_page {
+       void            *page_addr;
+       unsigned int    gfx_addr;
+       struct          drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct  intel_ring_buffer {
+       const char      *name;
+       struct          ring_regs {
+                       u32 ctl;
+                       u32 head;
+                       u32 tail;
+                       u32 start;
+       } regs;
+       unsigned int    ring_flag;
+       unsigned long   size;
+       unsigned int    alignment;
+       void            *virtual_start;
+       struct          drm_device *dev;
+       struct          drm_gem_object *gem_object;
+
+       unsigned int    head;
+       unsigned int    tail;
+       unsigned int    space;
+       u32             next_seqno;
+       struct intel_hw_status_page status_page;
+
+       u32             irq_gem_seqno;          /* last seq seem at irq time */
+       u32             waiting_gem_seqno;
+       int             user_irq_refcount;
+       void            (*user_irq_get)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*user_irq_put)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*setup_status_page)(struct drm_device *dev,
+                       struct  intel_ring_buffer *ring);
+
+       int             (*init)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+
+       unsigned int    (*get_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_tail)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_active_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*advance_ring)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*flush)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       u32     invalidate_domains,
+                       u32     flush_domains);
+       u32             (*add_request)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_file *file_priv,
+                       u32 flush_domains);
+       u32             (*get_gem_seqno)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       int             (*dispatch_gem_execbuffer)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_i915_gem_execbuffer2 *exec,
+                       struct drm_clip_rect *cliprects,
+                       uint64_t exec_offset);
+
+       /**
+        * List of objects currently involved in rendering from the
+        * ringbuffer.
+        *
+        * Includes buffers having the contents of their GPU caches
+        * flushed, not necessarily primitives.  last_rendering_seqno
+        * represents when the rendering involved will be completed.
+        *
+        * A reference is held on the buffer while on this list.
+        */
+       struct list_head active_list;
+
+       /**
+        * List of breadcrumbs associated with GPU requests currently
+        * outstanding.
+        */
+       struct list_head request_list;
+
+       wait_queue_head_t irq_queue;
+       drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+               int reg)
+{
+       u32 *regs = ring->status_page.page_addr;
+       return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_ring_begin(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+void intel_ring_emit(struct drm_device *dev,
+               struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len);
+void intel_ring_advance(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7..e916870 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -616,7 +616,9 @@ struct drm_i915_gem_execbuffer2 {
        __u32 num_cliprects;
        /** This is a struct drm_clip_rect *cliprects */
        __u64 cliprects_ptr;
-       __u64 flags; /* currently unused */
+#define I915_EXEC_RENDER                 (1<<0)
+#define I915_EXEC_BSD                    (1<<1)
+       __u64 flags;
        __u64 rsvd1;
        __u64 rsvd2;
 };
-- 
1.7.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to