Author: dumbbell
Date: Sat Oct 31 15:09:31 2015
New Revision: 290228
URL: https://svnweb.freebsd.org/changeset/base/290228

Log:
  drm/i915: Reduce diff with Linux 3.8
  
  There is no functional change. The goal is to ease the future update to
  Linux 3.8's i915 driver.
  
  MFC after:    2 months

Modified:
  head/sys/dev/drm2/i915/i915_gem_execbuffer.c
  head/sys/dev/drm2/i915/intel_pm.c
  head/sys/dev/drm2/i915/intel_ringbuffer.h

Modified: head/sys/dev/drm2/i915/i915_gem_execbuffer.c
==============================================================================
--- head/sys/dev/drm2/i915/i915_gem_execbuffer.c        Sat Oct 31 10:16:44 
2015        (r290227)
+++ head/sys/dev/drm2/i915/i915_gem_execbuffer.c        Sat Oct 31 15:09:31 
2015        (r290228)
@@ -221,9 +221,10 @@ eb_create(int size)
 {
        struct eb_objects *eb;
 
-       eb = malloc(sizeof(*eb), DRM_I915_GEM, M_WAITOK | M_ZERO);
+       eb = malloc(sizeof(*eb),
+                    DRM_I915_GEM, M_WAITOK | M_ZERO);
        eb->buckets = hashinit(size, DRM_I915_GEM, &eb->hashmask);
-       return (eb);
+       return eb;
 }
 
 static void
@@ -250,9 +251,10 @@ eb_get_object(struct eb_objects *eb, uns
 
        LIST_FOREACH(obj, &eb->buckets[handle & eb->hashmask], exec_node) {
                if (obj->exec_handle == handle)
-                       return (obj);
+                       return obj;
        }
-       return (NULL);
+
+       return NULL;
 }
 
 static void
@@ -374,7 +376,7 @@ i915_gem_execbuffer_relocate_entry(struc
 
        /* We can't wait for rendering with pagefaults disabled */
        if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
-               return (-EFAULT);
+               return -EFAULT;
 
        reloc->delta += target_offset;
        if (use_cpu_reloc(obj)) {
@@ -389,7 +391,7 @@ i915_gem_execbuffer_relocate_entry(struc
                sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)],
                    SFB_NOWAIT);
                if (sf == NULL)
-                       return (-ENOMEM);
+                       return -ENOMEM;
                vaddr = (void *)sf_buf_kva(sf);
                *(uint32_t *)(vaddr + page_offset) = reloc->delta;
                sf_buf_free(sf);
@@ -509,14 +511,13 @@ i915_gem_execbuffer_relocate(struct drm_
        i915_gem_retire_requests(dev);
 
        ret = 0;
-       pflags = vm_fault_disable_pagefaults();
        /* This is the fast path and we cannot handle a pagefault whilst
         * holding the device lock lest the user pass in the relocations
         * contained within a mmaped bo. For in such a case we, the page
         * fault handler would call i915_gem_fault() and we would try to
         * acquire the device lock again. Obviously this is bad.
         */
-
+       pflags = vm_fault_disable_pagefaults();
        list_for_each_entry(obj, objects, exec_list) {
                ret = i915_gem_execbuffer_relocate_object(obj, eb);
                if (ret)
@@ -585,7 +586,8 @@ i915_gem_execbuffer_reserve(struct intel
        struct drm_i915_gem_object *obj;
        struct list_head ordered_objects;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
-       int ret, retry;
+       int retry;
+       int ret;
 
        dev_priv = ring->dev->dev_private;
        INIT_LIST_HEAD(&ordered_objects);
@@ -957,11 +959,12 @@ validate_exec_list(struct drm_i915_gem_e
                        return -EINVAL;
 
                length = exec[i].relocation_count *
-                   sizeof(struct drm_i915_gem_relocation_entry);
+                       sizeof(struct drm_i915_gem_relocation_entry);
                if (length == 0) {
                        (*map)[i] = NULL;
                        continue;
                }
+
                /*
                 * Since both start and end of the relocation region
                 * may be not aligned on the page boundary, be
@@ -977,7 +980,7 @@ validate_exec_list(struct drm_i915_gem_e
                if ((*maplen)[i] == -1) {
                        free(ma, DRM_I915_GEM);
                        (*map)[i] = NULL;
-                       return (-EFAULT);
+                       return -EFAULT;
                }
        }
 
@@ -1058,7 +1061,7 @@ i915_gem_fix_mi_batchbuffer_end(struct d
        char *mkva;
        uint64_t po_r, po_w;
        uint32_t cmd;
-       
+
        po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset +
            batch_start_offset + batch_len;
        if (batch_len > 0)
@@ -1088,7 +1091,7 @@ DRM_DEBUG("batchbuffer does not end by M
 
 int i915_fix_mi_batchbuffer_end = 0;
 
- static int
+static int
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
                            struct intel_ring_buffer *ring)
 {
@@ -1125,13 +1128,13 @@ i915_gem_do_execbuffer(struct drm_device
        struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
        struct intel_ring_buffer *ring;
-       vm_page_t **relocs_ma;
-       int *relocs_len;
        u32 ctx_id = i915_execbuffer2_get_context_id(*args);
        u32 exec_start, exec_len;
        u32 seqno;
        u32 mask;
        int ret, mode, i;
+       vm_page_t **relocs_ma;
+       int *relocs_len;
 
        if (!i915_gem_check_execbuffer(args)) {
                DRM_DEBUG("execbuf with invalid offset/length\n");
@@ -1141,10 +1144,10 @@ i915_gem_do_execbuffer(struct drm_device
        if (args->batch_len == 0)
                return (0);
 
-       ret = validate_exec_list(exec, args->buffer_count, &relocs_ma,
-           &relocs_len);
-       if (ret != 0)
-               goto pre_struct_lock_err;
+       ret = validate_exec_list(exec, args->buffer_count,
+           &relocs_ma, &relocs_len);
+       if (ret)
+               goto pre_mutex_err;
 
        switch (args->flags & I915_EXEC_RING_MASK) {
        case I915_EXEC_DEFAULT:
@@ -1157,7 +1160,7 @@ i915_gem_do_execbuffer(struct drm_device
                        DRM_DEBUG("Ring %s doesn't support contexts\n",
                                  ring->name);
                        ret = -EPERM;
-                       goto pre_struct_lock_err;
+                       goto pre_mutex_err;
                }
                break;
        case I915_EXEC_BLT:
@@ -1166,20 +1169,20 @@ i915_gem_do_execbuffer(struct drm_device
                        DRM_DEBUG("Ring %s doesn't support contexts\n",
                                  ring->name);
                        ret = -EPERM;
-                       goto pre_struct_lock_err;
+                       goto pre_mutex_err;
                }
                break;
        default:
                DRM_DEBUG("execbuf with unknown ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
                ret = -EINVAL;
-               goto pre_struct_lock_err;
+               goto pre_mutex_err;
        }
        if (!intel_ring_initialized(ring)) {
                DRM_DEBUG("execbuf with invalid ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
                ret = -EINVAL;
-               goto pre_struct_lock_err;
+               goto pre_mutex_err;
        }
 
        mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1192,13 +1195,13 @@ i915_gem_do_execbuffer(struct drm_device
                    mode != dev_priv->relative_constants_mode) {
                        if (INTEL_INFO(dev)->gen < 4) {
                                ret = -EINVAL;
-                               goto pre_struct_lock_err;
+                               goto pre_mutex_err;
                        }
 
                        if (INTEL_INFO(dev)->gen > 5 &&
                            mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
                                ret = -EINVAL;
-                               goto pre_struct_lock_err;
+                               goto pre_mutex_err;
                        }
 
                        /* The HW changed the meaning on this bit on gen6 */
@@ -1209,57 +1212,57 @@ i915_gem_do_execbuffer(struct drm_device
        default:
                DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
                ret = -EINVAL;
-               goto pre_struct_lock_err;
+               goto pre_mutex_err;
        }
 
        if (args->buffer_count < 1) {
                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
                ret = -EINVAL;
-               goto pre_struct_lock_err;
+               goto pre_mutex_err;
        }
 
        if (args->num_cliprects != 0) {
                if (ring != &dev_priv->rings[RCS]) {
                        DRM_DEBUG("clip rectangles are only valid with the 
render ring\n");
                        ret = -EINVAL;
-                       goto pre_struct_lock_err;
+                       goto pre_mutex_err;
                }
 
                if (INTEL_INFO(dev)->gen >= 5) {
                        DRM_DEBUG("clip rectangles are only valid on 
pre-gen5\n");
                        ret = -EINVAL;
-                       goto pre_struct_lock_err;
+                       goto pre_mutex_err;
                }
 
                if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
                        DRM_DEBUG("execbuf with %u cliprects\n",
                                  args->num_cliprects);
                        ret = -EINVAL;
-                       goto pre_struct_lock_err;
+                       goto pre_mutex_err;
                }
-               cliprects = malloc( sizeof(*cliprects) * args->num_cliprects,
-                   DRM_I915_GEM, M_WAITOK | M_ZERO);
+               cliprects = malloc(args->num_cliprects * sizeof(*cliprects),
+                                   DRM_I915_GEM, M_WAITOK | M_ZERO);
                ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects,
                    sizeof(*cliprects) * args->num_cliprects);
                if (ret != 0)
-                       goto pre_struct_lock_err;
+                       goto pre_mutex_err;
        }
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
-               goto pre_struct_lock_err;
+               goto pre_mutex_err;
 
        if (dev_priv->mm.suspended) {
                DRM_UNLOCK(dev);
                ret = -EBUSY;
-               goto pre_struct_lock_err;
+               goto pre_mutex_err;
        }
 
        eb = eb_create(args->buffer_count);
        if (eb == NULL) {
                DRM_UNLOCK(dev);
                ret = -ENOMEM;
-               goto pre_struct_lock_err;
+               goto pre_mutex_err;
        }
 
        /* Look up object handles */
@@ -1350,7 +1353,7 @@ i915_gem_do_execbuffer(struct drm_device
            mode != dev_priv->relative_constants_mode) {
                ret = intel_ring_begin(ring, 4);
                if (ret)
-                       goto err;
+                               goto err;
 
                intel_ring_emit(ring, MI_NOOP);
                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
@@ -1375,9 +1378,6 @@ i915_gem_do_execbuffer(struct drm_device
                    args->batch_start_offset, args->batch_len);
        }
 
-       CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
-           exec_start, exec_len);
-
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
                        ret = i915_emit_box(dev, &cliprects[i],
@@ -1397,6 +1397,9 @@ i915_gem_do_execbuffer(struct drm_device
                        goto err;
        }
 
+       CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
+           exec_start, exec_len);
+
        i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
        i915_gem_execbuffer_retire_commands(dev, file, ring);
 
@@ -1411,9 +1414,10 @@ err:
                list_del_init(&obj->exec_list);
                drm_gem_object_unreference(&obj->base);
        }
+
        DRM_UNLOCK(dev);
 
-pre_struct_lock_err:
+pre_mutex_err:
        for (i = 0; i < args->buffer_count; i++) {
                if (relocs_ma[i] != NULL) {
                        vm_page_unhold_pages(relocs_ma[i], relocs_len[i]);
@@ -1461,7 +1465,7 @@ i915_gem_execbuffer(struct drm_device *d
                          args->buffer_count, ret);
                free(exec_list, DRM_I915_GEM);
                free(exec2_list, DRM_I915_GEM);
-               return (ret);
+               return ret;
        }
 
        for (i = 0; i < args->buffer_count; i++) {
@@ -1525,8 +1529,8 @@ i915_gem_execbuffer2(struct drm_device *
        }
 
        /* XXXKIB user-controllable malloc size */
-       exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count,
-           DRM_I915_GEM, M_WAITOK);
+       exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count,
+                            DRM_I915_GEM, M_WAITOK);
        ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec2_list,
            sizeof(*exec2_list) * args->buffer_count);
        if (ret != 0) {

Modified: head/sys/dev/drm2/i915/intel_pm.c
==============================================================================
--- head/sys/dev/drm2/i915/intel_pm.c   Sat Oct 31 10:16:44 2015        
(r290227)
+++ head/sys/dev/drm2/i915/intel_pm.c   Sat Oct 31 15:09:31 2015        
(r290228)
@@ -602,7 +602,7 @@ static void i915_ironlake_get_mem_freq(s
                dev_priv->mem_freq = 1600;
                break;
        default:
-               DRM_DEBUG("unknown memory frequency 0x%02x\n",
+               DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
                                 ddrpll & 0xff);
                dev_priv->mem_freq = 0;
                break;
@@ -633,7 +633,7 @@ static void i915_ironlake_get_mem_freq(s
                dev_priv->fsb_freq = 6400;
                break;
        default:
-               DRM_DEBUG("unknown fsb frequency 0x%04x\n",
+               DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
                                 csipll & 0x3ff);
                dev_priv->fsb_freq = 0;
                break;
@@ -2706,7 +2706,7 @@ unsigned long i915_chipset_val(struct dr
         * zero and give the hw a chance to gather more samples.
         */
        if (diff1 <= 10)
-               return (dev_priv->chipset_power);
+               return dev_priv->chipset_power;
 
        count1 = I915_READ(DMIEC);
        count2 = I915_READ(DDREC);
@@ -2739,7 +2739,7 @@ unsigned long i915_chipset_val(struct dr
        dev_priv->last_time1 = now;
 
        dev_priv->chipset_power = ret;
-       return (ret);
+       return ret;
 }
 
 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
@@ -3192,6 +3192,18 @@ void intel_init_emon(struct drm_device *
        dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /*
+        * On Ibex Peak and Cougar Point, we need to disable clock
+        * gating for the panel power sequencer or it will fail to
+        * start up when no ports are active.
+        */
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
 static void ironlake_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3261,6 +3273,24 @@ static void ironlake_init_clock_gating(s
                   _3D_CHICKEN2_WM_READ_PIPELINED);
 }
 
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe;
+
+       /*
+        * On Ibex Peak and Cougar Point, we need to disable clock
+        * gating for the panel power sequencer or it will fail to
+        * start up when no ports are active.
+        */
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+                  DPLS_EDP_PPS_FIX_DIS);
+       /* Without this, mode sets may fail silently on FDI */
+       for_each_pipe(pipe)
+               I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+}
+
 static void gen6_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3510,36 +3540,6 @@ static void i830_init_clock_gating(struc
        I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 }
 
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /*
-        * On Ibex Peak and Cougar Point, we need to disable clock
-        * gating for the panel power sequencer or it will fail to
-        * start up when no ports are active.
-        */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe;
-
-       /*
-        * On Ibex Peak and Cougar Point, we need to disable clock
-        * gating for the panel power sequencer or it will fail to
-        * start up when no ports are active.
-        */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-       I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
-                  DPLS_EDP_PPS_FIX_DIS);
-       /* Without this, mode sets may fail silently on FDI */
-       for_each_pipe(pipe)
-               I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
 void intel_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;

Modified: head/sys/dev/drm2/i915/intel_ringbuffer.h
==============================================================================
--- head/sys/dev/drm2/i915/intel_ringbuffer.h   Sat Oct 31 10:16:44 2015        
(r290227)
+++ head/sys/dev/drm2/i915/intel_ringbuffer.h   Sat Oct 31 15:09:31 2015        
(r290228)
@@ -81,15 +81,15 @@ struct  intel_ring_buffer {
        int             (*init)(struct intel_ring_buffer *ring);
 
        void            (*write_tail)(struct intel_ring_buffer *ring,
-                                     uint32_t value);
+                                     u32 value);
        int             (*flush)(struct intel_ring_buffer *ring,
-                                 uint32_t      invalidate_domains,
-                                 uint32_t      flush_domains);
+                                 u32   invalidate_domains,
+                                 u32   flush_domains);
        int             (*add_request)(struct intel_ring_buffer *ring,
                                       uint32_t *seqno);
        uint32_t        (*get_seqno)(struct intel_ring_buffer *ring);
        int             (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
-                                              uint32_t offset, uint32_t 
length);
+                                              u32 offset, u32 length);
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
        void            (*cleanup)(struct intel_ring_buffer *ring);
@@ -155,7 +155,7 @@ intel_ring_flag(struct intel_ring_buffer
        return 1 << ring->id;
 }
 
-static inline uint32_t
+static inline u32
 intel_ring_sync_index(struct intel_ring_buffer *ring,
                      struct intel_ring_buffer *other)
 {
@@ -180,7 +180,7 @@ intel_read_status_page(struct intel_ring
 {
        /* Ensure that the compiler doesn't optimize away the load. */
        __compiler_membar();
-       return (atomic_load_acq_32(ring->status_page.page_addr + reg));
+       return atomic_load_acq_32(ring->status_page.page_addr + reg);
 }
 
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
@@ -221,7 +221,6 @@ static inline u32 intel_ring_get_tail(st
 void i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno);
 
 /* DRI warts */
-int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start,
-    uint32_t size);
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to