From: Russell King <rmk+ker...@arm.linux.org.uk>

The Linux kernel dislikes the use of stdint.h types.  In order to
prepare this code for merging, we need to convert to the preferred
types, which are u8/u16/u32/u64 for unsigned ints of the specified
size, and s8/s16/s32/s64 for signed ints of the same.

Signed-off-by: Russell King <rmk+kernel at arm.linux.org.uk>
---
 drivers/staging/etnaviv/etnaviv_buffer.c     | 10 ++--
 drivers/staging/etnaviv/etnaviv_drv.c        |  2 +-
 drivers/staging/etnaviv/etnaviv_drv.h        | 31 +++++-----
 drivers/staging/etnaviv/etnaviv_gem.c        | 28 ++++-----
 drivers/staging/etnaviv/etnaviv_gem.h        | 24 ++++----
 drivers/staging/etnaviv/etnaviv_gem_submit.c | 14 ++---
 drivers/staging/etnaviv/etnaviv_gpu.c        | 19 +++----
 drivers/staging/etnaviv/etnaviv_gpu.h        | 59 ++++++++++---------
 drivers/staging/etnaviv/etnaviv_iommu.c      | 12 ++--
 drivers/staging/etnaviv/etnaviv_mmu.c        | 10 ++--
 drivers/staging/etnaviv/etnaviv_mmu.h        |  8 +--
 include/uapi/drm/etnaviv_drm.h               | 85 ++++++++++++++--------------
 12 files changed, 150 insertions(+), 152 deletions(-)

diff --git a/drivers/staging/etnaviv/etnaviv_buffer.c 
b/drivers/staging/etnaviv/etnaviv_buffer.c
index 816dc0c4a287..01a29793aed3 100644
--- a/drivers/staging/etnaviv/etnaviv_buffer.c
+++ b/drivers/staging/etnaviv/etnaviv_buffer.c
@@ -28,7 +28,7 @@
  */


-static inline void OUT(struct etnaviv_gem_object *buffer, uint32_t data)
+static inline void OUT(struct etnaviv_gem_object *buffer, u32 data)
 {
        u32 *vaddr = (u32 *)buffer->vaddr;

@@ -183,7 +183,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned 
int event,
         * if we are going to completely overflow the buffer, we need to wrap.
         */
        if (buffer->offset + reserve_size >
-           buffer->base.size / sizeof(uint32_t))
+           buffer->base.size / sizeof(u32))
                buffer->offset = 0;

        /* save offset back into main buffer */
@@ -192,7 +192,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned 
int event,
        link_size = 6;

        /* Skip over any extra instructions */
-       link_target += extra_size * sizeof(uint32_t);
+       link_target += extra_size * sizeof(u32);

        /* update offset for every cmd stream */
        for (i = submit->nr_cmds; i--; ) {
@@ -232,8 +232,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned 
int event,
        }

        if (gpu->mmu->need_flush || gpu->switch_context) {
-               uint32_t new_target = gpu_va(gpu, buffer) + buffer->offset *
-                                       sizeof(uint32_t);
+               u32 new_target = gpu_va(gpu, buffer) + buffer->offset *
+                                       sizeof(u32);

                if (gpu->mmu->need_flush) {
                        /* Add the MMU flush */
diff --git a/drivers/staging/etnaviv/etnaviv_drv.c 
b/drivers/staging/etnaviv/etnaviv_drv.c
index 4f97bb667278..d7133e4450c6 100644
--- a/drivers/staging/etnaviv/etnaviv_drv.c
+++ b/drivers/staging/etnaviv/etnaviv_drv.c
@@ -434,7 +434,7 @@ static int etnaviv_ioctl_gem_userptr(struct drm_device 
*dev, void *data,

        if (offset_in_page(args->user_ptr | args->user_size) ||
            (uintptr_t)args->user_ptr != args->user_ptr ||
-           (uint32_t)args->user_size != args->user_size)
+           (u32)args->user_size != args->user_size)
                return -EINVAL;

        if (args->flags & ETNA_USERPTR_WRITE)
diff --git a/drivers/staging/etnaviv/etnaviv_drv.h 
b/drivers/staging/etnaviv/etnaviv_drv.h
index 8f9c1f747b4e..b98db8c11ef3 100644
--- a/drivers/staging/etnaviv/etnaviv_drv.h
+++ b/drivers/staging/etnaviv/etnaviv_drv.h
@@ -53,7 +53,7 @@ struct etnaviv_drm_private {
        int num_gpus;
        struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];

-       uint32_t next_fence;
+       u32 next_fence;

        /* list of GEM objects: */
        struct list_head inactive_list;
@@ -61,9 +61,10 @@ struct etnaviv_drm_private {
        struct workqueue_struct *wq;
 };

-static void etnaviv_queue_work(struct drm_device *dev, struct work_struct *w)
+static inline void etnaviv_queue_work(struct drm_device *dev,
+       struct work_struct *w)
 {
-       struct etnaviv_drm_private *priv = drm->dev_private;
+       struct etnaviv_drm_private *priv = dev->dev_private;

        queue_work(priv->wq, w);
 }
@@ -73,11 +74,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void 
*data,

 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, uint64_t *offset);
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
 int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu,
-       struct drm_gem_object *obj, uint32_t *iova);
+       struct drm_gem_object *obj, u32 *iova);
 int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
-       int id, uint32_t *iova);
+       int id, u32 *iova);
 void etnaviv_gem_put_iova(struct drm_gem_object *obj);
 struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
@@ -90,18 +91,18 @@ void *etnaviv_gem_vaddr_locked(struct drm_gem_object *obj);
 void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
 dma_addr_t etnaviv_gem_paddr_locked(struct drm_gem_object *obj);
 void etnaviv_gem_move_to_active(struct drm_gem_object *obj,
-               struct etnaviv_gpu *gpu, uint32_t access, uint32_t fence);
+               struct etnaviv_gpu *gpu, u32 access, u32 fence);
 void etnaviv_gem_move_to_inactive(struct drm_gem_object *obj);
-int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
                struct timespec *timeout);
 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
 void etnaviv_gem_free_object(struct drm_gem_object *obj);
 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-               uint32_t size, uint32_t flags, uint32_t *handle);
+               u32 size, u32 flags, u32 *handle);
 struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
-               uint32_t size, uint32_t flags);
+               u32 size, u32 flags);
 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
-       uintptr_t ptr, uint32_t size, uint32_t flags, uint32_t *handle);
+       uintptr_t ptr, u32 size, u32 flags, u32 *handle);
 u32 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
 void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
@@ -122,14 +123,14 @@ u32 etnaviv_readl(const void __iomem *addr);
 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)

 /* returns true if fence a comes after fence b */
-static inline bool fence_after(uint32_t a, uint32_t b)
+static inline bool fence_after(u32 a, u32 b)
 {
-       return (int32_t)(a - b) > 0;
+       return (s32)(a - b) > 0;
 }

-static inline bool fence_after_eq(uint32_t a, uint32_t b)
+static inline bool fence_after_eq(u32 a, u32 b)
 {
-       return (int32_t)(a - b) >= 0;
+       return (s32)(a - b) >= 0;
 }

 static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
diff --git a/drivers/staging/etnaviv/etnaviv_gem.c 
b/drivers/staging/etnaviv/etnaviv_gem.c
index 6e114a390101..23e655de7925 100644
--- a/drivers/staging/etnaviv/etnaviv_gem.c
+++ b/drivers/staging/etnaviv/etnaviv_gem.c
@@ -270,7 +270,7 @@ out:
 }

 /* get mmap offset - must be called under struct_mutex */
-int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, uint64_t *offset)
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
 {
        int ret;

@@ -292,7 +292,7 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, 
uint64_t *offset)
  * the refcnt counter needs to be atomic_t.
  */
 int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu,
-       struct drm_gem_object *obj, uint32_t *iova)
+       struct drm_gem_object *obj, u32 *iova)
 {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
        struct etnaviv_vram_mapping *mapping =
@@ -319,7 +319,7 @@ int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu,
 }

 int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
-       int id, uint32_t *iova)
+       int id, u32 *iova)
 {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
        struct etnaviv_vram_mapping *mapping =
@@ -393,7 +393,7 @@ dma_addr_t etnaviv_gem_paddr_locked(struct drm_gem_object 
*obj)
 }

 void etnaviv_gem_move_to_active(struct drm_gem_object *obj,
-       struct etnaviv_gpu *gpu, uint32_t access, uint32_t fence)
+       struct etnaviv_gpu *gpu, u32 access, u32 fence)
 {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

@@ -426,7 +426,7 @@ void etnaviv_gem_move_to_inactive(struct drm_gem_object 
*obj)
        list_add_tail(&etnaviv_obj->mm_list, &priv->inactive_list);
 }

-int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
                struct timespec *timeout)
 {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
@@ -434,7 +434,7 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, 
uint32_t op,

        if (is_active(etnaviv_obj)) {
                struct etnaviv_gpu *gpu = etnaviv_obj->gpu;
-               uint32_t fence = 0;
+               u32 fence = 0;

                if (op & ETNA_PREP_READ)
                        fence = etnaviv_obj->write_fence;
@@ -462,11 +462,11 @@ static void etnaviv_gem_describe(struct drm_gem_object 
*obj, struct seq_file *m)
 {
        struct drm_device *dev = obj->dev;
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-       uint64_t off = drm_vma_node_start(&obj->vma_node);
+       unsigned long off = drm_vma_node_start(&obj->vma_node);

        WARN_ON(!mutex_is_locked(&dev->struct_mutex));

-       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zd\n",
+       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08lx %p %zd\n",
                        etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
                        etnaviv_obj->read_fence, etnaviv_obj->write_fence,
                        obj->name, obj->refcount.refcount.counter,
@@ -565,7 +565,7 @@ int etnaviv_gem_obj_add(struct drm_device *dev, struct 
drm_gem_object *obj)
 }

 static int etnaviv_gem_new_impl(struct drm_device *dev,
-               uint32_t size, uint32_t flags,
+               u32 size, u32 flags,
                struct drm_gem_object **obj)
 {
        struct etnaviv_gem_object *etnaviv_obj;
@@ -623,7 +623,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev,
 }

 static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
-               uint32_t size, uint32_t flags)
+               u32 size, u32 flags)
 {
        struct drm_gem_object *obj = NULL;
        int ret;
@@ -657,7 +657,7 @@ fail:

 /* convenience method to construct a GEM buffer object, and userspace handle */
 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-               uint32_t size, uint32_t flags, uint32_t *handle)
+               u32 size, u32 flags, u32 *handle)
 {
        struct drm_gem_object *obj;
        int ret;
@@ -681,7 +681,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct 
drm_file *file,
 }

 struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
-               uint32_t size, uint32_t flags)
+               u32 size, u32 flags)
 {
        struct drm_gem_object *obj;
        int ret;
@@ -699,7 +699,7 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device 
*dev,
        return obj;
 }

-int etnaviv_gem_new_private(struct drm_device *dev, size_t size, uint32_t 
flags,
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
        struct etnaviv_gem_object **res)
 {
        struct drm_gem_object *obj;
@@ -885,7 +885,7 @@ static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops 
= {
 };

 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
-       uintptr_t ptr, uint32_t size, uint32_t flags, uint32_t *handle)
+       uintptr_t ptr, u32 size, u32 flags, u32 *handle)
 {
        struct etnaviv_gem_object *etnaviv_obj;
        int ret;
diff --git a/drivers/staging/etnaviv/etnaviv_gem.h 
b/drivers/staging/etnaviv/etnaviv_gem.h
index 397b30f39708..08f0e6464cc7 100644
--- a/drivers/staging/etnaviv/etnaviv_gem.h
+++ b/drivers/staging/etnaviv/etnaviv_gem.h
@@ -33,14 +33,14 @@ struct etnaviv_vram_mapping {
        struct list_head obj_head;
        struct etnaviv_iommu *mmu;
        struct drm_mm_node vram_node;
-       uint32_t iova;
+       u32 iova;
 };

 struct etnaviv_gem_object {
        struct drm_gem_object base;
        const struct etnaviv_gem_ops *ops;

-       uint32_t flags;
+       u32 flags;

        /* And object is either:
         *  inactive - on priv->inactive_list
@@ -52,8 +52,8 @@ struct etnaviv_gem_object {
         */
        struct list_head mm_list;
        struct etnaviv_gpu *gpu;     /* non-null if active */
-       uint32_t access;
-       uint32_t read_fence, write_fence;
+       u32 access;
+       u32 read_fence, write_fence;

        /* Transiently in the process of submit ioctl, objects associated
         * with the submit are on submit->bo_list.. this only lasts for
@@ -108,29 +108,29 @@ static inline bool is_active(struct etnaviv_gem_object 
*etnaviv_obj)
 struct etnaviv_gem_submit {
        struct drm_device *dev;
        struct etnaviv_gpu *gpu;
-       uint32_t exec_state;
+       u32 exec_state;
        struct list_head bo_list;
        struct ww_acquire_ctx ticket;
-       uint32_t fence;
+       u32 fence;
        unsigned int nr_cmds;
        unsigned int nr_bos;
        struct {
-               uint32_t type;
-               uint32_t offset; /* in dwords */
-               uint32_t size;  /* in dwords */
+               u32 type;
+               u32 offset; /* in dwords */
+               u32 size;  /* in dwords */
                struct etnaviv_gem_object *obj;
        } cmd[MAX_CMDS];
        struct {
-               uint32_t flags;
+               u32 flags;
                struct etnaviv_gem_object *obj;
-               uint32_t iova;
+               u32 iova;
        } bos[0];
 };

 struct etnaviv_vram_mapping *
 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
                             struct etnaviv_iommu *mmu);
-int etnaviv_gem_new_private(struct drm_device *dev, size_t size, uint32_t 
flags,
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
        struct etnaviv_gem_object **res);
 int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
diff --git a/drivers/staging/etnaviv/etnaviv_gem_submit.c 
b/drivers/staging/etnaviv/etnaviv_gem_submit.c
index 647d16fee98b..e2c94f476810 100644
--- a/drivers/staging/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/staging/etnaviv/etnaviv_gem_submit.c
@@ -139,7 +139,7 @@ static int submit_validate_objects(struct 
etnaviv_gem_submit *submit)
 retry:
        for (i = 0; i < submit->nr_bos; i++) {
                struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-               uint32_t iova;
+               u32 iova;

                if (slow_locked == i)
                        slow_locked = -1;
@@ -201,8 +201,8 @@ fail:
        return ret;
 }

-static int submit_bo(struct etnaviv_gem_submit *submit, uint32_t idx,
-               struct etnaviv_gem_object **obj, uint32_t *iova)
+static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
+               struct etnaviv_gem_object **obj, u32 *iova)
 {
        if (idx >= submit->nr_bos) {
                DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -220,10 +220,10 @@ static int submit_bo(struct etnaviv_gem_submit *submit, 
uint32_t idx,

 /* process the reloc's and patch up the cmdstream as needed: */
 static int submit_reloc(struct etnaviv_gem_submit *submit, struct 
etnaviv_gem_object *obj,
-               uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
+               u32 offset, u32 nr_relocs, u64 relocs)
 {
-       uint32_t i, last_offset = 0;
-       uint32_t *ptr = obj->vaddr;
+       u32 i, last_offset = 0;
+       u32 *ptr = obj->vaddr;
        int ret;

        if (offset % 4) {
@@ -236,7 +236,7 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, 
struct etnaviv_gem_ob
                struct etnaviv_gem_object *bobj;
                void __user *userptr =
                        to_user_ptr(relocs + (i * sizeof(submit_reloc)));
-               uint32_t iova, off;
+               u32 iova, off;

                ret = copy_from_user(&submit_reloc, userptr,
                                     sizeof(submit_reloc));
diff --git a/drivers/staging/etnaviv/etnaviv_gpu.c 
b/drivers/staging/etnaviv/etnaviv_gpu.c
index 4f2174431c2b..233b2197eb37 100644
--- a/drivers/staging/etnaviv/etnaviv_gpu.c
+++ b/drivers/staging/etnaviv/etnaviv_gpu.c
@@ -35,8 +35,7 @@ static const struct platform_device_id gpu_ids[] = {
  * Driver functions:
  */

-int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, uint32_t param,
-       uint64_t *value)
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
 {
        switch (param) {
        case ETNAVIV_PARAM_GPU_MODEL:
@@ -728,7 +727,7 @@ static void recover_worker(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);

        /* Retire the buffer objects in a work */
-       etnaviv_queue_work(gpu->dev, &gpu->retire_work);
+       etnaviv_queue_work(gpu->drm, &gpu->retire_work);
 }

 static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
@@ -741,7 +740,7 @@ static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
 static void hangcheck_handler(unsigned long data)
 {
        struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
-       uint32_t fence = gpu->retired_fence;
+       u32 fence = gpu->retired_fence;
        bool progress = false;

        if (fence != gpu->hangcheck_fence) {
@@ -750,7 +749,7 @@ static void hangcheck_handler(unsigned long data)
        }

        if (!progress) {
-               uint32_t dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+               u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
                int change = dma_addr - gpu->hangcheck_dma_addr;

                if (change < 0 || change > 16) {
@@ -764,7 +763,7 @@ static void hangcheck_handler(unsigned long data)
                dev_err(gpu->dev, "     completed fence: %u\n", fence);
                dev_err(gpu->dev, "     submitted fence: %u\n",
                        gpu->submitted_fence);
-               etnaviv_queue_work(gpu->dev, &gpu->recover_work);
+               etnaviv_queue_work(gpu->drm, &gpu->recover_work);
        }

        /* if still more pending work, reset the hangcheck timer: */
@@ -835,7 +834,7 @@ static void retire_worker(struct work_struct *work)
        struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
                                               retire_work);
        struct drm_device *dev = gpu->drm;
-       uint32_t fence = gpu->retired_fence;
+       u32 fence = gpu->retired_fence;

        mutex_lock(&dev->struct_mutex);

@@ -864,7 +863,7 @@ static void retire_worker(struct work_struct *work)
 }

 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
-       uint32_t fence, struct timespec *timeout)
+       u32 fence, struct timespec *timeout)
 {
        int ret;

@@ -963,7 +962,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
                WARN_ON(is_active(etnaviv_obj) && (etnaviv_obj->gpu != gpu));

                if (!is_active(etnaviv_obj)) {
-                       uint32_t iova;
+                       u32 iova;

                        /* ring takes a reference to the bo and iova: */
                        drm_gem_object_reference(&etnaviv_obj->base);
@@ -1036,7 +1035,7 @@ static irqreturn_t irq_handler(int irq, void *data)
                }

                /* Retire the buffer objects in a work */
-               etnaviv_queue_work(gpu->dev, &gpu->retire_work);
+               etnaviv_queue_work(gpu->drm, &gpu->retire_work);

                ret = IRQ_HANDLED;
        }
diff --git a/drivers/staging/etnaviv/etnaviv_gpu.h 
b/drivers/staging/etnaviv/etnaviv_gpu.h
index 49f369b5a4b5..44af5ca3f633 100644
--- a/drivers/staging/etnaviv/etnaviv_gpu.h
+++ b/drivers/staging/etnaviv/etnaviv_gpu.h
@@ -26,61 +26,61 @@ struct etnaviv_gem_submit;

 struct etnaviv_chip_identity {
        /* Chip model. */
-       uint32_t model;
+       u32 model;

        /* Revision value.*/
-       uint32_t revision;
+       u32 revision;

        /* Supported feature fields. */
-       uint32_t features;
+       u32 features;

        /* Supported minor feature fields. */
-       uint32_t minor_features0;
+       u32 minor_features0;

        /* Supported minor feature 1 fields. */
-       uint32_t minor_features1;
+       u32 minor_features1;

        /* Supported minor feature 2 fields. */
-       uint32_t minor_features2;
+       u32 minor_features2;

        /* Supported minor feature 3 fields. */
-       uint32_t minor_features3;
+       u32 minor_features3;

        /* Number of streams supported. */
-       uint32_t stream_count;
+       u32 stream_count;

        /* Total number of temporary registers per thread. */
-       uint32_t register_max;
+       u32 register_max;

        /* Maximum number of threads. */
-       uint32_t thread_count;
+       u32 thread_count;

        /* Number of shader cores. */
-       uint32_t shader_core_count;
+       u32 shader_core_count;

        /* Size of the vertex cache. */
-       uint32_t vertex_cache_size;
+       u32 vertex_cache_size;

        /* Number of entries in the vertex output buffer. */
-       uint32_t vertex_output_buffer_size;
+       u32 vertex_output_buffer_size;

        /* Number of pixel pipes. */
-       uint32_t pixel_pipes;
+       u32 pixel_pipes;

        /* Number of instructions. */
-       uint32_t instruction_count;
+       u32 instruction_count;

        /* Number of constants. */
-       uint32_t num_constants;
+       u32 num_constants;

        /* Buffer size */
-       uint32_t buffer_size;
+       u32 buffer_size;
 };

 struct etnaviv_event {
        bool used;
-       uint32_t fence;
-       uint32_t ring_pos;
+       u32 fence;
+       u32 ring_pos;
 };

 struct etnaviv_gpu {
@@ -94,7 +94,7 @@ struct etnaviv_gpu {
        struct drm_gem_object *buffer;

        /* bus base address of memory  */
-       uint32_t memory_base;
+       u32 memory_base;

        /* event management: */
        struct etnaviv_event event[30];
@@ -104,12 +104,12 @@ struct etnaviv_gpu {
        /* list of GEM active objects: */
        struct list_head active_list;

-       uint32_t idle_mask;
-       uint32_t last_ring_pos;
+       u32 idle_mask;
+       u32 last_ring_pos;

        /* Fencing support */
-       uint32_t submitted_fence;
-       uint32_t retired_fence;
+       u32 submitted_fence;
+       u32 retired_fence;
        wait_queue_head_t fence_event;

        /* worker for handling active-list retiring: */
@@ -129,8 +129,8 @@ struct etnaviv_gpu {
 #define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
 #define DRM_ETNAVIV_HANGCHECK_JIFFIES 
msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
        struct timer_list hangcheck_timer;
-       uint32_t hangcheck_fence;
-       uint32_t hangcheck_dma_addr;
+       u32 hangcheck_fence;
+       u32 hangcheck_dma_addr;
        struct work_struct recover_work;
 };

@@ -144,13 +144,12 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 
reg)
        return etnaviv_readl(gpu->mmio + reg);
 }

-static inline bool fence_completed(struct etnaviv_gpu *gpu, uint32_t fence)
+static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
 {
        return fence_after_eq(gpu->retired_fence, fence);
 }

-int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, uint32_t param,
-       uint64_t *value);
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);

 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);

@@ -160,7 +159,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct 
seq_file *m);

 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
-       uint32_t fence, struct timespec *timeout);
+       u32 fence, struct timespec *timeout);
 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        struct etnaviv_gem_submit *submit, struct etnaviv_file_private *ctx);
 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
diff --git a/drivers/staging/etnaviv/etnaviv_iommu.c 
b/drivers/staging/etnaviv/etnaviv_iommu.c
index 8aff23a310fb..73ba65205705 100644
--- a/drivers/staging/etnaviv/etnaviv_iommu.c
+++ b/drivers/staging/etnaviv/etnaviv_iommu.c
@@ -26,12 +26,12 @@
 #include "state_hi.xml.h"

 #define PT_SIZE                SZ_512K
-#define PT_ENTRIES     (PT_SIZE / sizeof(uint32_t))
+#define PT_ENTRIES     (PT_SIZE / sizeof(u32))

 #define GPU_MEM_START  0x80000000

 struct etnaviv_iommu_domain_pgtable {
-       uint32_t *pgtable;
+       u32 *pgtable;
        dma_addr_t paddr;
 };

@@ -65,7 +65,7 @@ static void pgtable_free(struct etnaviv_iommu_domain_pgtable 
*pgtable,
        dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
 }

-static uint32_t pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
+static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
                           unsigned long iova)
 {
        /* calcuate index into page table */
@@ -88,7 +88,7 @@ static void pgtable_write(struct etnaviv_iommu_domain_pgtable 
*pgtable,

 static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
 {
-       uint32_t iova, *p;
+       u32 iova, *p;
        int ret, i;

        etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
@@ -183,10 +183,10 @@ void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
        struct iommu_domain *domain)
 {
        struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
-       uint32_t pgtable;
+       u32 pgtable;

        /* set page table address in MC */
-       pgtable = (uint32_t)etnaviv_domain->pgtable.paddr;
+       pgtable = (u32)etnaviv_domain->pgtable.paddr;

        gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
        gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
diff --git a/drivers/staging/etnaviv/etnaviv_mmu.c 
b/drivers/staging/etnaviv/etnaviv_mmu.c
index ec01c8a560f4..754bcbc0c19a 100644
--- a/drivers/staging/etnaviv/etnaviv_mmu.c
+++ b/drivers/staging/etnaviv/etnaviv_mmu.c
@@ -25,7 +25,7 @@ static int etnaviv_fault_handler(struct iommu_domain *iommu, 
struct device *dev,
        return 0;
 }

-int etnaviv_iommu_map(struct etnaviv_iommu *iommu, uint32_t iova,
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
                struct sg_table *sgt, unsigned len, int prot)
 {
        struct iommu_domain *domain = iommu->domain;
@@ -64,7 +64,7 @@ fail:
        return ret;
 }

-int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, uint32_t iova,
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
                struct sg_table *sgt, unsigned len)
 {
        struct iommu_domain *domain = iommu->domain;
@@ -91,7 +91,7 @@ int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, uint32_t 
iova,
 }

 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
-       struct etnaviv_gem_object *etnaviv_obj, uint32_t memory_base,
+       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
        struct etnaviv_vram_mapping **out_mapping)
 {
        struct etnaviv_drm_private *priv = etnaviv_obj->base.dev->dev_private;
@@ -109,7 +109,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,

        /* v1 MMU can optimize single entry (contiguous) scatterlists */
        if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
-               uint32_t iova;
+               u32 iova;

                iova = sg_dma_address(sgt->sgl) - memory_base;
                if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
@@ -227,7 +227,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
                             struct etnaviv_vram_mapping *mapping)
 {
        if (mapping) {
-               uint32_t offset = mapping->vram_node.start;
+               u32 offset = mapping->vram_node.start;

                if (mapping->iova >= 0x80000000) {
                        etnaviv_iommu_unmap(mmu, offset, etnaviv_obj->sgt,
diff --git a/drivers/staging/etnaviv/etnaviv_mmu.h 
b/drivers/staging/etnaviv/etnaviv_mmu.h
index 94dbab736fc1..1d619e91f457 100644
--- a/drivers/staging/etnaviv/etnaviv_mmu.h
+++ b/drivers/staging/etnaviv/etnaviv_mmu.h
@@ -34,7 +34,7 @@ struct etnaviv_iommu {

        /* memory manager for GPU address area */
        struct drm_mm mm;
-       uint32_t last_iova;
+       u32 last_iova;
        bool need_flush;
 };

@@ -42,12 +42,12 @@ struct etnaviv_gem_object;

 int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
        int cnt);
-int etnaviv_iommu_map(struct etnaviv_iommu *iommu, uint32_t iova,
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
        struct sg_table *sgt, unsigned len, int prot);
-int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, uint32_t iova,
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
        struct sg_table *sgt, unsigned len);
 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
-       struct etnaviv_gem_object *etnaviv_obj, uint32_t memory_base,
+       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
        struct etnaviv_vram_mapping **mapping);
 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
        struct etnaviv_gem_object *etnaviv_obj,
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index c6ce72ae4dbe..0aca6b189e63 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -17,12 +17,11 @@
 #ifndef __ETNAVIV_DRM_H__
 #define __ETNAVIV_DRM_H__

-#include <stddef.h>
 #include <drm/drm.h>

 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints:
- *  1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit
+ *  1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
  *     user/kernel compatibility
  *  2) Keep fields aligned to their size
  *  3) Because of how drm_ioctl() works, we can add new fields at
@@ -38,8 +37,8 @@
  * same as 'struct timespec' but 32/64b ABI safe.
  */
 struct drm_etnaviv_timespec {
-       int64_t tv_sec;          /* seconds */
-       int64_t tv_nsec;         /* nanoseconds */
+       __s64 tv_sec;          /* seconds */
+       __s64 tv_nsec;         /* nanoseconds */
 };

 #define ETNAVIV_PARAM_GPU_MODEL                     0x01
@@ -64,9 +63,9 @@ struct drm_etnaviv_timespec {
 #define ETNA_MAX_PIPES 4

 struct drm_etnaviv_param {
-       uint32_t pipe;           /* in */
-       uint32_t param;          /* in, ETNAVIV_PARAM_x */
-       uint64_t value;          /* out (get_param) or in (set_param) */
+       __u32 pipe;           /* in */
+       __u32 param;          /* in, ETNAVIV_PARAM_x */
+       __u64 value;          /* out (get_param) or in (set_param) */
 };

 /*
@@ -83,15 +82,15 @@ struct drm_etnaviv_param {
 #define ETNA_BO_FORCE_MMU    0x00100000

 struct drm_etnaviv_gem_new {
-       uint64_t size;           /* in */
-       uint32_t flags;          /* in, mask of ETNA_BO_x */
-       uint32_t handle;         /* out */
+       __u64 size;           /* in */
+       __u32 flags;          /* in, mask of ETNA_BO_x */
+       __u32 handle;         /* out */
 };

 struct drm_etnaviv_gem_info {
-       uint32_t handle;         /* in */
-       uint32_t pad;
-       uint64_t offset;         /* out, offset to pass to mmap() */
+       __u32 handle;         /* in */
+       __u32 pad;
+       __u64 offset;         /* out, offset to pass to mmap() */
 };

 #define ETNA_PREP_READ        0x01
@@ -99,13 +98,13 @@ struct drm_etnaviv_gem_info {
 #define ETNA_PREP_NOSYNC      0x04

 struct drm_etnaviv_gem_cpu_prep {
-       uint32_t handle;         /* in */
-       uint32_t op;             /* in, mask of ETNA_PREP_x */
+       __u32 handle;         /* in */
+       __u32 op;             /* in, mask of ETNA_PREP_x */
        struct drm_etnaviv_timespec timeout;   /* in */
 };

 struct drm_etnaviv_gem_cpu_fini {
-       uint32_t handle;         /* in */
+       __u32 handle;         /* in */
 };

 /*
@@ -119,9 +118,9 @@ struct drm_etnaviv_gem_cpu_fini {
  * otherwise EINVAL.
  */
 struct drm_etnaviv_gem_submit_reloc {
-       uint32_t submit_offset;  /* in, offset from submit_bo */
-       uint32_t reloc_idx;      /* in, index of reloc_bo buffer */
-       uint64_t reloc_offset;   /* in, offset from start of reloc_bo */
+       __u32 submit_offset;  /* in, offset from submit_bo */
+       __u32 reloc_idx;      /* in, index of reloc_bo buffer */
+       __u64 reloc_offset;   /* in, offset from start of reloc_bo */
 };

 /* submit-types:
@@ -132,13 +131,13 @@ struct drm_etnaviv_gem_submit_reloc {
 #define ETNA_SUBMIT_CMD_BUF             0x0001
 #define ETNA_SUBMIT_CMD_CTX_RESTORE_BUF 0x0002
 struct drm_etnaviv_gem_submit_cmd {
-       uint32_t type;           /* in, one of ETNA_SUBMIT_CMD_x */
-       uint32_t submit_idx;     /* in, index of submit_bo cmdstream buffer */
-       uint32_t submit_offset;  /* in, offset into submit_bo */
-       uint32_t size;           /* in, cmdstream size */
-       uint32_t pad;
-       uint32_t nr_relocs;      /* in, number of submit_reloc's */
-       uint64_t relocs;         /* in, ptr to array of submit_reloc's */
+       __u32 type;           /* in, one of ETNA_SUBMIT_CMD_x */
+       __u32 submit_idx;     /* in, index of submit_bo cmdstream buffer */
+       __u32 submit_offset;  /* in, offset into submit_bo */
+       __u32 size;           /* in, cmdstream size */
+       __u32 pad;
+       __u32 nr_relocs;      /* in, number of submit_reloc's */
+       __u64 relocs;         /* in, ptr to array of submit_reloc's */
 };

 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -155,9 +154,9 @@ struct drm_etnaviv_gem_submit_cmd {
 #define ETNA_SUBMIT_BO_READ             0x0001
 #define ETNA_SUBMIT_BO_WRITE            0x0002
 struct drm_etnaviv_gem_submit_bo {
-       uint32_t flags;          /* in, mask of ETNA_SUBMIT_BO_x */
-       uint32_t handle;         /* in, GEM handle */
-       uint64_t presumed;       /* in/out, presumed buffer address */
+       __u32 flags;          /* in, mask of ETNA_SUBMIT_BO_x */
+       __u32 handle;         /* in, GEM handle */
+       __u64 presumed;       /* in/out, presumed buffer address */
 };

 /* Each cmdstream submit consists of a table of buffers involved, and
@@ -168,14 +167,14 @@ struct drm_etnaviv_gem_submit_bo {
 #define ETNA_PIPE_2D      0x01
 #define ETNA_PIPE_VG      0x02
 struct drm_etnaviv_gem_submit {
-       uint32_t pipe;           /* in */
-       uint32_t exec_state;     /* in, initial execution state (ETNA_PIPE_x) */
-       uint32_t fence;          /* out */
-       uint32_t nr_bos;         /* in, number of submit_bo's */
-       uint32_t nr_cmds;        /* in, number of submit_cmd's */
-       uint32_t pad;
-       uint64_t bos;            /* in, ptr to array of submit_bo's */
-       uint64_t cmds;           /* in, ptr to array of submit_cmd's */
+       __u32 pipe;           /* in */
+       __u32 exec_state;     /* in, initial execution state (ETNA_PIPE_x) */
+       __u32 fence;          /* out */
+       __u32 nr_bos;         /* in, number of submit_bo's */
+       __u32 nr_cmds;        /* in, number of submit_cmd's */
+       __u32 pad;
+       __u64 bos;            /* in, ptr to array of submit_bo's */
+       __u64 cmds;           /* in, ptr to array of submit_cmd's */
 };

 /* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -186,18 +185,18 @@ struct drm_etnaviv_gem_submit {
  * APIs without requiring a dummy bo to synchronize on.
  */
 struct drm_etnaviv_wait_fence {
-       uint32_t pipe;           /* in */
-       uint32_t fence;          /* in */
+       __u32 pipe;           /* in */
+       __u32 fence;          /* in */
        struct drm_etnaviv_timespec timeout;   /* in */
 };

 #define ETNA_USERPTR_READ      0x01
 #define ETNA_USERPTR_WRITE     0x02
 struct drm_etnaviv_gem_userptr {
-       uint64_t user_ptr;      /* in, page aligned user pointer */
-       uint64_t user_size;     /* in, page aligned user size */
-       uint32_t flags;         /* in, flags */
-       uint32_t handle;        /* out, non-zero handle */
+       __u64 user_ptr; /* in, page aligned user pointer */
+       __u64 user_size;        /* in, page aligned user size */
+       __u32 flags;            /* in, flags */
+       __u32 handle;   /* out, non-zero handle */
 };

 #define DRM_ETNAVIV_GET_PARAM          0x00
-- 
2.5.1

Reply via email to