Am 02.06.21 um 14:34 schrieb Daniel Vetter:
On Wed, Jun 02, 2021 at 01:17:09PM +0200, Christian König wrote:
No functional change.

Signed-off-by: Christian König <christian.koe...@amd.com>
Acked-by: Daniel Vetter <daniel.vet...@ffwll.ch>

Maybe add the checkpatch warnings you're fixing here to the commit
message. I didn't know that initcalls should be at the bottom ...

Well dma_resv_lockdep() had some tab/space mixup and moving it around was the easiest way to fix that in the editor :)

Moving it to the end seemed logical to me.

Christian.

-Daniel


---
  drivers/dma-buf/dma-resv.c | 128 +++++++++++++++++++------------------
  1 file changed, 65 insertions(+), 63 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 6ddbeb5dfbf6..87f5d82d992a 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
  /*
   * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
   *
@@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
        kfree_rcu(list, rcu);
  }
-#if IS_ENABLED(CONFIG_LOCKDEP)
-static int __init dma_resv_lockdep(void)
-{
-       struct mm_struct *mm = mm_alloc();
-       struct ww_acquire_ctx ctx;
-       struct dma_resv obj;
-       struct address_space mapping;
-       int ret;
-
-       if (!mm)
-               return -ENOMEM;
-
-       dma_resv_init(&obj);
-       address_space_init_once(&mapping);
-
-       mmap_read_lock(mm);
-       ww_acquire_init(&ctx, &reservation_ww_class);
-       ret = dma_resv_lock(&obj, &ctx);
-       if (ret == -EDEADLK)
-               dma_resv_lock_slow(&obj, &ctx);
-       fs_reclaim_acquire(GFP_KERNEL);
-       /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
-       i_mmap_lock_write(&mapping);
-       i_mmap_unlock_write(&mapping);
-#ifdef CONFIG_MMU_NOTIFIER
-       lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
-       __dma_fence_might_wait();
-       lock_map_release(&__mmu_notifier_invalidate_range_start_map);
-#else
-       __dma_fence_might_wait();
-#endif
-       fs_reclaim_release(GFP_KERNEL);
-       ww_mutex_unlock(&obj.lock);
-       ww_acquire_fini(&ctx);
-       mmap_read_unlock(mm);
-       
-       mmput(mm);
-
-       return 0;
-}
-subsys_initcall(dma_resv_lockdep);
-#endif
-
  /**
   * dma_resv_init - initialize a reservation object
   * @obj: the reservation object
@@ -196,9 +154,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned 
int num_fences)
        if (old && old->shared_max) {
                if ((old->shared_count + num_fences) <= old->shared_max)
                        return 0;
-               else
-                       max = max(old->shared_count + num_fences,
-                                 old->shared_max * 2);
+               max = max(old->shared_count + num_fences, old->shared_max * 2);
        } else {
                max = max(4ul, roundup_pow_of_two(num_fences));
        }
@@ -337,17 +293,17 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct 
dma_fence *fence)
  EXPORT_SYMBOL(dma_resv_add_excl_fence);
/**
-* dma_resv_copy_fences - Copy all fences from src to dst.
-* @dst: the destination reservation object
-* @src: the source reservation object
-*
-* Copy all fences from src to dst. dst-lock must be held.
-*/
+ * dma_resv_copy_fences - Copy all fences from src to dst.
+ * @dst: the destination reservation object
+ * @src: the source reservation object
+ *
+ * Copy all fences from src to dst. dst-lock must be held.
+ */
  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
  {
        struct dma_resv_list *src_list, *dst_list;
        struct dma_fence *old, *new;
-       unsigned i;
+       unsigned int i;
dma_resv_assert_held(dst); @@ -356,7 +312,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) retry:
        if (src_list) {
-               unsigned shared_count = src_list->shared_count;
+               unsigned int shared_count = src_list->shared_count;
rcu_read_unlock(); @@ -373,6 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) dst_list->shared_count = 0;
                for (i = 0; i < src_list->shared_count; ++i) {
+                       struct dma_fence __rcu **dst;
                        struct dma_fence *fence;
fence = rcu_dereference(src_list->shared[i]);
@@ -391,7 +348,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct 
dma_resv *src)
                                continue;
                        }
- rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
+                       dst = &dst_list->shared[dst_list->shared_count++];
+                       rcu_assign_pointer(*dst, fence);
                }
        } else {
                dst_list = NULL;
@@ -431,7 +389,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
   */
  int dma_resv_get_fences_rcu(struct dma_resv *obj,
                            struct dma_fence **pfence_excl,
-                           unsigned *pshared_count,
+                           unsigned int *pshared_count,
                            struct dma_fence ***pshared)
  {
        struct dma_fence **shared = NULL;
@@ -533,9 +491,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
                               bool wait_all, bool intr,
                               unsigned long timeout)
  {
-       struct dma_fence *fence;
-       unsigned seq, shared_count;
        long ret = timeout ? timeout : 1;
+       unsigned int seq, shared_count;
+       struct dma_fence *fence;
        int i;
retry:
@@ -565,8 +523,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
                        shared_count = fobj->shared_count;
for (i = 0; !fence && i < shared_count; ++i) {
-                       struct dma_fence *lfence = 
rcu_dereference(fobj->shared[i]);
+                       struct dma_fence *lfence;
+ lfence = rcu_dereference(fobj->shared[i]);
                        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
                                     &lfence->flags))
                                continue;
@@ -633,7 +592,7 @@ static inline int dma_resv_test_signaled_single(struct 
dma_fence *passed_fence)
   */
  bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
  {
-       unsigned seq, shared_count;
+       unsigned int seq, shared_count;
        int ret;
rcu_read_lock();
@@ -643,16 +602,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, 
bool test_all)
        seq = read_seqcount_begin(&obj->seq);
if (test_all) {
-               unsigned i;
-
                struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+               unsigned int i;
if (fobj)
                        shared_count = fobj->shared_count;
for (i = 0; i < shared_count; ++i) {
-                       struct dma_fence *fence = 
rcu_dereference(fobj->shared[i]);
+                       struct dma_fence *fence;
+ fence = rcu_dereference(fobj->shared[i]);
                        ret = dma_resv_test_signaled_single(fence);
                        if (ret < 0)
                                goto retry;
@@ -681,3 +640,46 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool 
test_all)
        return ret;
  }
  EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+       struct mm_struct *mm = mm_alloc();
+       struct ww_acquire_ctx ctx;
+       struct dma_resv obj;
+       struct address_space mapping;
+       int ret;
+
+       if (!mm)
+               return -ENOMEM;
+
+       dma_resv_init(&obj);
+       address_space_init_once(&mapping);
+
+       mmap_read_lock(mm);
+       ww_acquire_init(&ctx, &reservation_ww_class);
+       ret = dma_resv_lock(&obj, &ctx);
+       if (ret == -EDEADLK)
+               dma_resv_lock_slow(&obj, &ctx);
+       fs_reclaim_acquire(GFP_KERNEL);
+       /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
+       i_mmap_lock_write(&mapping);
+       i_mmap_unlock_write(&mapping);
+#ifdef CONFIG_MMU_NOTIFIER
+       lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+       __dma_fence_might_wait();
+       lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+#else
+       __dma_fence_might_wait();
+#endif
+       fs_reclaim_release(GFP_KERNEL);
+       ww_mutex_unlock(&obj.lock);
+       ww_acquire_fini(&ctx);
+       mmap_read_unlock(mm);
+
+       mmput(mm);
+
+       return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
--
2.25.1


Reply via email to