The amdgpu part looks good to me.

A minor nit-pick in mmu_notifier.c (inline).

Either way, the series is Acked-by: Felix Kuehling <felix.kuehl...@amd.com>

On 2018-12-05 12:36 a.m., jgli...@redhat.com wrote:
> From: Jérôme Glisse <jgli...@redhat.com>
>
> To avoid having to change many callback definition everytime we want
> to add a parameter use a structure to group all parameters for the
> mmu_notifier invalidate_range_start/end callback. No functional changes
> with this patch.
>
> Signed-off-by: Jérôme Glisse <jgli...@redhat.com>
> Cc: Andrew Morton <a...@linux-foundation.org>
> Cc: Matthew Wilcox <mawil...@microsoft.com>
> Cc: Ross Zwisler <zwis...@kernel.org>
> Cc: Jan Kara <j...@suse.cz>
> Cc: Dan Williams <dan.j.willi...@intel.com>
> Cc: Paolo Bonzini <pbonz...@redhat.com>
> Cc: Radim Krčmář <rkrc...@redhat.com>
> Cc: Michal Hocko <mho...@kernel.org>
> Cc: Christian Koenig <christian.koe...@amd.com>
> Cc: Felix Kuehling <felix.kuehl...@amd.com>
> Cc: Ralph Campbell <rcampb...@nvidia.com>
> Cc: John Hubbard <jhubb...@nvidia.com>
> Cc: k...@vger.kernel.org
> Cc: dri-devel@lists.freedesktop.org
> Cc: linux-r...@vger.kernel.org
> Cc: linux-fsde...@vger.kernel.org
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c  | 43 +++++++++++--------------
>  drivers/gpu/drm/i915/i915_gem_userptr.c | 14 ++++----
>  drivers/gpu/drm/radeon/radeon_mn.c      | 16 ++++-----
>  drivers/infiniband/core/umem_odp.c      | 20 +++++-------
>  drivers/infiniband/hw/hfi1/mmu_rb.c     | 13 +++-----
>  drivers/misc/mic/scif/scif_dma.c        | 11 ++-----
>  drivers/misc/sgi-gru/grutlbpurge.c      | 14 ++++----
>  drivers/xen/gntdev.c                    | 12 +++----
>  include/linux/mmu_notifier.h            | 14 +++++---
>  mm/hmm.c                                | 23 ++++++-------
>  mm/mmu_notifier.c                       | 21 ++++++++++--
>  virt/kvm/kvm_main.c                     | 14 +++-----
>  12 files changed, 102 insertions(+), 113 deletions(-)
>
[snip]
> diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
> index 5119ff846769..5f6665ae3ee2 100644
> --- a/mm/mmu_notifier.c
> +++ b/mm/mmu_notifier.c
> @@ -178,14 +178,20 @@ int __mmu_notifier_invalidate_range_start(struct 
> mm_struct *mm,
>                                 unsigned long start, unsigned long end,
>                                 bool blockable)
>  {
> +     struct mmu_notifier_range _range, *range = &_range;

I'm not sure why you need to access _range indirectly through a pointer.
See below.


>       struct mmu_notifier *mn;
>       int ret = 0;
>       int id;
>  
> +     range->blockable = blockable;
> +     range->start = start;
> +     range->end = end;
> +     range->mm = mm;

This could just assign _range.blockable, _range.start, etc. without the
indirection. Or you could even use an initializer instead:

struct mmu_notifier_range range = {
    .blockable = blockable,
    .start = start,
    ...
};


> +
>       id = srcu_read_lock(&srcu);
>       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
>               if (mn->ops->invalidate_range_start) {
> -                     int _ret = mn->ops->invalidate_range_start(mn, mm, 
> start, end, blockable);
> +                     int _ret = mn->ops->invalidate_range_start(mn, range);

This could just use &_range without the indirection.

Same in ..._invalidate_range_end below.

Regards,
  Felix


>                       if (_ret) {
>                               pr_info("%pS callback failed with %d in 
> %sblockable context.\n",
>                                               
> mn->ops->invalidate_range_start, _ret,
> @@ -205,9 +211,20 @@ void __mmu_notifier_invalidate_range_end(struct 
> mm_struct *mm,
>                                        unsigned long end,
>                                        bool only_end)
>  {
> +     struct mmu_notifier_range _range, *range = &_range;
>       struct mmu_notifier *mn;
>       int id;
>  
> +     /*
> +      * The end call back will never be call if the start refused to go
> +      * through because of blockable was false so here assume that we
> +      * can block.
> +      */
> +     range->blockable = true;
> +     range->start = start;
> +     range->end = end;
> +     range->mm = mm;
> +
>       id = srcu_read_lock(&srcu);
>       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
>               /*
> @@ -226,7 +243,7 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct 
> *mm,
>               if (!only_end && mn->ops->invalidate_range)
>                       mn->ops->invalidate_range(mn, mm, start, end);
>               if (mn->ops->invalidate_range_end)
> -                     mn->ops->invalidate_range_end(mn, mm, start, end);
> +                     mn->ops->invalidate_range_end(mn, range);
>       }
>       srcu_read_unlock(&srcu, id);
>  }
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 2679e476b6c3..f829f63f2b16 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -360,10 +360,7 @@ static void kvm_mmu_notifier_change_pte(struct 
> mmu_notifier *mn,
>  }
>  
>  static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
> -                                                 struct mm_struct *mm,
> -                                                 unsigned long start,
> -                                                 unsigned long end,
> -                                                 bool blockable)
> +                                     const struct mmu_notifier_range *range)
>  {
>       struct kvm *kvm = mmu_notifier_to_kvm(mn);
>       int need_tlb_flush = 0, idx;
> @@ -377,7 +374,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct 
> mmu_notifier *mn,
>        * count is also read inside the mmu_lock critical section.
>        */
>       kvm->mmu_notifier_count++;
> -     need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
> +     need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
>       need_tlb_flush |= kvm->tlbs_dirty;
>       /* we've to flush the tlb before the pages can be freed */
>       if (need_tlb_flush)
> @@ -385,7 +382,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct 
> mmu_notifier *mn,
>  
>       spin_unlock(&kvm->mmu_lock);
>  
> -     ret = kvm_arch_mmu_notifier_invalidate_range(kvm, start, end, 
> blockable);
> +     ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
> +                                     range->end, range->blockable);
>  
>       srcu_read_unlock(&kvm->srcu, idx);
>  
> @@ -393,9 +391,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct 
> mmu_notifier *mn,
>  }
>  
>  static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
> -                                               struct mm_struct *mm,
> -                                               unsigned long start,
> -                                               unsigned long end)
> +                                     const struct mmu_notifier_range *range)
>  {
>       struct kvm *kvm = mmu_notifier_to_kvm(mn);
>  
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to