On Tue, Oct 20, 2020 at 09:18:54AM +0300, Kirill A. Shutemov wrote:
> +int __kvm_protect_memory(unsigned long start, unsigned long end, bool 
> protect)
> +{
> +     struct mm_struct *mm = current->mm;
> +     struct vm_area_struct *vma, *prev;
> +     int ret;
> +
> +     if (mmap_write_lock_killable(mm))
> +             return -EINTR;
> +
> +     ret = -ENOMEM;
> +     vma = find_vma(current->mm, start);
> +     if (!vma)
> +             goto out;
> +
> +     ret = -EINVAL;
> +     if (vma->vm_start > start)
> +             goto out;
> +
> +     if (start > vma->vm_start)
> +             prev = vma;
> +     else
> +             prev = vma->vm_prev;
> +
> +     ret = 0;
> +     while (true) {
> +             unsigned long newflags, tmp;
> +
> +             tmp = vma->vm_end;
> +             if (tmp > end)
> +                     tmp = end;
> +
> +             newflags = vma->vm_flags;
> +             if (protect)
> +                     newflags |= VM_KVM_PROTECTED;
> +             else
> +                     newflags &= ~VM_KVM_PROTECTED;
> +
> +             /* The VMA has been handled as part of other memslot */
> +             if (newflags == vma->vm_flags)
> +                     goto next;
> +
> +             ret = mprotect_fixup(vma, &prev, start, tmp, newflags);
> +             if (ret)
> +                     goto out;
> +
> +next:
> +             start = tmp;
> +             if (start < prev->vm_end)
> +                     start = prev->vm_end;
> +
> +             if (start >= end)
> +                     goto out;
> +
> +             vma = prev->vm_next;
> +             if (!vma || vma->vm_start != start) {
> +                     ret = -ENOMEM;
> +                     goto out;
> +             }
> +     }
> +out:
> +     mmap_write_unlock(mm);
> +     return ret;
> +}
> +EXPORT_SYMBOL_GPL(__kvm_protect_memory);

Since migration will be disabled after this; should the above not (at
the very least) force compaction before proceeding to lock the pages in?

Reply via email to