We are going unmap guest pages from direct mapping and cannot rely on it
for guest memory access. Use temporary kmap_atomic()-style mapping to
access guest memory.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
---
 virt/kvm/kvm_main.c      |  27 ++++++++++-
 virt/lib/mem_protected.c | 101 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 126 insertions(+), 2 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4c008c7b4974..9b569b78874a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,6 +51,7 @@
 #include <linux/io.h>
 #include <linux/lockdep.h>
 #include <linux/kthread.h>
+#include <linux/pagewalk.h>
 
 #include <asm/processor.h>
 #include <asm/ioctl.h>
@@ -154,6 +155,12 @@ static void kvm_uevent_notify_change(unsigned int type, 
struct kvm *kvm);
 static unsigned long long kvm_createvm_count;
 static unsigned long long kvm_active_vms;
 
+void *kvm_map_page_atomic(struct page *page);
+void kvm_unmap_page_atomic(void *vaddr);
+
+int kvm_init_protected_memory(void);
+void kvm_exit_protected_memory(void);
+
 int __kvm_protect_memory(unsigned long start, unsigned long end, bool protect);
 
 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
@@ -2329,6 +2336,7 @@ int copy_from_guest(void *data, unsigned long hva, int 
len, bool protected)
        int offset = offset_in_page(hva);
        struct page *page;
        int npages, seg;
+       void *vaddr;
 
        if (!protected)
                return __copy_from_user(data, (void __user *)hva, len);
@@ -2341,7 +2349,11 @@ int copy_from_guest(void *data, unsigned long hva, int 
len, bool protected)
                npages = get_user_pages_unlocked(hva, 1, &page, FOLL_KVM);
                if (npages != 1)
                        return -EFAULT;
-               memcpy(data, page_address(page) + offset, seg);
+
+               vaddr = kvm_map_page_atomic(page);
+               memcpy(data, vaddr + offset, seg);
+               kvm_unmap_page_atomic(vaddr);
+
                put_page(page);
                len -= seg;
                hva += seg;
@@ -2356,6 +2368,7 @@ int copy_to_guest(unsigned long hva, const void *data, 
int len, bool protected)
        int offset = offset_in_page(hva);
        struct page *page;
        int npages, seg;
+       void *vaddr;
 
        if (!protected)
                return __copy_to_user((void __user *)hva, data, len);
@@ -2369,7 +2382,11 @@ int copy_to_guest(unsigned long hva, const void *data, 
int len, bool protected)
                                                 FOLL_WRITE | FOLL_KVM);
                if (npages != 1)
                        return -EFAULT;
-               memcpy(page_address(page) + offset, data, seg);
+
+               vaddr = kvm_map_page_atomic(page);
+               memcpy(vaddr + offset, data, seg);
+               kvm_unmap_page_atomic(vaddr);
+
                put_page(page);
                len -= seg;
                hva += seg;
@@ -4945,6 +4962,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned 
vcpu_align,
        if (r)
                goto out_free;
 
+       if (IS_ENABLED(CONFIG_HAVE_KVM_PROTECTED_MEMORY) &&
+           kvm_init_protected_memory())
+               goto out_unreg;
+
        kvm_chardev_ops.owner = module;
        kvm_vm_fops.owner = module;
        kvm_vcpu_fops.owner = module;
@@ -4968,6 +4989,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned 
vcpu_align,
        return 0;
 
 out_unreg:
+       kvm_exit_protected_memory();
        kvm_async_pf_deinit();
 out_free:
        kmem_cache_destroy(kvm_vcpu_cache);
@@ -4989,6 +5011,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
 
 void kvm_exit(void)
 {
+       kvm_exit_protected_memory();
        debugfs_remove_recursive(kvm_debugfs_dir);
        misc_deregister(&kvm_dev);
        kmem_cache_destroy(kvm_vcpu_cache);
diff --git a/virt/lib/mem_protected.c b/virt/lib/mem_protected.c
index 0b01dd74f29c..1dfe82534242 100644
--- a/virt/lib/mem_protected.c
+++ b/virt/lib/mem_protected.c
@@ -5,6 +5,100 @@
 #include <linux/vmalloc.h>
 #include <asm/tlbflush.h>
 
+static pte_t **guest_map_ptes;
+static struct vm_struct *guest_map_area;
+
+void *kvm_map_page_atomic(struct page *page)
+{
+       pte_t *pte;
+       void *vaddr;
+
+       preempt_disable();
+       pte = guest_map_ptes[smp_processor_id()];
+       vaddr = guest_map_area->addr + smp_processor_id() * PAGE_SIZE;
+       set_pte(pte, mk_pte(page, PAGE_KERNEL));
+       return vaddr;
+}
+EXPORT_SYMBOL_GPL(kvm_map_page_atomic);
+
+void kvm_unmap_page_atomic(void *vaddr)
+{
+       pte_t *pte = guest_map_ptes[smp_processor_id()];
+       set_pte(pte, __pte(0));
+       flush_tlb_one_kernel((unsigned long)vaddr);
+       preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kvm_unmap_page_atomic);
+
+int kvm_init_protected_memory(void)
+{
+       guest_map_ptes = kmalloc_array(num_possible_cpus(),
+                                      sizeof(pte_t *), GFP_KERNEL);
+       if (!guest_map_ptes)
+               return -ENOMEM;
+
+       guest_map_area = alloc_vm_area(PAGE_SIZE * num_possible_cpus(),
+                                      guest_map_ptes);
+       if (!guest_map_ptes) {
+               kfree(guest_map_ptes);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_init_protected_memory);
+
+void kvm_exit_protected_memory(void)
+{
+       if (guest_map_area)
+               free_vm_area(guest_map_area);
+       if (guest_map_ptes)
+               kfree(guest_map_ptes);
+}
+EXPORT_SYMBOL_GPL(kvm_exit_protected_memory);
+
+static int adjust_direct_mapping_pte_range(pmd_t *pmd, unsigned long addr,
+                                          unsigned long end,
+                                          struct mm_walk *walk)
+{
+       bool protect = (bool)walk->private;
+       pte_t *pte;
+       struct page *page;
+
+       if (pmd_trans_huge(*pmd)) {
+               page = pmd_page(*pmd);
+               if (is_huge_zero_page(page))
+                       return 0;
+               VM_BUG_ON_PAGE(total_mapcount(page) != 1, page);
+               /* XXX: Would it fail with direct device assignment? */
+               VM_BUG_ON_PAGE(page_count(page) != 1, page);
+               kernel_map_pages(page, HPAGE_PMD_NR, !protect);
+               return 0;
+       }
+
+       pte = pte_offset_map(pmd, addr);
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               pte_t entry = *pte;
+
+               if (!pte_present(entry))
+                       continue;
+
+               if (is_zero_pfn(pte_pfn(entry)))
+                       continue;
+
+               page = pte_page(entry);
+
+               VM_BUG_ON_PAGE(page_mapcount(page) != 1, page);
+               kernel_map_pages(page, 1, !protect);
+       }
+
+       return 0;
+}
+
+static const struct mm_walk_ops adjust_direct_mapping_ops = {
+       .pmd_entry      = adjust_direct_mapping_pte_range,
+};
+
 int __kvm_protect_memory(unsigned long start, unsigned long end, bool protect)
 {
        struct mm_struct *mm = current->mm;
@@ -50,6 +144,13 @@ int __kvm_protect_memory(unsigned long start, unsigned long 
end, bool protect)
                if (ret)
                        goto out;
 
+               if (vma_is_anonymous(vma)) {
+                       ret = walk_page_range_novma(mm, start, tmp,
+                                                   &adjust_direct_mapping_ops, 
NULL,
+                                                   (void *) protect);
+                       if (ret)
+                               goto out;
+               }
 next:
                start = tmp;
                if (start < prev->vm_end)
-- 
2.26.2

Reply via email to