From: Thomas Hellstrom <thellst...@vmware.com>

With vmwgfx dirty-tracking we need a specialized huge_fault
callback. Implement and hook it up.

Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Michal Hocko <mho...@suse.com>
Cc: "Matthew Wilcox (Oracle)" <wi...@infradead.org>
Cc: "Kirill A. Shutemov" <kirill.shute...@linux.intel.com>
Cc: Ralph Campbell <rcampb...@nvidia.com>
Cc: "Jérôme Glisse" <jgli...@redhat.com>
Cc: "Christian König" <christian.koe...@amd.com>
Cc: Dan Williams <dan.j.willi...@intel.com>
Signed-off-by: Thomas Hellstrom <thellst...@vmware.com>
Reviewed-by: Roland Scheidegger <srol...@vmware.com>
Acked-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h        |  4 ++
 drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 74 +++++++++++++++++++++-
 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c   |  5 +-
 3 files changed, 81 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 86b69397d166..bb2757c98f0a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1430,6 +1430,10 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
                        pgoff_t start, pgoff_t end);
 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+                               enum page_entry_size pe_size);
+#endif
 
 /**
  * VMW_DEBUG_KMS - Debug output for kernel mode-setting
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 17a5dca7b921..cde3e07ebaf7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -473,7 +473,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
         * a lot of unnecessary write faults.
         */
        if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
-               prot = vma->vm_page_prot;
+               prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
        else
                prot = vm_get_page_prot(vma->vm_flags);
 
@@ -486,3 +486,75 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
 
        return ret;
 }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+                               enum page_entry_size pe_size)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+           vma->vm_private_data;
+       struct vmw_buffer_object *vbo =
+               container_of(bo, struct vmw_buffer_object, base);
+       pgprot_t prot;
+       vm_fault_t ret;
+       pgoff_t fault_page_size;
+       bool write = vmf->flags & FAULT_FLAG_WRITE;
+       bool is_cow_mapping =
+               (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+
+       switch (pe_size) {
+       case PE_SIZE_PMD:
+               fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
+               break;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+       case PE_SIZE_PUD:
+               fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
+               break;
+#endif
+       default:
+               WARN_ON_ONCE(1);
+               return VM_FAULT_FALLBACK;
+       }
+
+       /* Always do write dirty-tracking and COW on PTE level. */
+       if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
+               return VM_FAULT_FALLBACK;
+
+       ret = ttm_bo_vm_reserve(bo, vmf);
+       if (ret)
+               return ret;
+
+       if (vbo->dirty) {
+               pgoff_t allowed_prefault;
+               unsigned long page_offset;
+
+               page_offset = vmf->pgoff -
+                       drm_vma_node_start(&bo->base.vma_node);
+               if (page_offset >= bo->num_pages ||
+                   vmw_resources_clean(vbo, page_offset,
+                                       page_offset + PAGE_SIZE,
+                                       &allowed_prefault)) {
+                       ret = VM_FAULT_SIGBUS;
+                       goto out_unlock;
+               }
+
+               /*
+                * Write protect, so we get a new fault on write, and can
+                * split.
+                */
+               prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
+       } else {
+               prot = vm_get_page_prot(vma->vm_flags);
+       }
+
+       ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
+       if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+               return ret;
+
+out_unlock:
+       dma_resv_unlock(bo->base.resv);
+
+       return ret;
+}
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index aa7e50f63b94..3c03b1746661 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -34,7 +34,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
                .page_mkwrite = vmw_bo_vm_mkwrite,
                .fault = vmw_bo_vm_fault,
                .open = ttm_bo_vm_open,
-               .close = ttm_bo_vm_close
+               .close = ttm_bo_vm_close,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               .huge_fault = vmw_bo_vm_huge_fault,
+#endif
        };
        struct drm_file *file_priv = filp->private_data;
        struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
-- 
2.21.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to