On 11/16/21 16:38, Joao Martins wrote:
> On 11/15/21 17:49, Jason Gunthorpe wrote:
>> On Mon, Nov 15, 2021 at 01:11:32PM +0100, Joao Martins wrote:
>>> On 11/12/21 16:34, Jason Gunthorpe wrote:
>>>> On Fri, Nov 12, 2021 at 04:08:24PM +0100, Joao Martins wrote:
>>>>> diff --git a/drivers/dax/device.c b/drivers/dax/device.c
>>>>> index a65c67ab5ee0..0c2ac97d397d 100644
>>>>> +++ b/drivers/dax/device.c
>>>>> @@ -192,6 +192,42 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax 
>>>>> *dev_dax,
>>>>>  }
>>>>>  #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
>>>>>  
>>>>> +static void set_page_mapping(struct vm_fault *vmf, pfn_t pfn,
>>>>> +                      unsigned long fault_size,
>>>>> +                      struct address_space *f_mapping)
>>>>> +{
>>>>> + unsigned long i;
>>>>> + pgoff_t pgoff;
>>>>> +
>>>>> + pgoff = linear_page_index(vmf->vma, ALIGN(vmf->address, fault_size));
>>>>> +
>>>>> + for (i = 0; i < fault_size / PAGE_SIZE; i++) {
>>>>> +         struct page *page;
>>>>> +
>>>>> +         page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
>>>>> +         if (page->mapping)
>>>>> +                 continue;
>>>>> +         page->mapping = f_mapping;
>>>>> +         page->index = pgoff + i;
>>>>> + }
>>>>> +}
>>>>> +
>>>>> +static void set_compound_mapping(struct vm_fault *vmf, pfn_t pfn,
>>>>> +                          unsigned long fault_size,
>>>>> +                          struct address_space *f_mapping)
>>>>> +{
>>>>> + struct page *head;
>>>>> +
>>>>> + head = pfn_to_page(pfn_t_to_pfn(pfn));
>>>>> + head = compound_head(head);
>>>>> + if (head->mapping)
>>>>> +         return;
>>>>> +
>>>>> + head->mapping = f_mapping;
>>>>> + head->index = linear_page_index(vmf->vma,
>>>>> +                 ALIGN(vmf->address, fault_size));
>>>>> +}
>>>>
>>>> Should this stuff be setup before doing vmf_insert_pfn_XX?
>>>>
>>>
>>> Interestingly filesystem-dax does this, but not device-dax.
>>
>> I think it may be a bug ?
>>
> Possibly.
> 
> Dan, any thoughts (see also below) ? You probably hold all that
> history since its inception on commit 2232c6382a4 ("device-dax: Enable 
> page_mapping()")
> and commit 35de299547d1 ("device-dax: Set page->index").
> 
Below is what I have staged so far as a percursor patch (see below scissors 
mark).

It also lets me simplify compound page case for __dax_set_mapping() in this 
patch,
like below diff.

But I still wonder whether this ordering adjustment of @mapping setting is best 
placed
as a percursor patch whenever pgmap/page refcount changes happen. Anyways it's 
just a
thought.

diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 80824e460fbf..35706214778e 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -78,15 +78,21 @@ static void __dax_set_mapping(struct vm_fault *vmf, pfn_t 
pfn,
                              struct address_space *f_mapping)
 {
        struct address_space *c_mapping = vmf->vma->vm_file->f_mapping;
+       struct dev_dax *dev_dax = vmf->vma->vm_file->private_data;
        unsigned long i, nr_pages = fault_size / PAGE_SIZE;
        pgoff_t pgoff;

+       /* mapping is only set on the head */
+       if (dev_dax->pgmap->vmemmap_shift)
+               nr_pages = 1;
+
        pgoff = linear_page_index(vmf->vma,
                        ALIGN(vmf->address, fault_size));

        for (i = 0; i < nr_pages; i++) {
                struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);

+               page = compound_head(page);
                if (page->mapping &&
                    (!f_mapping && page->mapping != c_mapping))
                        continue;
@@ -473,6 +479,9 @@ int dev_dax_probe(struct dev_dax *dev_dax)
        }

        pgmap->type = MEMORY_DEVICE_GENERIC;
+       if (dev_dax->align > PAGE_SIZE)
+               pgmap->vmemmap_shift =
+                       order_base_2(dev_dax->align >> PAGE_SHIFT);
        addr = devm_memremap_pages(dev, pgmap);
        if (IS_ERR(addr))
                return PTR_ERR(addr);
(

----------------------------------->8-------------------------------------

From: Joao Martins <joao.m.mart...@oracle.com>
Subject: [PATCH] device-dax: set mapping prior to vmf_insert_pfn{,_pmd,pud}()

Normally, the @page mapping is set prior to inserting the page into a
page table entry. Make device-dax adhere to the same  ordering, rather
than setting mapping after the PTE is inserted.

Care is taken to clear the mapping on a vmf_insert_pfn* failure (rc !=
VM_FAULT_NOPAGE). Thus mapping is cleared when we have a valid @pfn
which is right before we call vmf_insert_pfn*() and it is only cleared
if the one set on the page is the mapping recorded in the fault handler
data (@vmf).

Suggested-by: Jason Gunthorpe <j...@nvidia.com>
Signed-off-by: Joao Martins <joao.m.mart...@oracle.com>
---
 drivers/dax/device.c | 79 +++++++++++++++++++++++++++++++-------------
 1 file changed, 56 insertions(+), 23 deletions(-)

diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 630de5a795b0..80824e460fbf 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -73,6 +73,43 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax 
*dev_dax, pgoff_t
pgoff,
        return -1;
 }

+static void __dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+                             unsigned long fault_size,
+                             struct address_space *f_mapping)
+{
+       struct address_space *c_mapping = vmf->vma->vm_file->f_mapping;
+       unsigned long i, nr_pages = fault_size / PAGE_SIZE;
+       pgoff_t pgoff;
+
+       pgoff = linear_page_index(vmf->vma,
+                       ALIGN(vmf->address, fault_size));
+
+       for (i = 0; i < nr_pages; i++) {
+               struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
+
+               if (page->mapping &&
+                   (!f_mapping && page->mapping != c_mapping))
+                       continue;
+
+               page->mapping = f_mapping;
+               page->index = pgoff + i;
+       }
+}
+
+static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+                           unsigned long fault_size)
+{
+       struct address_space *c_mapping = vmf->vma->vm_file->f_mapping;
+
+       __dax_set_mapping(vmf, pfn, fault_size, c_mapping);
+}
+
+static void dax_clear_mapping(struct vm_fault *vmf, pfn_t pfn,
+                             unsigned long fault_size)
+{
+       __dax_set_mapping(vmf, pfn, fault_size, NULL);
+}
+
 static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
                                struct vm_fault *vmf, pfn_t *pfn)
 {
@@ -100,6 +137,8 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax 
*dev_dax,

        *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);

+       dax_set_mapping(vmf, *pfn, fault_size);
+
        return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
 }

@@ -140,6 +179,8 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax 
*dev_dax,

        *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);

+       dax_set_mapping(vmf, *pfn, fault_size);
+
        return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
 }

@@ -182,6 +223,8 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax 
*dev_dax,

        *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);

+       dax_set_mapping(vmf, *pfn, fault_size);
+
        return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
 }
 #else
@@ -199,7 +242,7 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
        unsigned long fault_size;
        vm_fault_t rc = VM_FAULT_SIGBUS;
        int id;
-       pfn_t pfn;
+       pfn_t pfn = { .val = 0 };
        struct dev_dax *dev_dax = filp->private_data;

        dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", 
current->comm,
@@ -224,28 +267,18 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
                rc = VM_FAULT_SIGBUS;
        }

-       if (rc == VM_FAULT_NOPAGE) {
-               unsigned long i;
-               pgoff_t pgoff;
-
-               /*
-                * In the device-dax case the only possibility for a
-                * VM_FAULT_NOPAGE result is when device-dax capacity is
-                * mapped. No need to consider the zero page, or racing
-                * conflicting mappings.
-                */
-               pgoff = linear_page_index(vmf->vma,
-                               ALIGN(vmf->address, fault_size));
-               for (i = 0; i < fault_size / PAGE_SIZE; i++) {
-                       struct page *page;
-
-                       page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
-                       if (page->mapping)
-                               continue;
-                       page->mapping = filp->f_mapping;
-                       page->index = pgoff + i;
-               }
-       }
+       /*
+        * In the device-dax case the only possibility for a
+        * VM_FAULT_NOPAGE result is when device-dax capacity is
+        * mapped. No need to consider the zero page, or racing
+        * conflicting mappings.
+        * We could get VM_FAULT_FALLBACK without even attempting
+        * to insert the page table entry. So make sure we test
+        * for the error code with a devmap @pfn value which is
+        * set right before vmf_insert_pfn*().
+        */
+       if (rc != VM_FAULT_NOPAGE && pfn_t_devmap(pfn))
+               dax_clear_mapping(vmf, pfn, fault_size);
        dax_read_unlock(id);

        return rc;
-- 
2.17.2

Reply via email to