To be able to reuse the DMA mapping logic, split it in two functions.

Signed-off-by: Jérôme Glisse <jgli...@redhat.com>
---
 mm/hmm.c | 120 ++++++++++++++++++++++++++++++++++-----------------------------
 1 file changed, 65 insertions(+), 55 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index d26abe4..07f1ab6 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -910,76 +910,86 @@ static int hmm_mirror_fault_hugetlb_entry(pte_t *ptep,
        return 0;
 }
 
+static int hmm_mirror_dma_map_range(struct hmm_mirror *mirror,
+                                   dma_addr_t *hmm_pte,
+                                   spinlock_t *lock,
+                                   unsigned long npages)
+{
+       struct device *dev = mirror->device->dev;
+       unsigned long i;
+       int ret = 0;
+
+       for (i = 0; i < npages; i++) {
+               dma_addr_t dma_addr, pte;
+               struct page *page;
+
+again:
+               pte = ACCESS_ONCE(hmm_pte[i]);
+               if (!hmm_pte_test_valid_pfn(&pte) || !hmm_pte_test_select(&pte))
+                       continue;
+
+               page = pfn_to_page(hmm_pte_pfn(pte));
+               VM_BUG_ON(!page);
+               dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
+                                       DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(dev, dma_addr)) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               /*
+                * Make sure we transfer the dirty bit. Note that there
+                * might still be a window for another thread to set
+                * the dirty bit before we check for pte equality. This
+                * will just lead to a useless retry so it is not the
+                * end of the world here.
+                */
+               if (lock)
+                       spin_lock(lock);
+               if (hmm_pte_test_dirty(&hmm_pte[i]))
+                       hmm_pte_set_dirty(&pte);
+               if (ACCESS_ONCE(hmm_pte[i]) != pte) {
+                               if (lock)
+                                       spin_unlock(lock);
+                               dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+                                              DMA_BIDIRECTIONAL);
+                               if (hmm_pte_test_valid_pfn(&hmm_pte[i]))
+                                       goto again;
+                               continue;
+               }
+               hmm_pte[i] = hmm_pte_from_dma_addr(dma_addr);
+               if (hmm_pte_test_write(&pte))
+                       hmm_pte_set_write(&hmm_pte[i]);
+               if (hmm_pte_test_dirty(&pte))
+                       hmm_pte_set_dirty(&hmm_pte[i]);
+               if (lock)
+                       spin_unlock(lock);
+       }
+
+       return ret;
+}
+
 static int hmm_mirror_dma_map(struct hmm_mirror *mirror,
                              struct hmm_pt_iter *iter,
                              unsigned long start,
                              unsigned long end)
 {
-       struct device *dev = mirror->device->dev;
        unsigned long addr;
        int ret;
 
        for (ret = 0, addr = start; !ret && addr < end;) {
-               unsigned long i = 0, next = end;
+               unsigned long next = end, npages;
                dma_addr_t *hmm_pte;
+               spinlock_t *lock;
 
                hmm_pte = hmm_pt_iter_populate(iter, addr, &next);
                if (!hmm_pte)
                        return -ENOENT;
 
-               do {
-                       dma_addr_t dma_addr, pte;
-                       struct page *page;
-
-again:
-                       pte = ACCESS_ONCE(hmm_pte[i]);
-                       if (!hmm_pte_test_valid_pfn(&pte) ||
-                           !hmm_pte_test_select(&pte)) {
-                               if (!hmm_pte_test_valid_dma(&pte)) {
-                                       ret = -ENOENT;
-                                       break;
-                               }
-                               continue;
-                       }
-
-                       page = pfn_to_page(hmm_pte_pfn(pte));
-                       VM_BUG_ON(!page);
-                       dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
-                                               DMA_BIDIRECTIONAL);
-                       if (dma_mapping_error(dev, dma_addr)) {
-                               ret = -ENOMEM;
-                               break;
-                       }
-
-                       hmm_pt_iter_directory_lock(iter);
-                       /*
-                        * Make sure we transfer the dirty bit. Note that there
-                        * might still be a window for another thread to set
-                        * the dirty bit before we check for pte equality. This
-                        * will just lead to a useless retry so it is not the
-                        * end of the world here.
-                        */
-                       if (hmm_pte_test_dirty(&hmm_pte[i]))
-                               hmm_pte_set_dirty(&pte);
-                       if (ACCESS_ONCE(hmm_pte[i]) != pte) {
-                               hmm_pt_iter_directory_unlock(iter);
-                               dma_unmap_page(dev, dma_addr, PAGE_SIZE,
-                                              DMA_BIDIRECTIONAL);
-                               if (hmm_pte_test_valid_pfn(&pte))
-                                       goto again;
-                               if (!hmm_pte_test_valid_dma(&pte)) {
-                                       ret = -ENOENT;
-                                       break;
-                               }
-                       } else {
-                               hmm_pte[i] = hmm_pte_from_dma_addr(dma_addr);
-                               if (hmm_pte_test_write(&pte))
-                                       hmm_pte_set_write(&hmm_pte[i]);
-                               if (hmm_pte_test_dirty(&pte))
-                                       hmm_pte_set_dirty(&hmm_pte[i]);
-                               hmm_pt_iter_directory_unlock(iter);
-                       }
-               } while (addr += PAGE_SIZE, i++, addr != next && !ret);
+               npages = (next - addr) >> PAGE_SHIFT;
+               lock = hmm_pt_iter_directory_lock_ptr(iter);
+               ret = hmm_mirror_dma_map_range(mirror, hmm_pte, lock, npages);
+               addr = next;
        }
 
        return ret;
-- 
2.4.3

Reply via email to