Factor out a helper for the "manual" zeroing of a DAX range to clean
up dax_iomap_zero a lot.

Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 fs/dax.c | 36 +++++++++++++++++++-----------------
 1 file changed, 19 insertions(+), 17 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index d7a923d152240..dc9ebeff850ab 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1121,34 +1121,36 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state 
*xas, struct vm_fault *vmf,
 }
 #endif /* CONFIG_FS_DAX_PMD */
 
+static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
+               unsigned int offset, size_t size)
+{
+       void *kaddr;
+       long rc;
+
+       rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
+       if (rc >= 0) {
+               memset(kaddr + offset, 0, size);
+               dax_flush(dax_dev, kaddr + offset, size);
+       }
+       return rc;
+}
+
 s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
 {
        pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
        long rc, id;
-       void *kaddr;
-       bool page_aligned = false;
        unsigned offset = offset_in_page(pos);
        unsigned size = min_t(u64, PAGE_SIZE - offset, length);
 
-       if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
-               page_aligned = true;
-
        id = dax_read_lock();
-
-       if (page_aligned)
+       if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
                rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
        else
-               rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
-       if (rc < 0) {
-               dax_read_unlock(id);
-               return rc;
-       }
-
-       if (!page_aligned) {
-               memset(kaddr + offset, 0, size);
-               dax_flush(iomap->dax_dev, kaddr + offset, size);
-       }
+               rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
        dax_read_unlock(id);
+
+       if (rc < 0)
+               return rc;
        return size;
 }
 
-- 
2.30.2


Reply via email to