Notes about this commit:

* These methods are based on pmem_dax_ops from drivers/nvdimm/pmem.c

* dev_dax_direct_access() is returns the hpa, pfn and kva. The kva was
  newly stored as dev_dax->virt_addr by dev_dax_probe().

* The hpa/pfn are used for mmap (dax_iomap_fault()), and the kva is used
  for read/write (dax_iomap_rw())

* dev_dax_recovery_write() and dev_dax_zero_page_range() have not been
  tested yet. I'm looking for suggestions as to how to test those.

Signed-off-by: John Groves <j...@groves.net>
---
 drivers/dax/bus.c | 120 ++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 115 insertions(+), 5 deletions(-)

diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 9d9a4ae7bbc0..61a8d1b3c07a 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -7,6 +7,10 @@
 #include <linux/slab.h>
 #include <linux/dax.h>
 #include <linux/io.h>
+#include <linux/backing-dev.h>
+#include <linux/pfn_t.h>
+#include <linux/range.h>
+#include <linux/uio.h>
 #include "dax-private.h"
 #include "bus.h"
 
@@ -1441,6 +1445,105 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax 
*dev_dax, pgoff_t pgoff,
 }
 EXPORT_SYMBOL_GPL(dax_pgoff_to_phys);
 
+#if IS_ENABLED(CONFIG_DEV_DAX_IOMAP)
+
+static void write_dax(void *pmem_addr, struct page *page,
+               unsigned int off, unsigned int len)
+{
+       unsigned int chunk;
+       void *mem;
+
+       while (len) {
+               mem = kmap_local_page(page);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
+               memcpy_flushcache(pmem_addr, mem + off, chunk);
+               kunmap_local(mem);
+               len -= chunk;
+               off = 0;
+               page++;
+               pmem_addr += chunk;
+       }
+}
+
+static long __dev_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+                       long nr_pages, enum dax_access_mode mode, void **kaddr,
+                       pfn_t *pfn)
+{
+       struct dev_dax *dev_dax = dax_get_private(dax_dev);
+       size_t size = nr_pages << PAGE_SHIFT;
+       size_t offset = pgoff << PAGE_SHIFT;
+       void *virt_addr = dev_dax->virt_addr + offset;
+       u64 flags = PFN_DEV|PFN_MAP;
+       phys_addr_t phys;
+       pfn_t local_pfn;
+       size_t dax_size;
+
+       WARN_ON(!dev_dax->virt_addr);
+
+       if (down_read_interruptible(&dax_dev_rwsem))
+               return 0; /* no valid data since we were killed */
+       dax_size = dev_dax_size(dev_dax);
+       up_read(&dax_dev_rwsem);
+
+       phys = dax_pgoff_to_phys(dev_dax, pgoff, nr_pages << PAGE_SHIFT);
+
+       if (kaddr)
+               *kaddr = virt_addr;
+
+       local_pfn = phys_to_pfn_t(phys, flags); /* are flags correct? */
+       if (pfn)
+               *pfn = local_pfn;
+
+       /* This the valid size at the specified address */
+       return PHYS_PFN(min_t(size_t, size, dax_size - offset));
+}
+
+static int dev_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+                                   size_t nr_pages)
+{
+       long resid = nr_pages << PAGE_SHIFT;
+       long offset = pgoff << PAGE_SHIFT;
+
+       /* Break into one write per dax region */
+       while (resid > 0) {
+               void *kaddr;
+               pgoff_t poff = offset >> PAGE_SHIFT;
+               long len = __dev_dax_direct_access(dax_dev, poff,
+                                                  nr_pages, DAX_ACCESS, 
&kaddr, NULL);
+               len = min_t(long, len, PAGE_SIZE);
+               write_dax(kaddr, ZERO_PAGE(0), offset, len);
+
+               offset += len;
+               resid  -= len;
+       }
+       return 0;
+}
+
+static long dev_dax_direct_access(struct dax_device *dax_dev,
+               pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
+               void **kaddr, pfn_t *pfn)
+{
+       return __dev_dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, 
pfn);
+}
+
+static size_t dev_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+               void *addr, size_t bytes, struct iov_iter *i)
+{
+       size_t off;
+
+       off = offset_in_page(addr);
+
+       return _copy_from_iter_flushcache(addr, bytes, i);
+}
+
+static const struct dax_operations dev_dax_ops = {
+       .direct_access = dev_dax_direct_access,
+       .zero_page_range = dev_dax_zero_page_range,
+       .recovery_write = dev_dax_recovery_write,
+};
+
+#endif /* IS_ENABLED(CONFIG_DEV_DAX_IOMAP) */
+
 static struct dev_dax *__devm_create_dev_dax(struct dev_dax_data *data)
 {
        struct dax_region *dax_region = data->dax_region;
@@ -1496,11 +1599,18 @@ static struct dev_dax *__devm_create_dev_dax(struct 
dev_dax_data *data)
                }
        }
 
-       /*
-        * No dax_operations since there is no access to this device outside of
-        * mmap of the resulting character device.
-        */
-       dax_dev = alloc_dax(dev_dax, NULL);
+       if (IS_ENABLED(CONFIG_DEV_DAX_IOMAP))
+               /* holder_ops currently populated separately in a slightly
+                * hacky way
+                */
+               dax_dev = alloc_dax(dev_dax, &dev_dax_ops);
+       else
+               /*
+                * No dax_operations since there is no access to this device
+                * outside of mmap of the resulting character device.
+                */
+               dax_dev = alloc_dax(dev_dax, NULL);
+
        if (IS_ERR(dax_dev)) {
                rc = PTR_ERR(dax_dev);
                goto err_alloc_dax;
-- 
2.49.0


Reply via email to