Implement a replacement for launder_folio.  The key feature of
invalidate_inode_pages2() is that it locks each folio individually, unmaps
it to prevent mmap'd accesses interfering and calls the ->launder_folio()
address_space op to flush it.  This has problems: firstly, each folio is
written individually as one or more small writes; secondly, adjacent folios
cannot be added so easily into the laundry; thirdly, it's yet another op to
implement.

Instead, use the invalidate lock to cause anyone wanting to add a folio to
the inode to wait, then unmap all the folios if we have mmaps, then,
conditionally, use ->writepages() to flush any dirty data back and then
discard all pages.

The invalidate lock prevents ->read_iter(), ->write_iter() and faulting
through mmap all from adding pages for the duration.

This is then used from netfslib to handle the flusing in unbuffered and
direct writes.

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: Matthew Wilcox <wi...@infradead.org>
cc: Miklos Szeredi <mik...@szeredi.hu>
cc: Trond Myklebust <trond.mykleb...@hammerspace.com>
cc: Christoph Hellwig <h...@lst.de>
cc: Andrew Morton <a...@linux-foundation.org>
cc: Alexander Viro <v...@zeniv.linux.org.uk>
cc: Christian Brauner <brau...@kernel.org>
cc: Jeff Layton <jlay...@kernel.org>
cc: linux...@kvack.org
cc: linux-fsde...@vger.kernel.org
cc: ne...@lists.linux.dev
cc: v...@lists.linux.dev
cc: linux-...@lists.infradead.org
cc: ceph-de...@vger.kernel.org
cc: linux-c...@vger.kernel.org
cc: linux-...@vger.kernel.org
cc: de...@lists.orangefs.org
---

Notes:
    Changes
    =======
    ver #2)
     - Make filemap_invalidate_inode() take a range.
     - Make netfs_unbuffered_write_iter() use filemap_invalidate_inode().

 fs/netfs/direct_write.c | 28 ++++++++++++++++++---
 include/linux/pagemap.h |  2 ++
 mm/filemap.c            | 54 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 80 insertions(+), 4 deletions(-)

diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index bee047e20f5d..2b81cd4aae6e 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -132,12 +132,14 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct 
kiocb *iocb, struct iov
 ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = file->f_mapping->host;
+       struct address_space *mapping = file->f_mapping;
+       struct inode *inode = mapping->host;
        struct netfs_inode *ictx = netfs_inode(inode);
-       unsigned long long end;
        ssize_t ret;
+       loff_t pos = iocb->ki_pos;
+       unsigned long long end = pos + iov_iter_count(from) - 1;
 
-       _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), 
i_size_read(inode));
+       _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
 
        if (!iov_iter_count(from))
                return 0;
@@ -157,7 +159,25 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, 
struct iov_iter *from)
        ret = file_update_time(file);
        if (ret < 0)
                goto out;
-       ret = kiocb_invalidate_pages(iocb, iov_iter_count(from));
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               /* We could block if there are any pages in the range. */
+               ret = -EAGAIN;
+               if (filemap_range_has_page(mapping, pos, end))
+                       if (filemap_invalidate_inode(inode, true, pos, end))
+                               goto out;
+       } else {
+               ret = filemap_write_and_wait_range(mapping, pos, end);
+               if (ret < 0)
+                       goto out;
+       }
+
+       /*
+        * After a write we want buffered reads to be sure to go to disk to get
+        * the new data.  We invalidate clean cached page from the region we're
+        * about to write.  We do this *before* the write so that we can return
+        * without clobbering -EIOCBQUEUED from ->direct_IO().
+        */
+       ret = filemap_invalidate_inode(inode, true, pos, end);
        if (ret < 0)
                goto out;
        end = iocb->ki_pos + iov_iter_count(from);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2df35e65557d..c5e33e2ca48a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -40,6 +40,8 @@ int filemap_fdatawait_keep_errors(struct address_space 
*mapping);
 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t 
lend);
 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
                loff_t start_byte, loff_t end_byte);
+int filemap_invalidate_inode(struct inode *inode, bool flush,
+                            loff_t start, loff_t end);
 
 static inline int filemap_fdatawait(struct address_space *mapping)
 {
diff --git a/mm/filemap.c b/mm/filemap.c
index 9a2e28bf298a..53516305b4b4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -4134,6 +4134,60 @@ bool filemap_release_folio(struct folio *folio, gfp_t 
gfp)
 }
 EXPORT_SYMBOL(filemap_release_folio);
 
+/**
+ * filemap_invalidate_inode - Invalidate/forcibly write back a range of an 
inode's pagecache
+ * @inode: The inode to flush
+ * @flush: Set to write back rather than simply invalidate.
+ * @start: First byte to in range.
+ * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start
+ *       onwards.
+ *
+ * Invalidate all the folios on an inode that contribute to the specified
+ * range, possibly writing them back first.  Whilst the operation is
+ * undertaken, the invalidate lock is held to prevent new folios from being
+ * installed.
+ */
+int filemap_invalidate_inode(struct inode *inode, bool flush,
+                            loff_t start, loff_t end)
+{
+       struct address_space *mapping = inode->i_mapping;
+       pgoff_t first = start >> PAGE_SHIFT;
+       pgoff_t last = end >> PAGE_SHIFT;
+       pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1;
+
+       if (!mapping || !mapping->nrpages || end < start)
+               goto out;
+
+       /* Prevent new folios from being added to the inode. */
+       filemap_invalidate_lock(mapping);
+
+       if (!mapping->nrpages)
+               goto unlock;
+
+       unmap_mapping_pages(mapping, first, nr, false);
+
+       /* Write back the data if we're asked to. */
+       if (flush) {
+               struct writeback_control wbc = {
+                       .sync_mode      = WB_SYNC_ALL,
+                       .nr_to_write    = LONG_MAX,
+                       .range_start    = first,
+                       .range_end      = last,
+               };
+
+               filemap_fdatawrite_wbc(mapping, &wbc);
+       }
+
+       /* Wait for writeback to complete on all folios and discard. */
+       truncate_inode_pages_range(mapping, first, last);
+
+unlock:
+       filemap_invalidate_unlock(mapping);
+out:
+       return filemap_check_errors(mapping);
+}
+EXPORT_SYMBOL(filemap_invalidate_inode);
+
 #ifdef CONFIG_CACHESTAT_SYSCALL
 /**
  * filemap_cachestat() - compute the page cache statistics of a mapping

Reply via email to