There are two types of I/O activity that can be "in progress" at the time
of fuse_release() execution: asynchronous read-ahead and write-back. The
patch ensures that they are completed before fuse_release_common sends
FUSE_RELEASE to userspace.

So far as fuse_release() waits for end of async I/O, its callbacks
(fuse_readpages_end and fuse_writepage_finish) calling fuse_file_put cannot
be the last holders of fuse file anymore. To emphasize the fact, the patch
replaces fuse_file_put with __fuse_file_put there.

Signed-off-by: Maxim Patlasov <mpatla...@parallels.com>
---
 fs/fuse/file.c |   71 ++++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 66 insertions(+), 5 deletions(-)

diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b81a945..d50af99 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -149,6 +149,17 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
        }
 }
 
+/*
+ * Asynchronous callbacks may use it instead of fuse_file_put() because
+ * we guarantee that they are never last holders of ff. Hitting BUG() below
+ * will make clear any violation of the guarantee.
+ */
+static void __fuse_file_put(struct fuse_file *ff)
+{
+       if (atomic_dec_and_test(&ff->count))
+               BUG();
+}
+
 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
                 bool isdir)
 {
@@ -302,6 +313,13 @@ void fuse_release_common(struct file *file, int opcode)
        req->misc.release.path = file->f_path;
 
        /*
+        * No more in-flight asynchronous READ or WRITE requests if
+        * fuse file release is synchronous
+        */
+       if (ff->fc->close_wait)
+               BUG_ON(atomic_read(&ff->count) != 1);
+
+       /*
         * Normally this will send the RELEASE request, however if
         * some asynchronous READ or WRITE requests are outstanding,
         * the sending will be delayed.
@@ -321,11 +339,34 @@ static int fuse_open(struct inode *inode, struct file 
*file)
 static int fuse_release(struct inode *inode, struct file *file)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_file *ff = file->private_data;
 
        /* see fuse_vma_close() for !writeback_cache case */
        if (fc->writeback_cache)
                write_inode_now(inode, 1);
 
+       if (ff->fc->close_wait) {
+               struct fuse_inode *fi = get_fuse_inode(inode);
+
+               /*
+                * Must remove file from write list. Otherwise it is possible
+                * this file will get more writeback from another files
+                * rerouted via write_files.
+                */
+               spin_lock(&ff->fc->lock);
+               list_del_init(&ff->write_entry);
+               spin_unlock(&ff->fc->lock);
+
+               wait_event(fi->page_waitq, atomic_read(&ff->count) == 1);
+
+               /*
+                * spin_unlock_wait(&ff->fc->lock) would be natural here to
+                * wait for threads just released ff to leave their critical
+                * sections. But taking spinlock is the first thing
+                * fuse_release_common does, so that this is unnecessary.
+                */
+       }
+
        fuse_release_common(file, FUSE_RELEASE);
 
        /* return value is ignored by VFS */
@@ -823,8 +864,17 @@ static void fuse_readpages_end(struct fuse_conn *fc, 
struct fuse_req *req)
                unlock_page(page);
                page_cache_release(page);
        }
-       if (req->ff)
-               fuse_file_put(req->ff, false);
+       if (req->ff) {
+               if (fc->close_wait) {
+                       struct fuse_inode *fi = get_fuse_inode(req->inode);
+
+                       spin_lock(&fc->lock);
+                       __fuse_file_put(req->ff);
+                       wake_up(&fi->page_waitq);
+                       spin_unlock(&fc->lock);
+               } else
+                       fuse_file_put(req->ff, false);
+       }
 }
 
 struct fuse_fill_data {
@@ -851,6 +901,7 @@ static void fuse_send_readpages(struct fuse_fill_data *data)
        if (fc->async_read) {
                req->ff = fuse_file_get(ff);
                req->end = fuse_readpages_end;
+               req->inode = data->inode;
                fuse_request_send_background(fc, req);
        } else {
                fuse_request_send(fc, req);
@@ -1537,7 +1588,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, 
struct fuse_req *req)
        for (i = 0; i < req->num_pages; i++)
                __free_page(req->pages[i]);
 
-       if (req->ff)
+       if (req->ff && !fc->close_wait)
                fuse_file_put(req->ff, false);
 }
 
@@ -1554,6 +1605,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, 
struct fuse_req *req)
                dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
                bdi_writeout_inc(bdi);
        }
+       if (fc->close_wait)
+               __fuse_file_put(req->ff);
        wake_up(&fi->page_waitq);
 }
 
@@ -1694,8 +1747,16 @@ int fuse_write_inode(struct inode *inode, struct 
writeback_control *wbc)
 
        ff = __fuse_write_file_get(fc, fi);
        err = fuse_flush_times(inode, ff);
-       if (ff)
-               fuse_file_put(ff, 0);
+       if (ff) {
+               if (fc->close_wait) {
+                       spin_lock(&fc->lock);
+                       __fuse_file_put(ff);
+                       wake_up(&fi->page_waitq);
+                       spin_unlock(&fc->lock);
+
+               } else
+                       fuse_file_put(ff, false);
+       }
 
        return err;
 }

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to