Author: jhb
Date: Fri Aug  5 22:23:04 2016
New Revision: 303787
URL: https://svnweb.freebsd.org/changeset/base/303787

Log:
  MFC 303406,303501: Fix panic when using aio_fsync().
  
  303406:
  Adjust tests in fsync job scheduling loop to reduce indentation.
  
  303501:
  Fix locking issues with aio_fsync().
  
  - Use correct lock in aio_cancel_sync when dequeueing job.
  - Add _locked variants of aio_set/clear_cancel_function and use those
    to avoid lock recursion when adding and removing fsync jobs to the
    per-process sync queue.
  - While here, add a basic test for aio_fsync().
  
  PR:           211390
  Approved by:  re (kib)

Modified:
  stable/11/sys/kern/vfs_aio.c
  stable/11/tests/sys/aio/aio_test.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/kern/vfs_aio.c
==============================================================================
--- stable/11/sys/kern/vfs_aio.c        Fri Aug  5 21:34:09 2016        
(r303786)
+++ stable/11/sys/kern/vfs_aio.c        Fri Aug  5 22:23:04 2016        
(r303787)
@@ -311,6 +311,7 @@ static void aio_proc_rundown_exec(void *
 static int     aio_qphysio(struct proc *p, struct kaiocb *job);
 static void    aio_daemon(void *param);
 static void    aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
+static bool    aio_clear_cancel_function_locked(struct kaiocb *job);
 static int     aio_kick(struct proc *userp);
 static void    aio_kick_nowait(struct proc *userp);
 static void    aio_kick_helper(void *context, int pending);
@@ -913,18 +914,16 @@ notification_done:
        if (job->jobflags & KAIOCB_CHECKSYNC) {
                schedule_fsync = false;
                TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
-                       if (job->fd_file == sjob->fd_file &&
-                           job->seqno < sjob->seqno) {
-                               if (--sjob->pending == 0) {
-                                       TAILQ_REMOVE(&ki->kaio_syncqueue, sjob,
-                                           list);
-                                       if (!aio_clear_cancel_function(sjob))
-                                               continue;
-                                       TAILQ_INSERT_TAIL(&ki->kaio_syncready,
-                                           sjob, list);
-                                       schedule_fsync = true;
-                               }
-                       }
+                       if (job->fd_file != sjob->fd_file ||
+                           job->seqno >= sjob->seqno)
+                               continue;
+                       if (--sjob->pending > 0)
+                               continue;
+                       TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list);
+                       if (!aio_clear_cancel_function_locked(sjob))
+                               continue;
+                       TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list);
+                       schedule_fsync = true;
                }
                if (schedule_fsync)
                        taskqueue_enqueue(taskqueue_aiod_kick,
@@ -969,40 +968,57 @@ aio_cancel_cleared(struct kaiocb *job)
        return ((job->jobflags & KAIOCB_CLEARED) != 0);
 }
 
-bool
-aio_clear_cancel_function(struct kaiocb *job)
+static bool
+aio_clear_cancel_function_locked(struct kaiocb *job)
 {
-       struct kaioinfo *ki;
 
-       ki = job->userproc->p_aioinfo;
-       AIO_LOCK(ki);
+       AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
        MPASS(job->cancel_fn != NULL);
        if (job->jobflags & KAIOCB_CANCELLING) {
                job->jobflags |= KAIOCB_CLEARED;
-               AIO_UNLOCK(ki);
                return (false);
        }
        job->cancel_fn = NULL;
-       AIO_UNLOCK(ki);
        return (true);
 }
 
 bool
-aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
+aio_clear_cancel_function(struct kaiocb *job)
 {
        struct kaioinfo *ki;
+       bool ret;
 
        ki = job->userproc->p_aioinfo;
        AIO_LOCK(ki);
-       if (job->jobflags & KAIOCB_CANCELLED) {
-               AIO_UNLOCK(ki);
+       ret = aio_clear_cancel_function_locked(job);
+       AIO_UNLOCK(ki);
+       return (ret);
+}
+
+static bool
+aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func)
+{
+
+       AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
+       if (job->jobflags & KAIOCB_CANCELLED)
                return (false);
-       }
        job->cancel_fn = func;
-       AIO_UNLOCK(ki);
        return (true);
 }
 
+bool
+aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
+{
+       struct kaioinfo *ki;
+       bool ret;
+
+       ki = job->userproc->p_aioinfo;
+       AIO_LOCK(ki);
+       ret = aio_set_cancel_function_locked(job, func);
+       AIO_UNLOCK(ki);
+       return (ret);
+}
+
 void
 aio_complete(struct kaiocb *job, long status, int error)
 {
@@ -1657,10 +1673,10 @@ aio_cancel_sync(struct kaiocb *job)
        struct kaioinfo *ki;
 
        ki = job->userproc->p_aioinfo;
-       mtx_lock(&aio_job_mtx);
+       AIO_LOCK(ki);
        if (!aio_cancel_cleared(job))
                TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
-       mtx_unlock(&aio_job_mtx);
+       AIO_UNLOCK(ki);
        aio_cancel(job);
 }
 
@@ -1720,7 +1736,8 @@ queueit:
                        }
                }
                if (job->pending != 0) {
-                       if (!aio_set_cancel_function(job, aio_cancel_sync)) {
+                       if (!aio_set_cancel_function_locked(job,
+                               aio_cancel_sync)) {
                                AIO_UNLOCK(ki);
                                aio_cancel(job);
                                return (0);

Modified: stable/11/tests/sys/aio/aio_test.c
==============================================================================
--- stable/11/tests/sys/aio/aio_test.c  Fri Aug  5 21:34:09 2016        
(r303786)
+++ stable/11/tests/sys/aio/aio_test.c  Fri Aug  5 22:23:04 2016        
(r303787)
@@ -924,6 +924,88 @@ ATF_TC_BODY(aio_socket_short_write_cance
        close(s[0]);
 }
 
+/*
+ * This test just performs a basic test of aio_fsync().
+ */
+ATF_TC_WITHOUT_HEAD(aio_fsync_test);
+ATF_TC_BODY(aio_fsync_test, tc)
+{
+       struct aiocb synccb, *iocbp;
+       struct {
+               struct aiocb iocb;
+               bool done;
+               char *buffer;
+       } buffers[16];
+       struct stat sb;
+       char pathname[PATH_MAX];
+       ssize_t rval;
+       unsigned i;
+       int fd;
+
+       ATF_REQUIRE_KERNEL_MODULE("aio");
+       ATF_REQUIRE_UNSAFE_AIO();
+
+       strcpy(pathname, PATH_TEMPLATE);
+       fd = mkstemp(pathname);
+       ATF_REQUIRE_MSG(fd != -1, "mkstemp failed: %s", strerror(errno));
+       unlink(pathname);
+
+       ATF_REQUIRE(fstat(fd, &sb) == 0);
+       ATF_REQUIRE(sb.st_blksize != 0);
+       ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0);
+
+       /*
+        * Queue several asynchronous write requests.  Hopefully this
+        * forces the aio_fsync() request to be deferred.  There is no
+        * reliable way to guarantee that however.
+        */
+       srandomdev();
+       for (i = 0; i < nitems(buffers); i++) {
+               buffers[i].done = false;
+               memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb));
+               buffers[i].buffer = malloc(sb.st_blksize);
+               aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random());
+               buffers[i].iocb.aio_fildes = fd;
+               buffers[i].iocb.aio_buf = buffers[i].buffer;
+               buffers[i].iocb.aio_nbytes = sb.st_blksize;
+               buffers[i].iocb.aio_offset = sb.st_blksize * i;
+               ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0);
+       }
+
+       /* Queue the aio_fsync request. */
+       memset(&synccb, 0, sizeof(synccb));
+       synccb.aio_fildes = fd;
+       ATF_REQUIRE(aio_fsync(O_SYNC, &synccb) == 0);
+
+       /* Wait for requests to complete. */
+       for (;;) {
+       next:
+               rval = aio_waitcomplete(&iocbp, NULL);
+               ATF_REQUIRE(iocbp != NULL);
+               if (iocbp == &synccb) {
+                       ATF_REQUIRE(rval == 0);
+                       break;
+               }
+
+               for (i = 0; i < nitems(buffers); i++) {
+                       if (iocbp == &buffers[i].iocb) {
+                               ATF_REQUIRE(buffers[i].done == false);
+                               ATF_REQUIRE(rval == sb.st_blksize);
+                               buffers[i].done = true;
+                               goto next;
+                       }
+               }
+
+               ATF_REQUIRE_MSG(false, "unmatched AIO request");
+       }
+
+       for (i = 0; i < nitems(buffers); i++)
+               ATF_REQUIRE_MSG(buffers[i].done,
+                   "AIO request %u did not complete", i);
+
+       close(fd);
+}
+
 ATF_TP_ADD_TCS(tp)
 {
 
@@ -937,6 +1019,7 @@ ATF_TP_ADD_TCS(tp)
        ATF_TP_ADD_TC(tp, aio_socket_two_reads);
        ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write);
        ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel);
+       ATF_TP_ADD_TC(tp, aio_fsync_test);
 
        return (atf_no_error());
 }
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to