The branch main has been updated by kib:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=6df6facf44f952f64753bee00831fd93f16c99a9

commit 6df6facf44f952f64753bee00831fd93f16c99a9
Author:     Konstantin Belousov <k...@freebsd.org>
AuthorDate: 2023-08-18 12:38:28 +0000
Commit:     Konstantin Belousov <k...@freebsd.org>
CommitDate: 2023-09-09 22:00:06 +0000

    shmfd: hide direct rangelock(9) use under a wrapper
    
    Sponsored by:   The FreeBSD Foundation
    MFC after:      1 week
---
 sys/kern/uipc_shm.c | 69 +++++++++++++++++++++++++----------------------------
 1 file changed, 33 insertions(+), 36 deletions(-)

diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index 1975f0d60f89..a8e2502808a0 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -182,6 +182,15 @@ SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
     "Number of contig reclaims before giving up for default alloc policy");
 
+#define        shm_rangelock_unlock(shmfd, cookie)                             
\
+       rangelock_unlock(&(shmfd)->shm_rl, (cookie), &(shmfd)->shm_mtx)
+#define        shm_rangelock_rlock(shmfd, start, end)                          
\
+       rangelock_rlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
+#define        shm_rangelock_tryrlock(shmfd, start, end)                       
\
+       rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
+#define        shm_rangelock_wlock(shmfd, start, end)                          
\
+       rangelock_wlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
+
 static int
 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
 {
@@ -456,10 +465,10 @@ shm_read(struct file *fp, struct uio *uio, struct ucred 
*active_cred,
                return (error);
 #endif
        foffset_lock_uio(fp, uio, flags);
-       rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
-           uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
+       rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
+           uio->uio_offset + uio->uio_resid);
        error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
-       rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+       shm_rangelock_unlock(shmfd, rl_cookie);
        foffset_unlock_uio(fp, uio, flags);
        return (error);
 }
@@ -497,13 +506,10 @@ shm_write(struct file *fp, struct uio *uio, struct ucred 
*active_cred,
        } else {
                size = uio->uio_offset + uio->uio_resid;
        }
-       if ((flags & FOF_OFFSET) == 0) {
-               rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
-                   &shmfd->shm_mtx);
-       } else {
-               rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
-                   size, &shmfd->shm_mtx);
-       }
+       if ((flags & FOF_OFFSET) == 0)
+               rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
+       else
+               rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
        if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
                error = EPERM;
        } else {
@@ -516,7 +522,7 @@ shm_write(struct file *fp, struct uio *uio, struct ucred 
*active_cred,
                        error = uiomove_object(shmfd->shm_object,
                            shmfd->shm_size, uio);
        }
-       rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+       shm_rangelock_unlock(shmfd, rl_cookie);
        foffset_unlock_uio(fp, uio, flags);
        return (error);
 }
@@ -571,22 +577,20 @@ shm_ioctl(struct file *fp, u_long com, void *data, struct 
ucred *active_cred,
                    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
                        return (EINVAL);
 
-               rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
-                   &shmfd->shm_mtx);
+               rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
                shmfd->shm_lp_psind = conf->psind;
                shmfd->shm_lp_alloc_policy = conf->alloc_policy;
                shmfd->shm_object->un_pager.phys.data_val = conf->psind;
-               rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+               shm_rangelock_unlock(shmfd, rl_cookie);
                return (0);
        case FIOGSHMLPGCNF:
                if (!shm_largepage(shmfd))
                        return (ENOTTY);
                conf = data;
-               rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
-                   &shmfd->shm_mtx);
+               rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
                conf->psind = shmfd->shm_lp_psind;
                conf->alloc_policy = shmfd->shm_lp_alloc_policy;
-               rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+               shm_rangelock_unlock(shmfd, rl_cookie);
                return (0);
        default:
                return (ENOTTY);
@@ -917,10 +921,9 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
        void *rl_cookie;
        int error;
 
-       rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
-           &shmfd->shm_mtx);
+       rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
        error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
-       rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+       shm_rangelock_unlock(shmfd, rl_cookie);
        return (error);
 }
 
@@ -1258,8 +1261,7 @@ kern_shm_open2(struct thread *td, const char *userpath, 
int flags, mode_t mode,
                                error = ENOENT;
                        }
                } else {
-                       rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
-                           &shmfd->shm_mtx);
+                       rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
 
                        /*
                         * kern_shm_open() likely shouldn't ever error out on
@@ -1331,8 +1333,7 @@ kern_shm_open2(struct thread *td, const char *userpath, 
int flags, mode_t mode,
                                shmfd->shm_seals |= initial_seals;
                                shm_hold(shmfd);
                        }
-                       rangelock_unlock(&shmfd->shm_rl, rl_cookie,
-                           &shmfd->shm_mtx);
+                       shm_rangelock_unlock(shmfd, rl_cookie);
                }
                sx_xunlock(&shm_dict_lock);
 
@@ -1648,8 +1649,7 @@ shm_mmap(struct file *fp, vm_map_t map, vm_offset_t 
*addr, vm_size_t objsize,
        shmfd = fp->f_data;
        maxprot = VM_PROT_NONE;
 
-       rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
-           &shmfd->shm_mtx);
+       rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
        /* FREAD should always be set. */
        if ((fp->f_flag & FREAD) != 0)
                maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
@@ -1723,7 +1723,7 @@ shm_mmap(struct file *fp, vm_map_t map, vm_offset_t 
*addr, vm_size_t objsize,
                vm_object_deallocate(shmfd->shm_object);
        }
 out:
-       rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+       shm_rangelock_unlock(shmfd, rl_cookie);
        return (error);
 }
 
@@ -1941,8 +1941,7 @@ shm_add_seals(struct file *fp, int seals)
 
        error = 0;
        shmfd = fp->f_data;
-       rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
-           &shmfd->shm_mtx);
+       rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
 
        /* Even already-set seals should result in EPERM. */
        if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
@@ -1973,7 +1972,7 @@ shm_add_seals(struct file *fp, int seals)
        }
        shmfd->shm_seals |= nseals;
 out:
-       rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+       shm_rangelock_unlock(shmfd, rl_cookie);
        return (error);
 }
 
@@ -2067,8 +2066,7 @@ shm_fspacectl(struct file *fp, int cmd, off_t *offset, 
off_t *length, int flags,
        off = *offset;
        len = *length;
 
-       rl_cookie = rangelock_wlock(&shmfd->shm_rl, off, off + len,
-           &shmfd->shm_mtx);
+       rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
        switch (cmd) {
        case SPACECTL_DEALLOC:
                if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
@@ -2082,7 +2080,7 @@ shm_fspacectl(struct file *fp, int cmd, off_t *offset, 
off_t *length, int flags,
        default:
                __assert_unreachable();
        }
-       rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+       shm_rangelock_unlock(shmfd, rl_cookie);
        return (error);
 }
 
@@ -2109,11 +2107,10 @@ shm_fallocate(struct file *fp, off_t offset, off_t len, 
struct thread *td)
         * the shmfd is not necessarily a concern.  If other mechanisms are
         * added to grow a shmfd, this may need to be re-evaluated.
         */
-       rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
-           &shmfd->shm_mtx);
+       rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
        if (size > shmfd->shm_size)
                error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
-       rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
+       shm_rangelock_unlock(shmfd, rl_cookie);
        /* Translate to posix_fallocate(2) return value as needed. */
        if (error == ENOMEM)
                error = ENOSPC;

Reply via email to