Author: mjg Date: Wed Dec 11 23:11:21 2019 New Revision: 355633 URL: https://svnweb.freebsd.org/changeset/base/355633
Log: vfs: locking primitives which elide ->v_vnlock and shared locking disablement Both of these features are not needed by many consumers and result in avoidable reads which in turn puts them on profiles due to cache-line ping ponging. On top of that the current lockgmr entry point is slower than necessary single-threaded. As an attempted clean up preparing for other changes, provide new routines which don't support any of the aforementioned features. With these patches in place vop_stdlock and vop_stdunlock disappear from flamegraphs during -j 104 buildkernel. Reviewed by: jeff (previous version) Tested by: pho Differential Revision: https://reviews.freebsd.org/D22665 Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c head/sys/fs/tmpfs/tmpfs_subr.c head/sys/fs/tmpfs/tmpfs_vnops.c head/sys/kern/kern_lock.c head/sys/kern/vfs_default.c head/sys/sys/lockmgr.h head/sys/sys/vnode.h Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c ============================================================================== --- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Wed Dec 11 23:09:12 2019 (r355632) +++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Wed Dec 11 23:11:21 2019 (r355633) @@ -5932,7 +5932,7 @@ zfs_lock(ap) znode_t *zp; int err; - err = vop_stdlock(ap); + err = vop_lock(ap); if (err == 0 && (ap->a_flags & LK_NOWAIT) == 0) { vp = ap->a_vp; zp = vp->v_data; @@ -5989,7 +5989,11 @@ struct vop_vector zfs_vnodeops = { .vop_vptocnp = zfs_vptocnp, #ifdef DIAGNOSTIC .vop_lock1 = zfs_lock, +#else + .vop_lock1 = vop_lock, #endif + .vop_unlock = vop_unlock, + .vop_islocked = vop_islocked, }; struct vop_vector zfs_fifoops = { Modified: head/sys/fs/tmpfs/tmpfs_subr.c ============================================================================== --- head/sys/fs/tmpfs/tmpfs_subr.c Wed Dec 11 23:09:12 2019 (r355632) +++ head/sys/fs/tmpfs/tmpfs_subr.c Wed Dec 11 23:11:21 2019 (r355633) @@ -671,7 +671,7 @@ loop: MPASS(vp != NULL); /* lkflag is ignored, the lock is exclusive */ - (void) vn_lock(vp, lkflag | LK_RETRY); + (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_data = node; vp->v_type = node->tn_type; Modified: head/sys/fs/tmpfs/tmpfs_vnops.c ============================================================================== --- head/sys/fs/tmpfs/tmpfs_vnops.c Wed Dec 11 23:09:12 2019 (r355632) +++ head/sys/fs/tmpfs/tmpfs_vnops.c Wed Dec 11 23:11:21 2019 (r355633) @@ -1632,6 +1632,9 @@ struct vop_vector tmpfs_vnodeop_entries = { .vop_whiteout = tmpfs_whiteout, .vop_bmap = VOP_EOPNOTSUPP, .vop_vptocnp = tmpfs_vptocnp, + .vop_lock1 = vop_lock, + .vop_unlock = vop_unlock, + .vop_islocked = vop_islocked, }; /* Modified: head/sys/kern/kern_lock.c ============================================================================== --- head/sys/kern/kern_lock.c Wed Dec 11 23:09:12 2019 (r355632) +++ head/sys/kern/kern_lock.c Wed Dec 11 23:11:21 2019 (r355633) @@ -1156,6 +1156,88 @@ lockmgr_unlock_fast_path(struct lock *lk, u_int flags, return (0); } +/* + * Lightweight entry points for common operations. + * + * Functionality is similar to sx locks, in that none of the additional lockmgr + * features are supported. To be clear, these are NOT supported: + * 1. shared locking disablement + * 2. returning with an error after sleep + * 3. unlocking the interlock + * + * If in doubt, use lockmgr_*_fast_path. + */ +int +lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line) +{ + uintptr_t x; + + MPASS((flags & LK_TYPE_MASK) == LK_SHARED); + MPASS((flags & LK_INTERLOCK) == 0); + MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0); + + if (LK_CAN_WITNESS(flags)) + WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, + file, line, NULL); + if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) { + lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags); + return (0); + } + + return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL)); +} + +int +lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line) +{ + uintptr_t tid; + + MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE); + MPASS((flags & LK_INTERLOCK) == 0); + + if (LK_CAN_WITNESS(flags)) + WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | + LOP_EXCLUSIVE, file, line, NULL); + tid = (uintptr_t)curthread; + if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { + lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, + flags); + return (0); + } + + return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL)); +} + +int +lockmgr_unlock(struct lock *lk) +{ + uintptr_t x, tid; + const char *file; + int line; + + file = __FILE__; + line = __LINE__; + + _lockmgr_assert(lk, KA_LOCKED, file, line); + x = lk->lk_lock; + if (__predict_true(x & LK_SHARE) != 0) { + if (lockmgr_sunlock_try(lk, &x)) { + lockmgr_note_shared_release(lk, file, line); + } else { + return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line)); + } + } else { + tid = (uintptr_t)curthread; + if (!lockmgr_recursed(lk) && + atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) { + lockmgr_note_exclusive_release(lk, file, line); + } else { + return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line)); + } + } + return (0); +} + int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, const char *wmesg, int pri, int timo, const char *file, int line) Modified: head/sys/kern/vfs_default.c ============================================================================== --- head/sys/kern/vfs_default.c Wed Dec 11 23:09:12 2019 (r355632) +++ head/sys/kern/vfs_default.c Wed Dec 11 23:11:21 2019 (r355633) @@ -545,6 +545,71 @@ vop_stdislocked(ap) } /* + * Variants of the above set. + * + * Differences are: + * - shared locking disablement is not supported + * - v_vnlock pointer is not honored + */ +int +vop_lock(ap) + struct vop_lock1_args /* { + struct vnode *a_vp; + int a_flags; + char *file; + int line; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + int flags = ap->a_flags; + struct mtx *ilk; + + MPASS(vp->v_vnlock == &vp->v_lock); + + if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0)) + goto other; + + switch (flags & LK_TYPE_MASK) { + case LK_SHARED: + return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line)); + case LK_EXCLUSIVE: + return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line)); + } +other: + ilk = VI_MTX(vp); + return (lockmgr_lock_fast_path(&vp->v_lock, flags, + &ilk->lock_object, ap->a_file, ap->a_line)); +} + +int +vop_unlock(ap) + struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + MPASS(vp->v_vnlock == &vp->v_lock); + MPASS(ap->a_flags == 0); + + return (lockmgr_unlock(&vp->v_lock)); +} + +int +vop_islocked(ap) + struct vop_islocked_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + MPASS(vp->v_vnlock == &vp->v_lock); + + return (lockstatus(&vp->v_lock)); +} + +/* * Return true for select/poll. */ int Modified: head/sys/sys/lockmgr.h ============================================================================== --- head/sys/sys/lockmgr.h Wed Dec 11 23:09:12 2019 (r355632) +++ head/sys/sys/lockmgr.h Wed Dec 11 23:11:21 2019 (r355633) @@ -74,6 +74,10 @@ int lockmgr_lock_fast_path(struct lock *lk, u_int fla struct lock_object *ilk, const char *file, int line); int lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk); +int lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line); +int lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line); +int lockmgr_unlock(struct lock *lk); + #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void _lockmgr_assert(const struct lock *lk, int what, const char *file, int line); #endif Modified: head/sys/sys/vnode.h ============================================================================== --- head/sys/sys/vnode.h Wed Dec 11 23:09:12 2019 (r355632) +++ head/sys/sys/vnode.h Wed Dec 11 23:11:21 2019 (r355633) @@ -761,11 +761,14 @@ int vop_stdgetwritemount(struct vop_getwritemount_args int vop_stdgetpages(struct vop_getpages_args *); int vop_stdinactive(struct vop_inactive_args *); int vop_stdneed_inactive(struct vop_need_inactive_args *); -int vop_stdislocked(struct vop_islocked_args *); int vop_stdkqfilter(struct vop_kqfilter_args *); int vop_stdlock(struct vop_lock1_args *); -int vop_stdputpages(struct vop_putpages_args *); int vop_stdunlock(struct vop_unlock_args *); +int vop_stdislocked(struct vop_islocked_args *); +int vop_lock(struct vop_lock1_args *); +int vop_unlock(struct vop_unlock_args *); +int vop_islocked(struct vop_islocked_args *); +int vop_stdputpages(struct vop_putpages_args *); int vop_nopoll(struct vop_poll_args *); int vop_stdaccess(struct vop_access_args *ap); int vop_stdaccessx(struct vop_accessx_args *ap); _______________________________________________ svn-src-head@freebsd.org mailing list https://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"