36 #include <sys/cdefs.h>
39 #include "opt_adaptive_mutexes.h"
41 #include "opt_global.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
44 #include "opt_sched.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
51 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
60 #include <sys/sysctl.h>
61 #include <sys/turnstile.h>
62 #include <sys/vmmeter.h>
63 #include <sys/lock_profile.h>
65 #include <machine/atomic.h>
66 #include <machine/bus.h>
67 #include <machine/cpu.h>
71 #include <fs/devfs/devfs_int.h>
74 #include <vm/vm_extern.h>
76 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
77 #define ADAPTIVE_MUTEXES
81 #include <sys/pmckern.h>
82 PMC_SOFT_DEFINE( , , lock, failed);
88 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
90 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
92 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
94 static void assert_mtx(
struct lock_object *lock,
int what);
96 static void db_show_mtx(
struct lock_object *lock);
98 static void lock_mtx(
struct lock_object *lock,
int how);
99 static void lock_spin(
struct lock_object *lock,
int how);
101 static int owner_mtx(
struct lock_object *lock,
struct thread **owner);
103 static int unlock_mtx(
struct lock_object *lock);
110 .lc_name =
"sleep mutex",
111 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
114 .lc_ddb_show = db_show_mtx,
119 .lc_owner = owner_mtx,
123 .lc_name =
"spin mutex",
124 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
127 .lc_ddb_show = db_show_mtx,
132 .lc_owner = owner_mtx,
146 mtx_assert((
struct mtx *)lock, what);
153 mtx_lock((
struct mtx *)lock);
160 panic(
"spin locks can only use msleep_spin");
168 m = (
struct mtx *)lock;
169 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
178 panic(
"spin locks can only use msleep_spin");
183 owner_mtx(
struct lock_object *lock,
struct thread **owner)
185 struct mtx *m = (
struct mtx *)lock;
200 if (SCHEDULER_STOPPED())
202 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
203 (
"mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
204 curthread, m->lock_object.lo_name, file, line));
205 KASSERT(m->mtx_lock != MTX_DESTROYED,
206 (
"mtx_lock() of destroyed mutex @ %s:%d", file, line));
208 (
"mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
210 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
213 __mtx_lock(m, curthread, opts, file, line);
214 LOCK_LOG_LOCK(
"LOCK", &m->lock_object, opts, m->mtx_recurse, file,
216 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
217 curthread->td_locks++;
224 if (SCHEDULER_STOPPED())
226 KASSERT(m->mtx_lock != MTX_DESTROYED,
227 (
"mtx_unlock() of destroyed mutex @ %s:%d", file, line));
229 (
"mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
231 curthread->td_locks--;
232 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
233 LOCK_LOG_LOCK(
"UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
235 mtx_assert(m, MA_OWNED);
237 if (m->mtx_recurse == 0)
238 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
239 __mtx_unlock(m, curthread, opts, file, line);
246 if (SCHEDULER_STOPPED())
248 KASSERT(m->mtx_lock != MTX_DESTROYED,
249 (
"mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
251 (
"mtx_lock_spin() of sleep mutex %s @ %s:%d",
252 m->lock_object.lo_name, file, line));
254 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
255 (
"mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
256 m->lock_object.lo_name, file, line));
257 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
259 __mtx_lock_spin(m, curthread, opts, file, line);
260 LOCK_LOG_LOCK(
"LOCK", &m->lock_object, opts, m->mtx_recurse, file,
262 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
269 if (SCHEDULER_STOPPED())
271 KASSERT(m->mtx_lock != MTX_DESTROYED,
272 (
"mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
274 (
"mtx_unlock_spin() of sleep mutex %s @ %s:%d",
275 m->lock_object.lo_name, file, line));
276 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
277 LOCK_LOG_LOCK(
"UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
279 mtx_assert(m, MA_OWNED);
281 __mtx_unlock_spin(m);
292 #ifdef LOCK_PROFILING
298 if (SCHEDULER_STOPPED())
301 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
302 (
"mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
303 curthread, m->lock_object.lo_name, file, line));
304 KASSERT(m->mtx_lock != MTX_DESTROYED,
305 (
"mtx_trylock() of destroyed mutex @ %s:%d", file, line));
307 (
"mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
310 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
312 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
315 rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
317 LOCK_LOG_TRY(
"LOCK", &m->lock_object, opts, rval, file, line);
319 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
321 curthread->td_locks++;
322 if (m->mtx_recurse == 0)
323 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
324 m, contested, waittime, file, line);
343 #ifdef ADAPTIVE_MUTEXES
344 volatile struct thread *owner;
349 #ifdef LOCK_PROFILING
354 uint64_t spin_cnt = 0;
355 uint64_t sleep_cnt = 0;
356 int64_t sleep_time = 0;
357 int64_t all_time = 0;
360 if (SCHEDULER_STOPPED())
364 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
365 (
"_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
366 m->lock_object.lo_name, file, line));
368 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
369 if (LOCK_LOG_TEST(&m->lock_object, opts))
370 CTR1(KTR_LOCK,
"_mtx_lock_sleep: %p recursing", m);
375 PMC_SOFT_CALL( , , lock, failed);
377 lock_profile_obtain_lock_failed(&m->lock_object,
378 &contested, &waittime);
379 if (LOCK_LOG_TEST(&m->lock_object, opts))
381 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
382 m->lock_object.lo_name, (
void *)m->mtx_lock, file, line);
384 all_time -= lockstat_nsecs(&m->lock_object);
387 while (!_mtx_obtain_lock(m, tid)) {
391 #ifdef ADAPTIVE_MUTEXES
397 if (v != MTX_UNOWNED) {
398 owner = (
struct thread *)(v & ~MTX_FLAGMASK);
399 if (TD_IS_RUNNING(owner)) {
400 if (LOCK_LOG_TEST(&m->lock_object, 0))
402 "%s: spinning on %p held by %p",
405 TD_IS_RUNNING(owner)) {
423 if (v == MTX_UNOWNED) {
428 #ifdef ADAPTIVE_MUTEXES
436 owner = (
struct thread *)(v & ~MTX_FLAGMASK);
437 if (TD_IS_RUNNING(owner)) {
448 if ((v & MTX_CONTESTED) == 0 &&
449 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
457 mtx_assert(m, MA_NOTOWNED);
462 "contention: %p at %s:%d wants %s, taken by %s:%d",
463 (
void *)tid, file, line, m->lock_object.lo_name,
464 WITNESS_FILE(&m->lock_object),
465 WITNESS_LINE(&m->lock_object));
474 sleep_time -= lockstat_nsecs(&m->lock_object);
478 sleep_time += lockstat_nsecs(&m->lock_object);
483 all_time += lockstat_nsecs(&m->lock_object);
488 "contention end: %s acquired by %p at %s:%d",
489 m->lock_object.lo_name, (
void *)tid, file, line);
492 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
493 waittime, file, line);
496 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
501 if (spin_cnt > sleep_cnt)
502 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time));
517 printf(
"spin lock %p (%s) held by %p (tid %d) too long\n",
518 m, m->lock_object.lo_name, td, td->td_tid);
522 panic(
"spin lock held too long");
533 _mtx_lock_spin(
struct mtx *m, uintptr_t tid,
int opts,
const char *file,
537 #ifdef LOCK_PROFILING
542 int64_t spin_time = 0;
545 if (SCHEDULER_STOPPED())
548 if (LOCK_LOG_TEST(&m->lock_object, opts))
549 CTR1(KTR_LOCK,
"_mtx_lock_spin: %p spinning", m);
552 PMC_SOFT_CALL( , , lock, failed);
554 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
556 spin_time -= lockstat_nsecs(&m->lock_object);
558 while (!_mtx_obtain_lock(m, tid)) {
562 while (m->mtx_lock != MTX_UNOWNED) {
563 if (i++ < 10000000) {
576 spin_time += lockstat_nsecs(&m->lock_object);
579 if (LOCK_LOG_TEST(&m->lock_object, opts))
580 CTR1(KTR_LOCK,
"_mtx_lock_spin: %p spin done", m);
582 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
583 contested, waittime, (file), (line));
586 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, spin_time);
597 #ifdef LOCK_PROFILING
599 uint64_t waittime = 0;
602 int64_t spin_time = 0;
606 tid = (uintptr_t)curthread;
608 if (SCHEDULER_STOPPED())
612 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
618 KASSERT(m->mtx_lock != MTX_DESTROYED,
619 (
"thread_lock() of destroyed mutex @ %s:%d", file, line));
621 (
"thread_lock() of sleep mutex %s @ %s:%d",
622 m->lock_object.lo_name, file, line));
624 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
625 (
"thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
626 m->lock_object.lo_name, file, line));
627 WITNESS_CHECKORDER(&m->lock_object,
628 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
629 while (!_mtx_obtain_lock(m, tid)) {
630 if (m->mtx_lock == tid) {
635 PMC_SOFT_CALL( , , lock, failed);
637 lock_profile_obtain_lock_failed(&m->lock_object,
638 &contested, &waittime);
641 while (m->mtx_lock != MTX_UNOWNED) {
644 else if (i < 60000000 ||
650 if (m != td->td_lock)
655 if (m == td->td_lock)
657 __mtx_unlock_spin(m);
660 spin_time += lockstat_nsecs(&m->lock_object);
662 if (m->mtx_recurse == 0)
663 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
664 m, contested, waittime, (file), (line));
665 LOCK_LOG_LOCK(
"LOCK", &m->lock_object, opts, m->mtx_recurse, file,
667 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
668 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_time);
676 THREAD_LOCK_ASSERT(td, MA_OWNED);
679 mtx_unlock_spin(lock);
687 mtx_assert(
new, MA_OWNED);
689 atomic_store_rel_ptr((
volatile void *)&td->td_lock, (uintptr_t)
new);
697 mtx_assert(
new, MA_OWNED);
698 THREAD_LOCK_ASSERT(td, MA_OWNED);
701 mtx_unlock_spin(lock);
715 if (SCHEDULER_STOPPED())
718 if (mtx_recursed(m)) {
719 if (--(m->mtx_recurse) == 0)
720 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
721 if (LOCK_LOG_TEST(&m->lock_object, opts))
722 CTR1(KTR_LOCK,
"_mtx_unlock_sleep: %p unrecurse", m);
732 if (LOCK_LOG_TEST(&m->lock_object, opts))
733 CTR1(KTR_LOCK,
"_mtx_unlock_sleep: %p contested", m);
736 _mtx_release_lock_quick(m);
754 #ifdef INVARIANT_SUPPORT
756 _mtx_assert(
struct mtx *m,
int what,
const char *file,
int line)
763 case MA_OWNED | MA_RECURSED:
764 case MA_OWNED | MA_NOTRECURSED:
766 panic(
"mutex %s not owned at %s:%d",
767 m->lock_object.lo_name, file, line);
768 if (mtx_recursed(m)) {
769 if ((what & MA_NOTRECURSED) != 0)
770 panic(
"mutex %s recursed at %s:%d",
771 m->lock_object.lo_name, file, line);
772 }
else if ((what & MA_RECURSED) != 0) {
773 panic(
"mutex %s unrecursed at %s:%d",
774 m->lock_object.lo_name, file, line);
779 panic(
"mutex %s owned at %s:%d",
780 m->lock_object.lo_name, file, line);
783 panic(
"unknown mtx_assert at %s:%d", file, line);
796 void mtx_validate(
struct mtx *);
799 mtx_validate(
struct mtx *m)
812 if (!kernacc((caddr_t)m,
sizeof(m),
813 VM_PROT_READ | VM_PROT_WRITE))
814 panic(
"Can't read and write to mutex %p", m);
825 struct mtx_args *margs = arg;
827 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
839 struct lock_class *
class;
842 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
843 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
844 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
845 (
"%s: mtx_lock not aligned for %s: %p", __func__, name,
859 if (opts & MTX_QUIET)
861 if (opts & MTX_RECURSE)
862 flags |= LO_RECURSABLE;
863 if ((opts & MTX_NOWITNESS) == 0)
865 if (opts & MTX_DUPOK)
867 if (opts & MTX_NOPROFILE)
868 flags |= LO_NOPROFILE;
871 m->mtx_lock = MTX_UNOWNED;
874 lock_init(&m->lock_object,
class, name, type, flags);
890 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
896 curthread->td_locks--;
898 lock_profile_release_lock(&m->lock_object);
900 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
904 m->mtx_lock = MTX_DESTROYED;
926 mtx_init(&
proc0.p_mtx,
"process lock", NULL, MTX_DEF | MTX_DUPOK);
927 mtx_init(&
proc0.p_slock,
"process slock", NULL, MTX_SPIN | MTX_RECURSE);
934 db_show_mtx(
struct lock_object *lock)
939 m = (
struct mtx *)lock;
941 db_printf(
" flags: {");
946 if (m->lock_object.lo_flags & LO_RECURSABLE)
947 db_printf(
", RECURSE");
948 if (m->lock_object.lo_flags & LO_DUPOK)
949 db_printf(
", DUPOK");
951 db_printf(
" state: {");
953 db_printf(
"UNOWNED");
955 db_printf(
"DESTROYED");
958 if (m->mtx_lock & MTX_CONTESTED)
959 db_printf(
", CONTESTED");
960 if (m->mtx_lock & MTX_RECURSED)
961 db_printf(
", RECURSED");
966 db_printf(
" owner: %p (tid %d, pid %d, \"%s\")\n", td,
967 td->td_tid, td->td_proc->p_pid, td->td_name);
969 db_printf(
" recursed: %d\n", m->mtx_recurse);
void _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
static int unlock_mtx(struct lock_object *lock)
struct lock_class lock_class_mtx_spin
void turnstile_broadcast(struct turnstile *ts, int queue)
static int unlock_spin(struct lock_object *lock)
static void _mtx_lock_spin_failed(struct mtx *m)
void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
void turnstile_unpend(struct turnstile *ts, int owner_type)
void panic(const char *fmt,...)
struct turnstile * turnstile_lookup(struct lock_object *lock)
void mtx_sysinit(void *arg)
int _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
void thread_lock_set(struct thread *td, struct mtx *new)
void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags)
static void lock_mtx(struct lock_object *lock, int how)
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
static void assert_mtx(struct lock_object *lock, int what)
void turnstile_chain_unlock(struct lock_object *lock)
struct lock_class lock_class_mtx_sleep
void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
int printf(const char *fmt,...)
void witness_display_spinlock(struct lock_object *lock, struct thread *owner, int(*prnt)(const char *fmt,...))
void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
void init_turnstiles(void)
void lock_destroy(struct lock_object *lock)
void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, int line)
struct turnstile * turnstile_trywait(struct lock_object *lock)
void turnstile_cancel(struct turnstile *ts)
void turnstile_chain_lock(struct lock_object *lock)
void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
void mtx_destroy(struct mtx *m)
struct mtx * thread_lock_block(struct thread *td)
static void lock_spin(struct lock_object *lock, int how)
void thread_lock_unblock(struct thread *td, struct mtx *new)