40 #include "opt_hwpmc_hooks.h"
41 #include "opt_kdtrace.h"
42 #include "opt_no_adaptive_sx.h"
44 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
52 #include <sys/mutex.h>
54 #include <sys/sleepqueue.h>
56 #include <sys/sysctl.h>
58 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
59 #include <machine/cpu.h>
66 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
70 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
73 #include <sys/pmckern.h>
74 PMC_SOFT_DECLARE( , , lock, failed);
78 #define SQ_EXCLUSIVE_QUEUE 0
79 #define SQ_SHARED_QUEUE 1
82 #define ASX_RETRIES 10
83 #define ASX_LOOPS 10000
90 #define GIANT_DECLARE \
92 WITNESS_SAVE_DECL(Giant) \
94 #define GIANT_SAVE() do { \
95 if (mtx_owned(&Giant)) { \
96 WITNESS_SAVE(&Giant.lock_object, Giant); \
97 while (mtx_owned(&Giant)) { \
104 #define GIANT_RESTORE() do { \
105 if (_giantcnt > 0) { \
106 mtx_assert(&Giant, MA_NOTOWNED); \
107 while (_giantcnt--) \
109 WITNESS_RESTORE(&Giant.lock_object, Giant); \
117 #define sx_recurse lock_object.lo_data
118 #define sx_recursed(sx) ((sx)->sx_recurse != 0)
120 static void assert_sx(
struct lock_object *lock,
int what);
122 static void db_show_sx(
struct lock_object *lock);
124 static void lock_sx(
struct lock_object *lock,
int how);
126 static int owner_sx(
struct lock_object *lock,
struct thread **owner);
128 static int unlock_sx(
struct lock_object *lock);
132 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
135 .lc_ddb_show = db_show_sx,
140 .lc_owner = owner_sx,
145 #define _sx_assert(sx, what, file, line)
152 sx_assert((
struct sx *)lock, what);
160 sx = (
struct sx *)lock;
172 sx = (
struct sx *)lock;
173 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
174 if (sx_xlocked(sx)) {
185 owner_sx(
struct lock_object *lock,
struct thread **owner)
187 struct sx *sx = (
struct sx *)lock;
188 uintptr_t x = sx->sx_lock;
190 *owner = (
struct thread *)SX_OWNER(x);
191 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
199 struct sx_args *sargs = arg;
201 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
209 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
210 SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
211 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
212 (
"%s: sx_lock not aligned for %s: %p", __func__, description,
215 flags = LO_SLEEPABLE | LO_UPGRADABLE;
218 if (opts & SX_NOPROFILE)
219 flags |= LO_NOPROFILE;
220 if (!(opts & SX_NOWITNESS))
222 if (opts & SX_RECURSE)
223 flags |= LO_RECURSABLE;
227 flags |= opts & SX_NOADAPTIVE;
228 sx->sx_lock = SX_LOCK_UNLOCKED;
230 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
237 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, (
"sx lock still held"));
238 KASSERT(sx->sx_recurse == 0, (
"sx lock still recursed"));
239 sx->sx_lock = SX_LOCK_DESTROYED;
244 _sx_slock(
struct sx *sx,
int opts,
const char *file,
int line)
248 if (SCHEDULER_STOPPED())
250 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
251 (
"sx_slock() by idle thread %p on sx %s @ %s:%d",
252 curthread, sx->lock_object.lo_name, file, line));
253 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
254 (
"sx_slock() of destroyed sx @ %s:%d", file, line));
255 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
256 error = __sx_slock(sx, opts, file, line);
258 LOCK_LOG_LOCK(
"SLOCK", &sx->lock_object, 0, 0, file, line);
259 WITNESS_LOCK(&sx->lock_object, 0, file, line);
260 curthread->td_locks++;
271 if (SCHEDULER_STOPPED())
274 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
275 (
"sx_try_slock() by idle thread %p on sx %s @ %s:%d",
276 curthread, sx->lock_object.lo_name, file, line));
280 KASSERT(x != SX_LOCK_DESTROYED,
281 (
"sx_try_slock() of destroyed sx @ %s:%d", file, line));
282 if (!(x & SX_LOCK_SHARED))
284 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
285 LOCK_LOG_TRY(
"SLOCK", &sx->lock_object, 0, 1, file, line);
286 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
287 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE,
288 sx, 0, 0, file, line);
289 curthread->td_locks++;
294 LOCK_LOG_TRY(
"SLOCK", &sx->lock_object, 0, 0, file, line);
299 _sx_xlock(
struct sx *sx,
int opts,
const char *file,
int line)
303 if (SCHEDULER_STOPPED())
305 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
306 (
"sx_xlock() by idle thread %p on sx %s @ %s:%d",
307 curthread, sx->lock_object.lo_name, file, line));
308 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
309 (
"sx_xlock() of destroyed sx @ %s:%d", file, line));
310 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
312 error = __sx_xlock(sx, curthread, opts, file, line);
314 LOCK_LOG_LOCK(
"XLOCK", &sx->lock_object, 0, sx->sx_recurse,
316 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
317 curthread->td_locks++;
328 if (SCHEDULER_STOPPED())
331 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
332 (
"sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
333 curthread, sx->lock_object.lo_name, file, line));
334 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
335 (
"sx_try_xlock() of destroyed sx @ %s:%d", file, line));
337 if (sx_xlocked(sx) &&
338 (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
340 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
343 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
344 (uintptr_t)curthread);
345 LOCK_LOG_TRY(
"XLOCK", &sx->lock_object, 0, rval, file, line);
347 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
350 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
351 sx, 0, 0, file, line);
352 curthread->td_locks++;
362 if (SCHEDULER_STOPPED())
364 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
365 (
"sx_sunlock() of destroyed sx @ %s:%d", file, line));
367 curthread->td_locks--;
368 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
369 LOCK_LOG_LOCK(
"SUNLOCK", &sx->lock_object, 0, 0, file, line);
370 __sx_sunlock(sx, file, line);
371 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
378 if (SCHEDULER_STOPPED())
380 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
381 (
"sx_xunlock() of destroyed sx @ %s:%d", file, line));
383 curthread->td_locks--;
384 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
385 LOCK_LOG_LOCK(
"XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
388 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
389 __sx_xunlock(sx, curthread, file, line);
403 if (SCHEDULER_STOPPED())
406 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
407 (
"sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
415 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
416 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
417 (uintptr_t)curthread | x);
418 LOCK_LOG_TRY(
"XUPGRADE", &sx->lock_object, 0, success, file, line);
420 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
422 LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
436 if (SCHEDULER_STOPPED())
439 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
440 (
"sx_downgrade() of destroyed sx @ %s:%d", file, line));
441 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
444 panic(
"downgrade of a recursed lock");
447 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
461 if (!(x & SX_LOCK_SHARED_WAITERS) &&
462 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
463 (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
464 LOCK_LOG_LOCK(
"XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
480 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
481 (x & SX_LOCK_EXCLUSIVE_WAITERS));
482 if (x & SX_LOCK_SHARED_WAITERS)
487 LOCK_LOG_LOCK(
"XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
488 LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
506 volatile struct thread *owner;
507 u_int i, spintries = 0;
510 #ifdef LOCK_PROFILING
517 uint64_t spin_cnt = 0;
518 uint64_t sleep_cnt = 0;
519 int64_t sleep_time = 0;
520 int64_t all_time = 0;
523 if (SCHEDULER_STOPPED())
527 if (sx_xlocked(sx)) {
528 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
529 (
"_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
530 sx->lock_object.lo_name, file, line));
532 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
533 if (LOCK_LOG_TEST(&sx->lock_object, 0))
534 CTR2(KTR_LOCK,
"%s: %p recursing", __func__, sx);
538 if (LOCK_LOG_TEST(&sx->lock_object, 0))
539 CTR5(KTR_LOCK,
"%s: %s contested (lock=%p) at %s:%d", __func__,
540 sx->lock_object.lo_name, (
void *)sx->sx_lock, file, line);
543 all_time -= lockstat_nsecs(&sx->lock_object);
546 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
551 PMC_SOFT_CALL( , , lock, failed);
553 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
562 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
563 if ((x & SX_LOCK_SHARED) == 0) {
565 owner = (
struct thread *)x;
566 if (TD_IS_RUNNING(owner)) {
567 if (LOCK_LOG_TEST(&sx->lock_object, 0))
569 "%s: spinning on %p held by %p",
570 __func__, sx, owner);
572 while (SX_OWNER(sx->sx_lock) == x &&
573 TD_IS_RUNNING(owner)) {
581 }
else if (SX_SHARERS(x) && spintries < ASX_RETRIES) {
584 for (i = 0; i < ASX_LOOPS; i++) {
585 if (LOCK_LOG_TEST(&sx->lock_object, 0))
587 "%s: shared spinning on %p with %u and %u",
588 __func__, sx, spintries, i);
590 if ((x & SX_LOCK_SHARED) == 0 ||
611 if (x == SX_LOCK_UNLOCKED) {
624 if (!(x & SX_LOCK_SHARED) &&
625 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
626 owner = (
struct thread *)SX_OWNER(x);
627 if (TD_IS_RUNNING(owner)) {
644 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
645 if (atomic_cmpset_acq_ptr(&sx->sx_lock,
646 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
647 tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
649 CTR2(KTR_LOCK,
"%s: %p claimed by new writer",
661 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
662 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
663 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
667 if (LOCK_LOG_TEST(&sx->lock_object, 0))
668 CTR2(KTR_LOCK,
"%s: %p set excl waiters flag",
677 if (LOCK_LOG_TEST(&sx->lock_object, 0))
678 CTR2(KTR_LOCK,
"%s: %p blocking on sleep queue",
682 sleep_time -= lockstat_nsecs(&sx->lock_object);
685 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
686 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
688 if (!(opts & SX_INTERRUPTIBLE))
693 sleep_time += lockstat_nsecs(&sx->lock_object);
697 if (LOCK_LOG_TEST(&sx->lock_object, 0))
699 "%s: interruptible sleep by %p suspended by signal",
703 if (LOCK_LOG_TEST(&sx->lock_object, 0))
704 CTR2(KTR_LOCK,
"%s: %p resuming from sleep queue",
708 all_time += lockstat_nsecs(&sx->lock_object);
710 LOCKSTAT_RECORD4(LS_SX_XLOCK_BLOCK, sx, sleep_time,
711 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
712 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
713 if (spin_cnt > sleep_cnt)
714 LOCKSTAT_RECORD4(LS_SX_XLOCK_SPIN, sx, all_time - sleep_time,
715 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
716 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
719 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
720 contested, waittime, file, line);
735 int queue, wakeup_swapper;
737 if (SCHEDULER_STOPPED())
740 MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
744 if ((--sx->sx_recurse) == 0)
745 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
746 if (LOCK_LOG_TEST(&sx->lock_object, 0))
747 CTR2(KTR_LOCK,
"%s: %p unrecursing", __func__, sx);
750 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
751 SX_LOCK_EXCLUSIVE_WAITERS));
752 if (LOCK_LOG_TEST(&sx->lock_object, 0))
753 CTR2(KTR_LOCK,
"%s: %p contested", __func__, sx);
756 x = SX_LOCK_UNLOCKED;
767 if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
770 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
775 if (LOCK_LOG_TEST(&sx->lock_object, 0))
776 CTR3(KTR_LOCK,
"%s: %p waking up all threads on %s queue",
779 atomic_store_rel_ptr(&sx->sx_lock, x);
798 volatile struct thread *owner;
800 #ifdef LOCK_PROFILING
808 uint64_t spin_cnt = 0;
809 uint64_t sleep_cnt = 0;
810 int64_t sleep_time = 0;
811 int64_t all_time = 0;
814 if (SCHEDULER_STOPPED())
819 all_time -= lockstat_nsecs(&sx->lock_object);
838 if (x & SX_LOCK_SHARED) {
839 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
840 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
841 x + SX_ONE_SHARER)) {
842 if (LOCK_LOG_TEST(&sx->lock_object, 0))
844 "%s: %p succeed %p -> %p", __func__,
846 (
void *)(x + SX_ONE_SHARER));
852 PMC_SOFT_CALL( , , lock, failed);
854 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
863 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
865 owner = (
struct thread *)x;
866 if (TD_IS_RUNNING(owner)) {
867 if (LOCK_LOG_TEST(&sx->lock_object, 0))
869 "%s: spinning on %p held by %p",
870 __func__, sx, owner);
872 while (SX_OWNER(sx->sx_lock) == x &&
873 TD_IS_RUNNING(owner)) {
895 if (x & SX_LOCK_SHARED) {
906 if (!(x & SX_LOCK_SHARED) &&
907 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
908 owner = (
struct thread *)SX_OWNER(x);
909 if (TD_IS_RUNNING(owner)) {
921 if (!(x & SX_LOCK_SHARED_WAITERS)) {
922 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
923 x | SX_LOCK_SHARED_WAITERS)) {
927 if (LOCK_LOG_TEST(&sx->lock_object, 0))
928 CTR2(KTR_LOCK,
"%s: %p set shared waiters flag",
936 if (LOCK_LOG_TEST(&sx->lock_object, 0))
937 CTR2(KTR_LOCK,
"%s: %p blocking on sleep queue",
941 sleep_time -= lockstat_nsecs(&sx->lock_object);
944 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
945 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
947 if (!(opts & SX_INTERRUPTIBLE))
952 sleep_time += lockstat_nsecs(&sx->lock_object);
956 if (LOCK_LOG_TEST(&sx->lock_object, 0))
958 "%s: interruptible sleep by %p suspended by signal",
962 if (LOCK_LOG_TEST(&sx->lock_object, 0))
963 CTR2(KTR_LOCK,
"%s: %p resuming from sleep queue",
967 all_time += lockstat_nsecs(&sx->lock_object);
969 LOCKSTAT_RECORD4(LS_SX_SLOCK_BLOCK, sx, sleep_time,
970 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
971 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
972 if (spin_cnt > sleep_cnt)
973 LOCKSTAT_RECORD4(LS_SX_SLOCK_SPIN, sx, all_time - sleep_time,
974 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
975 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
978 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
979 contested, waittime, file, line);
996 if (SCHEDULER_STOPPED())
1006 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
1007 (
"%s: waiting sharers", __func__));
1013 if (SX_SHARERS(x) > 1) {
1014 if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
1015 x - SX_ONE_SHARER)) {
1016 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1018 "%s: %p succeeded %p -> %p",
1019 __func__, sx, (
void *)x,
1020 (
void *)(x - SX_ONE_SHARER));
1030 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
1031 MPASS(x == SX_SHARERS_LOCK(1));
1032 if (atomic_cmpset_rel_ptr(&sx->sx_lock,
1033 SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
1034 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1035 CTR2(KTR_LOCK,
"%s: %p last succeeded",
1046 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1056 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1057 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1058 SX_LOCK_UNLOCKED)) {
1062 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1063 CTR2(KTR_LOCK,
"%s: %p waking up all thread on"
1064 "exclusive queue", __func__, sx);
1074 #ifdef INVARIANT_SUPPORT
1085 _sx_assert(
struct sx *sx,
int what,
const char *file,
int line)
1095 case SA_SLOCKED | SA_NOTRECURSED:
1096 case SA_SLOCKED | SA_RECURSED:
1102 case SA_LOCKED | SA_NOTRECURSED:
1103 case SA_LOCKED | SA_RECURSED:
1112 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1113 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1114 sx_xholder(sx) != curthread)))
1115 panic(
"Lock %s not %slocked @ %s:%d\n",
1116 sx->lock_object.lo_name, slocked ?
"share " :
"",
1119 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1121 if (what & SA_NOTRECURSED)
1122 panic(
"Lock %s recursed @ %s:%d\n",
1123 sx->lock_object.lo_name, file,
1125 }
else if (what & SA_RECURSED)
1126 panic(
"Lock %s not recursed @ %s:%d\n",
1127 sx->lock_object.lo_name, file, line);
1132 case SA_XLOCKED | SA_NOTRECURSED:
1133 case SA_XLOCKED | SA_RECURSED:
1134 if (sx_xholder(sx) != curthread)
1135 panic(
"Lock %s not exclusively locked @ %s:%d\n",
1136 sx->lock_object.lo_name, file, line);
1138 if (what & SA_NOTRECURSED)
1139 panic(
"Lock %s recursed @ %s:%d\n",
1140 sx->lock_object.lo_name, file, line);
1141 }
else if (what & SA_RECURSED)
1142 panic(
"Lock %s not recursed @ %s:%d\n",
1143 sx->lock_object.lo_name, file, line);
1154 if (sx_xholder(sx) == curthread)
1155 panic(
"Lock %s exclusively locked @ %s:%d\n",
1156 sx->lock_object.lo_name, file, line);
1160 panic(
"Unknown sx lock assertion: %d @ %s:%d", what, file,
1168 db_show_sx(
struct lock_object *lock)
1173 sx = (
struct sx *)lock;
1175 db_printf(
" state: ");
1176 if (sx->sx_lock == SX_LOCK_UNLOCKED)
1177 db_printf(
"UNLOCKED\n");
1178 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1179 db_printf(
"DESTROYED\n");
1181 }
else if (sx->sx_lock & SX_LOCK_SHARED)
1182 db_printf(
"SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1184 td = sx_xholder(sx);
1185 db_printf(
"XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1186 td->td_tid, td->td_proc->p_pid, td->td_name);
1188 db_printf(
" recursed: %d\n", sx->sx_recurse);
1191 db_printf(
" waiters: ");
1192 switch(sx->sx_lock &
1193 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1194 case SX_LOCK_SHARED_WAITERS:
1195 db_printf(
"shared\n");
1197 case SX_LOCK_EXCLUSIVE_WAITERS:
1198 db_printf(
"exclusive\n");
1200 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1201 db_printf(
"exclusive and shared\n");
1204 db_printf(
"none\n");
1214 sx_chain(
struct thread *td,
struct thread **ownerp)
1224 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1225 sx->lock_object.lo_name != td->td_wmesg)
1229 db_printf(
"blocked on sx \"%s\" ", td->td_wmesg);
1230 *ownerp = sx_xholder(sx);
1231 if (sx->sx_lock & SX_LOCK_SHARED)
1232 db_printf(
"SLOCK (count %ju)\n",
1233 (uintmax_t)SX_SHARERS(sx->sx_lock));
1235 db_printf(
"XLOCK\n");
void sleepq_release(void *wchan)
void sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, int queue)
void sx_init_flags(struct sx *sx, const char *description, int opts)
int _sx_try_xlock(struct sx *sx, const char *file, int line)
void witness_assert(struct lock_object *lock, int flags, const char *file, int line)
void panic(const char *fmt,...)
int _sx_xlock(struct sx *sx, int opts, const char *file, int line)
int _sx_try_slock(struct sx *sx, const char *file, int line)
int _sx_slock(struct sx *sx, int opts, const char *file, int line)
int sleepq_wait_sig(void *wchan, int pri)
static void lock_sx(struct lock_object *lock, int how)
void sleepq_lock(void *wchan)
static int unlock_sx(struct lock_object *lock)
void _sx_downgrade(struct sx *sx, const char *file, int line)
void sleepq_wait(void *wchan, int pri)
void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags)
struct lock_class lock_class_sx
int sleepq_broadcast(void *wchan, int flags, int pri, int queue)
int _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
void _sx_xunlock(struct sx *sx, const char *file, int line)
#define SQ_EXCLUSIVE_QUEUE
void sx_sysinit(void *arg)
void lock_destroy(struct lock_object *lock)
#define _sx_assert(sx, what, file, line)
void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
static void assert_sx(struct lock_object *lock, int what)
CTASSERT((SX_NOADAPTIVE &LO_CLASSFLAGS)==SX_NOADAPTIVE)
int _sx_try_upgrade(struct sx *sx, const char *file, int line)
void _sx_sunlock(struct sx *sx, const char *file, int line)
void _sx_sunlock_hard(struct sx *sx, const char *file, int line)
int _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, int line)
u_int sleepq_sleepcnt(void *wchan, int queue)
void sx_destroy(struct sx *sx)