34 #include <sys/cdefs.h>
38 #include "opt_kdtrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/kernel.h>
47 #include <sys/mutex.h>
49 #include <sys/rmlock.h>
50 #include <sys/sched.h>
52 #include <sys/turnstile.h>
53 #include <sys/lock_profile.h>
54 #include <machine/cpu.h>
64 #define RM_DESTROYED ((void *)0xdead)
66 #define rm_destroyed(rm) \
67 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
69 #define RMPF_ONQUEUE 1
73 #define _rm_assert(c, what, file, line)
76 static void assert_rm(
struct lock_object *lock,
int what);
78 static void db_show_rm(
struct lock_object *lock);
80 static void lock_rm(
struct lock_object *lock,
int how);
82 static int owner_rm(
struct lock_object *lock,
struct thread **owner);
84 static int unlock_rm(
struct lock_object *lock);
88 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
91 .lc_ddb_show = db_show_rm,
101 .lc_name =
"sleepable rm",
102 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
105 .lc_ddb_show = db_show_rm,
110 .lc_owner = owner_rm,
118 rm_assert((
struct rmlock *)lock, what);
132 rm = (
struct rmlock *)lock;
137 panic(
"lock_rm called in read mode");
146 rm = (
struct rmlock *)lock;
153 owner_rm(
struct lock_object *lock,
struct thread **owner)
156 struct lock_class *
lc;
158 rm = (
struct rmlock *)lock;
159 lc = LOCK_CLASS(&rm->rm_wlock_object);
160 return (lc->lc_owner(&rm->rm_wlock_object, owner));
177 struct rm_queue *next;
180 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
181 next = pc->pc_rm_queue.rmq_next;
182 tracker->rmp_cpuQueue.rmq_next = next;
185 next->rmq_prev = &tracker->rmp_cpuQueue;
188 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
197 const struct thread *td)
199 struct rm_queue *queue;
200 struct rm_priotracker *tracker;
204 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
205 queue = queue->rmq_next) {
206 tracker = (
struct rm_priotracker *)queue;
207 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
216 struct rm_queue *next, *prev;
218 next = tracker->rmp_cpuQueue.rmq_next;
219 prev = tracker->rmp_cpuQueue.rmq_prev;
222 next->rmq_prev = prev;
225 prev->rmq_next = next;
232 struct rmlock *rm = arg;
233 struct rm_priotracker *tracker;
234 struct rm_queue *queue;
237 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
238 queue = queue->rmq_next) {
239 tracker = (
struct rm_priotracker *)queue;
240 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
243 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
253 struct lock_class *
lc;
257 if (!(opts & RM_NOWITNESS))
258 liflags |= LO_WITNESS;
259 if (opts & RM_RECURSE)
260 liflags |= LO_RECURSABLE;
262 LIST_INIT(&rm->rm_activeReaders);
263 if (opts & RM_SLEEPABLE) {
264 liflags |= LO_SLEEPABLE;
269 mtx_init(&rm->rm_lock_mtx, name,
"rmlock_mtx", MTX_NOWITNESS);
271 lock_init(&rm->lock_object, lc, name, NULL, liflags);
285 rm_assert(rm, RA_UNLOCKED);
287 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
298 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
299 return (sx_xlocked(&rm->rm_lock_sx));
301 return (mtx_owned(&rm->rm_lock_mtx));
307 struct rm_args *args = arg;
309 rm_init(args->ra_rm, args->ra_desc);
315 struct rm_args_flags *args = arg;
329 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
338 if (tracker->rmp_flags) {
349 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
356 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
370 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
371 if (!sx_try_xlock(&rm->rm_lock_sx))
374 if (!mtx_trylock(&rm->rm_lock_mtx))
378 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
379 THREAD_SLEEPING_OK();
380 sx_xlock(&rm->rm_lock_sx);
381 THREAD_NO_SLEEPING();
383 mtx_lock(&rm->rm_lock_mtx);
388 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
393 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
394 sx_xunlock(&rm->rm_lock_sx);
396 mtx_unlock(&rm->rm_lock_mtx);
402 _rm_rlock(
struct rmlock *rm,
struct rm_priotracker *tracker,
int trylock)
404 struct thread *td = curthread;
407 if (SCHEDULER_STOPPED())
410 tracker->rmp_flags = 0;
411 tracker->rmp_thread = td;
412 tracker->rmp_rmlock = rm;
418 pc = cpuid_to_pcpu[td->td_oncpu];
432 if (0 == (td->td_owepreempt |
433 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
444 if (td->td_owepreempt) {
449 if (!tracker->rmp_flags)
453 LIST_REMOVE(tracker, rmp_qentry);
459 rm = tracker->rmp_rmlock;
477 struct thread *td = tracker->rmp_thread;
479 if (SCHEDULER_STOPPED())
483 pc = cpuid_to_pcpu[td->td_oncpu];
488 if (0 == (td->td_owepreempt | tracker->rmp_flags))
497 struct rm_priotracker *prio;
501 if (SCHEDULER_STOPPED())
504 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
505 sx_xlock(&rm->rm_lock_sx);
507 mtx_lock(&rm->rm_lock_mtx);
509 if (CPU_CMP(&rm->rm_writecpus, &
all_cpus)) {
512 CPU_NAND(&readcpus, &rm->rm_writecpus);
531 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
547 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
548 sx_xunlock(&rm->rm_lock_sx);
550 mtx_unlock(&rm->rm_lock_mtx);
559 if (SCHEDULER_STOPPED())
562 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
563 (
"rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
564 curthread, rm->lock_object.lo_name, file, line));
566 (
"rm_wlock() of destroyed rmlock @ %s:%d", file, line));
569 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
574 LOCK_LOG_LOCK(
"RMWLOCK", &rm->lock_object, 0, 0, file, line);
576 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
578 curthread->td_locks++;
586 if (SCHEDULER_STOPPED())
590 (
"rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
592 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
593 LOCK_LOG_LOCK(
"RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
595 curthread->td_locks--;
600 int trylock,
const char *file,
int line)
603 if (SCHEDULER_STOPPED())
607 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
611 (
"rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
612 rm->lock_object.lo_name, file, line));
616 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
617 (
"rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
618 curthread, rm->lock_object.lo_name, file, line));
620 (
"rm_rlock() of destroyed rmlock @ %s:%d", file, line));
623 (
"rm_rlock: wlock already held for %s @ %s:%d",
624 rm->lock_object.lo_name, file, line));
625 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
631 LOCK_LOG_TRY(
"RMRLOCK", &rm->lock_object, 0, 1, file,
634 LOCK_LOG_LOCK(
"RMRLOCK", &rm->lock_object, 0, 0, file,
636 WITNESS_LOCK(&rm->lock_object, 0, file, line);
638 curthread->td_locks++;
642 LOCK_LOG_TRY(
"RMRLOCK", &rm->lock_object, 0, 0, file, line);
649 const char *file,
int line)
652 if (SCHEDULER_STOPPED())
656 (
"rm_runlock() of destroyed rmlock @ %s:%d", file, line));
658 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
659 LOCK_LOG_LOCK(
"RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
661 curthread->td_locks--;
686 int trylock,
const char *file,
int line)
694 const char *file,
int line)
702 #ifdef INVARIANT_SUPPORT
713 _rm_assert(
struct rmlock *rm,
int what,
const char *file,
int line)
721 case RA_LOCKED | RA_RECURSED:
722 case RA_LOCKED | RA_NOTRECURSED:
724 case RA_RLOCKED | RA_RECURSED:
725 case RA_RLOCKED | RA_NOTRECURSED:
731 if (what & RA_RLOCKED)
732 panic(
"Lock %s exclusively locked @ %s:%d\n",
733 rm->lock_object.lo_name, file, line);
734 if (what & RA_RECURSED)
735 panic(
"Lock %s not recursed @ %s:%d\n",
736 rm->lock_object.lo_name, file, line);
745 panic(
"Lock %s not %slocked @ %s:%d\n",
746 rm->lock_object.lo_name, (what & RA_RLOCKED) ?
747 "read " :
"", file, line);
749 if (what & RA_NOTRECURSED)
750 panic(
"Lock %s recursed @ %s:%d\n",
751 rm->lock_object.lo_name, file, line);
752 }
else if (what & RA_RECURSED)
753 panic(
"Lock %s not recursed @ %s:%d\n",
754 rm->lock_object.lo_name, file, line);
758 panic(
"Lock %s not exclusively locked @ %s:%d\n",
759 rm->lock_object.lo_name, file, line);
763 panic(
"Lock %s exclusively locked @ %s:%d\n",
764 rm->lock_object.lo_name, file, line);
771 panic(
"Lock %s read locked @ %s:%d\n",
772 rm->lock_object.lo_name, file, line);
775 panic(
"Unknown rm lock assertion: %d @ %s:%d", what, file,
783 print_tracker(
struct rm_priotracker *tr)
788 db_printf(
" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
789 td->td_proc->p_pid, td->td_name);
791 db_printf(
"ONQUEUE");
793 db_printf(
",SIGNAL");
800 db_show_rm(
struct lock_object *lock)
802 struct rm_priotracker *tr;
803 struct rm_queue *queue;
805 struct lock_class *
lc;
808 rm = (
struct rmlock *)lock;
809 db_printf(
" writecpus: ");
810 ddb_display_cpuset(__DEQUALIFY(
const cpuset_t *, &rm->rm_writecpus));
812 db_printf(
" per-CPU readers:\n");
813 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
814 for (queue = pc->pc_rm_queue.rmq_next;
815 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
816 tr = (
struct rm_priotracker *)queue;
817 if (tr->rmp_rmlock == rm)
820 db_printf(
" active readers:\n");
821 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
823 lc = LOCK_CLASS(&rm->rm_wlock_object);
824 db_printf("Backing write-lock (%s):\n", lc->lc_name);
825 lc->lc_ddb_show(&rm->rm_wlock_object);
#define _rm_assert(c, what, file, line)
static void rm_cleanIPI(void *arg)
struct lock_class lock_class_rm_sleepable
void sx_init_flags(struct sx *sx, const char *description, int opts)
void smp_no_rendevous_barrier(void *dummy)
struct lock_class lock_class_rm
void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
void turnstile_unpend(struct turnstile *ts, int owner_type)
MTX_SYSINIT(rm_spinlock,&rm_spinlock,"rm_spinlock", MTX_SPIN)
void rm_sysinit(void *arg)
static void lock_rm(struct lock_object *lock, int how)
void panic(const char *fmt,...)
void _rm_wlock(struct rmlock *rm)
struct pcpu * pcpu_find(u_int cpuid)
struct turnstile * turnstile_lookup(struct lock_object *lock)
int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line)
static int rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm, const struct thread *td)
static int unlock_rm(struct lock_object *lock)
void rm_destroy(struct rmlock *rm)
void rm_init_flags(struct rmlock *rm, const char *name, int opts)
static void rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags)
static void rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
void rm_init(struct rmlock *rm, const char *name)
static void assert_rm(struct lock_object *lock, int what)
void turnstile_chain_unlock(struct lock_object *lock)
int turnstile_signal(struct turnstile *ts, int queue)
void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line)
static void _rm_unlock_hard(struct thread *td, struct rm_priotracker *tracker)
void _rm_wunlock(struct rmlock *rm)
static int _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
void lock_destroy(struct lock_object *lock)
struct turnstile * turnstile_trywait(struct lock_object *lock)
void turnstile_chain_lock(struct lock_object *lock)
void rm_sysinit_flags(void *arg)
void smp_rendezvous_cpus(cpuset_t map, void(*setup_func)(void *), void(*action_func)(void *), void(*teardown_func)(void *), void *arg)
void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
void mtx_destroy(struct mtx *m)
int rm_wowned(struct rmlock *rm)
void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
void critical_enter(void)
static struct mtx rm_spinlock
void sx_destroy(struct sx *sx)