59 #include <sys/cdefs.h>
62 #include "opt_sleepqueue_profiling.h"
64 #include "opt_kdtrace.h"
65 #include "opt_sched.h"
67 #include <sys/param.h>
68 #include <sys/systm.h>
70 #include <sys/kernel.h>
72 #include <sys/mutex.h>
75 #include <sys/sched.h>
77 #include <sys/signalvar.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/sysctl.h>
94 #define SC_TABLESIZE 128
95 #define SC_MASK (SC_TABLESIZE - 1)
97 #define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
98 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
126 struct lock_object *sq_lock;
133 #ifdef SLEEPQUEUE_PROFILING
139 #ifdef SLEEPQUEUE_PROFILING
140 u_int sleepq_max_depth;
141 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0,
"sleepq profiling");
142 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
143 "sleepq chain stats");
144 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
145 0,
"maxmimum depth achieved of a single chain");
147 static void sleepq_profile(
const char *wmesg);
148 static int prof_enabled;
160 static void sleepq_dtor(
void *mem,
int size,
void *arg);
162 static int sleepq_init(
void *mem,
int size,
int flags);
178 #ifdef SLEEPQUEUE_PROFILING
179 struct sysctl_oid *chain_oid;
187 MTX_SPIN | MTX_RECURSE);
188 #ifdef SLEEPQUEUE_PROFILING
189 snprintf(chain_name,
sizeof(chain_name),
"%d", i);
190 chain_oid = SYSCTL_ADD_NODE(NULL,
191 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
192 chain_name, CTLFLAG_RD, NULL,
"sleepq chain stats");
193 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
195 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
202 NULL, sleepq_dtor,
sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
204 NULL, NULL,
sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
239 mtx_lock_spin(&sc->sc_lock);
253 KASSERT(wchan != NULL, (
"%s: invalid NULL wait channel", __func__));
255 mtx_assert(&sc->sc_lock, MA_OWNED);
256 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
257 if (sq->sq_wchan == wchan)
271 mtx_unlock_spin(&sc->sc_lock);
281 sleepq_add(
void *wchan,
struct lock_object *lock,
const char *wmesg,
int flags,
290 mtx_assert(&sc->sc_lock, MA_OWNED);
291 MPASS(td->td_sleepqueue != NULL);
292 MPASS(wchan != NULL);
296 KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
297 (
"Trying sleep, but thread marked as sleeping prohibited"));
311 sq = td->td_sleepqueue;
313 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
314 (
"thread's sleep queue %d is not empty", i));
315 KASSERT(sq->sq_blockedcnt[i] == 0,
316 (
"thread's sleep queue %d count mismatches", i));
318 KASSERT(LIST_EMPTY(&sq->sq_free),
319 (
"thread's sleep queue has a non-empty free list"));
320 KASSERT(sq->sq_wchan == NULL, (
"stale sq_wchan pointer"));
323 #ifdef SLEEPQUEUE_PROFILING
325 if (sc->sc_depth > sc->sc_max_depth) {
326 sc->sc_max_depth = sc->sc_depth;
327 if (sc->sc_max_depth > sleepq_max_depth)
328 sleepq_max_depth = sc->sc_max_depth;
331 sq = td->td_sleepqueue;
332 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
333 sq->sq_wchan = wchan;
334 sq->sq_type = flags & SLEEPQ_TYPE;
336 MPASS(wchan == sq->sq_wchan);
337 MPASS(lock == sq->sq_lock);
338 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
339 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
342 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
343 sq->sq_blockedcnt[queue]++;
344 td->td_sleepqueue = NULL;
345 td->td_sqqueue = queue;
346 td->td_wchan = wchan;
347 td->td_wmesg = wmesg;
348 if (flags & SLEEPQ_INTERRUPTIBLE) {
349 td->td_flags |= TDF_SINTR;
350 td->td_flags &= ~TDF_SLEEPABORT;
367 mtx_assert(&sc->sc_lock, MA_OWNED);
368 MPASS(TD_ON_SLEEPQ(td));
369 MPASS(td->td_sleepqueue == NULL);
370 MPASS(wchan != NULL);
371 callout_reset_curcpu(&td->td_slpcallout, timo,
sleepq_timeout, td);
382 KASSERT(wchan != NULL, (
"%s: invalid NULL wait channel", __func__));
387 return (sq->sq_blockedcnt[queue]);
404 int sig, ret, stop_allowed;
409 mtx_assert(&sc->sc_lock, MA_OWNED);
410 MPASS(wchan != NULL);
411 if ((td->td_pflags & TDP_WAKEUP) != 0) {
412 td->td_pflags &= ~TDP_WAKEUP;
424 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
428 stop_allowed = (td->td_flags & TDF_SBDRY) ? SIG_STOP_NOT_ALLOWED :
431 mtx_unlock_spin(&sc->sc_lock);
432 CTR3(KTR_PROC,
"sleepq catching signals: thread %p (pid %ld, %s)",
433 (
void *)td, (
long)p->p_pid, td->td_name);
436 mtx_lock(&ps->ps_mtx);
437 sig =
cursig(td, stop_allowed);
439 mtx_unlock(&ps->ps_mtx);
441 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
443 if (SIGISMEMBER(ps->ps_sigintr, sig))
447 mtx_unlock(&ps->ps_mtx);
455 mtx_lock_spin(&sc->sc_lock);
468 if (TD_ON_SLEEPQ(td)) {
476 panic(
"not waking up swapper");
480 mtx_unlock_spin(&sc->sc_lock);
481 MPASS(td->td_lock != &sc->sc_lock);
498 mtx_assert(&sc->sc_lock, MA_OWNED);
499 THREAD_LOCK_ASSERT(td, MA_OWNED);
505 if (td->td_sleepqueue != NULL) {
506 mtx_unlock_spin(&sc->sc_lock);
515 if (td->td_flags & TDF_TIMEOUT) {
516 MPASS(TD_ON_SLEEPQ(td));
524 panic(
"not waking up swapper");
527 mtx_unlock_spin(&sc->sc_lock);
530 #ifdef SLEEPQUEUE_PROFILING
532 sleepq_profile(td->td_wmesg);
534 MPASS(td->td_sleepqueue == NULL);
537 SDT_PROBE0(sched, , , sleep);
540 KASSERT(TD_IS_RUNNING(td), (
"running but not TDS_RUNNING"));
541 CTR3(KTR_PROC,
"sleepq resume: thread %p (pid %ld, %s)",
542 (
void *)td, (
long)td->td_proc->p_pid, (
void *)td->td_name);
554 THREAD_LOCK_ASSERT(td, MA_OWNED);
559 if (td->td_flags & TDF_TIMEOUT) {
560 td->td_flags &= ~TDF_TIMEOUT;
561 return (EWOULDBLOCK);
568 if (td->td_flags & TDF_TIMOFAIL)
569 td->td_flags &= ~TDF_TIMOFAIL;
576 else if (callout_stop(&td->td_slpcallout) == 0) {
577 td->td_flags |= TDF_TIMEOUT;
579 mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
593 THREAD_LOCK_ASSERT(td, MA_OWNED);
596 if (td->td_flags & TDF_SINTR)
597 td->td_flags &= ~TDF_SINTR;
599 if (td->td_flags & TDF_SLEEPABORT) {
600 td->td_flags &= ~TDF_SLEEPABORT;
601 return (td->td_intrval);
616 MPASS(!(td->td_flags & TDF_SINTR));
634 thread_unlock(curthread);
651 MPASS(!(td->td_flags & TDF_SINTR));
667 int rcatch, rvalt, rvals;
672 thread_unlock(curthread);
689 MPASS(wchan != NULL);
712 MPASS(sq->sq_wchan != NULL);
713 MPASS(td->td_wchan == sq->sq_wchan);
714 MPASS(td->td_sqqueue <
NR_SLEEPQS && td->td_sqqueue >= 0);
715 THREAD_LOCK_ASSERT(td, MA_OWNED);
717 mtx_assert(&sc->sc_lock, MA_OWNED);
719 SDT_PROBE2(sched, , ,
wakeup, td, td->td_proc);
722 sq->sq_blockedcnt[td->td_sqqueue]--;
723 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
730 if (LIST_EMPTY(&sq->sq_free)) {
731 td->td_sleepqueue = sq;
735 #ifdef SLEEPQUEUE_PROFILING
739 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
740 LIST_REMOVE(td->td_sleepqueue, sq_hash);
744 td->td_flags &= ~TDF_SINTR;
746 CTR3(KTR_PROC,
"sleepq_wakeup: thread %p (pid %ld, %s)",
747 (
void *)td, (
long)td->td_proc->p_pid, td->td_name);
750 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
751 if (pri != 0 && td->td_priority > pri &&
752 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
761 if (TD_IS_SLEEPING(td)) {
773 sleepq_dtor(
void *mem,
int size,
void *arg)
780 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
781 MPASS(sq->sq_blockedcnt[i] == 0);
798 TAILQ_INIT(&sq->sq_blocked[i]);
799 sq->sq_blockedcnt[i] = 0;
801 LIST_INIT(&sq->sq_free);
812 struct thread *td, *besttd;
815 CTR2(KTR_PROC,
"sleepq_signal(%p, %d)", wchan, flags);
816 KASSERT(wchan != NULL, (
"%s: invalid NULL wait channel", __func__));
817 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
821 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
822 (
"%s: mismatch between sleep/wakeup and cv_*", __func__));
831 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
832 if (besttd == NULL || td->td_priority < besttd->td_priority)
835 MPASS(besttd != NULL);
838 thread_unlock(besttd);
839 return (wakeup_swapper);
849 struct thread *td, *tdn;
852 CTR2(KTR_PROC,
"sleepq_broadcast(%p, %d)", wchan, flags);
853 KASSERT(wchan != NULL, (
"%s: invalid NULL wait channel", __func__));
854 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
858 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
859 (
"%s: mismatch between sleep/wakeup and cv_*", __func__));
863 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
869 return (wakeup_swapper);
887 CTR3(KTR_PROC,
"sleepq_timeout: thread %p (pid %ld, %s)",
888 (
void *)td, (
long)td->td_proc->p_pid, (
void *)td->td_name);
895 if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
896 wchan = td->td_wchan;
898 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
901 td->td_flags |= TDF_TIMEOUT;
915 if (TD_ON_SLEEPQ(td)) {
916 td->td_flags |= TDF_TIMEOUT;
930 if (td->td_flags & TDF_TIMEOUT) {
931 MPASS(TD_IS_SLEEPING(td));
932 td->td_flags &= ~TDF_TIMEOUT;
936 td->td_flags |= TDF_TIMOFAIL;
957 MPASS(wchan != NULL);
966 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
973 MPASS(td->td_wchan == wchan);
991 THREAD_LOCK_ASSERT(td, MA_OWNED);
992 MPASS(TD_ON_SLEEPQ(td));
993 MPASS(td->td_flags & TDF_SINTR);
994 MPASS(intrval == EINTR || intrval == ERESTART);
1000 if (td->td_flags & TDF_TIMEOUT)
1003 CTR3(KTR_PROC,
"sleepq_abort: thread %p (pid %ld, %s)",
1004 (
void *)td, (
long)td->td_proc->p_pid, (
void *)td->td_name);
1005 td->td_intrval = intrval;
1006 td->td_flags |= TDF_SLEEPABORT;
1012 if (!TD_IS_SLEEPING(td))
1014 wchan = td->td_wchan;
1015 MPASS(wchan != NULL);
1023 #ifdef SLEEPQUEUE_PROFILING
1024 #define SLEEPQ_PROF_LOCATIONS 1024
1025 #define SLEEPQ_SBUFSIZE 512
1026 struct sleepq_prof {
1027 LIST_ENTRY(sleepq_prof) sp_link;
1028 const
char *sp_wmesg;
1034 struct sqphead sleepq_prof_free;
1036 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1037 static struct mtx sleepq_prof_lock;
1038 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1041 sleepq_profile(const
char *wmesg)
1043 struct sleepq_prof *sp;
1045 mtx_lock_spin(&sleepq_prof_lock);
1046 if (prof_enabled == 0)
1048 LIST_FOREACH(sp, &sleepq_hash[
SC_HASH(wmesg)], sp_link)
1049 if (sp->sp_wmesg == wmesg)
1051 sp = LIST_FIRST(&sleepq_prof_free);
1054 sp->sp_wmesg = wmesg;
1055 LIST_REMOVE(sp, sp_link);
1056 LIST_INSERT_HEAD(&sleepq_hash[
SC_HASH(wmesg)], sp, sp_link);
1060 mtx_unlock_spin(&sleepq_prof_lock);
1065 sleepq_prof_reset(
void)
1067 struct sleepq_prof *sp;
1071 mtx_lock_spin(&sleepq_prof_lock);
1072 enabled = prof_enabled;
1075 LIST_INIT(&sleepq_hash[i]);
1076 LIST_INIT(&sleepq_prof_free);
1077 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1078 sp = &sleepq_profent[i];
1079 sp->sp_wmesg = NULL;
1081 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1083 prof_enabled = enabled;
1084 mtx_unlock_spin(&sleepq_prof_lock);
1088 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1096 if (req->newptr == NULL)
1098 if (v == prof_enabled)
1101 sleepq_prof_reset();
1102 mtx_lock_spin(&sleepq_prof_lock);
1104 mtx_unlock_spin(&sleepq_prof_lock);
1110 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1118 if (req->newptr == NULL)
1122 sleepq_prof_reset();
1128 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1130 struct sleepq_prof *sp;
1141 enabled = prof_enabled;
1142 mtx_lock_spin(&sleepq_prof_lock);
1144 mtx_unlock_spin(&sleepq_prof_lock);
1146 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1148 sp->sp_wmesg, sp->sp_count);
1151 mtx_lock_spin(&sleepq_prof_lock);
1152 prof_enabled = enabled;
1153 mtx_unlock_spin(&sleepq_prof_lock);
1160 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1161 NULL, 0, dump_sleepq_prof_stats,
"A",
"Sleepqueue profiling statistics");
1162 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1163 NULL, 0, reset_sleepq_prof_stats,
"I",
1164 "Reset sleepqueue profiling statistics");
1165 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1166 NULL, 0, enable_sleepq_prof,
"I",
"Enable sleepqueue profiling");
1170 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1175 struct lock_object *lock;
1188 wchan = (
void *)addr;
1190 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1191 if (sq->sq_wchan == wchan)
1198 for (i = 0; i < SC_TABLESIZE; i++)
1204 db_printf(
"Unable to locate a sleep queue via %p\n", (
void *)addr);
1207 db_printf(
"Wait channel: %p\n", sq->sq_wchan);
1208 db_printf(
"Queue type: %d\n", sq->sq_type);
1212 db_printf(
"Associated Interlock: %p - (%s) %s\n", lock,
1213 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1216 db_printf(
"Blocked threads:\n");
1218 db_printf(
"\nQueue[%d]:\n", i);
1219 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1220 db_printf(
"\tempty\n");
1222 TAILQ_FOREACH(td, &sq->sq_blocked[0],
1224 db_printf(
"\t%p (tid %d, pid %d, \"%s\")\n", td,
1225 td->td_tid, td->td_proc->p_pid,
1228 db_printf(
"(expected: %u)\n", sq->sq_blockedcnt[i]);
1233 DB_SHOW_ALIAS(
sleepqueue, db_show_sleepqueue);
struct sleepqueue * sleepq_lookup(void *wchan)
void sleepq_release(void *wchan)
static int sleepq_check_timeout(void)
void sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, int queue)
void sched_prio(struct thread *td, u_char prio)
MTX_SYSINIT(et_eventtimers_init,&et_eventtimers_mtx,"et_mtx", MTX_DEF)
int snprintf(char *str, size_t size, const char *format,...)
static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL,"cpufreq debugging")
static void sleepq_switch(void *wchan, int pri)
int sleepq_timedwait_sig(void *wchan, int pri)
void panic(const char *fmt,...)
TAILQ_HEAD(note_info_list, note_info)
int sleepq_abort(struct thread *td, int intrval)
static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]
SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW,&idletick, 0,"Run periodic events when idle")
void mi_switch(int flags, struct thread *newtd)
static void sleepq_timeout(void *arg)
void sleepq_remove(struct thread *td, void *wchan)
void sleepq_set_timeout(void *wchan, int timo)
int sleepq_wait_sig(void *wchan, int pri)
void sleepq_lock(void *wchan)
void sleepq_free(struct sleepqueue *sq)
static int sleepq_check_signals(void)
void thread_lock_set(struct thread *td, struct mtx *new)
SDT_PROBE_DECLARE(sched,,, sleep)
void sleepq_wait(void *wchan, int pri)
int setrunnable(struct thread *td)
int sbuf_printf(struct sbuf *s, const char *fmt,...)
int cursig(struct thread *td, int stop_allowed)
int sleepq_broadcast(void *wchan, int flags, int pri, int queue)
static int sleepq_catch_signals(void *wchan, int pri)
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
static int sleepq_init(void *mem, int size, int flags)
int sleepq_signal(void *wchan, int flags, int pri, int queue)
SYSCTL_PROC(_kern, OID_AUTO, acct_chkfreq, CTLTYPE_INT|CTLFLAG_RW,&acctchkfreq, 0, sysctl_acct_chkfreq,"I","frequency for checking the free space")
static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
void sbuf_delete(struct sbuf *s)
int sleepq_type(void *wchan)
int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
void init_sleepqueues(void)
struct sleepqueue * sleepq_alloc(void)
static uma_zone_t sleepq_zone
int sbuf_finish(struct sbuf *s)
int thread_suspend_check(int return_instead)
struct sbuf * sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, struct sysctl_req *req)
u_int sleepq_sleepcnt(void *wchan, int queue)
int sleepq_timedwait(void *wchan, int pri)
void sched_sleep(struct thread *td, int pri)