37 #include <sys/cdefs.h>
40 #include "opt_kdtrace.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/callout.h>
46 #include <sys/condvar.h>
47 #include <sys/interrupt.h>
48 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sysctl.h>
60 #include <machine/cpu.h>
71 "Average number of items examined per softclock call. Units = 1/1000");
74 "Average number of Giant callouts made per softclock call. Units = 1/1000");
77 "Average number of lock callouts made per softclock call. Units = 1/1000");
80 "Average number of MP callouts made per softclock call. Units = 1/1000");
95 void (*ce_migration_func)(
void *);
96 void *ce_migration_arg;
98 int ce_migration_ticks;
136 #define cc_migration_func cc_migrating_entity.ce_migration_func
137 #define cc_migration_arg cc_migrating_entity.ce_migration_arg
138 #define cc_migration_cpu cc_migrating_entity.ce_migration_cpu
139 #define cc_migration_ticks cc_migrating_entity.ce_migration_ticks
142 #define CPUBLOCK MAXCPU
143 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
144 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
147 #define CC_CPU(cpu) &cc_cpu
148 #define CC_SELF() &cc_cpu
150 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
151 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
152 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
157 static MALLOC_DEFINE(M_CALLOUT,
"callout",
"Callout datastructures");
184 cc->cc_migration_cpu = CPUBLOCK;
185 cc->cc_migration_ticks = 0;
186 cc->cc_migration_func = NULL;
187 cc->cc_migration_arg = NULL;
199 return (cc->cc_migration_cpu != CPUBLOCK);
251 c->c_flags = CALLOUT_LOCAL_ALLOC;
252 SLIST_INSERT_HEAD(&cc->
cc_callfree, c, c_links.sle);
263 callout_cpu_switch(
struct callout *c,
struct callout_cpu *cc,
int new_cpu)
267 MPASS(c != NULL && cc != NULL);
313 panic(
"died while creating standard software ithreads");
321 panic(
"died while creating standard software ithreads");
346 mtx_lock_spin_flags(&cc->
cc_lock, MTX_QUIET);
355 mtx_unlock_spin_flags(&cc->
cc_lock, MTX_QUIET);
369 struct callout_tailq *sc;
374 mtx_lock_spin_flags(&cc->
cc_lock, MTX_QUIET);
376 while( skip <
ncallout && skip < limit ) {
379 TAILQ_FOREACH( c, sc, c_links.tqe ){
380 if (c->c_time - curticks <=
ncallout)
387 mtx_unlock_spin_flags(&cc->
cc_lock, MTX_QUIET);
400 if (cpu == CPUBLOCK) {
401 while (c->c_cpu == CPUBLOCK)
417 void (*func)(
void *),
void *arg,
int cpu)
425 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
427 c->c_time =
ticks + to_ticks;
433 (*callout_new_inserted)(cpu,
442 if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0)
445 SLIST_INSERT_HEAD(&cc->
cc_callfree, c, c_links.sle);
450 int *lockcalls,
int *gcalls)
452 void (*c_func)(
void *);
454 struct lock_class *
class;
455 struct lock_object *c_lock;
456 int c_flags, sharedlock;
459 void (*new_func)(
void *);
461 int new_cpu, new_ticks;
466 static uint64_t maxdt = 36893488147419102LL;
467 static timeout_t *lastfunc;
470 KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
471 (CALLOUT_PENDING | CALLOUT_ACTIVE),
472 (
"softclock_call_cc: pend|act %p %x", c, c->c_flags));
473 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
474 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
478 c_flags = c->c_flags;
479 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
480 c->c_flags = CALLOUT_LOCAL_ALLOC;
482 c->c_flags &= ~CALLOUT_PENDING;
486 if (c_lock != NULL) {
487 class->lc_lock(c_lock, sharedlock);
493 class->lc_unlock(c_lock);
499 if (c_lock == &
Giant.lock_object) {
501 CTR3(KTR_CALLOUT,
"callout %p func %p arg %p",
505 CTR3(KTR_CALLOUT,
"callout lock %p func %p arg %p",
510 CTR3(KTR_CALLOUT,
"callout mpsafe %p func %p arg %p",
516 THREAD_NO_SLEEPING();
517 SDT_PROBE1(callout_execute, kernel, , callout__start, c);
519 SDT_PROBE1(callout_execute, kernel, , callout__end, c);
520 THREAD_SLEEPING_OK();
523 bintime_sub(&bt2, &bt1);
524 if (bt2.frac > maxdt) {
525 if (lastfunc != c_func || bt2.frac > maxdt * 2) {
526 bintime2timespec(&bt2, &ts2);
528 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
529 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
535 CTR1(KTR_CALLOUT,
"callout %p finished", c);
536 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
537 class->lc_unlock(c_lock);
540 KASSERT(cc->
cc_curr == c, (
"mishandled cc_curr"));
556 c->c_flags &= ~CALLOUT_DFRMIGRATION;
563 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0,
564 (
"Migrating legacy callout %p", c));
570 new_cpu = cc->cc_migration_cpu;
571 new_ticks = cc->cc_migration_ticks;
572 new_func = cc->cc_migration_func;
573 new_arg = cc->cc_migration_arg;
582 if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
584 "deferred cancelled %p func %p arg %p",
585 c, new_func, new_arg);
589 c->c_flags &= ~CALLOUT_DFRMIGRATION;
591 new_cc = callout_cpu_switch(c, cc, new_cpu);
597 panic(
"migration should not happen");
608 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
609 c->c_flags == CALLOUT_LOCAL_ALLOC,
610 (
"corrupted callout"));
611 if (c_flags & CALLOUT_LOCAL_ALLOC)
636 struct callout_tailq *bucket;
644 #ifndef MAX_SOFTCLOCK_STEPS
645 #define MAX_SOFTCLOCK_STEPS 100
663 c = TAILQ_FIRST(bucket);
666 if (c->c_time != curticks) {
667 c = TAILQ_NEXT(c, c_links.tqe);
679 cc->
cc_next = TAILQ_NEXT(c, c_links.tqe);
680 TAILQ_REMOVE(bucket, c, c_links.tqe);
682 &lockcalls, &gcalls);
712 struct callout_handle
720 struct callout_handle handle;
725 new = SLIST_FIRST(&cc->cc_callfree);
728 panic(
"timeout table full");
729 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
730 callout_reset(
new, to_ticks, ftn, arg);
731 handle.callout =
new;
741 struct callout_handle handle;
750 if (handle.callout == NULL)
754 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
755 callout_stop(handle.callout);
762 handle->callout = NULL;
792 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
808 CTR4(KTR_CALLOUT,
"%s %p func %p arg %p",
809 cancelled ?
"cancelled" :
"failed to cancel",
810 c, c->c_func, c->c_arg);
815 if (c->c_flags & CALLOUT_PENDING) {
817 cc->
cc_next = TAILQ_NEXT(c, c_links.tqe);
823 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
832 if (c->c_cpu != cpu) {
834 cc->cc_migration_cpu = cpu;
835 cc->cc_migration_ticks = to_ticks;
836 cc->cc_migration_func = ftn;
837 cc->cc_migration_arg = arg;
838 c->c_flags |= CALLOUT_DFRMIGRATION;
840 "migration of %p func %p arg %p in %d to %u deferred",
841 c, c->c_func, c->c_arg, to_ticks, cpu);
845 cc = callout_cpu_switch(c, cc, cpu);
850 CTR5(KTR_CALLOUT,
"%sscheduled %p func %p arg %p in %d",
851 cancelled ?
"re" :
"", c, c->c_func, c->c_arg, to_ticks);
878 struct lock_class *
class;
879 int use_lock, sq_locked;
885 if (!safe && c->c_lock != NULL) {
886 if (c->c_lock == &
Giant.lock_object)
887 use_lock = mtx_owned(&
Giant);
890 class = LOCK_CLASS(c->c_lock);
891 class->lc_assert(c->c_lock, LA_XLOCKED);
906 if (sq_locked != 0 && cc != old_cc) {
914 panic(
"migration should not happen");
923 if (!(c->c_flags & CALLOUT_PENDING)) {
924 c->c_flags &= ~CALLOUT_ACTIVE;
931 CTR3(KTR_CALLOUT,
"failed to stop %p func %p arg %p",
932 c, c->c_func, c->c_arg);
983 &cc->
cc_lock.lock_object,
"codrain",
1002 CTR3(KTR_CALLOUT,
"cancelled %p func %p arg %p",
1003 c, c->c_func, c->c_arg);
1005 (
"callout wrongly scheduled for migration"));
1007 KASSERT(!sq_locked, (
"sleepqueue chain locked"));
1009 }
else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
1010 c->c_flags &= ~CALLOUT_DFRMIGRATION;
1011 CTR3(KTR_CALLOUT,
"postponing stop %p func %p arg %p",
1012 c, c->c_func, c->c_arg);
1016 CTR3(KTR_CALLOUT,
"failed to stop %p func %p arg %p",
1017 c, c->c_func, c->c_arg);
1019 KASSERT(!sq_locked, (
"sleepqueue chain still locked"));
1025 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
1027 CTR3(KTR_CALLOUT,
"cancelled %p func %p arg %p",
1028 c, c->c_func, c->c_arg);
1030 cc->
cc_next = TAILQ_NEXT(c, c_links.tqe);
1044 bzero(c,
sizeof *c);
1047 c->c_flags = CALLOUT_RETURNUNLOCKED;
1049 c->c_lock = &
Giant.lock_object;
1058 struct lock_object *lock;
1061 bzero(c,
sizeof *c);
1063 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1064 (
"callout_init_lock: bad flags %d", flags));
1065 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1066 (
"callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1067 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1068 (LC_SPINLOCK | LC_SLEEPABLE)), (
"%s: invalid lock class",
1070 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1074 #ifdef APM_FIXUP_CALLTODO
1091 adjust_timeout_calltodo(time_change)
1092 struct timeval *time_change;
1094 register struct callout *p;
1095 unsigned long delta_ticks;
1103 if (time_change->tv_sec < 0)
1105 else if (time_change->tv_sec <= LONG_MAX / 1000000)
1106 delta_ticks = (time_change->tv_sec * 1000000 +
1107 time_change->tv_usec + (
tick - 1)) /
tick + 1;
1108 else if (time_change->tv_sec <= LONG_MAX /
hz)
1109 delta_ticks = time_change->tv_sec *
hz +
1110 (time_change->tv_usec + (
tick - 1)) /
tick + 1;
1112 delta_ticks = LONG_MAX;
1114 if (delta_ticks > INT_MAX)
1115 delta_ticks = INT_MAX;
1124 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1125 p->c_time -= delta_ticks;
1132 delta_ticks = -p->c_time;
void sleepq_release(void *wchan)
struct callout_handle timeout(timeout_t *ftn, void *arg, int to_ticks)
void sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, int queue)
void softclock(void *arg)
int callout_reset_on(struct callout *c, int to_ticks, void(*ftn)(void *), void *arg, int cpu)
struct callout_tailq * cc_callwheel
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
SDT_PROVIDER_DEFINE(callout_execute)
static MALLOC_DEFINE(M_CALLOUT,"callout","Callout datastructures")
void panic(const char *fmt,...)
#define CC_LOCK_ASSERT(cc)
int callout_schedule(struct callout *c, int to_ticks)
int callout_tickstofirst(int limit)
int callout_schedule_on(struct callout *c, int to_ticks, int cpu)
void sleepq_lock(void *wchan)
void(* callout_new_inserted)(int cpu, int ticks)
SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD,&avg_depth, 0,"Average number of items examined per softclock call. Units = 1/1000")
#define MAX_SOFTCLOCK_STEPS
void sleepq_wait(void *wchan, int pri)
static struct callout_cpu * callout_lock(struct callout *c)
static void start_softclock(void *dummy)
void kern_timeout_callwheel_init(void)
static int cc_cme_migrating(struct callout_cpu *cc)
struct callout_list cc_callfree
static void cc_cme_cleanup(struct callout_cpu *cc)
struct callout_cpu cc_cpu
int _callout_stop_safe(struct callout *c, int safe)
void _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
static void callout_cc_del(struct callout *c, struct callout_cpu *cc)
int printf(const char *fmt,...)
int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep)
SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL)
void callout_init(struct callout *c, int mpsafe)
void callout_handle_init(struct callout_handle *handle)
struct callout * cc_callout
void bintime(struct bintime *bt)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
SDT_PROBE_DEFINE1(callout_execute, kernel,, callout__start,"struct callout *")
caddr_t kern_timeout_callwheel_alloc(caddr_t v)
void binuptime(struct bintime *bt)
static void callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, void(*func)(void *), void *arg, int cpu)
struct intr_event * clk_intr_event
struct cc_mig_ent cc_migrating_entity
void swi_sched(void *cookie, int flags)
void untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, int *lockcalls, int *gcalls)
static void callout_cpu_init(struct callout_cpu *cc)