37 #include <sys/cdefs.h>
41 #include "opt_device_polling.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
45 #include "opt_watchdog.h"
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/callout.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
55 #include <sys/mutex.h>
57 #include <sys/resource.h>
58 #include <sys/resourcevar.h>
59 #include <sys/sched.h>
61 #include <sys/signalvar.h>
62 #include <sys/sleepqueue.h>
66 #include <vm/vm_map.h>
67 #include <sys/sysctl.h>
69 #include <sys/interrupt.h>
70 #include <sys/limits.h>
71 #include <sys/timetc.h>
78 #include <sys/pmckern.h>
79 PMC_SOFT_DEFINE( , ,
clock, hard);
80 PMC_SOFT_DEFINE( , ,
clock, stat);
81 PMC_SOFT_DEFINE_EX( , ,
clock, prof, \
102 long cp_time[CPUSTATES];
105 unsigned int cp_time32[CPUSTATES];
110 if (req->flags & SCTL_MASK32) {
112 return SYSCTL_OUT(req, 0,
sizeof(cp_time32));
113 for (i = 0; i < CPUSTATES; i++)
114 cp_time32[i] = (
unsigned int)cp_time[i];
115 error = SYSCTL_OUT(req, cp_time32,
sizeof(cp_time32));
120 return SYSCTL_OUT(req, 0,
sizeof(cp_time));
121 error = SYSCTL_OUT(req, cp_time,
sizeof(cp_time));
126 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
139 unsigned int cp_time32[CPUSTATES];
145 if (req->flags & SCTL_MASK32)
146 return SYSCTL_OUT(req, 0,
sizeof(cp_time32) * (
mp_maxid + 1));
149 return SYSCTL_OUT(req, 0,
sizeof(
long) * CPUSTATES * (
mp_maxid + 1));
151 for (error = 0, c = 0; error == 0 && c <=
mp_maxid; c++) {
152 if (!CPU_ABSENT(c)) {
154 cp_time = pcpu->pc_cp_time;
159 if (req->flags & SCTL_MASK32) {
160 for (i = 0; i < CPUSTATES; i++)
161 cp_time32[i] = (
unsigned int)cp_time[i];
162 error = SYSCTL_OUT(req, cp_time32,
sizeof(cp_time32));
165 error = SYSCTL_OUT(req, cp_time,
sizeof(
long) * CPUSTATES);
170 SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
174 static const char *blessed[] = {
180 static int slptime_threshold = 1800;
181 static int blktime_threshold = 900;
182 static int sleepfreq = 3;
190 int blkticks, i, slpticks, slptype, tryl, tticks;
194 blkticks = blktime_threshold *
hz;
195 slpticks = slptime_threshold *
hz;
204 panic(
"%s: possible deadlock detected on allproc_lock\n",
207 pause(
"allproc", sleepfreq * hz);
211 FOREACH_PROC_IN_SYSTEM(p) {
213 if (p->p_state == PRS_NEW) {
217 FOREACH_THREAD_IN_PROC(p, td) {
225 if (TD_ON_LOCK(td) && ticks < td->td_blktick) {
232 MPASS(td->td_blocked != NULL);
234 tticks =
ticks - td->td_blktick;
236 if (tticks > blkticks) {
246 panic(
"%s: possible deadlock detected for %p, blocked for %d ticks\n",
247 __func__, td, tticks);
249 }
else if (TD_IS_SLEEPING(td) &&
251 ticks < td->td_blktick) {
260 wchan = td->td_wchan;
261 tticks =
ticks - td->td_slptick;
264 if ((slptype == SLEEPQ_SX ||
265 slptype == SLEEPQ_LK) &&
279 for (i = 0; blessed[i] != NULL;
281 if (!strcmp(blessed[i],
293 panic(
"%s: possible deadlock detected for %p, blocked for %d ticks\n",
294 __func__, td, tticks);
304 pause(
"-", sleepfreq * hz);
308 static struct kthread_desc deadlkres_kd = {
311 (
struct thread **)NULL
316 static SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW, 0,
317 "Deadlock resolver");
318 SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
319 &slptime_threshold, 0,
320 "Number of seconds within is valid to sleep on a sleepqueue");
321 SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
322 &blktime_threshold, 0,
323 "Number of seconds within is valid to block on a turnstile");
324 SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
325 "Number of seconds between any deadlock resolver thread run");
335 bzero(cp_time,
sizeof(
long) * CPUSTATES);
338 for (j = 0; j < CPUSTATES; j++)
339 cp_time[j] += pc->pc_cp_time[j];
344 #include <sys/watchdog.h>
346 static int watchdog_ticks;
347 static int watchdog_enabled;
348 static void watchdog_fire(
void);
349 static void watchdog_config(
void *, u_int,
int *);
413 i = stathz ? stathz :
hz;
416 psratio = profhz / i;
418 EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
430 struct pstats *pstats;
431 struct thread *td = curthread;
432 struct proc *p = td->td_proc;
441 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
444 flags |= TDF_ALRMPEND | TDF_ASTPENDING;
447 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
450 flags |= TDF_PROFPEND | TDF_ASTPENDING;
455 td->td_flags |= flags;
459 if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
460 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
461 if (td->td_intr_frame != NULL)
462 PMC_SOFT_CALL_TF( , ,
clock, hard, td->td_intr_frame);
474 atomic_add_int(&ticks, 1);
487 #ifdef DEVICE_POLLING
491 if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
499 struct pstats *pstats;
500 struct thread *td = curthread;
501 struct proc *p = td->td_proc;
502 int *t = DPCPU_PTR(pcputicks);
503 int flags, global, newticks;
514 newticks = *t - global;
521 }
while (!atomic_cmpset_int(&ticks, global, *t));
529 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
531 if (
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
533 flags |= TDF_ALRMPEND | TDF_ASTPENDING;
536 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
540 flags |= TDF_PROFPEND | TDF_ASTPENDING;
545 td->td_flags |= flags;
549 if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
550 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
551 if (td->td_intr_frame != NULL)
552 PMC_SOFT_CALL_TF( , ,
clock, hard, td->td_intr_frame);
558 if (atomic_cmpset_acq_int(&global_hardclock_run, 0, 1)) {
560 #ifdef DEVICE_POLLING
564 atomic_store_rel_int(&global_hardclock_run, 0);
567 if (watchdog_enabled > 0) {
568 i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
569 if (i > 0 && i <= newticks)
574 if (curcpu == CPU_FIRST())
581 int *t = DPCPU_ID_PTR(cpu, pcputicks);
593 register unsigned long ticks;
594 register long sec, usec;
628 printf(
"tvotohz: negative time difference %ld sec %ld usec\n",
632 }
else if (sec <= LONG_MAX / 1000000)
633 ticks = (sec * 1000000 + (
unsigned long)usec + (
tick - 1))
635 else if (sec <= LONG_MAX / hz)
637 + ((
unsigned long)usec + (
tick - 1)) /
tick + 1;
653 register struct proc *p;
656 PROC_LOCK_ASSERT(p, MA_OWNED);
657 if (p->p_flag & P_STOPPROF)
659 if ((p->p_flag & P_PROFIL) == 0) {
660 p->p_flag |= P_PROFIL;
662 if (++profprocs == 1)
673 register struct proc *p;
676 PROC_LOCK_ASSERT(p, MA_OWNED);
677 if (p->p_flag & P_PROFIL) {
678 if (p->p_profthreads != 0) {
679 p->p_flag |= P_STOPPROF;
680 while (p->p_profthreads != 0)
681 msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
683 p->p_flag &= ~P_STOPPROF;
685 if ((p->p_flag & P_PROFIL) == 0)
687 p->p_flag &= ~P_PROFIL;
689 if (--profprocs == 0)
721 cp_time = (
long *)PCPU_PTR(cp_time);
726 td->td_uticks += cnt;
727 if (p->p_nice > NZERO)
728 cp_time[CP_NICE] += cnt;
730 cp_time[CP_USER] += cnt;
744 if ((td->td_pflags & TDP_ITHREAD) ||
745 td->td_intr_nesting_level >= 2) {
746 td->td_iticks += cnt;
747 cp_time[CP_INTR] += cnt;
749 td->td_pticks += cnt;
750 td->td_sticks += cnt;
751 if (!TD_IS_IDLETHREAD(td))
752 cp_time[CP_SYS] += cnt;
754 cp_time[CP_IDLE] += cnt;
759 MPASS(p->p_vmspace != NULL);
762 ru->ru_ixrss += pgtok(vm->vm_tsize) * cnt;
763 ru->ru_idrss += pgtok(vm->vm_dsize) * cnt;
764 ru->ru_isrss += pgtok(vm->vm_ssize) * cnt;
765 rss = pgtok(vmspace_resident_count(vm));
766 if (ru->ru_maxrss < rss)
768 KTR_POINT2(KTR_SCHED,
"thread",
sched_tdname(td),
"statclock",
769 "prio:%d", td->td_priority,
"stathz:%d", (stathz)?stathz:hz);
770 SDT_PROBE2(sched, , ,
tick, td, td->td_proc);
771 thread_lock_flags(td, MTX_QUIET);
772 for ( ; cnt > 0; cnt--)
776 if (td->td_intr_frame != NULL)
777 PMC_SOFT_CALL_TF( , ,
clock, stat, td->td_intr_frame);
805 if (td->td_proc->p_flag & P_PROFIL)
814 if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
816 if (i < g->textsize) {
823 if (td->td_intr_frame != NULL)
824 PMC_SOFT_CALL_TF( , ,
clock, prof, td->td_intr_frame);
834 struct clockinfo clkinfo;
838 bzero(&clkinfo,
sizeof(clkinfo));
842 clkinfo.stathz = stathz ? stathz :
hz;
847 CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
849 "Rate and period of various kernel clocks");
854 watchdog_config(
void *unused __unused, u_int cmd,
int *error)
858 u = cmd & WD_INTERVAL;
859 if (u >= WD_TO_1SEC) {
860 watchdog_ticks = (1 << (u - WD_TO_1SEC)) *
hz;
861 watchdog_enabled = 1;
864 watchdog_enabled = 0;
883 nintr = sintrcnt /
sizeof(u_long);
885 printf(
"interrupt total\n");
886 while (--nintr >= 0) {
888 printf(
"%-12s %20lu\n", curname, *curintr);
889 curname += strlen(curname) + 1;
890 inttotal += *curintr++;
892 printf(
"Total %20ju\n", (uintmax_t)inttotal);
894 #if defined(KDB) && !defined(KDB_UNATTENDED)
896 kdb_enter(KDB_WHY_WATCHDOG,
"watchdog timeout");
898 panic(
"watchdog timeout");
SDT_PROBE_DEFINE2(sched,,, tick,"struct thread *","struct proc *")
int tvtohz(struct timeval *tv)
void sched_clock(struct thread *td)
void tc_ticktock(int cnt)
void kthread_start(void *udata) const
static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL,"cpufreq debugging")
static void initclocks(void *dummy)
char * sched_tdname(struct thread *td)
static int sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
void read_cpu_time(long *cp_time)
void hardclock_device_poll(void)
void panic(const char *fmt,...)
static int global_hardclock_run
void hardclock_cnt(int cnt, int usermode)
struct pcpu * pcpu_find(u_int cpuid)
static struct mtx time_lock
SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD,&boothowto, 0,"Boot control flags, passed from loader")
void profclock(int usermode, uintfptr_t pc)
SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE, 0, 0, sysctl_kern_cp_time,"LU","CPU time statistics")
void startprofclock(struct proc *p)
int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
int itimerdecr(struct itimerval *itp, int usec)
void addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
void stopprofclock(struct proc *p)
void statclock_cnt(int cnt, int usermode)
void cpu_startprofclock(void)
int pause(const char *wmesg, int timo)
void kdb_enter(const char *why, const char *msg)
int printf(const char *fmt,...)
void cpu_stopprofclock(void)
void hardclock(int usermode, uintfptr_t pc)
int sleepq_type(void *wchan)
static long empty[CPUSTATES]
static int sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
void hardclock_cpu(int usermode)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
static int sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
void profclock_cnt(int cnt, int usermode, uintfptr_t pc)
SDT_PROVIDER_DECLARE(sched)
void statclock(int usermode)
void hardclock_sync(int cpu)
static DPCPU_DEFINE(int, pcputicks)
void cpu_tick_calibration(void)
SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)