FreeBSD kernel kern code
kern_clock.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  * The Regents of the University of California. All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  * notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in the
17  * documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  * may be used to endorse or promote products derived from this software
20  * without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$BSDSUniX$");
39 
40 #include "opt_kdb.h"
41 #include "opt_device_polling.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
44 #include "opt_ntp.h"
45 #include "opt_watchdog.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/callout.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
53 #include <sys/ktr.h>
54 #include <sys/lock.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resource.h>
58 #include <sys/resourcevar.h>
59 #include <sys/sched.h>
60 #include <sys/sdt.h>
61 #include <sys/signalvar.h>
62 #include <sys/sleepqueue.h>
63 #include <sys/smp.h>
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_map.h>
67 #include <sys/sysctl.h>
68 #include <sys/bus.h>
69 #include <sys/interrupt.h>
70 #include <sys/limits.h>
71 #include <sys/timetc.h>
72 
73 #ifdef GPROF
74 #include <sys/gmon.h>
75 #endif
76 
77 #ifdef HWPMC_HOOKS
78 #include <sys/pmckern.h>
79 PMC_SOFT_DEFINE( , , clock, hard);
80 PMC_SOFT_DEFINE( , , clock, stat);
81 PMC_SOFT_DEFINE_EX( , , clock, prof, \
83 #endif
84 
85 #ifdef DEVICE_POLLING
86 extern void hardclock_device_poll(void);
87 #endif /* DEVICE_POLLING */
88 
89 static void initclocks(void *dummy);
90 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
91 
92 /* Spin-lock protecting profiling statistics. */
93 static struct mtx time_lock;
94 
96 SDT_PROBE_DEFINE2(sched, , , tick, "struct thread *", "struct proc *");
97 
98 static int
99 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
100 {
101  int error;
102  long cp_time[CPUSTATES];
103 #ifdef SCTL_MASK32
104  int i;
105  unsigned int cp_time32[CPUSTATES];
106 #endif
107 
108  read_cpu_time(cp_time);
109 #ifdef SCTL_MASK32
110  if (req->flags & SCTL_MASK32) {
111  if (!req->oldptr)
112  return SYSCTL_OUT(req, 0, sizeof(cp_time32));
113  for (i = 0; i < CPUSTATES; i++)
114  cp_time32[i] = (unsigned int)cp_time[i];
115  error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
116  } else
117 #endif
118  {
119  if (!req->oldptr)
120  return SYSCTL_OUT(req, 0, sizeof(cp_time));
121  error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
122  }
123  return error;
124 }
125 
126 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
127  0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
128 
129 static long empty[CPUSTATES];
130 
131 static int
132 sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
133 {
134  struct pcpu *pcpu;
135  int error;
136  int c;
137  long *cp_time;
138 #ifdef SCTL_MASK32
139  unsigned int cp_time32[CPUSTATES];
140  int i;
141 #endif
142 
143  if (!req->oldptr) {
144 #ifdef SCTL_MASK32
145  if (req->flags & SCTL_MASK32)
146  return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
147  else
148 #endif
149  return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
150  }
151  for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
152  if (!CPU_ABSENT(c)) {
153  pcpu = pcpu_find(c);
154  cp_time = pcpu->pc_cp_time;
155  } else {
156  cp_time = empty;
157  }
158 #ifdef SCTL_MASK32
159  if (req->flags & SCTL_MASK32) {
160  for (i = 0; i < CPUSTATES; i++)
161  cp_time32[i] = (unsigned int)cp_time[i];
162  error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
163  } else
164 #endif
165  error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
166  }
167  return error;
168 }
169 
170 SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
171  0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
172 
173 #ifdef DEADLKRES
174 static const char *blessed[] = {
175  "getblk",
176  "so_snd_sx",
177  "so_rcv_sx",
178  NULL
179 };
180 static int slptime_threshold = 1800;
181 static int blktime_threshold = 900;
182 static int sleepfreq = 3;
183 
184 static void
185 deadlkres(void)
186 {
187  struct proc *p;
188  struct thread *td;
189  void *wchan;
190  int blkticks, i, slpticks, slptype, tryl, tticks;
191 
192  tryl = 0;
193  for (;;) {
194  blkticks = blktime_threshold * hz;
195  slpticks = slptime_threshold * hz;
196 
197  /*
198  * Avoid to sleep on the sx_lock in order to avoid a possible
199  * priority inversion problem leading to starvation.
200  * If the lock can't be held after 100 tries, panic.
201  */
202  if (!sx_try_slock(&allproc_lock)) {
203  if (tryl > 100)
204  panic("%s: possible deadlock detected on allproc_lock\n",
205  __func__);
206  tryl++;
207  pause("allproc", sleepfreq * hz);
208  continue;
209  }
210  tryl = 0;
211  FOREACH_PROC_IN_SYSTEM(p) {
212  PROC_LOCK(p);
213  if (p->p_state == PRS_NEW) {
214  PROC_UNLOCK(p);
215  continue;
216  }
217  FOREACH_THREAD_IN_PROC(p, td) {
218 
219  /*
220  * Once a thread is found in "interesting"
221  * state a possible ticks wrap-up needs to be
222  * checked.
223  */
224  thread_lock(td);
225  if (TD_ON_LOCK(td) && ticks < td->td_blktick) {
226 
227  /*
228  * The thread should be blocked on a
229  * turnstile, simply check if the
230  * turnstile channel is in good state.
231  */
232  MPASS(td->td_blocked != NULL);
233 
234  tticks = ticks - td->td_blktick;
235  thread_unlock(td);
236  if (tticks > blkticks) {
237 
238  /*
239  * Accordingly with provided
240  * thresholds, this thread is
241  * stuck for too long on a
242  * turnstile.
243  */
244  PROC_UNLOCK(p);
245  sx_sunlock(&allproc_lock);
246  panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
247  __func__, td, tticks);
248  }
249  } else if (TD_IS_SLEEPING(td) &&
250  TD_ON_SLEEPQ(td) &&
251  ticks < td->td_blktick) {
252 
253  /*
254  * Check if the thread is sleeping on a
255  * lock, otherwise skip the check.
256  * Drop the thread lock in order to
257  * avoid a LOR with the sleepqueue
258  * spinlock.
259  */
260  wchan = td->td_wchan;
261  tticks = ticks - td->td_slptick;
262  thread_unlock(td);
263  slptype = sleepq_type(wchan);
264  if ((slptype == SLEEPQ_SX ||
265  slptype == SLEEPQ_LK) &&
266  tticks > slpticks) {
267 
268  /*
269  * Accordingly with provided
270  * thresholds, this thread is
271  * stuck for too long on a
272  * sleepqueue.
273  * However, being on a
274  * sleepqueue, we might still
275  * check for the blessed
276  * list.
277  */
278  tryl = 0;
279  for (i = 0; blessed[i] != NULL;
280  i++) {
281  if (!strcmp(blessed[i],
282  td->td_wmesg)) {
283  tryl = 1;
284  break;
285  }
286  }
287  if (tryl != 0) {
288  tryl = 0;
289  continue;
290  }
291  PROC_UNLOCK(p);
292  sx_sunlock(&allproc_lock);
293  panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
294  __func__, td, tticks);
295  }
296  } else
297  thread_unlock(td);
298  }
299  PROC_UNLOCK(p);
300  }
301  sx_sunlock(&allproc_lock);
302 
303  /* Sleep for sleepfreq seconds. */
304  pause("-", sleepfreq * hz);
305  }
306 }
307 
308 static struct kthread_desc deadlkres_kd = {
309  "deadlkres",
310  deadlkres,
311  (struct thread **)NULL
312 };
313 
314 SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
315 
316 static SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW, 0,
317  "Deadlock resolver");
318 SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
319  &slptime_threshold, 0,
320  "Number of seconds within is valid to sleep on a sleepqueue");
321 SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
322  &blktime_threshold, 0,
323  "Number of seconds within is valid to block on a turnstile");
324 SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
325  "Number of seconds between any deadlock resolver thread run");
326 #endif /* DEADLKRES */
327 
328 void
329 read_cpu_time(long *cp_time)
330 {
331  struct pcpu *pc;
332  int i, j;
333 
334  /* Sum up global cp_time[]. */
335  bzero(cp_time, sizeof(long) * CPUSTATES);
336  CPU_FOREACH(i) {
337  pc = pcpu_find(i);
338  for (j = 0; j < CPUSTATES; j++)
339  cp_time[j] += pc->pc_cp_time[j];
340  }
341 }
342 
343 #ifdef SW_WATCHDOG
344 #include <sys/watchdog.h>
345 
346 static int watchdog_ticks;
347 static int watchdog_enabled;
348 static void watchdog_fire(void);
349 static void watchdog_config(void *, u_int, int *);
350 #endif /* SW_WATCHDOG */
351 
352 /*
353  * Clock handling routines.
354  *
355  * This code is written to operate with two timers that run independently of
356  * each other.
357  *
358  * The main timer, running hz times per second, is used to trigger interval
359  * timers, timeouts and rescheduling as needed.
360  *
361  * The second timer handles kernel and user profiling,
362  * and does resource use estimation. If the second timer is programmable,
363  * it is randomized to avoid aliasing between the two clocks. For example,
364  * the randomization prevents an adversary from always giving up the cpu
365  * just before its quantum expires. Otherwise, it would never accumulate
366  * cpu ticks. The mean frequency of the second timer is stathz.
367  *
368  * If no second timer exists, stathz will be zero; in this case we drive
369  * profiling and statistics off the main clock. This WILL NOT be accurate;
370  * do not do it unless absolutely necessary.
371  *
372  * The statistics clock may (or may not) be run at a higher rate while
373  * profiling. This profile clock runs at profhz. We require that profhz
374  * be an integral multiple of stathz.
375  *
376  * If the statistics clock is running fast, it must be divided by the ratio
377  * profhz/stathz for statistics. (For profiling, every tick counts.)
378  *
379  * Time-of-day is maintained using a "timecounter", which may or may
380  * not be related to the hardware generating the above mentioned
381  * interrupts.
382  */
383 
384 int stathz;
385 int profhz;
387 volatile int ticks;
389 
390 static DPCPU_DEFINE(int, pcputicks); /* Per-CPU version of ticks. */
391 static int global_hardclock_run = 0;
392 
393 /*
394  * Initialize clock frequencies and start both clocks running.
395  */
396 /* ARGSUSED*/
397 static void
399  void *dummy;
400 {
401  register int i;
402 
403  /*
404  * Set divisors to 1 (normal case) and let the machine-specific
405  * code do its bit.
406  */
407  mtx_init(&time_lock, "time lock", NULL, MTX_DEF);
408  cpu_initclocks();
409 
410  /*
411  * Compute profhz/stathz, and fix profhz if needed.
412  */
413  i = stathz ? stathz : hz;
414  if (profhz == 0)
415  profhz = i;
416  psratio = profhz / i;
417 #ifdef SW_WATCHDOG
418  EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
419 #endif
420 }
421 
422 /*
423  * Each time the real-time timer fires, this function is called on all CPUs.
424  * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
425  * the other CPUs in the system need to call this function.
426  */
427 void
428 hardclock_cpu(int usermode)
429 {
430  struct pstats *pstats;
431  struct thread *td = curthread;
432  struct proc *p = td->td_proc;
433  int flags;
434 
435  /*
436  * Run current process's virtual and profile time, as needed.
437  */
438  pstats = p->p_stats;
439  flags = 0;
440  if (usermode &&
441  timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
442  PROC_SLOCK(p);
443  if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
444  flags |= TDF_ALRMPEND | TDF_ASTPENDING;
445  PROC_SUNLOCK(p);
446  }
447  if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
448  PROC_SLOCK(p);
449  if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
450  flags |= TDF_PROFPEND | TDF_ASTPENDING;
451  PROC_SUNLOCK(p);
452  }
453  thread_lock(td);
454  sched_tick(1);
455  td->td_flags |= flags;
456  thread_unlock(td);
457 
458 #ifdef HWPMC_HOOKS
459  if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
460  PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
461  if (td->td_intr_frame != NULL)
462  PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
463 #endif
464  callout_tick();
465 }
466 
467 /*
468  * The real-time timer, interrupting hz times per second.
469  */
470 void
471 hardclock(int usermode, uintfptr_t pc)
472 {
473 
474  atomic_add_int(&ticks, 1);
475  hardclock_cpu(usermode);
476  tc_ticktock(1);
478  /*
479  * If no separate statistics clock is available, run it from here.
480  *
481  * XXX: this only works for UP
482  */
483  if (stathz == 0) {
484  profclock(usermode, pc);
485  statclock(usermode);
486  }
487 #ifdef DEVICE_POLLING
488  hardclock_device_poll(); /* this is very short and quick */
489 #endif /* DEVICE_POLLING */
490 #ifdef SW_WATCHDOG
491  if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
492  watchdog_fire();
493 #endif /* SW_WATCHDOG */
494 }
495 
496 void
497 hardclock_cnt(int cnt, int usermode)
498 {
499  struct pstats *pstats;
500  struct thread *td = curthread;
501  struct proc *p = td->td_proc;
502  int *t = DPCPU_PTR(pcputicks);
503  int flags, global, newticks;
504 #ifdef SW_WATCHDOG
505  int i;
506 #endif /* SW_WATCHDOG */
507 
508  /*
509  * Update per-CPU and possibly global ticks values.
510  */
511  *t += cnt;
512  do {
513  global = ticks;
514  newticks = *t - global;
515  if (newticks <= 0) {
516  if (newticks < -1)
517  *t = global - 1;
518  newticks = 0;
519  break;
520  }
521  } while (!atomic_cmpset_int(&ticks, global, *t));
522 
523  /*
524  * Run current process's virtual and profile time, as needed.
525  */
526  pstats = p->p_stats;
527  flags = 0;
528  if (usermode &&
529  timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
530  PROC_SLOCK(p);
531  if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
532  tick * cnt) == 0)
533  flags |= TDF_ALRMPEND | TDF_ASTPENDING;
534  PROC_SUNLOCK(p);
535  }
536  if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
537  PROC_SLOCK(p);
538  if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
539  tick * cnt) == 0)
540  flags |= TDF_PROFPEND | TDF_ASTPENDING;
541  PROC_SUNLOCK(p);
542  }
543  thread_lock(td);
544  sched_tick(cnt);
545  td->td_flags |= flags;
546  thread_unlock(td);
547 
548 #ifdef HWPMC_HOOKS
549  if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
550  PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
551  if (td->td_intr_frame != NULL)
552  PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
553 #endif
554  callout_tick();
555  /* We are in charge to handle this tick duty. */
556  if (newticks > 0) {
557  /* Dangerous and no need to call these things concurrently. */
558  if (atomic_cmpset_acq_int(&global_hardclock_run, 0, 1)) {
559  tc_ticktock(newticks);
560 #ifdef DEVICE_POLLING
561  /* This is very short and quick. */
563 #endif /* DEVICE_POLLING */
564  atomic_store_rel_int(&global_hardclock_run, 0);
565  }
566 #ifdef SW_WATCHDOG
567  if (watchdog_enabled > 0) {
568  i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
569  if (i > 0 && i <= newticks)
570  watchdog_fire();
571  }
572 #endif /* SW_WATCHDOG */
573  }
574  if (curcpu == CPU_FIRST())
576 }
577 
578 void
580 {
581  int *t = DPCPU_ID_PTR(cpu, pcputicks);
582 
583  *t = ticks;
584 }
585 
586 /*
587  * Compute number of ticks in the specified amount of time.
588  */
589 int
591  struct timeval *tv;
592 {
593  register unsigned long ticks;
594  register long sec, usec;
595 
596  /*
597  * If the number of usecs in the whole seconds part of the time
598  * difference fits in a long, then the total number of usecs will
599  * fit in an unsigned long. Compute the total and convert it to
600  * ticks, rounding up and adding 1 to allow for the current tick
601  * to expire. Rounding also depends on unsigned long arithmetic
602  * to avoid overflow.
603  *
604  * Otherwise, if the number of ticks in the whole seconds part of
605  * the time difference fits in a long, then convert the parts to
606  * ticks separately and add, using similar rounding methods and
607  * overflow avoidance. This method would work in the previous
608  * case but it is slightly slower and assumes that hz is integral.
609  *
610  * Otherwise, round the time difference down to the maximum
611  * representable value.
612  *
613  * If ints have 32 bits, then the maximum value for any timeout in
614  * 10ms ticks is 248 days.
615  */
616  sec = tv->tv_sec;
617  usec = tv->tv_usec;
618  if (usec < 0) {
619  sec--;
620  usec += 1000000;
621  }
622  if (sec < 0) {
623 #ifdef DIAGNOSTIC
624  if (usec > 0) {
625  sec++;
626  usec -= 1000000;
627  }
628  printf("tvotohz: negative time difference %ld sec %ld usec\n",
629  sec, usec);
630 #endif
631  ticks = 1;
632  } else if (sec <= LONG_MAX / 1000000)
633  ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
634  / tick + 1;
635  else if (sec <= LONG_MAX / hz)
636  ticks = sec * hz
637  + ((unsigned long)usec + (tick - 1)) / tick + 1;
638  else
639  ticks = LONG_MAX;
640  if (ticks > INT_MAX)
641  ticks = INT_MAX;
642  return ((int)ticks);
643 }
644 
645 /*
646  * Start profiling on a process.
647  *
648  * Kernel profiling passes proc0 which never exits and hence
649  * keeps the profile clock running constantly.
650  */
651 void
653  register struct proc *p;
654 {
655 
656  PROC_LOCK_ASSERT(p, MA_OWNED);
657  if (p->p_flag & P_STOPPROF)
658  return;
659  if ((p->p_flag & P_PROFIL) == 0) {
660  p->p_flag |= P_PROFIL;
661  mtx_lock(&time_lock);
662  if (++profprocs == 1)
664  mtx_unlock(&time_lock);
665  }
666 }
667 
668 /*
669  * Stop profiling on a process.
670  */
671 void
673  register struct proc *p;
674 {
675 
676  PROC_LOCK_ASSERT(p, MA_OWNED);
677  if (p->p_flag & P_PROFIL) {
678  if (p->p_profthreads != 0) {
679  p->p_flag |= P_STOPPROF;
680  while (p->p_profthreads != 0)
681  msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
682  "stopprof", 0);
683  p->p_flag &= ~P_STOPPROF;
684  }
685  if ((p->p_flag & P_PROFIL) == 0)
686  return;
687  p->p_flag &= ~P_PROFIL;
688  mtx_lock(&time_lock);
689  if (--profprocs == 0)
691  mtx_unlock(&time_lock);
692  }
693 }
694 
695 /*
696  * Statistics clock. Updates rusage information and calls the scheduler
697  * to adjust priorities of the active thread.
698  *
699  * This should be called by all active processors.
700  */
701 void
702 statclock(int usermode)
703 {
704 
705  statclock_cnt(1, usermode);
706 }
707 
708 void
709 statclock_cnt(int cnt, int usermode)
710 {
711  struct rusage *ru;
712  struct vmspace *vm;
713  struct thread *td;
714  struct proc *p;
715  long rss;
716  long *cp_time;
717 
718  td = curthread;
719  p = td->td_proc;
720 
721  cp_time = (long *)PCPU_PTR(cp_time);
722  if (usermode) {
723  /*
724  * Charge the time as appropriate.
725  */
726  td->td_uticks += cnt;
727  if (p->p_nice > NZERO)
728  cp_time[CP_NICE] += cnt;
729  else
730  cp_time[CP_USER] += cnt;
731  } else {
732  /*
733  * Came from kernel mode, so we were:
734  * - handling an interrupt,
735  * - doing syscall or trap work on behalf of the current
736  * user process, or
737  * - spinning in the idle loop.
738  * Whichever it is, charge the time as appropriate.
739  * Note that we charge interrupts to the current process,
740  * regardless of whether they are ``for'' that process,
741  * so that we know how much of its real time was spent
742  * in ``non-process'' (i.e., interrupt) work.
743  */
744  if ((td->td_pflags & TDP_ITHREAD) ||
745  td->td_intr_nesting_level >= 2) {
746  td->td_iticks += cnt;
747  cp_time[CP_INTR] += cnt;
748  } else {
749  td->td_pticks += cnt;
750  td->td_sticks += cnt;
751  if (!TD_IS_IDLETHREAD(td))
752  cp_time[CP_SYS] += cnt;
753  else
754  cp_time[CP_IDLE] += cnt;
755  }
756  }
757 
758  /* Update resource usage integrals and maximums. */
759  MPASS(p->p_vmspace != NULL);
760  vm = p->p_vmspace;
761  ru = &td->td_ru;
762  ru->ru_ixrss += pgtok(vm->vm_tsize) * cnt;
763  ru->ru_idrss += pgtok(vm->vm_dsize) * cnt;
764  ru->ru_isrss += pgtok(vm->vm_ssize) * cnt;
765  rss = pgtok(vmspace_resident_count(vm));
766  if (ru->ru_maxrss < rss)
767  ru->ru_maxrss = rss;
768  KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
769  "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
770  SDT_PROBE2(sched, , , tick, td, td->td_proc);
771  thread_lock_flags(td, MTX_QUIET);
772  for ( ; cnt > 0; cnt--)
773  sched_clock(td);
774  thread_unlock(td);
775 #ifdef HWPMC_HOOKS
776  if (td->td_intr_frame != NULL)
777  PMC_SOFT_CALL_TF( , , clock, stat, td->td_intr_frame);
778 #endif
779 }
780 
781 void
782 profclock(int usermode, uintfptr_t pc)
783 {
784 
785  profclock_cnt(1, usermode, pc);
786 }
787 
788 void
789 profclock_cnt(int cnt, int usermode, uintfptr_t pc)
790 {
791  struct thread *td;
792 #ifdef GPROF
793  struct gmonparam *g;
794  uintfptr_t i;
795 #endif
796 
797  td = curthread;
798  if (usermode) {
799  /*
800  * Came from user mode; CPU was in user state.
801  * If this process is being profiled, record the tick.
802  * if there is no related user location yet, don't
803  * bother trying to count it.
804  */
805  if (td->td_proc->p_flag & P_PROFIL)
806  addupc_intr(td, pc, cnt);
807  }
808 #ifdef GPROF
809  else {
810  /*
811  * Kernel statistics are just like addupc_intr, only easier.
812  */
813  g = &_gmonparam;
814  if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
815  i = PC_TO_I(g, pc);
816  if (i < g->textsize) {
817  KCOUNT(g, i) += cnt;
818  }
819  }
820  }
821 #endif
822 #ifdef HWPMC_HOOKS
823  if (td->td_intr_frame != NULL)
824  PMC_SOFT_CALL_TF( , , clock, prof, td->td_intr_frame);
825 #endif
826 }
827 
828 /*
829  * Return information about system clocks.
830  */
831 static int
832 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
833 {
834  struct clockinfo clkinfo;
835  /*
836  * Construct clockinfo structure.
837  */
838  bzero(&clkinfo, sizeof(clkinfo));
839  clkinfo.hz = hz;
840  clkinfo.tick = tick;
841  clkinfo.profhz = profhz;
842  clkinfo.stathz = stathz ? stathz : hz;
843  return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
844 }
845 
846 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
847  CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
848  0, 0, sysctl_kern_clockrate, "S,clockinfo",
849  "Rate and period of various kernel clocks");
850 
851 #ifdef SW_WATCHDOG
852 
853 static void
854 watchdog_config(void *unused __unused, u_int cmd, int *error)
855 {
856  u_int u;
857 
858  u = cmd & WD_INTERVAL;
859  if (u >= WD_TO_1SEC) {
860  watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
861  watchdog_enabled = 1;
862  *error = 0;
863  } else {
864  watchdog_enabled = 0;
865  }
866 }
867 
868 /*
869  * Handle a watchdog timeout by dumping interrupt information and
870  * then either dropping to DDB or panicking.
871  */
872 static void
873 watchdog_fire(void)
874 {
875  int nintr;
876  uint64_t inttotal;
877  u_long *curintr;
878  char *curname;
879 
880  curintr = intrcnt;
881  curname = intrnames;
882  inttotal = 0;
883  nintr = sintrcnt / sizeof(u_long);
884 
885  printf("interrupt total\n");
886  while (--nintr >= 0) {
887  if (*curintr)
888  printf("%-12s %20lu\n", curname, *curintr);
889  curname += strlen(curname) + 1;
890  inttotal += *curintr++;
891  }
892  printf("Total %20ju\n", (uintmax_t)inttotal);
893 
894 #if defined(KDB) && !defined(KDB_UNATTENDED)
895  kdb_backtrace();
896  kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
897 #else
898  panic("watchdog timeout");
899 #endif
900 }
901 
902 #endif /* SW_WATCHDOG */
__FBSDID("$BSDSUniX$")
SDT_PROBE_DEFINE2(sched,,, tick,"struct thread *","struct proc *")
int tvtohz(struct timeval *tv)
Definition: kern_clock.c:590
void sched_clock(struct thread *td)
Definition: sched_4bsd.c:727
void tc_ticktock(int cnt)
Definition: kern_tc.c:816
int profprocs
Definition: kern_clock.c:386
INTERFACE clock
Definition: clock_if.m:30
void kthread_start(void *udata) const
Definition: kern_kthread.c:227
static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL,"cpufreq debugging")
u_int mp_maxid
Definition: subr_smp.c:68
static void initclocks(void *dummy)
Definition: kern_clock.c:398
char * sched_tdname(struct thread *td)
Definition: sched_4bsd.c:1700
static int sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
Definition: kern_clock.c:132
void read_cpu_time(long *cp_time)
Definition: kern_clock.c:329
void hardclock_device_poll(void)
Definition: kern_poll.c:288
void panic(const char *fmt,...)
static int global_hardclock_run
Definition: kern_clock.c:391
void hardclock_cnt(int cnt, int usermode)
Definition: kern_clock.c:497
struct pcpu * pcpu_find(u_int cpuid)
Definition: subr_pcpu.c:253
static struct mtx time_lock
Definition: kern_clock.c:93
SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD,&boothowto, 0,"Boot control flags, passed from loader")
void profclock(int usermode, uintfptr_t pc)
Definition: kern_clock.c:782
SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE, 0, 0, sysctl_kern_cp_time,"LU","CPU time statistics")
void startprofclock(struct proc *p)
Definition: kern_clock.c:652
struct sx allproc_lock
Definition: kern_proc.c:136
static int dummy
int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1163
void callout_tick(void)
Definition: kern_timeout.c:334
int itimerdecr(struct itimerval *itp, int usec)
Definition: kern_time.c:838
void kdb_backtrace(void)
Definition: subr_kdb.c:362
void addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
Definition: subr_prof.c:459
void stopprofclock(struct proc *p)
Definition: kern_clock.c:672
void statclock_cnt(int cnt, int usermode)
Definition: kern_clock.c:709
void cpu_startprofclock(void)
int pause(const char *wmesg, int timo)
Definition: kern_synch.c:350
int psratio
Definition: kern_clock.c:388
void kdb_enter(const char *why, const char *msg)
Definition: subr_kdb.c:433
int printf(const char *fmt,...)
Definition: subr_prf.c:367
void cpu_stopprofclock(void)
void hardclock(int usermode, uintfptr_t pc)
Definition: kern_clock.c:471
void sched_tick(int cnt)
Definition: sched_4bsd.c:1626
int sleepq_type(void *wchan)
static long empty[CPUSTATES]
Definition: kern_clock.c:129
static int sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
Definition: kern_clock.c:99
void hardclock_cpu(int usermode)
Definition: kern_clock.c:428
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
static int sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
Definition: kern_clock.c:832
void profclock_cnt(int cnt, int usermode, uintfptr_t pc)
Definition: kern_clock.c:789
volatile int ticks
Definition: kern_clock.c:387
int profhz
Definition: kern_clock.c:385
SDT_PROVIDER_DECLARE(sched)
void statclock(int usermode)
Definition: kern_clock.c:702
void hardclock_sync(int cpu)
Definition: kern_clock.c:579
int stathz
Definition: kern_clock.c:384
static DPCPU_DEFINE(int, pcputicks)
int tick
Definition: subr_param.c:85
void cpu_tick_calibration(void)
Definition: kern_tc.c:877
SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
int hz
Definition: subr_param.c:84