FreeBSD kernel kern code
kern_clocksource.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 2010-2012 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer,
10  * without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$BSDSUniX$");
29 
30 /*
31  * Common routines to manage event timers hardware.
32  */
33 
34 #include "opt_device_polling.h"
35 #include "opt_kdtrace.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/lock.h>
41 #include <sys/kdb.h>
42 #include <sys/ktr.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 #include <sys/timeet.h>
50 #include <sys/timetc.h>
51 
52 #include <machine/atomic.h>
53 #include <machine/clock.h>
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
56 
57 #ifdef KDTRACE_HOOKS
58 #include <sys/dtrace_bsd.h>
59 cyclic_clock_func_t cyclic_clock_func = NULL;
60 #endif
61 
62 int cpu_deepest_sleep = 0; /* Deepest Cx state available. */
63 int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */
64 int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */
65 
66 static void setuptimer(void);
67 static void loadtimer(struct bintime *now, int first);
68 static int doconfigtimer(void);
69 static void configtimer(int start);
70 static int round_freq(struct eventtimer *et, int freq);
71 
72 static void getnextcpuevent(struct bintime *event, int idle);
73 static void getnextevent(struct bintime *event);
74 static int handleevents(struct bintime *now, int fake);
75 #ifdef SMP
76 static void cpu_new_callout(int cpu, int ticks);
77 #endif
78 
79 static struct mtx et_hw_mtx;
80 
81 #define ET_HW_LOCK(state) \
82  { \
83  if (timer->et_flags & ET_FLAGS_PERCPU) \
84  mtx_lock_spin(&(state)->et_hw_mtx); \
85  else \
86  mtx_lock_spin(&et_hw_mtx); \
87  }
88 
89 #define ET_HW_UNLOCK(state) \
90  { \
91  if (timer->et_flags & ET_FLAGS_PERCPU) \
92  mtx_unlock_spin(&(state)->et_hw_mtx); \
93  else \
94  mtx_unlock_spin(&et_hw_mtx); \
95  }
96 
97 static struct eventtimer *timer = NULL;
98 static struct bintime timerperiod; /* Timer period for periodic mode. */
99 static struct bintime hardperiod; /* hardclock() events period. */
100 static struct bintime statperiod; /* statclock() events period. */
101 static struct bintime profperiod; /* profclock() events period. */
102 static struct bintime nexttick; /* Next global timer tick time. */
103 static struct bintime nexthard; /* Next global hardlock() event. */
104 static u_int busy = 0; /* Reconfiguration is in progress. */
105 static int profiling = 0; /* Profiling events enabled. */
106 
107 static char timername[32]; /* Wanted timer. */
108 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername));
109 
110 static int singlemul = 0; /* Multiplier for periodic mode. */
111 TUNABLE_INT("kern.eventtimer.singlemul", &singlemul);
112 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul,
113  0, "Multiplier for periodic mode");
114 
115 static u_int idletick = 0; /* Run periodic events when idle. */
116 TUNABLE_INT("kern.eventtimer.idletick", &idletick);
117 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick,
118  0, "Run periodic events when idle");
119 
120 static u_int activetick = 1; /* Run all periodic events when active. */
121 TUNABLE_INT("kern.eventtimer.activetick", &activetick);
122 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, activetick, CTLFLAG_RW, &activetick,
123  0, "Run all periodic events when active");
124 
125 static int periodic = 0; /* Periodic or one-shot mode. */
126 static int want_periodic = 0; /* What mode to prefer. */
127 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic);
128 
129 struct pcpu_state {
130  struct mtx et_hw_mtx; /* Per-CPU timer mutex. */
131  u_int action; /* Reconfiguration requests. */
132  u_int handle; /* Immediate handle resuests. */
133  struct bintime now; /* Last tick time. */
134  struct bintime nextevent; /* Next scheduled event on this CPU. */
135  struct bintime nexttick; /* Next timer tick time. */
136  struct bintime nexthard; /* Next hardlock() event. */
137  struct bintime nextstat; /* Next statclock() event. */
138  struct bintime nextprof; /* Next profclock() event. */
139 #ifdef KDTRACE_HOOKS
140  struct bintime nextcyc; /* Next OpenSolaris cyclics event. */
141 #endif
142  int ipi; /* This CPU needs IPI. */
143  int idle; /* This CPU is in idle mode. */
144 };
145 
146 static DPCPU_DEFINE(struct pcpu_state, timerstate);
147 
148 #define FREQ2BT(freq, bt) \
149 { \
150  (bt)->sec = 0; \
151  (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \
152 }
153 #define BT2FREQ(bt) \
154  (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \
155  ((bt)->frac >> 1))
156 
157 /*
158  * Timer broadcast IPI handler.
159  */
160 int
162 {
163  struct bintime now;
164  struct pcpu_state *state;
165  int done;
166 
167  if (doconfigtimer() || busy)
168  return (FILTER_HANDLED);
169  state = DPCPU_PTR(timerstate);
170  now = state->now;
171  CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x",
172  curcpu, now.sec, (unsigned int)(now.frac >> 32),
173  (unsigned int)(now.frac & 0xffffffff));
174  done = handleevents(&now, 0);
175  return (done ? FILTER_HANDLED : FILTER_STRAY);
176 }
177 
178 /*
179  * Handle all events for specified time on this CPU
180  */
181 static int
182 handleevents(struct bintime *now, int fake)
183 {
184  struct bintime t;
185  struct trapframe *frame;
186  struct pcpu_state *state;
187  uintfptr_t pc;
188  int usermode;
189  int done, runs;
190 
191  CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x",
192  curcpu, now->sec, (unsigned int)(now->frac >> 32),
193  (unsigned int)(now->frac & 0xffffffff));
194  done = 0;
195  if (fake) {
196  frame = NULL;
197  usermode = 0;
198  pc = 0;
199  } else {
200  frame = curthread->td_intr_frame;
201  usermode = TRAPF_USERMODE(frame);
202  pc = TRAPF_PC(frame);
203  }
204 
205  state = DPCPU_PTR(timerstate);
206 
207  runs = 0;
208  while (bintime_cmp(now, &state->nexthard, >=)) {
209  bintime_add(&state->nexthard, &hardperiod);
210  runs++;
211  }
212  if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 &&
213  bintime_cmp(&state->nexthard, &nexthard, >))
214  nexthard = state->nexthard;
215  if (runs && fake < 2) {
216  hardclock_cnt(runs, usermode);
217  done = 1;
218  }
219  runs = 0;
220  while (bintime_cmp(now, &state->nextstat, >=)) {
221  bintime_add(&state->nextstat, &statperiod);
222  runs++;
223  }
224  if (runs && fake < 2) {
225  statclock_cnt(runs, usermode);
226  done = 1;
227  }
228  if (profiling) {
229  runs = 0;
230  while (bintime_cmp(now, &state->nextprof, >=)) {
231  bintime_add(&state->nextprof, &profperiod);
232  runs++;
233  }
234  if (runs && !fake) {
235  profclock_cnt(runs, usermode, pc);
236  done = 1;
237  }
238  } else
239  state->nextprof = state->nextstat;
240 
241 #ifdef KDTRACE_HOOKS
242  if (fake == 0 && cyclic_clock_func != NULL &&
243  state->nextcyc.sec != -1 &&
244  bintime_cmp(now, &state->nextcyc, >=)) {
245  state->nextcyc.sec = -1;
246  (*cyclic_clock_func)(frame);
247  }
248 #endif
249 
250  getnextcpuevent(&t, 0);
251  if (fake == 2) {
252  state->nextevent = t;
253  return (done);
254  }
255  ET_HW_LOCK(state);
256  if (!busy) {
257  state->idle = 0;
258  state->nextevent = t;
259  loadtimer(now, 0);
260  }
261  ET_HW_UNLOCK(state);
262  return (done);
263 }
264 
265 /*
266  * Schedule binuptime of the next event on current CPU.
267  */
268 static void
269 getnextcpuevent(struct bintime *event, int idle)
270 {
271  struct bintime tmp;
272  struct pcpu_state *state;
273  int skip;
274 
275  state = DPCPU_PTR(timerstate);
276  /* Handle hardclock() events. */
277  *event = state->nexthard;
278  if (idle || (!activetick && !profiling &&
279  (timer->et_flags & ET_FLAGS_PERCPU) == 0)) {
280  skip = idle ? 4 : (stathz / 2);
281  if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip)
282  skip = tc_min_ticktock_freq;
283  skip = callout_tickstofirst(hz / skip) - 1;
284  CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip);
285  tmp = hardperiod;
286  bintime_mul(&tmp, skip);
287  bintime_add(event, &tmp);
288  }
289  if (!idle) { /* If CPU is active - handle other types of events. */
290  if (bintime_cmp(event, &state->nextstat, >))
291  *event = state->nextstat;
292  if (profiling && bintime_cmp(event, &state->nextprof, >))
293  *event = state->nextprof;
294  }
295 #ifdef KDTRACE_HOOKS
296  if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >))
297  *event = state->nextcyc;
298 #endif
299 }
300 
301 /*
302  * Schedule binuptime of the next event on all CPUs.
303  */
304 static void
305 getnextevent(struct bintime *event)
306 {
307  struct pcpu_state *state;
308 #ifdef SMP
309  int cpu;
310 #endif
311  int c, nonidle;
312 
313  state = DPCPU_PTR(timerstate);
314  *event = state->nextevent;
315  c = curcpu;
316  nonidle = !state->idle;
317  if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
318 #ifdef SMP
319  CPU_FOREACH(cpu) {
320  if (curcpu == cpu)
321  continue;
322  state = DPCPU_ID_PTR(cpu, timerstate);
323  nonidle += !state->idle;
324  if (bintime_cmp(event, &state->nextevent, >)) {
325  *event = state->nextevent;
326  c = cpu;
327  }
328  }
329 #endif
330  if (nonidle != 0 && bintime_cmp(event, &nexthard, >))
331  *event = nexthard;
332  }
333  CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d",
334  curcpu, event->sec, (unsigned int)(event->frac >> 32),
335  (unsigned int)(event->frac & 0xffffffff), c);
336 }
337 
338 /* Hardware timer callback function. */
339 static void
340 timercb(struct eventtimer *et, void *arg)
341 {
342  struct bintime now;
343  struct bintime *next;
344  struct pcpu_state *state;
345 #ifdef SMP
346  int cpu, bcast;
347 #endif
348 
349  /* Do not touch anything if somebody reconfiguring timers. */
350  if (busy)
351  return;
352  /* Update present and next tick times. */
353  state = DPCPU_PTR(timerstate);
354  if (et->et_flags & ET_FLAGS_PERCPU) {
355  next = &state->nexttick;
356  } else
357  next = &nexttick;
358  if (periodic) {
359  now = *next; /* Ex-next tick time becomes present time. */
360  bintime_add(next, &timerperiod); /* Next tick in 1 period. */
361  } else {
362  binuptime(&now); /* Get present time from hardware. */
363  next->sec = -1; /* Next tick is not scheduled yet. */
364  }
365  state->now = now;
366  CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x",
367  curcpu, now.sec, (unsigned int)(now.frac >> 32),
368  (unsigned int)(now.frac & 0xffffffff));
369 
370 #ifdef SMP
371  /* Prepare broadcasting to other CPUs for non-per-CPU timers. */
372  bcast = 0;
373  if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
374  CPU_FOREACH(cpu) {
375  state = DPCPU_ID_PTR(cpu, timerstate);
376  ET_HW_LOCK(state);
377  state->now = now;
378  if (bintime_cmp(&now, &state->nextevent, >=)) {
379  state->nextevent.sec++;
380  if (curcpu != cpu) {
381  state->ipi = 1;
382  bcast = 1;
383  }
384  }
385  ET_HW_UNLOCK(state);
386  }
387  }
388 #endif
389 
390  /* Handle events for this time on this CPU. */
391  handleevents(&now, 0);
392 
393 #ifdef SMP
394  /* Broadcast interrupt to other CPUs for non-per-CPU timers. */
395  if (bcast) {
396  CPU_FOREACH(cpu) {
397  if (curcpu == cpu)
398  continue;
399  state = DPCPU_ID_PTR(cpu, timerstate);
400  if (state->ipi) {
401  state->ipi = 0;
402  ipi_cpu(cpu, IPI_HARDCLOCK);
403  }
404  }
405  }
406 #endif
407 }
408 
409 /*
410  * Load new value into hardware timer.
411  */
412 static void
413 loadtimer(struct bintime *now, int start)
414 {
415  struct pcpu_state *state;
416  struct bintime new;
417  struct bintime *next;
418  uint64_t tmp;
419  int eq;
420 
421  if (timer->et_flags & ET_FLAGS_PERCPU) {
422  state = DPCPU_PTR(timerstate);
423  next = &state->nexttick;
424  } else
425  next = &nexttick;
426  if (periodic) {
427  if (start) {
428  /*
429  * Try to start all periodic timers aligned
430  * to period to make events synchronous.
431  */
432  tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28);
433  tmp = (tmp % (timerperiod.frac >> 28)) << 28;
434  new.sec = 0;
435  new.frac = timerperiod.frac - tmp;
436  if (new.frac < tmp) /* Left less then passed. */
437  bintime_add(&new, &timerperiod);
438  CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x",
439  curcpu, now->sec, (unsigned int)(now->frac >> 32),
440  new.sec, (unsigned int)(new.frac >> 32));
441  *next = new;
442  bintime_add(next, now);
443  et_start(timer, &new, &timerperiod);
444  }
445  } else {
446  getnextevent(&new);
447  eq = bintime_cmp(&new, next, ==);
448  CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d",
449  curcpu, new.sec, (unsigned int)(new.frac >> 32),
450  (unsigned int)(new.frac & 0xffffffff),
451  eq);
452  if (!eq) {
453  *next = new;
454  bintime_sub(&new, now);
455  et_start(timer, &new, NULL);
456  }
457  }
458 }
459 
460 /*
461  * Prepare event timer parameters after configuration changes.
462  */
463 static void
465 {
466  int freq;
467 
468  if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
469  periodic = 0;
470  else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
471  periodic = 1;
472  singlemul = MIN(MAX(singlemul, 1), 20);
473  freq = hz * singlemul;
474  while (freq < (profiling ? profhz : stathz))
475  freq += hz;
476  freq = round_freq(timer, freq);
477  FREQ2BT(freq, &timerperiod);
478 }
479 
480 /*
481  * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
482  */
483 static int
485 {
486  struct bintime now;
487  struct pcpu_state *state;
488 
489  state = DPCPU_PTR(timerstate);
490  switch (atomic_load_acq_int(&state->action)) {
491  case 1:
492  binuptime(&now);
493  ET_HW_LOCK(state);
494  loadtimer(&now, 1);
495  ET_HW_UNLOCK(state);
496  state->handle = 0;
497  atomic_store_rel_int(&state->action, 0);
498  return (1);
499  case 2:
500  ET_HW_LOCK(state);
501  et_stop(timer);
502  ET_HW_UNLOCK(state);
503  state->handle = 0;
504  atomic_store_rel_int(&state->action, 0);
505  return (1);
506  }
507  if (atomic_readandclear_int(&state->handle) && !busy) {
508  binuptime(&now);
509  handleevents(&now, 0);
510  return (1);
511  }
512  return (0);
513 }
514 
515 /*
516  * Reconfigure specified timer.
517  * For per-CPU timers use IPI to make other CPUs to reconfigure.
518  */
519 static void
521 {
522  struct bintime now, next;
523  struct pcpu_state *state;
524  int cpu;
525 
526  if (start) {
527  setuptimer();
528  binuptime(&now);
529  }
530  critical_enter();
531  ET_HW_LOCK(DPCPU_PTR(timerstate));
532  if (start) {
533  /* Initialize time machine parameters. */
534  next = now;
535  bintime_add(&next, &timerperiod);
536  if (periodic)
537  nexttick = next;
538  else
539  nexttick.sec = -1;
540  CPU_FOREACH(cpu) {
541  state = DPCPU_ID_PTR(cpu, timerstate);
542  state->now = now;
543  state->nextevent = next;
544  if (periodic)
545  state->nexttick = next;
546  else
547  state->nexttick.sec = -1;
548  state->nexthard = next;
549  state->nextstat = next;
550  state->nextprof = next;
551  hardclock_sync(cpu);
552  }
553  busy = 0;
554  /* Start global timer or per-CPU timer of this CPU. */
555  loadtimer(&now, 1);
556  } else {
557  busy = 1;
558  /* Stop global timer or per-CPU timer of this CPU. */
559  et_stop(timer);
560  }
561  ET_HW_UNLOCK(DPCPU_PTR(timerstate));
562 #ifdef SMP
563  /* If timer is global or there is no other CPUs yet - we are done. */
564  if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
565  critical_exit();
566  return;
567  }
568  /* Set reconfigure flags for other CPUs. */
569  CPU_FOREACH(cpu) {
570  state = DPCPU_ID_PTR(cpu, timerstate);
571  atomic_store_rel_int(&state->action,
572  (cpu == curcpu) ? 0 : ( start ? 1 : 2));
573  }
574  /* Broadcast reconfigure IPI. */
575  ipi_all_but_self(IPI_HARDCLOCK);
576  /* Wait for reconfiguration completed. */
577 restart:
578  cpu_spinwait();
579  CPU_FOREACH(cpu) {
580  if (cpu == curcpu)
581  continue;
582  state = DPCPU_ID_PTR(cpu, timerstate);
583  if (atomic_load_acq_int(&state->action))
584  goto restart;
585  }
586 #endif
587  critical_exit();
588 }
589 
590 /*
591  * Calculate nearest frequency supported by hardware timer.
592  */
593 static int
594 round_freq(struct eventtimer *et, int freq)
595 {
596  uint64_t div;
597 
598  if (et->et_frequency != 0) {
599  div = lmax((et->et_frequency + freq / 2) / freq, 1);
600  if (et->et_flags & ET_FLAGS_POW2DIV)
601  div = 1 << (flsl(div + div / 2) - 1);
602  freq = (et->et_frequency + div / 2) / div;
603  }
604  if (et->et_min_period.sec > 0)
605  freq = 0;
606  else if (et->et_min_period.frac != 0)
607  freq = min(freq, BT2FREQ(&et->et_min_period));
608  if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0)
609  freq = max(freq, BT2FREQ(&et->et_max_period));
610  return (freq);
611 }
612 
613 /*
614  * Configure and start event timers (BSP part).
615  */
616 void
618 {
619  struct pcpu_state *state;
620  int base, div, cpu;
621 
622  mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
623  CPU_FOREACH(cpu) {
624  state = DPCPU_ID_PTR(cpu, timerstate);
625  mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
626 #ifdef KDTRACE_HOOKS
627  state->nextcyc.sec = -1;
628 #endif
629  }
630 #ifdef SMP
631  callout_new_inserted = cpu_new_callout;
632 #endif
633  periodic = want_periodic;
634  /* Grab requested timer or the best of present. */
635  if (timername[0])
636  timer = et_find(timername, 0, 0);
637  if (timer == NULL && periodic) {
638  timer = et_find(NULL,
639  ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
640  }
641  if (timer == NULL) {
642  timer = et_find(NULL,
643  ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT);
644  }
645  if (timer == NULL && !periodic) {
646  timer = et_find(NULL,
647  ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
648  }
649  if (timer == NULL)
650  panic("No usable event timer found!");
651  et_init(timer, timercb, NULL, NULL);
652 
653  /* Adapt to timer capabilities. */
654  if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
655  periodic = 0;
656  else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
657  periodic = 1;
658  if (timer->et_flags & ET_FLAGS_C3STOP)
660 
661  /*
662  * We honor the requested 'hz' value.
663  * We want to run stathz in the neighborhood of 128hz.
664  * We would like profhz to run as often as possible.
665  */
666  if (singlemul <= 0 || singlemul > 20) {
667  if (hz >= 1500 || (hz % 128) == 0)
668  singlemul = 1;
669  else if (hz >= 750)
670  singlemul = 2;
671  else
672  singlemul = 4;
673  }
674  if (periodic) {
675  base = round_freq(timer, hz * singlemul);
676  singlemul = max((base + hz / 2) / hz, 1);
677  hz = (base + singlemul / 2) / singlemul;
678  if (base <= 128)
679  stathz = base;
680  else {
681  div = base / 128;
682  if (div >= singlemul && (div % singlemul) == 0)
683  div++;
684  stathz = base / div;
685  }
686  profhz = stathz;
687  while ((profhz + stathz) <= 128 * 64)
688  profhz += stathz;
690  } else {
691  hz = round_freq(timer, hz);
692  stathz = round_freq(timer, 127);
693  profhz = round_freq(timer, stathz * 64);
694  }
695  tick = 1000000 / hz;
696  FREQ2BT(hz, &hardperiod);
699  ET_LOCK();
700  configtimer(1);
701  ET_UNLOCK();
702 }
703 
704 /*
705  * Start per-CPU event timers on APs.
706  */
707 void
709 {
710  struct bintime now;
711  struct pcpu_state *state;
712 
713  state = DPCPU_PTR(timerstate);
714  binuptime(&now);
715  ET_HW_LOCK(state);
716  if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 && periodic) {
717  state->now = nexttick;
718  bintime_sub(&state->now, &timerperiod);
719  } else
720  state->now = now;
721  hardclock_sync(curcpu);
722  handleevents(&state->now, 2);
723  if (timer->et_flags & ET_FLAGS_PERCPU)
724  loadtimer(&now, 1);
725  ET_HW_UNLOCK(state);
726 }
727 
728 /*
729  * Switch to profiling clock rates.
730  */
731 void
733 {
734 
735  ET_LOCK();
736  if (profiling == 0) {
737  if (periodic) {
738  configtimer(0);
739  profiling = 1;
740  configtimer(1);
741  } else
742  profiling = 1;
743  } else
744  profiling++;
745  ET_UNLOCK();
746 }
747 
748 /*
749  * Switch to regular clock rates.
750  */
751 void
753 {
754 
755  ET_LOCK();
756  if (profiling == 1) {
757  if (periodic) {
758  configtimer(0);
759  profiling = 0;
760  configtimer(1);
761  } else
762  profiling = 0;
763  } else
764  profiling--;
765  ET_UNLOCK();
766 }
767 
768 /*
769  * Switch to idle mode (all ticks handled).
770  */
771 void
773 {
774  struct bintime now, t;
775  struct pcpu_state *state;
776 
777  if (idletick || busy ||
778  (periodic && (timer->et_flags & ET_FLAGS_PERCPU))
779 #ifdef DEVICE_POLLING
780  || curcpu == CPU_FIRST()
781 #endif
782  )
783  return;
784  state = DPCPU_PTR(timerstate);
785  if (periodic)
786  now = state->now;
787  else
788  binuptime(&now);
789  CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x",
790  curcpu, now.sec, (unsigned int)(now.frac >> 32),
791  (unsigned int)(now.frac & 0xffffffff));
792  getnextcpuevent(&t, 1);
793  ET_HW_LOCK(state);
794  state->idle = 1;
795  state->nextevent = t;
796  if (!periodic)
797  loadtimer(&now, 0);
798  ET_HW_UNLOCK(state);
799 }
800 
801 /*
802  * Switch to active mode (skip empty ticks).
803  */
804 void
806 {
807  struct bintime now;
808  struct pcpu_state *state;
809  struct thread *td;
810 
811  state = DPCPU_PTR(timerstate);
812  if (state->idle == 0 || busy)
813  return;
814  if (periodic)
815  now = state->now;
816  else
817  binuptime(&now);
818  CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x",
819  curcpu, now.sec, (unsigned int)(now.frac >> 32),
820  (unsigned int)(now.frac & 0xffffffff));
821  spinlock_enter();
822  td = curthread;
823  td->td_intr_nesting_level++;
824  handleevents(&now, 1);
825  td->td_intr_nesting_level--;
826  spinlock_exit();
827 }
828 
829 #ifdef KDTRACE_HOOKS
830 void
831 clocksource_cyc_set(const struct bintime *t)
832 {
833  struct bintime now;
834  struct pcpu_state *state;
835 
836  state = DPCPU_PTR(timerstate);
837  if (periodic)
838  now = state->now;
839  else
840  binuptime(&now);
841 
842  CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x",
843  curcpu, now.sec, (unsigned int)(now.frac >> 32),
844  (unsigned int)(now.frac & 0xffffffff));
845  CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x",
846  curcpu, t->sec, (unsigned int)(t->frac >> 32),
847  (unsigned int)(t->frac & 0xffffffff));
848 
849  ET_HW_LOCK(state);
850  if (bintime_cmp(t, &state->nextcyc, ==)) {
851  ET_HW_UNLOCK(state);
852  return;
853  }
854  state->nextcyc = *t;
855  if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) {
856  ET_HW_UNLOCK(state);
857  return;
858  }
859  state->nextevent = state->nextcyc;
860  if (!periodic)
861  loadtimer(&now, 0);
862  ET_HW_UNLOCK(state);
863 }
864 #endif
865 
866 #ifdef SMP
867 static void
868 cpu_new_callout(int cpu, int ticks)
869 {
870  struct bintime tmp;
871  struct pcpu_state *state;
872 
873  CTR3(KTR_SPARE2, "new co at %d: on %d in %d",
874  curcpu, cpu, ticks);
875  state = DPCPU_ID_PTR(cpu, timerstate);
876  ET_HW_LOCK(state);
877  if (state->idle == 0 || busy) {
878  ET_HW_UNLOCK(state);
879  return;
880  }
881  /*
882  * If timer is periodic - just update next event time for target CPU.
883  * If timer is global - there is chance it is already programmed.
884  */
885  if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) {
886  tmp = hardperiod;
887  bintime_mul(&tmp, ticks - 1);
888  bintime_add(&tmp, &state->nexthard);
889  if (bintime_cmp(&tmp, &state->nextevent, <))
890  state->nextevent = tmp;
891  if (periodic ||
892  bintime_cmp(&state->nextevent, &nexttick, >=)) {
893  ET_HW_UNLOCK(state);
894  return;
895  }
896  }
897  /*
898  * Otherwise we have to wake that CPU up, as we can't get present
899  * bintime to reprogram global timer from here. If timer is per-CPU,
900  * we by definition can't do it from here.
901  */
902  ET_HW_UNLOCK(state);
903  if (timer->et_flags & ET_FLAGS_PERCPU) {
904  state->handle = 1;
905  ipi_cpu(cpu, IPI_HARDCLOCK);
906  } else {
907  if (!cpu_idle_wakeup(cpu))
908  ipi_cpu(cpu, IPI_AST);
909  }
910 }
911 #endif
912 
913 /*
914  * Report or change the active event timers hardware.
915  */
916 static int
917 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
918 {
919  char buf[32];
920  struct eventtimer *et;
921  int error;
922 
923  ET_LOCK();
924  et = timer;
925  snprintf(buf, sizeof(buf), "%s", et->et_name);
926  ET_UNLOCK();
927  error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
928  ET_LOCK();
929  et = timer;
930  if (error != 0 || req->newptr == NULL ||
931  strcasecmp(buf, et->et_name) == 0) {
932  ET_UNLOCK();
933  return (error);
934  }
935  et = et_find(buf, 0, 0);
936  if (et == NULL) {
937  ET_UNLOCK();
938  return (ENOENT);
939  }
940  configtimer(0);
941  et_free(timer);
942  if (et->et_flags & ET_FLAGS_C3STOP)
944  if (timer->et_flags & ET_FLAGS_C3STOP)
946  periodic = want_periodic;
947  timer = et;
948  et_init(timer, timercb, NULL, NULL);
949  configtimer(1);
950  ET_UNLOCK();
951  return (error);
952 }
953 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer,
954  CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
955  0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer");
956 
957 /*
958  * Report or change the active event timer periodicity.
959  */
960 static int
962 {
963  int error, val;
964 
965  val = periodic;
966  error = sysctl_handle_int(oidp, &val, 0, req);
967  if (error != 0 || req->newptr == NULL)
968  return (error);
969  ET_LOCK();
970  configtimer(0);
971  periodic = want_periodic = val;
972  configtimer(1);
973  ET_UNLOCK();
974  return (error);
975 }
976 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
977  CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
978  0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode");
void cpu_idleclock(void)
static u_int idletick
volatile int smp_started
Definition: subr_smp.c:67
static int round_freq(struct eventtimer *et, int freq)
struct bintime nextstat
int et_stop(struct eventtimer *et)
Definition: kern_et.c:203
static struct bintime hardperiod
static struct bintime statperiod
struct buf * buf
Definition: vfs_bio.c:97
TUNABLE_INT("kern.eventtimer.singlemul",&singlemul)
void critical_exit(void)
Definition: kern_switch.c:192
int snprintf(char *str, size_t size, const char *format,...)
Definition: subr_prf.c:509
struct mtx et_hw_mtx
void *** start
Definition: linker_if.m:86
static void loadtimer(struct bintime *now, int first)
static char timername[32]
static int doconfigtimer(void)
struct eventtimer * et_find(const char *name, int check, int want)
Definition: kern_et.c:118
void panic(const char *fmt,...)
void hardclock_cnt(int cnt, int usermode)
Definition: kern_clock.c:497
static int periodic
TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername))
SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW,&singlemul, 0,"Multiplier for periodic mode")
struct bintime nexthard
SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW,&idletick, 0,"Run periodic events when idle")
static struct bintime profperiod
struct bintime now
static u_int activetick
int sysctl_handle_string(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1121
int callout_tickstofirst(int limit)
Definition: kern_timeout.c:365
#define FREQ2BT(freq, bt)
static void getnextevent(struct bintime *event)
static u_int busy
#define ET_HW_LOCK(state)
static DPCPU_DEFINE(struct pcpu_state, timerstate)
void(* callout_new_inserted)(int cpu, int ticks)
Definition: kern_timeout.c:155
#define ET_HW_UNLOCK(state)
SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE, 0, 0, sysctl_kern_eventtimer_timer,"A","Chosen event timer")
static void timercb(struct eventtimer *et, void *arg)
__FBSDID("$BSDSUniX$")
static int want_periodic
static struct bintime nexttick
int et_init(struct eventtimer *et, et_event_cb_t *event, et_deregister_cb_t *deregister, void *arg)
Definition: kern_et.c:140
static int handleevents(struct bintime *now, int fake)
int cpu_disable_c3_sleep
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:986
static struct bintime timerperiod
static void setuptimer(void)
void statclock_cnt(int cnt, int usermode)
Definition: kern_clock.c:709
static int sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
int cpu_disable_c2_sleep
static int profiling
void cpu_startprofclock(void)
void cpu_activeclock(void)
static struct eventtimer * timer
int et_start(struct eventtimer *et, struct bintime *first, struct bintime *period)
Definition: kern_et.c:162
void cpu_stopprofclock(void)
struct bintime nexttick
static void configtimer(int start)
void bintime(struct bintime *bt)
Definition: kern_tc.c:203
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
int cpu_deepest_sleep
static struct mtx et_hw_mtx
static int sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
struct bintime nextprof
static int singlemul
#define BT2FREQ(bt)
void profclock_cnt(int cnt, int usermode, uintfptr_t pc)
Definition: kern_clock.c:789
volatile int ticks
Definition: kern_clock.c:387
int et_free(struct eventtimer *et)
Definition: kern_et.c:224
static struct bintime nexthard
int profhz
Definition: kern_clock.c:385
void binuptime(struct bintime *bt)
Definition: kern_tc.c:171
void hardclock_sync(int cpu)
Definition: kern_clock.c:579
int stathz
Definition: kern_clock.c:384
void cpu_initclocks_ap(void)
int tc_min_ticktock_freq
Definition: kern_tc.c:92
int tick
Definition: subr_param.c:85
void cpu_initclocks_bsp(void)
static void getnextcpuevent(struct bintime *event, int idle)
int hardclockintr(void)
void critical_enter(void)
Definition: kern_switch.c:181
struct bintime nextevent
int hz
Definition: subr_param.c:84