27 #include <sys/cdefs.h>
32 #include <sys/param.h>
35 #include <sys/cpuset.h>
36 #include <sys/rtprio.h>
37 #include <sys/systm.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
42 #include <sys/limits.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
48 #include <sys/random.h>
49 #include <sys/resourcevar.h>
50 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/unistd.h>
55 #include <sys/vmmeter.h>
56 #include <machine/atomic.h>
57 #include <machine/cpu.h>
58 #include <machine/md_var.h>
59 #include <machine/stdarg.h>
62 #include <ddb/db_sym.h>
76 #define IT_DEAD 0x000001
77 #define IT_WAIT 0x000002
89 static MALLOC_DEFINE(M_ITHREAD,
"ithread",
"Interrupt Threads");
95 "Number of consecutive interrupts before storm protection is enabled");
97 TAILQ_HEAD_INITIALIZER(event_list);
98 static struct mtx event_lock;
99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
105 static int intr_filter_loop(
struct intr_event *ie,
106 struct trapframe *frame,
struct intr_thread **ithd);
108 struct intr_handler *ih);
115 struct intr_event *ie);
117 static void priv_ithread_execute_handler(
struct proc *p,
118 struct intr_handler *ih);
126 intr_priority(
enum intr_type flags)
130 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
131 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
156 panic(
"intr_priority: no interrupt type in flags");
168 struct intr_event *ie;
176 if (TAILQ_EMPTY(&ie->ie_handlers))
179 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
182 strlcpy(td->td_name, ie->ie_fullname,
sizeof(td->td_name));
184 sched_clear_tdname(td);
197 struct intr_handler *ih;
202 mtx_assert(&ie->ie_lock, MA_OWNED);
203 strlcpy(ie->ie_fullname, ie->ie_name,
sizeof(ie->ie_fullname));
204 ie->ie_flags &= ~IE_ENTROPY;
209 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
210 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
211 sizeof(ie->ie_fullname)) {
212 strcat(ie->ie_fullname,
" ");
213 strcat(ie->ie_fullname, ih->ih_name);
217 if (ih->ih_flags & IH_ENTROPY)
218 ie->ie_flags |= IE_ENTROPY;
226 last = &ie->ie_fullname[
sizeof(ie->ie_fullname) - 2];
227 while (missed-- > 0) {
228 if (strlen(ie->ie_fullname) + 1 ==
sizeof(ie->ie_fullname)) {
235 strcat(ie->ie_fullname,
" +");
238 strcat(ie->ie_fullname,
"+");
245 if (ie->ie_thread != NULL)
247 CTR2(KTR_INTR,
"%s: updated %s", __func__, ie->ie_fullname);
252 void (*pre_ithread)(
void *),
void (*post_ithread)(
void *),
253 void (*post_filter)(
void *),
int (*assign_cpu)(
void *, u_char),
254 const char *fmt, ...)
256 struct intr_event *ie;
260 if ((flags & ~IE_SOFT) != 0)
262 ie =
malloc(
sizeof(
struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
263 ie->ie_source = source;
264 ie->ie_pre_ithread = pre_ithread;
265 ie->ie_post_ithread = post_ithread;
266 ie->ie_post_filter = post_filter;
267 ie->ie_assign_cpu = assign_cpu;
268 ie->ie_flags = flags;
271 TAILQ_INIT(&ie->ie_handlers);
272 mtx_init(&ie->ie_lock,
"intr event", NULL, MTX_DEF);
275 vsnprintf(ie->ie_name,
sizeof(ie->ie_name), fmt, ap);
277 strlcpy(ie->ie_fullname, ie->ie_name,
sizeof(ie->ie_fullname));
278 mtx_lock(&event_lock);
279 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
280 mtx_unlock(&event_lock);
283 CTR2(KTR_INTR,
"%s: created %s", __func__, ie->ie_name);
303 if (cpu != NOCPU && CPU_ABSENT(cpu))
306 if (ie->ie_assign_cpu == NULL)
309 error =
priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
317 mtx_lock(&ie->ie_lock);
318 if (ie->ie_thread != NULL) {
324 id = ie->ie_thread->it_thread->td_tid;
325 mtx_unlock(&ie->ie_lock);
330 mtx_unlock(&ie->ie_lock);
331 error = ie->ie_assign_cpu(ie->ie_source, cpu);
333 mtx_lock(&ie->ie_lock);
334 if (ie->ie_thread != NULL) {
336 if (ie->ie_cpu == NOCPU)
340 id = ie->ie_thread->it_thread->td_tid;
341 mtx_unlock(&ie->ie_lock);
344 mtx_unlock(&ie->ie_lock);
348 mtx_lock(&ie->ie_lock);
350 mtx_unlock(&ie->ie_lock);
355 static struct intr_event *
358 struct intr_event *ie;
360 mtx_lock(&event_lock);
361 TAILQ_FOREACH(ie, &event_list, ie_list)
362 if (ie->ie_irq == irq &&
363 (ie->ie_flags & IE_SOFT) == 0 &&
364 TAILQ_FIRST(&ie->ie_handlers) != NULL)
366 mtx_unlock(&event_lock);
373 struct intr_event *ie;
385 for (n = 0; n < CPU_SETSIZE; n++) {
386 if (!CPU_ISSET(n, mask))
402 struct intr_event *ie;
410 mtx_lock(&ie->ie_lock);
411 if (ie->ie_cpu == NOCPU)
414 CPU_SET(ie->ie_cpu, mask);
415 mtx_unlock(&ie->ie_lock);
423 mtx_lock(&event_lock);
424 mtx_lock(&ie->ie_lock);
425 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
426 mtx_unlock(&ie->ie_lock);
427 mtx_unlock(&event_lock);
430 TAILQ_REMOVE(&event_list, ie, ie_list);
432 if (ie->ie_thread != NULL) {
434 ie->ie_thread = NULL;
437 mtx_unlock(&ie->ie_lock);
438 mtx_unlock(&event_lock);
455 &td, RFSTOPPED | RFHIGHPID,
456 0,
"intr",
"%s", name);
458 panic(
"kproc_create() failed with %d", error);
463 td->td_pflags |= TDP_ITHREAD;
465 CTR2(KTR_INTR,
"%s: created %s", __func__, name);
479 &td, RFSTOPPED | RFHIGHPID,
480 0,
"intr",
"%s", name);
482 panic(
"kproc_create() failed with %d", error);
487 td->td_pflags |= TDP_ITHREAD;
489 CTR2(KTR_INTR,
"%s: created %s", __func__, name);
499 CTR2(KTR_INTR,
"%s: killing %s", __func__, ithread->
it_event->ie_name);
503 if (TD_AWAITING_INTR(td)) {
513 driver_filter_t filter, driver_intr_t handler,
void *arg, u_char pri,
514 enum intr_type flags,
void **cookiep)
516 struct intr_handler *ih, *temp_ih;
519 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
523 ih =
malloc(
sizeof(
struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
524 ih->ih_filter = filter;
525 ih->ih_handler = handler;
526 ih->ih_argument = arg;
527 strlcpy(ih->ih_name, name,
sizeof(ih->ih_name));
530 if (flags & INTR_EXCL)
531 ih->ih_flags = IH_EXCLUSIVE;
532 if (flags & INTR_MPSAFE)
533 ih->ih_flags |= IH_MPSAFE;
534 if (flags & INTR_ENTROPY)
535 ih->ih_flags |= IH_ENTROPY;
538 mtx_lock(&ie->ie_lock);
539 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
540 if ((flags & INTR_EXCL) ||
541 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
542 mtx_unlock(&ie->ie_lock);
549 while (ie->ie_thread == NULL && handler != NULL) {
550 if (ie->ie_flags & IE_ADDING_THREAD)
551 msleep(ie, &ie->ie_lock, 0,
"ithread", 0);
553 ie->ie_flags |= IE_ADDING_THREAD;
554 mtx_unlock(&ie->ie_lock);
556 mtx_lock(&ie->ie_lock);
557 ie->ie_flags &= ~IE_ADDING_THREAD;
566 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
567 if (temp_ih->ih_pri > ih->ih_pri)
571 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
573 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
576 CTR3(KTR_INTR,
"%s: added %s to %s", __func__, ih->ih_name,
578 mtx_unlock(&ie->ie_lock);
587 driver_filter_t filter, driver_intr_t handler,
void *arg, u_char pri,
588 enum intr_type flags,
void **cookiep)
590 struct intr_handler *ih, *temp_ih;
593 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
597 ih =
malloc(
sizeof(
struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
598 ih->ih_filter = filter;
599 ih->ih_handler = handler;
600 ih->ih_argument = arg;
601 strlcpy(ih->ih_name, name,
sizeof(ih->ih_name));
604 if (flags & INTR_EXCL)
605 ih->ih_flags = IH_EXCLUSIVE;
606 if (flags & INTR_MPSAFE)
607 ih->ih_flags |= IH_MPSAFE;
608 if (flags & INTR_ENTROPY)
609 ih->ih_flags |= IH_ENTROPY;
612 mtx_lock(&ie->ie_lock);
613 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
614 if ((flags & INTR_EXCL) ||
615 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
616 mtx_unlock(&ie->ie_lock);
623 if (filter != NULL && handler != NULL) {
624 mtx_unlock(&ie->ie_lock);
626 mtx_lock(&ie->ie_lock);
631 while (ie->ie_thread == NULL && handler != NULL) {
632 if (ie->ie_flags & IE_ADDING_THREAD)
633 msleep(ie, &ie->ie_lock, 0,
"ithread", 0);
635 ie->ie_flags |= IE_ADDING_THREAD;
636 mtx_unlock(&ie->ie_lock);
638 mtx_lock(&ie->ie_lock);
639 ie->ie_flags &= ~IE_ADDING_THREAD;
649 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
650 if (temp_ih->ih_pri > ih->ih_pri)
654 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
656 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
659 CTR3(KTR_INTR,
"%s: added %s to %s", __func__, ih->ih_name,
661 mtx_unlock(&ie->ie_lock);
677 struct intr_handler *ih;
681 mtx_lock(&ie->ie_lock);
683 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
688 mtx_unlock(&ie->ie_lock);
689 panic(
"handler %p not found in interrupt event %p", cookie, ie);
701 start = index(ih->ih_name,
':');
703 start = index(ih->ih_name, 0);
710 space =
sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
711 if (strlen(descr) + 1 > space) {
712 mtx_unlock(&ie->ie_lock);
718 strcpy(start + 1, descr);
720 mtx_unlock(&ie->ie_lock);
731 struct intr_handler *ih;
732 struct intr_event *ie;
734 ih = (
struct intr_handler *)cookie;
739 (
"interrupt handler \"%s\" has a NULL interrupt event",
741 return (ie->ie_source);
754 struct intr_event *ie;
761 if (ie->ie_thread == NULL)
763 ithd = ie->ie_thread;
771 if (!TD_AWAITING_INTR(td)) {
788 struct intr_handler *handler = (
struct intr_handler *)cookie;
789 struct intr_event *ie;
791 struct intr_handler *ih;
799 ie = handler->ih_event;
801 (
"interrupt handler \"%s\" has a NULL interrupt event",
803 mtx_lock(&ie->ie_lock);
804 CTR3(KTR_INTR,
"%s: removing %s from %s", __func__, handler->ih_name,
807 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
810 mtx_unlock(&ie->ie_lock);
811 panic(
"interrupt handler \"%s\" not found in interrupt event \"%s\"",
812 ih->ih_name, ie->ie_name);
820 if (ie->ie_thread == NULL) {
821 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
822 mtx_unlock(&ie->ie_lock);
823 free(handler, M_ITHREAD);
835 thread_lock(ie->ie_thread->it_thread);
836 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
837 handler->ih_flags |= IH_DEAD;
844 atomic_store_rel_int(&ie->ie_thread->it_need, 1);
846 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
847 thread_unlock(ie->ie_thread->it_thread);
848 while (handler->ih_flags & IH_DEAD)
849 msleep(handler, &ie->ie_lock, 0,
"iev_rmh", 0);
858 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
859 if (!(ih->ih_flags & IH_FAST)) {
866 ie->ie_thread = NULL;
869 mtx_unlock(&ie->ie_lock);
870 free(handler, M_ITHREAD);
886 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
887 ie->ie_thread == NULL)
899 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
900 CTR3(KTR_INTR,
"%s: pid %d (%s) gathering entropy", __func__,
901 p->p_pid, td->td_name);
902 entropy.
event = (uintptr_t)ie;
904 random_harvest(&entropy,
sizeof(entropy), 2, 0,
908 KASSERT(p != NULL, (
"ithread %s has no process", ie->ie_name));
915 atomic_store_rel_int(&it->
it_need, 1);
917 if (TD_AWAITING_INTR(td)) {
918 CTR3(KTR_INTR,
"%s: schedule pid %d (%s)", __func__, p->p_pid,
923 CTR5(KTR_INTR,
"%s: pid %d (%s): it_need %d, state %d",
924 __func__, p->p_pid, td->td_name, it->
it_need, td->td_state);
934 struct intr_handler *handler = (
struct intr_handler *)cookie;
935 struct intr_event *ie;
938 struct intr_handler *ih;
946 ie = handler->ih_event;
948 (
"interrupt handler \"%s\" has a NULL interrupt event",
950 mtx_lock(&ie->ie_lock);
951 CTR3(KTR_INTR,
"%s: removing %s from %s", __func__, handler->ih_name,
954 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
957 mtx_unlock(&ie->ie_lock);
958 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
959 ih->ih_name, ie->ie_name);
967 if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
968 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
969 mtx_unlock(&ie->ie_lock);
970 free(handler, M_ITHREAD);
975 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
985 if (!TD_AWAITING_INTR(it->
it_thread) && !cold) {
986 handler->ih_flags |= IH_DEAD;
993 atomic_store_rel_int(&it->
it_need, 1);
995 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
997 while (handler->ih_flags & IH_DEAD)
998 msleep(handler, &ie->ie_lock, 0,
"iev_rmh", 0);
1003 if (handler->ih_thread) {
1005 handler->ih_thread = NULL;
1015 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1016 if (handler != NULL) {
1023 ie->ie_thread = NULL;
1026 mtx_unlock(&ie->ie_lock);
1027 free(handler, M_ITHREAD);
1042 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
1053 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
1054 CTR3(KTR_INTR,
"%s: pid %d (%s) gathering entropy", __func__,
1055 p->p_pid, td->td_name);
1056 entropy.event = (uintptr_t)ie;
1058 random_harvest(&entropy,
sizeof(entropy), 2, 0,
1062 KASSERT(p != NULL, (
"ithread %s has no process", ie->ie_name));
1069 atomic_store_rel_int(&it->
it_need, 1);
1071 if (TD_AWAITING_INTR(td)) {
1072 CTR3(KTR_INTR,
"%s: schedule pid %d (%s)", __func__, p->p_pid,
1077 CTR5(KTR_INTR,
"%s: pid %d (%s): it_need %d, state %d",
1078 __func__, p->p_pid, td->td_name, it->
it_need, td->td_state);
1103 swi_add(
struct intr_event **eventp,
const char *name, driver_intr_t handler,
1104 void *arg,
int pri,
enum intr_type flags,
void **cookiep)
1107 struct intr_event *ie;
1110 if (flags & INTR_ENTROPY)
1113 ie = (eventp != NULL) ? *eventp : NULL;
1116 if (!(ie->ie_flags & IE_SOFT))
1127 PI_SWI(pri), flags, cookiep);
1130 if (pri == SWI_CLOCK) {
1131 td = ie->ie_thread->it_thread;
1133 td->td_flags |= TDF_NOLOAD;
1145 struct intr_handler *ih = (
struct intr_handler *)cookie;
1146 struct intr_event *ie = ih->ih_event;
1150 CTR3(KTR_INTR,
"swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1154 CTR2(KTR_INTR,
"swi_sched: pid %d (%s) gathering entropy",
1155 curproc->p_pid, curthread->td_name);
1156 entropy.
event = (uintptr_t)ih;
1157 entropy.
td = curthread;
1158 random_harvest(&entropy,
sizeof(entropy), 1, 0,
1167 atomic_store_rel_int(&ih->ih_need, 1);
1169 if (!(flags & SWI_DELAY)) {
1170 PCPU_INC(cnt.v_soft);
1176 KASSERT(error == 0, (
"stray software interrupt"));
1195 priv_ithread_execute_handler(
struct proc *p,
struct intr_handler *ih)
1197 struct intr_event *ie;
1204 if (ih->ih_flags & IH_DEAD) {
1205 mtx_lock(&ie->ie_lock);
1206 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1207 ih->ih_flags &= ~IH_DEAD;
1209 mtx_unlock(&ie->ie_lock);
1214 CTR6(KTR_INTR,
"%s: pid %d exec %p(%p) for %s flg=%x",
1215 __func__, p->p_pid, (
void *)ih->ih_handler, ih->ih_argument,
1216 ih->ih_name, ih->ih_flags);
1218 if (!(ih->ih_flags & IH_MPSAFE))
1220 ih->ih_handler(ih->ih_argument);
1221 if (!(ih->ih_flags & IH_MPSAFE))
1233 struct intr_handler *ih, *ihn;
1235 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1240 if (ih->ih_flags & IH_DEAD) {
1241 mtx_lock(&ie->ie_lock);
1242 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1243 ih->ih_flags &= ~IH_DEAD;
1245 mtx_unlock(&ie->ie_lock);
1250 if (ih->ih_handler == NULL)
1258 if (ie->ie_flags & IE_SOFT) {
1259 if (atomic_load_acq_int(&ih->ih_need) == 0)
1262 atomic_store_rel_int(&ih->ih_need, 0);
1266 CTR6(KTR_INTR,
"%s: pid %d exec %p(%p) for %s flg=%x",
1267 __func__, p->p_pid, (
void *)ih->ih_handler,
1268 ih->ih_argument, ih->ih_name, ih->ih_flags);
1270 if (!(ih->ih_flags & IH_MPSAFE))
1272 ih->ih_handler(ih->ih_argument);
1273 if (!(ih->ih_flags & IH_MPSAFE))
1283 if (!(ie->ie_flags & IE_SOFT))
1284 THREAD_NO_SLEEPING();
1286 if (!(ie->ie_flags & IE_SOFT))
1287 THREAD_SLEEPING_OK();
1300 !(ie->ie_flags & IE_SOFT)) {
1302 if (
ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1304 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1315 if (ie->ie_post_ithread != NULL)
1316 ie->ie_post_ithread(ie->ie_source);
1327 struct intr_event *ie;
1336 (
"%s: ithread and proc linkage out of sync", __func__));
1350 CTR3(KTR_INTR,
"%s: pid %d (%s) exiting", __func__,
1351 p->p_pid, td->td_name);
1352 free(ithd, M_ITHREAD);
1361 while (atomic_load_acq_int(&ithd->
it_need) != 0) {
1368 atomic_store_rel_int(&ithd->
it_need, 0);
1371 WITNESS_WARN(WARN_PANIC, NULL,
"suspending ithread");
1372 mtx_assert(&
Giant, MA_NOTOWNED);
1380 if ((atomic_load_acq_int(&ithd->
it_need) == 0) &&
1412 struct intr_handler *ih;
1413 struct trapframe *oldframe;
1415 int error, ret, thread;
1420 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1429 td->td_intr_nesting_level++;
1433 oldframe = td->td_intr_frame;
1434 td->td_intr_frame = frame;
1435 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1436 if (ih->ih_filter == NULL) {
1440 CTR4(KTR_INTR,
"%s: exec %p(%p) for %s", __func__,
1441 ih->ih_filter, ih->ih_argument == NULL ? frame :
1442 ih->ih_argument, ih->ih_name);
1443 if (ih->ih_argument == NULL)
1444 ret = ih->ih_filter(frame);
1446 ret = ih->ih_filter(ih->ih_argument);
1447 KASSERT(ret == FILTER_STRAY ||
1448 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1449 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1450 (
"%s: incorrect return value %#x from %s", __func__, ret,
1468 if (ret == FILTER_SCHEDULE_THREAD)
1472 td->td_intr_frame = oldframe;
1475 if (ie->ie_pre_ithread != NULL)
1476 ie->ie_pre_ithread(ie->ie_source);
1478 if (ie->ie_post_filter != NULL)
1479 ie->ie_post_filter(ie->ie_source);
1486 KASSERT(error == 0, (
"bad stray interrupt"));
1489 log(LOG_WARNING,
"bad stray interrupt");
1493 td->td_intr_nesting_level--;
1504 struct intr_handler *ih;
1505 struct intr_event *ie;
1513 ih = (
struct intr_handler *)arg;
1514 priv = (ih->ih_thread != NULL) ? 1 : 0;
1515 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1517 (
"%s: ithread and proc linkage out of sync", __func__));
1531 CTR3(KTR_INTR,
"%s: pid %d (%s) exiting", __func__,
1532 p->p_pid, td->td_name);
1533 free(ithd, M_ITHREAD);
1542 while (atomic_load_acq_int(&ithd->
it_need) != 0) {
1549 atomic_store_rel_int(&ithd->
it_need, 0);
1551 priv_ithread_execute_handler(p, ih);
1555 WITNESS_WARN(WARN_PANIC, NULL,
"suspending ithread");
1556 mtx_assert(&
Giant, MA_NOTOWNED);
1564 if ((atomic_load_acq_int(&ithd->
it_need) == 0) &&
1606 intr_filter_loop(
struct intr_event *ie,
struct trapframe *frame,
1609 struct intr_handler *ih;
1611 int ret, thread_only;
1615 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1622 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1624 CTR5(KTR_INTR,
"%s: exec %p/%p(%p) for %s", __func__,
1625 ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1627 if (ih->ih_filter != NULL)
1628 ret = ih->ih_filter(arg);
1633 KASSERT(ret == FILTER_STRAY ||
1634 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1635 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1636 (
"%s: incorrect return value %#x from %s", __func__, ret,
1638 if (ret & FILTER_STRAY)
1641 *ithd = ih->ih_thread;
1652 *ithd = ie->ie_thread;
1653 return (FILTER_SCHEDULE_THREAD);
1655 return (FILTER_STRAY);
1673 struct trapframe *oldframe;
1680 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1683 td->td_intr_nesting_level++;
1686 oldframe = td->td_intr_frame;
1687 td->td_intr_frame = frame;
1688 thread = intr_filter_loop(ie, frame, &ithd);
1689 if (thread & FILTER_HANDLED) {
1690 if (ie->ie_post_filter != NULL)
1691 ie->ie_post_filter(ie->ie_source);
1693 if (ie->ie_pre_ithread != NULL)
1694 ie->ie_pre_ithread(ie->ie_source);
1696 td->td_intr_frame = oldframe;
1700 if (thread & FILTER_STRAY) {
1703 printf(
"Interrupt stray detection not present\n");
1707 if (thread & FILTER_SCHEDULE_THREAD) {
1709 panic(
"%s: impossible stray interrupt", __func__);
1711 td->td_intr_nesting_level--;
1721 db_dump_intrhand(
struct intr_handler *ih)
1725 db_printf(
"\t%-10s ", ih->ih_name);
1726 switch (ih->ih_pri) {
1746 if (ih->ih_pri >= PI_SOFT)
1749 db_printf(
"%4u", ih->ih_pri);
1753 if (ih->ih_filter != NULL) {
1755 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1757 if (ih->ih_handler != NULL) {
1758 if (ih->ih_filter != NULL)
1761 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1763 db_printf(
"(%p)", ih->ih_argument);
1765 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1769 if (ih->ih_flags & IH_EXCLUSIVE) {
1775 if (ih->ih_flags & IH_ENTROPY) {
1778 db_printf(
"ENTROPY");
1781 if (ih->ih_flags & IH_DEAD) {
1787 if (ih->ih_flags & IH_MPSAFE) {
1790 db_printf(
"MPSAFE");
1807 db_dump_intr_event(
struct intr_event *ie,
int handlers)
1809 struct intr_handler *ih;
1813 db_printf(
"%s ", ie->ie_fullname);
1816 db_printf(
"(pid %d)", it->
it_thread->td_proc->p_pid);
1818 db_printf(
"(no thread)");
1819 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1820 (it != NULL && it->
it_need)) {
1823 if (ie->ie_flags & IE_SOFT) {
1827 if (ie->ie_flags & IE_ENTROPY) {
1830 db_printf(
"ENTROPY");
1833 if (ie->ie_flags & IE_ADDING_THREAD) {
1836 db_printf(
"ADDING_THREAD");
1839 if (it != NULL && it->
it_need) {
1849 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1850 db_dump_intrhand(ih);
1856 DB_SHOW_COMMAND(intr, db_show_intr)
1858 struct intr_event *ie;
1861 verbose = index(modif,
'v') != NULL;
1862 all = index(modif,
'a') != NULL;
1863 TAILQ_FOREACH(ie, &event_list, ie_list) {
1864 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1866 db_dump_intr_event(ie, verbose);
1880 if (
swi_add(NULL,
"vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &
vm_ih))
1881 panic(
"died while creating vm swi ithread");
1901 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1910 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1917 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1925 for (i = intrcnt; j < (sintrcnt /
sizeof(u_long)) && !db_pager_quit;
1930 db_printf(
"%s\t%lu\n", cp, *i);
1931 cp += strlen(cp) + 1;
static TAILQ_HEAD(intr_event)
void sched_prio(struct thread *td, u_char prio)
MTX_SYSINIT(et_eventtimers_init,&et_eventtimers_mtx,"et_mtx", MTX_DEF)
int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
int intr_getaffinity(int irq, void *m)
static int swi_assign_cpu(void *arg, u_char cpu)
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
static int intr_storm_threshold
int kproc_kthread_add(void(*func)(void *), void *arg, struct proc **procptr, struct thread **tdptr, int flags, int pages, const char *procname, const char *fmt,...)
void panic(const char *fmt,...)
static void ithread_update(struct intr_thread *ithd)
static struct intr_thread * ithread_create(const char *name)
void mi_switch(int flags, struct thread *newtd)
static int intr_event_schedule_thread(struct intr_event *ie)
int intr_event_remove_handler(void *cookie)
int vsnprintf(char *str, size_t size, const char *format, va_list ap)
void intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
int priv_check(struct thread *td, int priv)
SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, sysctl_intrnames,"","Interrupt Names")
void sched_add(struct thread *td, int flags)
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
static void ithread_destroy(struct intr_thread *ithread)
int intr_event_describe_handler(struct intr_event *ie, void *cookie, const char *descr)
void sched_class(struct thread *td, int class)
int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
struct intr_event * tty_intr_event
int intr_event_bind(struct intr_event *ie, u_char cpu)
static void intr_event_update(struct intr_event *ie)
int intr_event_destroy(struct intr_event *ie)
void log(int level, const char *fmt,...)
TUNABLE_INT("hw.intr_storm_threshold",&intr_storm_threshold)
static void ithread_execute_handlers(struct proc *p, struct intr_event *ie)
struct intr_event * it_event
static int sysctl_intrnames(SYSCTL_HANDLER_ARGS)
int intr_setaffinity(int irq, void *m)
static int sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
int swi_remove(void *cookie)
int pause(const char *wmesg, int timo)
void free(void *addr, struct malloc_type *mtp)
SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,&intr_storm_threshold, 0,"Number of consecutive interrupts before storm protection is enabled")
int printf(const char *fmt,...)
int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep)
void _intr_drain(int irq)
int intr_event_create(struct intr_event **event, void *source, int flags, int irq, void(*pre_ithread)(void *), void(*post_ithread)(void *), void(*post_filter)(void *), int(*assign_cpu)(void *, u_char), const char *fmt,...)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
int intr_event_add_handler(struct intr_event *ie, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep)
static void ithread_loop(void *arg)
static MALLOC_DEFINE(M_ITHREAD,"ithread","Interrupt Threads")
static void start_softintr(void *dummy)
int cpuset_setthread(lwpid_t id, cpuset_t *mask)
void mtx_destroy(struct mtx *m)
struct intr_event * clk_intr_event
void swi_sched(void *cookie, int flags)
int intr_event_handle(struct intr_event *ie, struct trapframe *frame)
void critical_enter(void)
void * intr_handler_source(void *cookie)
struct thread * it_thread
static struct intr_event * intr_lookup(int irq)