FreeBSD kernel kern code
subr_taskqueue.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 2000 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$BSDSUniX$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/interrupt.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/taskqueue.h>
43 #include <sys/unistd.h>
44 #include <machine/stdarg.h>
45 
46 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
47 static void *taskqueue_giant_ih;
48 static void *taskqueue_ih;
49 
51  struct task *tb_running;
52  TAILQ_ENTRY(taskqueue_busy) tb_link;
53 };
54 
55 struct taskqueue {
56  STAILQ_HEAD(, task) tq_queue;
57  taskqueue_enqueue_fn tq_enqueue;
58  void *tq_context;
59  TAILQ_HEAD(, taskqueue_busy) tq_active;
60  struct mtx tq_mutex;
61  struct thread **tq_threads;
62  int tq_tcount;
63  int tq_spin;
64  int tq_flags;
65  int tq_callouts;
66 };
67 
68 #define TQ_FLAGS_ACTIVE (1 << 0)
69 #define TQ_FLAGS_BLOCKED (1 << 1)
70 #define TQ_FLAGS_PENDING (1 << 2)
71 
72 #define DT_CALLOUT_ARMED (1 << 0)
73 
74 #define TQ_LOCK(tq) \
75  do { \
76  if ((tq)->tq_spin) \
77  mtx_lock_spin(&(tq)->tq_mutex); \
78  else \
79  mtx_lock(&(tq)->tq_mutex); \
80  } while (0)
81 
82 #define TQ_UNLOCK(tq) \
83  do { \
84  if ((tq)->tq_spin) \
85  mtx_unlock_spin(&(tq)->tq_mutex); \
86  else \
87  mtx_unlock(&(tq)->tq_mutex); \
88  } while (0)
89 
90 void
91 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
92  int priority, task_fn_t func, void *context)
93 {
94 
95  TASK_INIT(&timeout_task->t, priority, func, context);
96  callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0);
97  timeout_task->q = queue;
98  timeout_task->f = 0;
99 }
100 
101 static __inline int
102 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
103  int t)
104 {
105  if (tq->tq_spin)
106  return (msleep_spin(p, m, wm, t));
107  return (msleep(p, m, pri, wm, t));
108 }
109 
110 static struct taskqueue *
111 _taskqueue_create(const char *name __unused, int mflags,
112  taskqueue_enqueue_fn enqueue, void *context,
113  int mtxflags, const char *mtxname)
114 {
115  struct taskqueue *queue;
116 
117  queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
118  if (!queue)
119  return NULL;
120 
121  STAILQ_INIT(&queue->tq_queue);
122  TAILQ_INIT(&queue->tq_active);
123  queue->tq_enqueue = enqueue;
124  queue->tq_context = context;
125  queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
126  queue->tq_flags |= TQ_FLAGS_ACTIVE;
127  mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
128 
129  return queue;
130 }
131 
132 struct taskqueue *
133 taskqueue_create(const char *name, int mflags,
134  taskqueue_enqueue_fn enqueue, void *context)
135 {
136  return _taskqueue_create(name, mflags, enqueue, context,
137  MTX_DEF, "taskqueue");
138 }
139 
140 /*
141  * Signal a taskqueue thread to terminate.
142  */
143 static void
144 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
145 {
146 
147  while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
148  wakeup(tq);
149  TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
150  }
151 }
152 
153 void
154 taskqueue_free(struct taskqueue *queue)
155 {
156 
157  TQ_LOCK(queue);
158  queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
159  taskqueue_terminate(queue->tq_threads, queue);
160  KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
161  KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
162  mtx_destroy(&queue->tq_mutex);
163  free(queue->tq_threads, M_TASKQUEUE);
164  free(queue, M_TASKQUEUE);
165 }
166 
167 static int
168 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
169 {
170  struct task *ins;
171  struct task *prev;
172 
173  /*
174  * Count multiple enqueues.
175  */
176  if (task->ta_pending) {
177  if (task->ta_pending < USHRT_MAX)
178  task->ta_pending++;
179  return (0);
180  }
181 
182  /*
183  * Optimise the case when all tasks have the same priority.
184  */
185  prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
186  if (!prev || prev->ta_priority >= task->ta_priority) {
187  STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
188  } else {
189  prev = NULL;
190  for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
191  prev = ins, ins = STAILQ_NEXT(ins, ta_link))
192  if (ins->ta_priority < task->ta_priority)
193  break;
194 
195  if (prev)
196  STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
197  else
198  STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
199  }
200 
201  task->ta_pending = 1;
202  if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
203  queue->tq_enqueue(queue->tq_context);
204  else
205  queue->tq_flags |= TQ_FLAGS_PENDING;
206 
207  return (0);
208 }
209 int
210 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
211 {
212  int res;
213 
214  TQ_LOCK(queue);
215  res = taskqueue_enqueue_locked(queue, task);
216  TQ_UNLOCK(queue);
217 
218  return (res);
219 }
220 
221 static void
223 {
224  struct taskqueue *queue;
225  struct timeout_task *timeout_task;
226 
227  timeout_task = arg;
228  queue = timeout_task->q;
229  KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
230  timeout_task->f &= ~DT_CALLOUT_ARMED;
231  queue->tq_callouts--;
232  taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
233 }
234 
235 int
237  struct timeout_task *timeout_task, int ticks)
238 {
239  int res;
240 
241  TQ_LOCK(queue);
242  KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
243  ("Migrated queue"));
244  KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
245  timeout_task->q = queue;
246  res = timeout_task->t.ta_pending;
247  if (ticks == 0) {
248  taskqueue_enqueue_locked(queue, &timeout_task->t);
249  } else {
250  if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
251  res++;
252  } else {
253  queue->tq_callouts++;
254  timeout_task->f |= DT_CALLOUT_ARMED;
255  if (ticks < 0)
256  ticks = -ticks; /* Ignore overflow. */
257  }
258  if (ticks > 0) {
259  callout_reset(&timeout_task->c, ticks,
260  taskqueue_timeout_func, timeout_task);
261  }
262  }
263  TQ_UNLOCK(queue);
264  return (res);
265 }
266 
267 static void
269 {
270 
271  while (!TAILQ_EMPTY(&queue->tq_active))
272  TQ_SLEEP(queue, &queue->tq_active, &queue->tq_mutex,
273  PWAIT, "-", 0);
274 }
275 
276 void
278 {
279 
280  TQ_LOCK(queue);
281  queue->tq_flags |= TQ_FLAGS_BLOCKED;
282  TQ_UNLOCK(queue);
283 }
284 
285 void
287 {
288 
289  TQ_LOCK(queue);
290  queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
291  if (queue->tq_flags & TQ_FLAGS_PENDING) {
292  queue->tq_flags &= ~TQ_FLAGS_PENDING;
293  queue->tq_enqueue(queue->tq_context);
294  }
295  TQ_UNLOCK(queue);
296 }
297 
298 static void
300 {
301  struct taskqueue_busy tb;
302  struct task *task;
303  int pending;
304 
305  mtx_assert(&queue->tq_mutex, MA_OWNED);
306  tb.tb_running = NULL;
307  TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
308 
309  while (STAILQ_FIRST(&queue->tq_queue)) {
310  /*
311  * Carefully remove the first task from the queue and
312  * zero its pending count.
313  */
314  task = STAILQ_FIRST(&queue->tq_queue);
315  STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
316  pending = task->ta_pending;
317  task->ta_pending = 0;
318  tb.tb_running = task;
319  TQ_UNLOCK(queue);
320 
321  task->ta_func(task->ta_context, pending);
322 
323  TQ_LOCK(queue);
324  tb.tb_running = NULL;
325  wakeup(task);
326  }
327  TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
328  if (TAILQ_EMPTY(&queue->tq_active))
329  wakeup(&queue->tq_active);
330 }
331 
332 void
333 taskqueue_run(struct taskqueue *queue)
334 {
335 
336  TQ_LOCK(queue);
337  taskqueue_run_locked(queue);
338  TQ_UNLOCK(queue);
339 }
340 
341 static int
342 task_is_running(struct taskqueue *queue, struct task *task)
343 {
344  struct taskqueue_busy *tb;
345 
346  mtx_assert(&queue->tq_mutex, MA_OWNED);
347  TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
348  if (tb->tb_running == task)
349  return (1);
350  }
351  return (0);
352 }
353 
354 static int
355 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
356  u_int *pendp)
357 {
358 
359  if (task->ta_pending > 0)
360  STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
361  if (pendp != NULL)
362  *pendp = task->ta_pending;
363  task->ta_pending = 0;
364  return (task_is_running(queue, task) ? EBUSY : 0);
365 }
366 
367 int
368 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
369 {
370  int error;
371 
372  TQ_LOCK(queue);
373  error = taskqueue_cancel_locked(queue, task, pendp);
374  TQ_UNLOCK(queue);
375 
376  return (error);
377 }
378 
379 int
381  struct timeout_task *timeout_task, u_int *pendp)
382 {
383  u_int pending, pending1;
384  int error;
385 
386  TQ_LOCK(queue);
387  pending = !!callout_stop(&timeout_task->c);
388  error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
389  if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
390  timeout_task->f &= ~DT_CALLOUT_ARMED;
391  queue->tq_callouts--;
392  }
393  TQ_UNLOCK(queue);
394 
395  if (pendp != NULL)
396  *pendp = pending + pending1;
397  return (error);
398 }
399 
400 void
401 taskqueue_drain(struct taskqueue *queue, struct task *task)
402 {
403 
404  if (!queue->tq_spin)
405  WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
406 
407  TQ_LOCK(queue);
408  while (task->ta_pending != 0 || task_is_running(queue, task))
409  TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
410  TQ_UNLOCK(queue);
411 }
412 
413 void
415 {
416  struct task *task;
417 
418  if (!queue->tq_spin)
419  WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
420 
421  TQ_LOCK(queue);
422  task = STAILQ_LAST(&queue->tq_queue, task, ta_link);
423  if (task != NULL)
424  while (task->ta_pending != 0)
425  TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
427  KASSERT(STAILQ_EMPTY(&queue->tq_queue),
428  ("taskqueue queue is not empty after draining"));
429  TQ_UNLOCK(queue);
430 }
431 
432 void
434  struct timeout_task *timeout_task)
435 {
436 
437  callout_drain(&timeout_task->c);
438  taskqueue_drain(queue, &timeout_task->t);
439 }
440 
441 static void
442 taskqueue_swi_enqueue(void *context)
443 {
445 }
446 
447 static void
449 {
450  taskqueue_run(taskqueue_swi);
451 }
452 
453 static void
455 {
457 }
458 
459 static void
461 {
462  taskqueue_run(taskqueue_swi_giant);
463 }
464 
465 int
466 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
467  const char *name, ...)
468 {
469  va_list ap;
470  struct thread *td;
471  struct taskqueue *tq;
472  int i, error;
473  char ktname[MAXCOMLEN + 1];
474 
475  if (count <= 0)
476  return (EINVAL);
477 
478  tq = *tqp;
479 
480  va_start(ap, name);
481  vsnprintf(ktname, sizeof(ktname), name, ap);
482  va_end(ap);
483 
484  tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
485  M_NOWAIT | M_ZERO);
486  if (tq->tq_threads == NULL) {
487  printf("%s: no memory for %s threads\n", __func__, ktname);
488  return (ENOMEM);
489  }
490 
491  for (i = 0; i < count; i++) {
492  if (count == 1)
493  error = kthread_add(taskqueue_thread_loop, tqp, NULL,
494  &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
495  else
496  error = kthread_add(taskqueue_thread_loop, tqp, NULL,
497  &tq->tq_threads[i], RFSTOPPED, 0,
498  "%s_%d", ktname, i);
499  if (error) {
500  /* should be ok to continue, taskqueue_free will dtrt */
501  printf("%s: kthread_add(%s): error %d", __func__,
502  ktname, error);
503  tq->tq_threads[i] = NULL; /* paranoid */
504  } else
505  tq->tq_tcount++;
506  }
507  for (i = 0; i < count; i++) {
508  if (tq->tq_threads[i] == NULL)
509  continue;
510  td = tq->tq_threads[i];
511  thread_lock(td);
512  sched_prio(td, pri);
513  sched_add(td, SRQ_BORING);
514  thread_unlock(td);
515  }
516 
517  return (0);
518 }
519 
520 void
522 {
523  struct taskqueue **tqp, *tq;
524 
525  tqp = arg;
526  tq = *tqp;
527  TQ_LOCK(tq);
528  while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
530  /*
531  * Because taskqueue_run() can drop tq_mutex, we need to
532  * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
533  * meantime, which means we missed a wakeup.
534  */
535  if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
536  break;
537  TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
538  }
540 
541  /* rendezvous with thread that asked us to terminate */
542  tq->tq_tcount--;
543  wakeup_one(tq->tq_threads);
544  TQ_UNLOCK(tq);
545  kthread_exit();
546 }
547 
548 void
550 {
551  struct taskqueue **tqp, *tq;
552 
553  tqp = context;
554  tq = *tqp;
555 
556  mtx_assert(&tq->tq_mutex, MA_OWNED);
557  wakeup_one(tq);
558 }
559 
561  swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
562  INTR_MPSAFE, &taskqueue_ih));
563 
565  swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
566  NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
567 
569 
570 struct taskqueue *
571 taskqueue_create_fast(const char *name, int mflags,
572  taskqueue_enqueue_fn enqueue, void *context)
573 {
574  return _taskqueue_create(name, mflags, enqueue, context,
575  MTX_SPIN, "fast_taskqueue");
576 }
577 
578 /* NB: for backwards compatibility */
579 int
580 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
581 {
582  return taskqueue_enqueue(queue, task);
583 }
584 
585 static void *taskqueue_fast_ih;
586 
587 static void
589 {
590  swi_sched(taskqueue_fast_ih, 0);
591 }
592 
593 static void
595 {
596  taskqueue_run(taskqueue_fast);
597 }
598 
600  swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
601  SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
602 
603 int
604 taskqueue_member(struct taskqueue *queue, struct thread *td)
605 {
606  int i, j, ret = 0;
607 
608  for (i = 0, j = 0; ; i++) {
609  if (queue->tq_threads[i] == NULL)
610  continue;
611  if (queue->tq_threads[i] == td) {
612  ret = 1;
613  break;
614  }
615  if (++j >= queue->tq_tcount)
616  break;
617  }
618  return (ret);
619 }
static void taskqueue_swi_enqueue(void *context)
static int taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
void taskqueue_drain(struct taskqueue *queue, struct task *task)
void sched_prio(struct thread *td, u_char prio)
Definition: sched_4bsd.c:897
TASKQUEUE_DEFINE_THREAD(thread)
int kthread_add(void(*func)(void *), void *arg, struct proc *p, struct thread **newtdp, int flags, int pages, const char *fmt,...)
Definition: kern_kthread.c:250
struct taskqueue * taskqueue_create_fast(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context)
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:454
static void taskqueue_swi_giant_enqueue(void *context)
void taskqueue_run(struct taskqueue *queue)
static void * taskqueue_fast_ih
TAILQ_HEAD(note_info_list, note_info)
const char * name
Definition: kern_fail.c:97
static void taskqueue_drain_running(struct taskqueue *queue)
struct task * tb_running
void taskqueue_block(struct taskqueue *queue)
int vsnprintf(char *str, size_t size, const char *format, va_list ap)
Definition: subr_prf.c:524
static STAILQ_HEAD(cn_device)
Definition: kern_cons.c:82
#define TQ_FLAGS_BLOCKED
void wakeup_one(void *ident)
Definition: kern_synch.c:398
TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, swi_add(NULL,"task queue", taskqueue_swi_run, NULL, SWI_TQ, INTR_MPSAFE,&taskqueue_ih))
int taskqueue_cancel_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, u_int *pendp)
void taskqueue_drain_timeout(struct taskqueue *queue, struct timeout_task *timeout_task)
void sched_add(struct thread *td, int flags)
Definition: sched_4bsd.c:1258
void taskqueue_thread_loop(void *arg)
__FBSDID("$BSDSUniX$")
int taskqueue_member(struct taskqueue *queue, struct thread *td)
static int dummy
static void * taskqueue_ih
int taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
static void taskqueue_swi_run(void *dummy)
static void taskqueue_run_locked(struct taskqueue *queue)
static void taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
void taskqueue_free(struct taskqueue *queue)
static __inline int TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, int t)
struct taskqueue * taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context)
static int task_is_running(struct taskqueue *queue, struct task *task)
#define TQ_FLAGS_ACTIVE
static MALLOC_DEFINE(M_TASKQUEUE,"taskqueue","Task Queues")
static void * taskqueue_giant_ih
static void taskqueue_fast_run(void *dummy)
#define DT_CALLOUT_ARMED
int msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
Definition: kern_synch.c:265
TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, swi_add(NULL,"fast taskq", taskqueue_fast_run, NULL, SWI_TQ_FAST, INTR_MPSAFE,&taskqueue_fast_ih))
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:554
int printf(const char *fmt,...)
Definition: subr_prf.c:367
int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep)
Definition: kern_intr.c:1103
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
#define TQ_UNLOCK(tq)
static struct taskqueue * _taskqueue_create(const char *name __unused, int mflags, taskqueue_enqueue_fn enqueue, void *context, int mtxflags, const char *mtxname)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
void wakeup(void *ident)
Definition: kern_synch.c:378
int taskqueue_enqueue_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, int ticks)
int priority
Definition: cpufreq_if.m:46
int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, const char *name,...)
int taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
static int taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, u_int *pendp)
volatile int ticks
Definition: kern_clock.c:387
void taskqueue_thread_enqueue(void *context)
static void taskqueue_swi_giant_run(void *dummy)
#define TQ_LOCK(tq)
void mtx_destroy(struct mtx *m)
Definition: kern_mutex.c:884
void _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, int priority, task_fn_t func, void *context)
static void taskqueue_timeout_func(void *arg)
void taskqueue_unblock(struct taskqueue *queue)
void taskqueue_drain_all(struct taskqueue *queue)
static void taskqueue_fast_enqueue(void *context)
void swi_sched(void *cookie, int flags)
Definition: kern_intr.c:1143
void kthread_exit(void)
Definition: kern_kthread.c:322
#define TQ_FLAGS_PENDING
int * count
Definition: cpufreq_if.m:63