FreeBSD kernel kern code
subr_turnstile.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  * promote products derived from this software without specific prior
14  * written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Implementation of turnstiles used to hold queue of threads blocked on
34  * non-sleepable locks. Sleepable locks use condition variables to
35  * implement their queues. Turnstiles differ from a sleep queue in that
36  * turnstile queue's are assigned to a lock held by an owning thread. Thus,
37  * when one thread is enqueued onto a turnstile, it can lend its priority
38  * to the owning thread.
39  *
40  * We wish to avoid bloating locks with an embedded turnstile and we do not
41  * want to use back-pointers in the locks for the same reason. Thus, we
42  * use a similar approach to that of Solaris 7 as described in Solaris
43  * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up
44  * in a hash table based on the address of the lock. Each entry in the
45  * hash table is a linked-lists of turnstiles and is called a turnstile
46  * chain. Each chain contains a spin mutex that protects all of the
47  * turnstiles in the chain.
48  *
49  * Each time a thread is created, a turnstile is allocated from a UMA zone
50  * and attached to that thread. When a thread blocks on a lock, if it is the
51  * first thread to block, it lends its turnstile to the lock. If the lock
52  * already has a turnstile, then it gives its turnstile to the lock's
53  * turnstile's free list. When a thread is woken up, it takes a turnstile from
54  * the free list if there are any other waiters. If it is the only thread
55  * blocked on the lock, then it reclaims the turnstile associated with the lock
56  * and removes it from the hash table.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$BSDSUniX$");
61 
62 #include "opt_ddb.h"
63 #include "opt_kdtrace.h"
64 #include "opt_turnstile_profiling.h"
65 #include "opt_sched.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/kdb.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/queue.h>
76 #include <sys/sched.h>
77 #include <sys/sdt.h>
78 #include <sys/sysctl.h>
79 #include <sys/turnstile.h>
80 
81 #include <vm/uma.h>
82 
83 #ifdef DDB
84 #include <ddb/ddb.h>
85 #include <sys/lockmgr.h>
86 #include <sys/sx.h>
87 #endif
88 
89 /*
90  * Constants for the hash table of turnstile chains. TC_SHIFT is a magic
91  * number chosen because the sleep queue's use the same value for the
92  * shift. Basically, we ignore the lower 8 bits of the address.
93  * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
94  */
95 #define TC_TABLESIZE 128 /* Must be power of 2. */
96 #define TC_MASK (TC_TABLESIZE - 1)
97 #define TC_SHIFT 8
98 #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
99 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
100 
101 /*
102  * There are three different lists of turnstiles as follows. The list
103  * connected by ts_link entries is a per-thread list of all the turnstiles
104  * attached to locks that we own. This is used to fixup our priority when
105  * a lock is released. The other two lists use the ts_hash entries. The
106  * first of these two is the turnstile chain list that a turnstile is on
107  * when it is attached to a lock. The second list to use ts_hash is the
108  * free list hung off of a turnstile that is attached to a lock.
109  *
110  * Each turnstile contains three lists of threads. The two ts_blocked lists
111  * are linked list of threads blocked on the turnstile's lock. One list is
112  * for exclusive waiters, and the other is for shared waiters. The
113  * ts_pending list is a linked list of threads previously awakened by
114  * turnstile_signal() or turnstile_wait() that are waiting to be put on
115  * the run queue.
116  *
117  * Locking key:
118  * c - turnstile chain lock
119  * q - td_contested lock
120  */
121 struct turnstile {
122  struct mtx ts_lock; /* Spin lock for self. */
123  struct threadqueue ts_blocked[2]; /* (c + q) Blocked threads. */
124  struct threadqueue ts_pending; /* (c) Pending threads. */
125  LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */
126  LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */
127  LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */
128  struct lock_object *ts_lockobj; /* (c) Lock we reference. */
129  struct thread *ts_owner; /* (c + q) Who owns the lock. */
130 };
131 
133  LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */
134  struct mtx tc_lock; /* Spin lock for this chain. */
135 #ifdef TURNSTILE_PROFILING
136  u_int tc_depth; /* Length of tc_queues. */
137  u_int tc_max_depth; /* Max length of tc_queues. */
138 #endif
139 };
140 
141 #ifdef TURNSTILE_PROFILING
142 u_int turnstile_max_depth;
143 static SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0,
144  "turnstile profiling");
145 static SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
146  "turnstile chain stats");
147 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
148  &turnstile_max_depth, 0, "maximum depth achieved of a single chain");
149 #endif
150 static struct mtx td_contested_lock;
152 static uma_zone_t turnstile_zone;
153 
154 /*
155  * Prototypes for non-exported routines.
156  */
157 static void init_turnstile0(void *dummy);
158 #ifdef TURNSTILE_PROFILING
159 static void init_turnstile_profiling(void *arg);
160 #endif
161 static void propagate_priority(struct thread *td);
162 static int turnstile_adjust_thread(struct turnstile *ts,
163  struct thread *td);
164 static struct thread *turnstile_first_waiter(struct turnstile *ts);
165 static void turnstile_setowner(struct turnstile *ts, struct thread *owner);
166 #ifdef INVARIANTS
167 static void turnstile_dtor(void *mem, int size, void *arg);
168 #endif
169 static int turnstile_init(void *mem, int size, int flags);
170 static void turnstile_fini(void *mem, int size);
171 
172 SDT_PROVIDER_DECLARE(sched);
173 SDT_PROBE_DEFINE(sched, , , sleep);
174 SDT_PROBE_DEFINE2(sched, , , wakeup, "struct thread *",
175  "struct proc *");
176 
177 /*
178  * Walks the chain of turnstiles and their owners to propagate the priority
179  * of the thread being blocked to all the threads holding locks that have to
180  * release their locks before this thread can run again.
181  */
182 static void
183 propagate_priority(struct thread *td)
184 {
185  struct turnstile *ts;
186  int pri;
187 
188  THREAD_LOCK_ASSERT(td, MA_OWNED);
189  pri = td->td_priority;
190  ts = td->td_blocked;
191  THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
192  /*
193  * Grab a recursive lock on this turnstile chain so it stays locked
194  * for the whole operation. The caller expects us to return with
195  * the original lock held. We only ever lock down the chain so
196  * the lock order is constant.
197  */
198  mtx_lock_spin(&ts->ts_lock);
199  for (;;) {
200  td = ts->ts_owner;
201 
202  if (td == NULL) {
203  /*
204  * This might be a read lock with no owner. There's
205  * not much we can do, so just bail.
206  */
207  mtx_unlock_spin(&ts->ts_lock);
208  return;
209  }
210 
211  thread_lock_flags(td, MTX_DUPOK);
212  mtx_unlock_spin(&ts->ts_lock);
213  MPASS(td->td_proc != NULL);
214  MPASS(td->td_proc->p_magic == P_MAGIC);
215 
216  /*
217  * If the thread is asleep, then we are probably about
218  * to deadlock. To make debugging this easier, just
219  * panic and tell the user which thread misbehaved so
220  * they can hopefully get a stack trace from the truly
221  * misbehaving thread.
222  */
223  if (TD_IS_SLEEPING(td)) {
224  printf(
225  "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n",
226  td->td_tid, td->td_proc->p_pid);
228  panic("sleeping thread");
229  }
230 
231  /*
232  * If this thread already has higher priority than the
233  * thread that is being blocked, we are finished.
234  */
235  if (td->td_priority <= pri) {
236  thread_unlock(td);
237  return;
238  }
239 
240  /*
241  * Bump this thread's priority.
242  */
243  sched_lend_prio(td, pri);
244 
245  /*
246  * If lock holder is actually running or on the run queue
247  * then we are done.
248  */
249  if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
250  MPASS(td->td_blocked == NULL);
251  thread_unlock(td);
252  return;
253  }
254 
255 #ifndef SMP
256  /*
257  * For UP, we check to see if td is curthread (this shouldn't
258  * ever happen however as it would mean we are in a deadlock.)
259  */
260  KASSERT(td != curthread, ("Deadlock detected"));
261 #endif
262 
263  /*
264  * If we aren't blocked on a lock, we should be.
265  */
266  KASSERT(TD_ON_LOCK(td), (
267  "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
268  td->td_tid, td->td_name, td->td_state,
269  ts->ts_lockobj->lo_name));
270 
271  /*
272  * Pick up the lock that td is blocked on.
273  */
274  ts = td->td_blocked;
275  MPASS(ts != NULL);
276  THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
277  /* Resort td on the list if needed. */
278  if (!turnstile_adjust_thread(ts, td)) {
279  mtx_unlock_spin(&ts->ts_lock);
280  return;
281  }
282  /* The thread lock is released as ts lock above. */
283  }
284 }
285 
286 /*
287  * Adjust the thread's position on a turnstile after its priority has been
288  * changed.
289  */
290 static int
291 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
292 {
293  struct thread *td1, *td2;
294  int queue;
295 
296  THREAD_LOCK_ASSERT(td, MA_OWNED);
297  MPASS(TD_ON_LOCK(td));
298 
299  /*
300  * This thread may not be blocked on this turnstile anymore
301  * but instead might already be woken up on another CPU
302  * that is waiting on the thread lock in turnstile_unpend() to
303  * finish waking this thread up. We can detect this case
304  * by checking to see if this thread has been given a
305  * turnstile by either turnstile_signal() or
306  * turnstile_broadcast(). In this case, treat the thread as
307  * if it was already running.
308  */
309  if (td->td_turnstile != NULL)
310  return (0);
311 
312  /*
313  * Check if the thread needs to be moved on the blocked chain.
314  * It needs to be moved if either its priority is lower than
315  * the previous thread or higher than the next thread.
316  */
317  THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
318  td1 = TAILQ_PREV(td, threadqueue, td_lockq);
319  td2 = TAILQ_NEXT(td, td_lockq);
320  if ((td1 != NULL && td->td_priority < td1->td_priority) ||
321  (td2 != NULL && td->td_priority > td2->td_priority)) {
322 
323  /*
324  * Remove thread from blocked chain and determine where
325  * it should be moved to.
326  */
327  queue = td->td_tsqueue;
328  MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
329  mtx_lock_spin(&td_contested_lock);
330  TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
331  TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
332  MPASS(td1->td_proc->p_magic == P_MAGIC);
333  if (td1->td_priority > td->td_priority)
334  break;
335  }
336 
337  if (td1 == NULL)
338  TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
339  else
340  TAILQ_INSERT_BEFORE(td1, td, td_lockq);
341  mtx_unlock_spin(&td_contested_lock);
342  if (td1 == NULL)
343  CTR3(KTR_LOCK,
344  "turnstile_adjust_thread: td %d put at tail on [%p] %s",
345  td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
346  else
347  CTR4(KTR_LOCK,
348  "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
349  td->td_tid, td1->td_tid, ts->ts_lockobj,
350  ts->ts_lockobj->lo_name);
351  }
352  return (1);
353 }
354 
355 /*
356  * Early initialization of turnstiles. This is not done via a SYSINIT()
357  * since this needs to be initialized very early when mutexes are first
358  * initialized.
359  */
360 void
362 {
363  int i;
364 
365  for (i = 0; i < TC_TABLESIZE; i++) {
366  LIST_INIT(&turnstile_chains[i].tc_turnstiles);
367  mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
368  NULL, MTX_SPIN);
369  }
370  mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
371  LIST_INIT(&thread0.td_contested);
372  thread0.td_turnstile = NULL;
373 }
374 
375 #ifdef TURNSTILE_PROFILING
376 static void
377 init_turnstile_profiling(void *arg)
378 {
379  struct sysctl_oid *chain_oid;
380  char chain_name[10];
381  int i;
382 
383  for (i = 0; i < TC_TABLESIZE; i++) {
384  snprintf(chain_name, sizeof(chain_name), "%d", i);
385  chain_oid = SYSCTL_ADD_NODE(NULL,
386  SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
387  chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
388  SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
389  "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
390  NULL);
391  SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
392  "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
393  0, NULL);
394  }
395 }
396 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
397  init_turnstile_profiling, NULL);
398 #endif
399 
400 static void
402 {
403 
404  turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
405  NULL,
406 #ifdef INVARIANTS
407  turnstile_dtor,
408 #else
409  NULL,
410 #endif
411  turnstile_init, turnstile_fini, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
412  thread0.td_turnstile = turnstile_alloc();
413 }
414 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
415 
416 /*
417  * Update a thread on the turnstile list after it's priority has been changed.
418  * The old priority is passed in as an argument.
419  */
420 void
421 turnstile_adjust(struct thread *td, u_char oldpri)
422 {
423  struct turnstile *ts;
424 
425  MPASS(TD_ON_LOCK(td));
426 
427  /*
428  * Pick up the lock that td is blocked on.
429  */
430  ts = td->td_blocked;
431  MPASS(ts != NULL);
432  THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
433  mtx_assert(&ts->ts_lock, MA_OWNED);
434 
435  /* Resort the turnstile on the list. */
436  if (!turnstile_adjust_thread(ts, td))
437  return;
438  /*
439  * If our priority was lowered and we are at the head of the
440  * turnstile, then propagate our new priority up the chain.
441  * Note that we currently don't try to revoke lent priorities
442  * when our priority goes up.
443  */
444  MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
445  td->td_tsqueue == TS_SHARED_QUEUE);
446  if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) &&
447  td->td_priority < oldpri) {
448  propagate_priority(td);
449  }
450 }
451 
452 /*
453  * Set the owner of the lock this turnstile is attached to.
454  */
455 static void
456 turnstile_setowner(struct turnstile *ts, struct thread *owner)
457 {
458 
459  mtx_assert(&td_contested_lock, MA_OWNED);
460  MPASS(ts->ts_owner == NULL);
461 
462  /* A shared lock might not have an owner. */
463  if (owner == NULL)
464  return;
465 
466  MPASS(owner->td_proc->p_magic == P_MAGIC);
467  ts->ts_owner = owner;
468  LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
469 }
470 
471 #ifdef INVARIANTS
472 /*
473  * UMA zone item deallocator.
474  */
475 static void
476 turnstile_dtor(void *mem, int size, void *arg)
477 {
478  struct turnstile *ts;
479 
480  ts = mem;
481  MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
482  MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
483  MPASS(TAILQ_EMPTY(&ts->ts_pending));
484 }
485 #endif
486 
487 /*
488  * UMA zone item initializer.
489  */
490 static int
491 turnstile_init(void *mem, int size, int flags)
492 {
493  struct turnstile *ts;
494 
495  bzero(mem, size);
496  ts = mem;
497  TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
498  TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
499  TAILQ_INIT(&ts->ts_pending);
500  LIST_INIT(&ts->ts_free);
501  mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN | MTX_RECURSE);
502  return (0);
503 }
504 
505 static void
506 turnstile_fini(void *mem, int size)
507 {
508  struct turnstile *ts;
509 
510  ts = mem;
511  mtx_destroy(&ts->ts_lock);
512 }
513 
514 /*
515  * Get a turnstile for a new thread.
516  */
517 struct turnstile *
519 {
520 
521  return (uma_zalloc(turnstile_zone, M_WAITOK));
522 }
523 
524 /*
525  * Free a turnstile when a thread is destroyed.
526  */
527 void
529 {
530 
531  uma_zfree(turnstile_zone, ts);
532 }
533 
534 /*
535  * Lock the turnstile chain associated with the specified lock.
536  */
537 void
538 turnstile_chain_lock(struct lock_object *lock)
539 {
540  struct turnstile_chain *tc;
541 
542  tc = TC_LOOKUP(lock);
543  mtx_lock_spin(&tc->tc_lock);
544 }
545 
546 struct turnstile *
547 turnstile_trywait(struct lock_object *lock)
548 {
549  struct turnstile_chain *tc;
550  struct turnstile *ts;
551 
552  tc = TC_LOOKUP(lock);
553  mtx_lock_spin(&tc->tc_lock);
554  LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
555  if (ts->ts_lockobj == lock) {
556  mtx_lock_spin(&ts->ts_lock);
557  return (ts);
558  }
559 
560  ts = curthread->td_turnstile;
561  MPASS(ts != NULL);
562  mtx_lock_spin(&ts->ts_lock);
563  KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
564  ts->ts_lockobj = lock;
565 
566  return (ts);
567 }
568 
569 void
571 {
572  struct turnstile_chain *tc;
573  struct lock_object *lock;
574 
575  mtx_assert(&ts->ts_lock, MA_OWNED);
576 
577  mtx_unlock_spin(&ts->ts_lock);
578  lock = ts->ts_lockobj;
579  if (ts == curthread->td_turnstile)
580  ts->ts_lockobj = NULL;
581  tc = TC_LOOKUP(lock);
582  mtx_unlock_spin(&tc->tc_lock);
583 }
584 
585 /*
586  * Look up the turnstile for a lock in the hash table locking the associated
587  * turnstile chain along the way. If no turnstile is found in the hash
588  * table, NULL is returned.
589  */
590 struct turnstile *
591 turnstile_lookup(struct lock_object *lock)
592 {
593  struct turnstile_chain *tc;
594  struct turnstile *ts;
595 
596  tc = TC_LOOKUP(lock);
597  mtx_assert(&tc->tc_lock, MA_OWNED);
598  LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
599  if (ts->ts_lockobj == lock) {
600  mtx_lock_spin(&ts->ts_lock);
601  return (ts);
602  }
603  return (NULL);
604 }
605 
606 /*
607  * Unlock the turnstile chain associated with a given lock.
608  */
609 void
610 turnstile_chain_unlock(struct lock_object *lock)
611 {
612  struct turnstile_chain *tc;
613 
614  tc = TC_LOOKUP(lock);
615  mtx_unlock_spin(&tc->tc_lock);
616 }
617 
618 /*
619  * Return a pointer to the thread waiting on this turnstile with the
620  * most important priority or NULL if the turnstile has no waiters.
621  */
622 static struct thread *
624 {
625  struct thread *std, *xtd;
626 
627  std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]);
628  xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
629  if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
630  return (std);
631  return (xtd);
632 }
633 
634 /*
635  * Take ownership of a turnstile and adjust the priority of the new
636  * owner appropriately.
637  */
638 void
640 {
641  struct thread *td, *owner;
642  struct turnstile_chain *tc;
643 
644  mtx_assert(&ts->ts_lock, MA_OWNED);
645  MPASS(ts != curthread->td_turnstile);
646 
647  owner = curthread;
648  mtx_lock_spin(&td_contested_lock);
649  turnstile_setowner(ts, owner);
650  mtx_unlock_spin(&td_contested_lock);
651 
652  td = turnstile_first_waiter(ts);
653  MPASS(td != NULL);
654  MPASS(td->td_proc->p_magic == P_MAGIC);
655  THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
656 
657  /*
658  * Update the priority of the new owner if needed.
659  */
660  thread_lock(owner);
661  if (td->td_priority < owner->td_priority)
662  sched_lend_prio(owner, td->td_priority);
663  thread_unlock(owner);
664  tc = TC_LOOKUP(ts->ts_lockobj);
665  mtx_unlock_spin(&ts->ts_lock);
666  mtx_unlock_spin(&tc->tc_lock);
667 }
668 
669 /*
670  * Block the current thread on the turnstile assicated with 'lock'. This
671  * function will context switch and not return until this thread has been
672  * woken back up. This function must be called with the appropriate
673  * turnstile chain locked and will return with it unlocked.
674  */
675 void
676 turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
677 {
678  struct turnstile_chain *tc;
679  struct thread *td, *td1;
680  struct lock_object *lock;
681 
682  td = curthread;
683  mtx_assert(&ts->ts_lock, MA_OWNED);
684  if (owner)
685  MPASS(owner->td_proc->p_magic == P_MAGIC);
686  MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
687 
688  /*
689  * If the lock does not already have a turnstile, use this thread's
690  * turnstile. Otherwise insert the current thread into the
691  * turnstile already in use by this lock.
692  */
693  tc = TC_LOOKUP(ts->ts_lockobj);
694  mtx_assert(&tc->tc_lock, MA_OWNED);
695  if (ts == td->td_turnstile) {
696 #ifdef TURNSTILE_PROFILING
697  tc->tc_depth++;
698  if (tc->tc_depth > tc->tc_max_depth) {
699  tc->tc_max_depth = tc->tc_depth;
700  if (tc->tc_max_depth > turnstile_max_depth)
701  turnstile_max_depth = tc->tc_max_depth;
702  }
703 #endif
704  LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
705  KASSERT(TAILQ_EMPTY(&ts->ts_pending),
706  ("thread's turnstile has pending threads"));
707  KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
708  ("thread's turnstile has exclusive waiters"));
709  KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]),
710  ("thread's turnstile has shared waiters"));
711  KASSERT(LIST_EMPTY(&ts->ts_free),
712  ("thread's turnstile has a non-empty free list"));
713  MPASS(ts->ts_lockobj != NULL);
714  mtx_lock_spin(&td_contested_lock);
715  TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
716  turnstile_setowner(ts, owner);
717  mtx_unlock_spin(&td_contested_lock);
718  } else {
719  TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
720  if (td1->td_priority > td->td_priority)
721  break;
722  mtx_lock_spin(&td_contested_lock);
723  if (td1 != NULL)
724  TAILQ_INSERT_BEFORE(td1, td, td_lockq);
725  else
726  TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
727  MPASS(owner == ts->ts_owner);
728  mtx_unlock_spin(&td_contested_lock);
729  MPASS(td->td_turnstile != NULL);
730  LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
731  }
732  thread_lock(td);
733  thread_lock_set(td, &ts->ts_lock);
734  td->td_turnstile = NULL;
735 
736  /* Save who we are blocked on and switch. */
737  lock = ts->ts_lockobj;
738  td->td_tsqueue = queue;
739  td->td_blocked = ts;
740  td->td_lockname = lock->lo_name;
741  td->td_blktick = ticks;
742  TD_SET_LOCK(td);
743  mtx_unlock_spin(&tc->tc_lock);
744  propagate_priority(td);
745 
746  if (LOCK_LOG_TEST(lock, 0))
747  CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
748  td->td_tid, lock, lock->lo_name);
749 
750  SDT_PROBE0(sched, , , sleep);
751 
752  THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
753  mi_switch(SW_VOL | SWT_TURNSTILE, NULL);
754 
755  if (LOCK_LOG_TEST(lock, 0))
756  CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
757  __func__, td->td_tid, lock, lock->lo_name);
758  thread_unlock(td);
759 }
760 
761 /*
762  * Pick the highest priority thread on this turnstile and put it on the
763  * pending list. This must be called with the turnstile chain locked.
764  */
765 int
766 turnstile_signal(struct turnstile *ts, int queue)
767 {
768  struct turnstile_chain *tc;
769  struct thread *td;
770  int empty;
771 
772  MPASS(ts != NULL);
773  mtx_assert(&ts->ts_lock, MA_OWNED);
774  MPASS(curthread->td_proc->p_magic == P_MAGIC);
775  MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
776  MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
777 
778  /*
779  * Pick the highest priority thread blocked on this lock and
780  * move it to the pending list.
781  */
782  td = TAILQ_FIRST(&ts->ts_blocked[queue]);
783  MPASS(td->td_proc->p_magic == P_MAGIC);
784  mtx_lock_spin(&td_contested_lock);
785  TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
786  mtx_unlock_spin(&td_contested_lock);
787  TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
788 
789  /*
790  * If the turnstile is now empty, remove it from its chain and
791  * give it to the about-to-be-woken thread. Otherwise take a
792  * turnstile from the free list and give it to the thread.
793  */
794  empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
795  TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]);
796  if (empty) {
797  tc = TC_LOOKUP(ts->ts_lockobj);
798  mtx_assert(&tc->tc_lock, MA_OWNED);
799  MPASS(LIST_EMPTY(&ts->ts_free));
800 #ifdef TURNSTILE_PROFILING
801  tc->tc_depth--;
802 #endif
803  } else
804  ts = LIST_FIRST(&ts->ts_free);
805  MPASS(ts != NULL);
806  LIST_REMOVE(ts, ts_hash);
807  td->td_turnstile = ts;
808 
809  return (empty);
810 }
811 
812 /*
813  * Put all blocked threads on the pending list. This must be called with
814  * the turnstile chain locked.
815  */
816 void
817 turnstile_broadcast(struct turnstile *ts, int queue)
818 {
819  struct turnstile_chain *tc;
820  struct turnstile *ts1;
821  struct thread *td;
822 
823  MPASS(ts != NULL);
824  mtx_assert(&ts->ts_lock, MA_OWNED);
825  MPASS(curthread->td_proc->p_magic == P_MAGIC);
826  MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
827  /*
828  * We must have the chain locked so that we can remove the empty
829  * turnstile from the hash queue.
830  */
831  tc = TC_LOOKUP(ts->ts_lockobj);
832  mtx_assert(&tc->tc_lock, MA_OWNED);
833  MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
834 
835  /*
836  * Transfer the blocked list to the pending list.
837  */
838  mtx_lock_spin(&td_contested_lock);
839  TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
840  mtx_unlock_spin(&td_contested_lock);
841 
842  /*
843  * Give a turnstile to each thread. The last thread gets
844  * this turnstile if the turnstile is empty.
845  */
846  TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
847  if (LIST_EMPTY(&ts->ts_free)) {
848  MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
849  ts1 = ts;
850 #ifdef TURNSTILE_PROFILING
851  tc->tc_depth--;
852 #endif
853  } else
854  ts1 = LIST_FIRST(&ts->ts_free);
855  MPASS(ts1 != NULL);
856  LIST_REMOVE(ts1, ts_hash);
857  td->td_turnstile = ts1;
858  }
859 }
860 
861 /*
862  * Wakeup all threads on the pending list and adjust the priority of the
863  * current thread appropriately. This must be called with the turnstile
864  * chain locked.
865  */
866 void
867 turnstile_unpend(struct turnstile *ts, int owner_type)
868 {
869  TAILQ_HEAD( ,thread) pending_threads;
870  struct turnstile *nts;
871  struct thread *td;
872  u_char cp, pri;
873 
874  MPASS(ts != NULL);
875  mtx_assert(&ts->ts_lock, MA_OWNED);
876  MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
877  MPASS(!TAILQ_EMPTY(&ts->ts_pending));
878 
879  /*
880  * Move the list of pending threads out of the turnstile and
881  * into a local variable.
882  */
883  TAILQ_INIT(&pending_threads);
884  TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
885 #ifdef INVARIANTS
886  if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
887  TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]))
888  ts->ts_lockobj = NULL;
889 #endif
890  /*
891  * Adjust the priority of curthread based on other contested
892  * locks it owns. Don't lower the priority below the base
893  * priority however.
894  */
895  td = curthread;
896  pri = PRI_MAX;
897  thread_lock(td);
898  mtx_lock_spin(&td_contested_lock);
899  /*
900  * Remove the turnstile from this thread's list of contested locks
901  * since this thread doesn't own it anymore. New threads will
902  * not be blocking on the turnstile until it is claimed by a new
903  * owner. There might not be a current owner if this is a shared
904  * lock.
905  */
906  if (ts->ts_owner != NULL) {
907  ts->ts_owner = NULL;
908  LIST_REMOVE(ts, ts_link);
909  }
910  LIST_FOREACH(nts, &td->td_contested, ts_link) {
911  cp = turnstile_first_waiter(nts)->td_priority;
912  if (cp < pri)
913  pri = cp;
914  }
915  mtx_unlock_spin(&td_contested_lock);
916  sched_unlend_prio(td, pri);
917  thread_unlock(td);
918  /*
919  * Wake up all the pending threads. If a thread is not blocked
920  * on a lock, then it is currently executing on another CPU in
921  * turnstile_wait() or sitting on a run queue waiting to resume
922  * in turnstile_wait(). Set a flag to force it to try to acquire
923  * the lock again instead of blocking.
924  */
925  while (!TAILQ_EMPTY(&pending_threads)) {
926  td = TAILQ_FIRST(&pending_threads);
927  TAILQ_REMOVE(&pending_threads, td, td_lockq);
928  SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
929  thread_lock(td);
930  THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
931  MPASS(td->td_proc->p_magic == P_MAGIC);
932  MPASS(TD_ON_LOCK(td));
933  TD_CLR_LOCK(td);
934  MPASS(TD_CAN_RUN(td));
935  td->td_blocked = NULL;
936  td->td_lockname = NULL;
937  td->td_blktick = 0;
938 #ifdef INVARIANTS
939  td->td_tsqueue = 0xff;
940 #endif
941  sched_add(td, SRQ_BORING);
942  thread_unlock(td);
943  }
944  mtx_unlock_spin(&ts->ts_lock);
945 }
946 
947 /*
948  * Give up ownership of a turnstile. This must be called with the
949  * turnstile chain locked.
950  */
951 void
953 {
954  struct thread *td;
955  u_char cp, pri;
956 
957  MPASS(ts != NULL);
958  mtx_assert(&ts->ts_lock, MA_OWNED);
959  MPASS(ts->ts_owner == curthread);
960  MPASS(TAILQ_EMPTY(&ts->ts_pending));
961  MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
962  !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
963 
964  /*
965  * Remove the turnstile from this thread's list of contested locks
966  * since this thread doesn't own it anymore. New threads will
967  * not be blocking on the turnstile until it is claimed by a new
968  * owner.
969  */
970  mtx_lock_spin(&td_contested_lock);
971  ts->ts_owner = NULL;
972  LIST_REMOVE(ts, ts_link);
973  mtx_unlock_spin(&td_contested_lock);
974 
975  /*
976  * Adjust the priority of curthread based on other contested
977  * locks it owns. Don't lower the priority below the base
978  * priority however.
979  */
980  td = curthread;
981  pri = PRI_MAX;
982  thread_lock(td);
983  mtx_unlock_spin(&ts->ts_lock);
984  mtx_lock_spin(&td_contested_lock);
985  LIST_FOREACH(ts, &td->td_contested, ts_link) {
986  cp = turnstile_first_waiter(ts)->td_priority;
987  if (cp < pri)
988  pri = cp;
989  }
990  mtx_unlock_spin(&td_contested_lock);
991  sched_unlend_prio(td, pri);
992  thread_unlock(td);
993 }
994 
995 /*
996  * Return the first thread in a turnstile.
997  */
998 struct thread *
999 turnstile_head(struct turnstile *ts, int queue)
1000 {
1001 #ifdef INVARIANTS
1002 
1003  MPASS(ts != NULL);
1004  MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
1005  mtx_assert(&ts->ts_lock, MA_OWNED);
1006 #endif
1007  return (TAILQ_FIRST(&ts->ts_blocked[queue]));
1008 }
1009 
1010 /*
1011  * Returns true if a sub-queue of a turnstile is empty.
1012  */
1013 int
1014 turnstile_empty(struct turnstile *ts, int queue)
1015 {
1016 #ifdef INVARIANTS
1017 
1018  MPASS(ts != NULL);
1019  MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
1020  mtx_assert(&ts->ts_lock, MA_OWNED);
1021 #endif
1022  return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
1023 }
1024 
1025 #ifdef DDB
1026 static void
1027 print_thread(struct thread *td, const char *prefix)
1028 {
1029 
1030  db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
1031  td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1032  td->td_name);
1033 }
1034 
1035 static void
1036 print_queue(struct threadqueue *queue, const char *header, const char *prefix)
1037 {
1038  struct thread *td;
1039 
1040  db_printf("%s:\n", header);
1041  if (TAILQ_EMPTY(queue)) {
1042  db_printf("%sempty\n", prefix);
1043  return;
1044  }
1045  TAILQ_FOREACH(td, queue, td_lockq) {
1046  print_thread(td, prefix);
1047  }
1048 }
1049 
1050 DB_SHOW_COMMAND(turnstile, db_show_turnstile)
1051 {
1052  struct turnstile_chain *tc;
1053  struct turnstile *ts;
1054  struct lock_object *lock;
1055  int i;
1056 
1057  if (!have_addr)
1058  return;
1059 
1060  /*
1061  * First, see if there is an active turnstile for the lock indicated
1062  * by the address.
1063  */
1064  lock = (struct lock_object *)addr;
1065  tc = TC_LOOKUP(lock);
1066  LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1067  if (ts->ts_lockobj == lock)
1068  goto found;
1069 
1070  /*
1071  * Second, see if there is an active turnstile at the address
1072  * indicated.
1073  */
1074  for (i = 0; i < TC_TABLESIZE; i++)
1075  LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) {
1076  if (ts == (struct turnstile *)addr)
1077  goto found;
1078  }
1079 
1080  db_printf("Unable to locate a turnstile via %p\n", (void *)addr);
1081  return;
1082 found:
1083  lock = ts->ts_lockobj;
1084  db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
1085  lock->lo_name);
1086  if (ts->ts_owner)
1087  print_thread(ts->ts_owner, "Lock Owner: ");
1088  else
1089  db_printf("Lock Owner: none\n");
1090  print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t");
1091  print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters",
1092  "\t");
1093  print_queue(&ts->ts_pending, "Pending Threads", "\t");
1094 
1095 }
1096 
1097 /*
1098  * Show all the threads a particular thread is waiting on based on
1099  * non-sleepable and non-spin locks.
1100  */
1101 static void
1102 print_lockchain(struct thread *td, const char *prefix)
1103 {
1104  struct lock_object *lock;
1105  struct lock_class *class;
1106  struct turnstile *ts;
1107 
1108  /*
1109  * Follow the chain. We keep walking as long as the thread is
1110  * blocked on a turnstile that has an owner.
1111  */
1112  while (!db_pager_quit) {
1113  db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1114  td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1115  td->td_name);
1116  switch (td->td_state) {
1117  case TDS_INACTIVE:
1118  db_printf("is inactive\n");
1119  return;
1120  case TDS_CAN_RUN:
1121  db_printf("can run\n");
1122  return;
1123  case TDS_RUNQ:
1124  db_printf("is on a run queue\n");
1125  return;
1126  case TDS_RUNNING:
1127  db_printf("running on CPU %d\n", td->td_oncpu);
1128  return;
1129  case TDS_INHIBITED:
1130  if (TD_ON_LOCK(td)) {
1131  ts = td->td_blocked;
1132  lock = ts->ts_lockobj;
1133  class = LOCK_CLASS(lock);
1134  db_printf("blocked on lock %p (%s) \"%s\"\n",
1135  lock, class->lc_name, lock->lo_name);
1136  if (ts->ts_owner == NULL)
1137  return;
1138  td = ts->ts_owner;
1139  break;
1140  }
1141  db_printf("inhibited\n");
1142  return;
1143  default:
1144  db_printf("??? (%#x)\n", td->td_state);
1145  return;
1146  }
1147  }
1148 }
1149 
1150 DB_SHOW_COMMAND(lockchain, db_show_lockchain)
1151 {
1152  struct thread *td;
1153 
1154  /* Figure out which thread to start with. */
1155  if (have_addr)
1156  td = db_lookup_thread(addr, TRUE);
1157  else
1158  td = kdb_thread;
1159 
1160  print_lockchain(td, "");
1161 }
1162 
1163 DB_SHOW_ALL_COMMAND(chains, db_show_allchains)
1164 {
1165  struct thread *td;
1166  struct proc *p;
1167  int i;
1168 
1169  i = 1;
1170  FOREACH_PROC_IN_SYSTEM(p) {
1171  FOREACH_THREAD_IN_PROC(p, td) {
1172  if (TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested)) {
1173  db_printf("chain %d:\n", i++);
1174  print_lockchain(td, " ");
1175  }
1176  if (db_pager_quit)
1177  return;
1178  }
1179  }
1180 }
1181 DB_SHOW_ALIAS(allchains, db_show_allchains)
1182 
1183 /*
1184  * Show all the threads a particular thread is waiting on based on
1185  * sleepable locks.
1186  */
1187 static void
1188 print_sleepchain(struct thread *td, const char *prefix)
1189 {
1190  struct thread *owner;
1191 
1192  /*
1193  * Follow the chain. We keep walking as long as the thread is
1194  * blocked on a sleep lock that has an owner.
1195  */
1196  while (!db_pager_quit) {
1197  db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1198  td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1199  td->td_name);
1200  switch (td->td_state) {
1201  case TDS_INACTIVE:
1202  db_printf("is inactive\n");
1203  return;
1204  case TDS_CAN_RUN:
1205  db_printf("can run\n");
1206  return;
1207  case TDS_RUNQ:
1208  db_printf("is on a run queue\n");
1209  return;
1210  case TDS_RUNNING:
1211  db_printf("running on CPU %d\n", td->td_oncpu);
1212  return;
1213  case TDS_INHIBITED:
1214  if (TD_ON_SLEEPQ(td)) {
1215  if (lockmgr_chain(td, &owner) ||
1216  sx_chain(td, &owner)) {
1217  if (owner == NULL)
1218  return;
1219  td = owner;
1220  break;
1221  }
1222  db_printf("sleeping on %p \"%s\"\n",
1223  td->td_wchan, td->td_wmesg);
1224  return;
1225  }
1226  db_printf("inhibited\n");
1227  return;
1228  default:
1229  db_printf("??? (%#x)\n", td->td_state);
1230  return;
1231  }
1232  }
1233 }
1234 
1235 DB_SHOW_COMMAND(sleepchain, db_show_sleepchain)
1236 {
1237  struct thread *td;
1238 
1239  /* Figure out which thread to start with. */
1240  if (have_addr)
1241  td = db_lookup_thread(addr, TRUE);
1242  else
1243  td = kdb_thread;
1244 
1245  print_sleepchain(td, "");
1246 }
1247 
1248 static void print_waiters(struct turnstile *ts, int indent);
1249 
1250 static void
1251 print_waiter(struct thread *td, int indent)
1252 {
1253  struct turnstile *ts;
1254  int i;
1255 
1256  if (db_pager_quit)
1257  return;
1258  for (i = 0; i < indent; i++)
1259  db_printf(" ");
1260  print_thread(td, "thread ");
1261  LIST_FOREACH(ts, &td->td_contested, ts_link)
1262  print_waiters(ts, indent + 1);
1263 }
1264 
1265 static void
1266 print_waiters(struct turnstile *ts, int indent)
1267 {
1268  struct lock_object *lock;
1269  struct lock_class *class;
1270  struct thread *td;
1271  int i;
1272 
1273  if (db_pager_quit)
1274  return;
1275  lock = ts->ts_lockobj;
1276  class = LOCK_CLASS(lock);
1277  for (i = 0; i < indent; i++)
1278  db_printf(" ");
1279  db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name);
1280  TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq)
1281  print_waiter(td, indent + 1);
1282  TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq)
1283  print_waiter(td, indent + 1);
1284  TAILQ_FOREACH(td, &ts->ts_pending, td_lockq)
1285  print_waiter(td, indent + 1);
1286 }
1287 
1288 DB_SHOW_COMMAND(locktree, db_show_locktree)
1289 {
1290  struct lock_object *lock;
1291  struct lock_class *class;
1292  struct turnstile_chain *tc;
1293  struct turnstile *ts;
1294 
1295  if (!have_addr)
1296  return;
1297  lock = (struct lock_object *)addr;
1298  tc = TC_LOOKUP(lock);
1299  LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1300  if (ts->ts_lockobj == lock)
1301  break;
1302  if (ts == NULL) {
1303  class = LOCK_CLASS(lock);
1304  db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name,
1305  lock->lo_name);
1306  } else
1307  print_waiters(ts, 0);
1308 }
1309 #endif
#define TC_LOOKUP(lock)
void kdb_backtrace_thread(struct thread *td)
Definition: subr_kdb.c:386
void turnstile_adjust(struct thread *td, u_char oldpri)
int snprintf(char *str, size_t size, const char *format,...)
Definition: subr_prf.c:509
void turnstile_broadcast(struct turnstile *ts, int queue)
static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL,"cpufreq debugging")
struct timespec * ts
Definition: clock_if.m:39
static int turnstile_init(void *mem, int size, int flags)
static LIST_HEAD(alq)
Definition: kern_alq.c:97
void turnstile_unpend(struct turnstile *ts, int owner_type)
void panic(const char *fmt,...)
SDT_PROBE_DEFINE(sched,,, sleep)
TAILQ_HEAD(note_info_list, note_info)
void turnstile_disown(struct turnstile *ts)
struct turnstile * turnstile_lookup(struct lock_object *lock)
SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW,&idletick, 0,"Run periodic events when idle")
struct turnstile * turnstile_alloc(void)
void mi_switch(int flags, struct thread *newtd)
Definition: kern_synch.c:422
SDT_PROVIDER_DECLARE(sched)
static void turnstile_setowner(struct turnstile *ts, struct thread *owner)
struct thread * turnstile_head(struct turnstile *ts, int queue)
void turnstile_free(struct turnstile *ts)
#define TC_TABLESIZE
void sched_add(struct thread *td, int flags)
Definition: sched_4bsd.c:1258
static int turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
void thread_lock_set(struct thread *td, struct mtx *new)
Definition: kern_mutex.c:693
static int dummy
void sched_lend_prio(struct thread *td, u_char prio)
Definition: sched_4bsd.c:864
static void propagate_priority(struct thread *td)
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
static struct thread * turnstile_first_waiter(struct turnstile *ts)
__FBSDID("$BSDSUniX$")
struct mtx ts_lock
void turnstile_chain_unlock(struct lock_object *lock)
int turnstile_signal(struct turnstile *ts, int queue)
struct thread * kdb_thread
Definition: subr_kdb.c:58
int printf(const char *fmt,...)
Definition: subr_prf.c:367
static long empty[CPUSTATES]
Definition: kern_clock.c:129
void turnstile_claim(struct turnstile *ts)
struct threadqueue ts_pending
static uma_zone_t turnstile_zone
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
void wakeup(void *ident)
Definition: kern_synch.c:378
SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL)
struct threadqueue ts_blocked[2]
void init_turnstiles(void)
struct turnstile * turnstile_trywait(struct lock_object *lock)
void turnstile_cancel(struct turnstile *ts)
volatile int ticks
Definition: kern_clock.c:387
void turnstile_chain_lock(struct lock_object *lock)
static void turnstile_fini(void *mem, int size)
static struct turnstile_chain turnstile_chains[TC_TABLESIZE]
static void init_turnstile0(void *dummy)
void mtx_destroy(struct mtx *m)
Definition: kern_mutex.c:884
static struct mtx td_contested_lock
void sched_unlend_prio(struct thread *td, u_char prio)
Definition: sched_4bsd.c:880
int turnstile_empty(struct turnstile *ts, int queue)
SDT_PROBE_DEFINE2(sched,,, wakeup,"struct thread *","struct proc *")