FreeBSD kernel kern code
kern_mutex.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  * promote products derived from this software without specific prior
14  * written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Machine independent bits of mutex implementation.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$BSDSUniX$");
38 
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
44 #include "opt_sched.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/conf.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/sbuf.h>
60 #include <sys/sysctl.h>
61 #include <sys/turnstile.h>
62 #include <sys/vmmeter.h>
63 #include <sys/lock_profile.h>
64 
65 #include <machine/atomic.h>
66 #include <machine/bus.h>
67 #include <machine/cpu.h>
68 
69 #include <ddb/ddb.h>
70 
71 #include <fs/devfs/devfs_int.h>
72 
73 #include <vm/vm.h>
74 #include <vm/vm_extern.h>
75 
76 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
77 #define ADAPTIVE_MUTEXES
78 #endif
79 
80 #ifdef HWPMC_HOOKS
81 #include <sys/pmckern.h>
82 PMC_SOFT_DEFINE( , , lock, failed);
83 #endif
84 
85 /*
86  * Internal utility macros.
87  */
88 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
89 
90 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
91 
92 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
93 
94 static void assert_mtx(struct lock_object *lock, int what);
95 #ifdef DDB
96 static void db_show_mtx(struct lock_object *lock);
97 #endif
98 static void lock_mtx(struct lock_object *lock, int how);
99 static void lock_spin(struct lock_object *lock, int how);
100 #ifdef KDTRACE_HOOKS
101 static int owner_mtx(struct lock_object *lock, struct thread **owner);
102 #endif
103 static int unlock_mtx(struct lock_object *lock);
104 static int unlock_spin(struct lock_object *lock);
105 
106 /*
107  * Lock classes for sleep and spin mutexes.
108  */
109 struct lock_class lock_class_mtx_sleep = {
110  .lc_name = "sleep mutex",
111  .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
112  .lc_assert = assert_mtx,
113 #ifdef DDB
114  .lc_ddb_show = db_show_mtx,
115 #endif
116  .lc_lock = lock_mtx,
117  .lc_unlock = unlock_mtx,
118 #ifdef KDTRACE_HOOKS
119  .lc_owner = owner_mtx,
120 #endif
121 };
122 struct lock_class lock_class_mtx_spin = {
123  .lc_name = "spin mutex",
124  .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
125  .lc_assert = assert_mtx,
126 #ifdef DDB
127  .lc_ddb_show = db_show_mtx,
128 #endif
129  .lc_lock = lock_spin,
130  .lc_unlock = unlock_spin,
131 #ifdef KDTRACE_HOOKS
132  .lc_owner = owner_mtx,
133 #endif
134 };
135 
136 /*
137  * System-wide mutexes
138  */
139 struct mtx blocked_lock;
140 struct mtx Giant;
141 
142 void
143 assert_mtx(struct lock_object *lock, int what)
144 {
145 
146  mtx_assert((struct mtx *)lock, what);
147 }
148 
149 void
150 lock_mtx(struct lock_object *lock, int how)
151 {
152 
153  mtx_lock((struct mtx *)lock);
154 }
155 
156 void
157 lock_spin(struct lock_object *lock, int how)
158 {
159 
160  panic("spin locks can only use msleep_spin");
161 }
162 
163 int
164 unlock_mtx(struct lock_object *lock)
165 {
166  struct mtx *m;
167 
168  m = (struct mtx *)lock;
169  mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
170  mtx_unlock(m);
171  return (0);
172 }
173 
174 int
175 unlock_spin(struct lock_object *lock)
176 {
177 
178  panic("spin locks can only use msleep_spin");
179 }
180 
181 #ifdef KDTRACE_HOOKS
182 int
183 owner_mtx(struct lock_object *lock, struct thread **owner)
184 {
185  struct mtx *m = (struct mtx *)lock;
186 
187  *owner = mtx_owner(m);
188  return (mtx_unowned(m) == 0);
189 }
190 #endif
191 
192 /*
193  * Function versions of the inlined __mtx_* macros. These are used by
194  * modules and can also be called from assembly language if needed.
195  */
196 void
197 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
198 {
199 
200  if (SCHEDULER_STOPPED())
201  return;
202  KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
203  ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
204  curthread, m->lock_object.lo_name, file, line));
205  KASSERT(m->mtx_lock != MTX_DESTROYED,
206  ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
207  KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
208  ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
209  file, line));
210  WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
211  file, line, NULL);
212 
213  __mtx_lock(m, curthread, opts, file, line);
214  LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
215  line);
216  WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
217  curthread->td_locks++;
218 }
219 
220 void
221 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
222 {
223 
224  if (SCHEDULER_STOPPED())
225  return;
226  KASSERT(m->mtx_lock != MTX_DESTROYED,
227  ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
228  KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
229  ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
230  file, line));
231  curthread->td_locks--;
232  WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
233  LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
234  line);
235  mtx_assert(m, MA_OWNED);
236 
237  if (m->mtx_recurse == 0)
238  LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
239  __mtx_unlock(m, curthread, opts, file, line);
240 }
241 
242 void
243 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
244 {
245 
246  if (SCHEDULER_STOPPED())
247  return;
248  KASSERT(m->mtx_lock != MTX_DESTROYED,
249  ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
250  KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
251  ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
252  m->lock_object.lo_name, file, line));
253  if (mtx_owned(m))
254  KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
255  ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
256  m->lock_object.lo_name, file, line));
257  WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
258  file, line, NULL);
259  __mtx_lock_spin(m, curthread, opts, file, line);
260  LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
261  line);
262  WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
263 }
264 
265 void
266 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
267 {
268 
269  if (SCHEDULER_STOPPED())
270  return;
271  KASSERT(m->mtx_lock != MTX_DESTROYED,
272  ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
273  KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
274  ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
275  m->lock_object.lo_name, file, line));
276  WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
277  LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
278  line);
279  mtx_assert(m, MA_OWNED);
280 
281  __mtx_unlock_spin(m);
282 }
283 
284 /*
285  * The important part of mtx_trylock{,_flags}()
286  * Tries to acquire lock `m.' If this function is called on a mutex that
287  * is already owned, it will recursively acquire the lock.
288  */
289 int
290 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
291 {
292 #ifdef LOCK_PROFILING
293  uint64_t waittime = 0;
294  int contested = 0;
295 #endif
296  int rval;
297 
298  if (SCHEDULER_STOPPED())
299  return (1);
300 
301  KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
302  ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
303  curthread, m->lock_object.lo_name, file, line));
304  KASSERT(m->mtx_lock != MTX_DESTROYED,
305  ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
306  KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
307  ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
308  file, line));
309 
310  if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
311  m->mtx_recurse++;
312  atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
313  rval = 1;
314  } else
315  rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
316 
317  LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
318  if (rval) {
319  WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
320  file, line);
321  curthread->td_locks++;
322  if (m->mtx_recurse == 0)
323  LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
324  m, contested, waittime, file, line);
325 
326  }
327 
328  return (rval);
329 }
330 
331 /*
332  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
333  *
334  * We call this if the lock is either contested (i.e. we need to go to
335  * sleep waiting for it), or if we need to recurse on it.
336  */
337 void
338 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
339  int line)
340 {
341  struct turnstile *ts;
342  uintptr_t v;
343 #ifdef ADAPTIVE_MUTEXES
344  volatile struct thread *owner;
345 #endif
346 #ifdef KTR
347  int cont_logged = 0;
348 #endif
349 #ifdef LOCK_PROFILING
350  int contested = 0;
351  uint64_t waittime = 0;
352 #endif
353 #ifdef KDTRACE_HOOKS
354  uint64_t spin_cnt = 0;
355  uint64_t sleep_cnt = 0;
356  int64_t sleep_time = 0;
357  int64_t all_time = 0;
358 #endif
359 
360  if (SCHEDULER_STOPPED())
361  return;
362 
363  if (mtx_owned(m)) {
364  KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
365  ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
366  m->lock_object.lo_name, file, line));
367  m->mtx_recurse++;
368  atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
369  if (LOCK_LOG_TEST(&m->lock_object, opts))
370  CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
371  return;
372  }
373 
374 #ifdef HWPMC_HOOKS
375  PMC_SOFT_CALL( , , lock, failed);
376 #endif
377  lock_profile_obtain_lock_failed(&m->lock_object,
378  &contested, &waittime);
379  if (LOCK_LOG_TEST(&m->lock_object, opts))
380  CTR4(KTR_LOCK,
381  "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
382  m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
383 #ifdef KDTRACE_HOOKS
384  all_time -= lockstat_nsecs(&m->lock_object);
385 #endif
386 
387  while (!_mtx_obtain_lock(m, tid)) {
388 #ifdef KDTRACE_HOOKS
389  spin_cnt++;
390 #endif
391 #ifdef ADAPTIVE_MUTEXES
392  /*
393  * If the owner is running on another CPU, spin until the
394  * owner stops running or the state of the lock changes.
395  */
396  v = m->mtx_lock;
397  if (v != MTX_UNOWNED) {
398  owner = (struct thread *)(v & ~MTX_FLAGMASK);
399  if (TD_IS_RUNNING(owner)) {
400  if (LOCK_LOG_TEST(&m->lock_object, 0))
401  CTR3(KTR_LOCK,
402  "%s: spinning on %p held by %p",
403  __func__, m, owner);
404  while (mtx_owner(m) == owner &&
405  TD_IS_RUNNING(owner)) {
406  cpu_spinwait();
407 #ifdef KDTRACE_HOOKS
408  spin_cnt++;
409 #endif
410  }
411  continue;
412  }
413  }
414 #endif
415 
416  ts = turnstile_trywait(&m->lock_object);
417  v = m->mtx_lock;
418 
419  /*
420  * Check if the lock has been released while spinning for
421  * the turnstile chain lock.
422  */
423  if (v == MTX_UNOWNED) {
424  turnstile_cancel(ts);
425  continue;
426  }
427 
428 #ifdef ADAPTIVE_MUTEXES
429  /*
430  * The current lock owner might have started executing
431  * on another CPU (or the lock could have changed
432  * owners) while we were waiting on the turnstile
433  * chain lock. If so, drop the turnstile lock and try
434  * again.
435  */
436  owner = (struct thread *)(v & ~MTX_FLAGMASK);
437  if (TD_IS_RUNNING(owner)) {
438  turnstile_cancel(ts);
439  continue;
440  }
441 #endif
442 
443  /*
444  * If the mutex isn't already contested and a failure occurs
445  * setting the contested bit, the mutex was either released
446  * or the state of the MTX_RECURSED bit changed.
447  */
448  if ((v & MTX_CONTESTED) == 0 &&
449  !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
450  turnstile_cancel(ts);
451  continue;
452  }
453 
454  /*
455  * We definitely must sleep for this lock.
456  */
457  mtx_assert(m, MA_NOTOWNED);
458 
459 #ifdef KTR
460  if (!cont_logged) {
461  CTR6(KTR_CONTENTION,
462  "contention: %p at %s:%d wants %s, taken by %s:%d",
463  (void *)tid, file, line, m->lock_object.lo_name,
464  WITNESS_FILE(&m->lock_object),
465  WITNESS_LINE(&m->lock_object));
466  cont_logged = 1;
467  }
468 #endif
469 
470  /*
471  * Block on the turnstile.
472  */
473 #ifdef KDTRACE_HOOKS
474  sleep_time -= lockstat_nsecs(&m->lock_object);
475 #endif
476  turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
477 #ifdef KDTRACE_HOOKS
478  sleep_time += lockstat_nsecs(&m->lock_object);
479  sleep_cnt++;
480 #endif
481  }
482 #ifdef KDTRACE_HOOKS
483  all_time += lockstat_nsecs(&m->lock_object);
484 #endif
485 #ifdef KTR
486  if (cont_logged) {
487  CTR4(KTR_CONTENTION,
488  "contention end: %s acquired by %p at %s:%d",
489  m->lock_object.lo_name, (void *)tid, file, line);
490  }
491 #endif
492  LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
493  waittime, file, line);
494 #ifdef KDTRACE_HOOKS
495  if (sleep_time)
496  LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
497 
498  /*
499  * Only record the loops spinning and not sleeping.
500  */
501  if (spin_cnt > sleep_cnt)
502  LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time));
503 #endif
504 }
505 
506 static void
507 _mtx_lock_spin_failed(struct mtx *m)
508 {
509  struct thread *td;
510 
511  td = mtx_owner(m);
512 
513  /* If the mutex is unlocked, try again. */
514  if (td == NULL)
515  return;
516 
517  printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
518  m, m->lock_object.lo_name, td, td->td_tid);
519 #ifdef WITNESS
520  witness_display_spinlock(&m->lock_object, td, printf);
521 #endif
522  panic("spin lock held too long");
523 }
524 
525 #ifdef SMP
526 /*
527  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
528  *
529  * This is only called if we need to actually spin for the lock. Recursion
530  * is handled inline.
531  */
532 void
533 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
534  int line)
535 {
536  int i = 0;
537 #ifdef LOCK_PROFILING
538  int contested = 0;
539  uint64_t waittime = 0;
540 #endif
541 #ifdef KDTRACE_HOOKS
542  int64_t spin_time = 0;
543 #endif
544 
545  if (SCHEDULER_STOPPED())
546  return;
547 
548  if (LOCK_LOG_TEST(&m->lock_object, opts))
549  CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
550 
551 #ifdef HWPMC_HOOKS
552  PMC_SOFT_CALL( , , lock, failed);
553 #endif
554  lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
555 #ifdef KDTRACE_HOOKS
556  spin_time -= lockstat_nsecs(&m->lock_object);
557 #endif
558  while (!_mtx_obtain_lock(m, tid)) {
559 
560  /* Give interrupts a chance while we spin. */
561  spinlock_exit();
562  while (m->mtx_lock != MTX_UNOWNED) {
563  if (i++ < 10000000) {
564  cpu_spinwait();
565  continue;
566  }
567  if (i < 60000000 || kdb_active || panicstr != NULL)
568  DELAY(1);
569  else
571  cpu_spinwait();
572  }
573  spinlock_enter();
574  }
575 #ifdef KDTRACE_HOOKS
576  spin_time += lockstat_nsecs(&m->lock_object);
577 #endif
578 
579  if (LOCK_LOG_TEST(&m->lock_object, opts))
580  CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
581 
582  LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
583  contested, waittime, (file), (line));
584 #ifdef KDTRACE_HOOKS
585  if (spin_time != 0)
586  LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, spin_time);
587 #endif
588 }
589 #endif /* SMP */
590 
591 void
592 _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
593 {
594  struct mtx *m;
595  uintptr_t tid;
596  int i;
597 #ifdef LOCK_PROFILING
598  int contested = 0;
599  uint64_t waittime = 0;
600 #endif
601 #ifdef KDTRACE_HOOKS
602  int64_t spin_time = 0;
603 #endif
604 
605  i = 0;
606  tid = (uintptr_t)curthread;
607 
608  if (SCHEDULER_STOPPED())
609  return;
610 
611 #ifdef KDTRACE_HOOKS
612  spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
613 #endif
614  for (;;) {
615 retry:
616  spinlock_enter();
617  m = td->td_lock;
618  KASSERT(m->mtx_lock != MTX_DESTROYED,
619  ("thread_lock() of destroyed mutex @ %s:%d", file, line));
620  KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
621  ("thread_lock() of sleep mutex %s @ %s:%d",
622  m->lock_object.lo_name, file, line));
623  if (mtx_owned(m))
624  KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
625  ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
626  m->lock_object.lo_name, file, line));
627  WITNESS_CHECKORDER(&m->lock_object,
628  opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
629  while (!_mtx_obtain_lock(m, tid)) {
630  if (m->mtx_lock == tid) {
631  m->mtx_recurse++;
632  break;
633  }
634 #ifdef HWPMC_HOOKS
635  PMC_SOFT_CALL( , , lock, failed);
636 #endif
637  lock_profile_obtain_lock_failed(&m->lock_object,
638  &contested, &waittime);
639  /* Give interrupts a chance while we spin. */
640  spinlock_exit();
641  while (m->mtx_lock != MTX_UNOWNED) {
642  if (i++ < 10000000)
643  cpu_spinwait();
644  else if (i < 60000000 ||
645  kdb_active || panicstr != NULL)
646  DELAY(1);
647  else
649  cpu_spinwait();
650  if (m != td->td_lock)
651  goto retry;
652  }
653  spinlock_enter();
654  }
655  if (m == td->td_lock)
656  break;
657  __mtx_unlock_spin(m); /* does spinlock_exit() */
658  }
659 #ifdef KDTRACE_HOOKS
660  spin_time += lockstat_nsecs(&m->lock_object);
661 #endif
662  if (m->mtx_recurse == 0)
663  LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
664  m, contested, waittime, (file), (line));
665  LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
666  line);
667  WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
668  LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_time);
669 }
670 
671 struct mtx *
672 thread_lock_block(struct thread *td)
673 {
674  struct mtx *lock;
675 
676  THREAD_LOCK_ASSERT(td, MA_OWNED);
677  lock = td->td_lock;
678  td->td_lock = &blocked_lock;
679  mtx_unlock_spin(lock);
680 
681  return (lock);
682 }
683 
684 void
685 thread_lock_unblock(struct thread *td, struct mtx *new)
686 {
687  mtx_assert(new, MA_OWNED);
688  MPASS(td->td_lock == &blocked_lock);
689  atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
690 }
691 
692 void
693 thread_lock_set(struct thread *td, struct mtx *new)
694 {
695  struct mtx *lock;
696 
697  mtx_assert(new, MA_OWNED);
698  THREAD_LOCK_ASSERT(td, MA_OWNED);
699  lock = td->td_lock;
700  td->td_lock = new;
701  mtx_unlock_spin(lock);
702 }
703 
704 /*
705  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
706  *
707  * We are only called here if the lock is recursed or contested (i.e. we
708  * need to wake up a blocked thread).
709  */
710 void
711 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
712 {
713  struct turnstile *ts;
714 
715  if (SCHEDULER_STOPPED())
716  return;
717 
718  if (mtx_recursed(m)) {
719  if (--(m->mtx_recurse) == 0)
720  atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
721  if (LOCK_LOG_TEST(&m->lock_object, opts))
722  CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
723  return;
724  }
725 
726  /*
727  * We have to lock the chain before the turnstile so this turnstile
728  * can be removed from the hash list if it is empty.
729  */
730  turnstile_chain_lock(&m->lock_object);
731  ts = turnstile_lookup(&m->lock_object);
732  if (LOCK_LOG_TEST(&m->lock_object, opts))
733  CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
734  MPASS(ts != NULL);
735  turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
736  _mtx_release_lock_quick(m);
737 
738  /*
739  * This turnstile is now no longer associated with the mutex. We can
740  * unlock the chain lock so a new turnstile may take it's place.
741  */
742  turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
743  turnstile_chain_unlock(&m->lock_object);
744 }
745 
746 /*
747  * All the unlocking of MTX_SPIN locks is done inline.
748  * See the __mtx_unlock_spin() macro for the details.
749  */
750 
751 /*
752  * The backing function for the INVARIANTS-enabled mtx_assert()
753  */
754 #ifdef INVARIANT_SUPPORT
755 void
756 _mtx_assert(struct mtx *m, int what, const char *file, int line)
757 {
758 
759  if (panicstr != NULL || dumping)
760  return;
761  switch (what) {
762  case MA_OWNED:
763  case MA_OWNED | MA_RECURSED:
764  case MA_OWNED | MA_NOTRECURSED:
765  if (!mtx_owned(m))
766  panic("mutex %s not owned at %s:%d",
767  m->lock_object.lo_name, file, line);
768  if (mtx_recursed(m)) {
769  if ((what & MA_NOTRECURSED) != 0)
770  panic("mutex %s recursed at %s:%d",
771  m->lock_object.lo_name, file, line);
772  } else if ((what & MA_RECURSED) != 0) {
773  panic("mutex %s unrecursed at %s:%d",
774  m->lock_object.lo_name, file, line);
775  }
776  break;
777  case MA_NOTOWNED:
778  if (mtx_owned(m))
779  panic("mutex %s owned at %s:%d",
780  m->lock_object.lo_name, file, line);
781  break;
782  default:
783  panic("unknown mtx_assert at %s:%d", file, line);
784  }
785 }
786 #endif
787 
788 /*
789  * The MUTEX_DEBUG-enabled mtx_validate()
790  *
791  * Most of these checks have been moved off into the LO_INITIALIZED flag
792  * maintained by the witness code.
793  */
794 #ifdef MUTEX_DEBUG
795 
796 void mtx_validate(struct mtx *);
797 
798 void
799 mtx_validate(struct mtx *m)
800 {
801 
802 /*
803  * XXX: When kernacc() does not require Giant we can reenable this check
804  */
805 #ifdef notyet
806  /*
807  * Can't call kernacc() from early init386(), especially when
808  * initializing Giant mutex, because some stuff in kernacc()
809  * requires Giant itself.
810  */
811  if (!cold)
812  if (!kernacc((caddr_t)m, sizeof(m),
813  VM_PROT_READ | VM_PROT_WRITE))
814  panic("Can't read and write to mutex %p", m);
815 #endif
816 }
817 #endif
818 
819 /*
820  * General init routine used by the MTX_SYSINIT() macro.
821  */
822 void
823 mtx_sysinit(void *arg)
824 {
825  struct mtx_args *margs = arg;
826 
827  mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
828 }
829 
830 /*
831  * Mutex initialization routine; initialize lock `m' of type contained in
832  * `opts' with options contained in `opts' and name `name.' The optional
833  * lock type `type' is used as a general lock category name for use with
834  * witness.
835  */
836 void
837 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
838 {
839  struct lock_class *class;
840  int flags;
841 
842  MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
843  MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
844  ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
845  ("%s: mtx_lock not aligned for %s: %p", __func__, name,
846  &m->mtx_lock));
847 
848 #ifdef MUTEX_DEBUG
849  /* Diagnostic and error correction */
850  mtx_validate(m);
851 #endif
852 
853  /* Determine lock class and lock flags. */
854  if (opts & MTX_SPIN)
855  class = &lock_class_mtx_spin;
856  else
857  class = &lock_class_mtx_sleep;
858  flags = 0;
859  if (opts & MTX_QUIET)
860  flags |= LO_QUIET;
861  if (opts & MTX_RECURSE)
862  flags |= LO_RECURSABLE;
863  if ((opts & MTX_NOWITNESS) == 0)
864  flags |= LO_WITNESS;
865  if (opts & MTX_DUPOK)
866  flags |= LO_DUPOK;
867  if (opts & MTX_NOPROFILE)
868  flags |= LO_NOPROFILE;
869 
870  /* Initialize mutex. */
871  m->mtx_lock = MTX_UNOWNED;
872  m->mtx_recurse = 0;
873 
874  lock_init(&m->lock_object, class, name, type, flags);
875 }
876 
877 /*
878  * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
879  * passed in as a flag here because if the corresponding mtx_init() was
880  * called with MTX_QUIET set, then it will already be set in the mutex's
881  * flags.
882  */
883 void
884 mtx_destroy(struct mtx *m)
885 {
886 
887  if (!mtx_owned(m))
888  MPASS(mtx_unowned(m));
889  else {
890  MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
891 
892  /* Perform the non-mtx related part of mtx_unlock_spin(). */
893  if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
894  spinlock_exit();
895  else
896  curthread->td_locks--;
897 
898  lock_profile_release_lock(&m->lock_object);
899  /* Tell witness this isn't locked to make it happy. */
900  WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
901  __LINE__);
902  }
903 
904  m->mtx_lock = MTX_DESTROYED;
905  lock_destroy(&m->lock_object);
906 }
907 
908 /*
909  * Intialize the mutex code and system mutexes. This is called from the MD
910  * startup code prior to mi_startup(). The per-CPU data space needs to be
911  * setup before this is called.
912  */
913 void
915 {
916 
917  /* Setup turnstiles so that sleep mutexes work. */
918  init_turnstiles();
919 
920  /*
921  * Initialize mutexes.
922  */
923  mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
924  mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
925  blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
926  mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
927  mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
928  mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
929  mtx_lock(&Giant);
930 }
931 
932 #ifdef DDB
933 void
934 db_show_mtx(struct lock_object *lock)
935 {
936  struct thread *td;
937  struct mtx *m;
938 
939  m = (struct mtx *)lock;
940 
941  db_printf(" flags: {");
942  if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
943  db_printf("SPIN");
944  else
945  db_printf("DEF");
946  if (m->lock_object.lo_flags & LO_RECURSABLE)
947  db_printf(", RECURSE");
948  if (m->lock_object.lo_flags & LO_DUPOK)
949  db_printf(", DUPOK");
950  db_printf("}\n");
951  db_printf(" state: {");
952  if (mtx_unowned(m))
953  db_printf("UNOWNED");
954  else if (mtx_destroyed(m))
955  db_printf("DESTROYED");
956  else {
957  db_printf("OWNED");
958  if (m->mtx_lock & MTX_CONTESTED)
959  db_printf(", CONTESTED");
960  if (m->mtx_lock & MTX_RECURSED)
961  db_printf(", RECURSED");
962  }
963  db_printf("}\n");
964  if (!mtx_unowned(m) && !mtx_destroyed(m)) {
965  td = mtx_owner(m);
966  db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
967  td->td_tid, td->td_proc->p_pid, td->td_name);
968  if (mtx_recursed(m))
969  db_printf(" recursed: %d\n", m->mtx_recurse);
970  }
971 }
972 #endif
void _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
Definition: kern_mutex.c:592
static int unlock_mtx(struct lock_object *lock)
Definition: kern_mutex.c:164
struct lock_class lock_class_mtx_spin
Definition: kern_mutex.c:122
void mutex_init(void)
Definition: kern_mutex.c:914
void turnstile_broadcast(struct turnstile *ts, int queue)
struct timespec * ts
Definition: clock_if.m:39
static int unlock_spin(struct lock_object *lock)
Definition: kern_mutex.c:175
static void _mtx_lock_spin_failed(struct mtx *m)
Definition: kern_mutex.c:507
void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
Definition: kern_mutex.c:711
const char * panicstr
void turnstile_unpend(struct turnstile *ts, int owner_type)
struct proc proc0
Definition: init_main.c:99
void panic(const char *fmt,...)
struct turnstile * turnstile_lookup(struct lock_object *lock)
const char * name
Definition: kern_fail.c:97
__FBSDID("$BSDSUniX$")
int * type
Definition: cpufreq_if.m:98
int dumping
void mtx_sysinit(void *arg)
Definition: kern_mutex.c:823
int _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
Definition: kern_mutex.c:290
void thread_lock_set(struct thread *td, struct mtx *new)
Definition: kern_mutex.c:693
struct mtx Giant
Definition: kern_mutex.c:140
void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags)
Definition: subr_lock.c:72
static void lock_mtx(struct lock_object *lock, int how)
Definition: kern_mutex.c:150
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
static int waittime
static void assert_mtx(struct lock_object *lock, int what)
Definition: kern_mutex.c:143
void turnstile_chain_unlock(struct lock_object *lock)
struct lock_class lock_class_mtx_sleep
Definition: kern_mutex.c:109
void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
Definition: kern_mutex.c:243
int printf(const char *fmt,...)
Definition: subr_prf.c:367
struct mtx devmtx
Definition: kern_conf.c:54
void witness_display_spinlock(struct lock_object *lock, struct thread *owner, int(*prnt)(const char *fmt,...))
void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
Definition: kern_mutex.c:197
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
void init_turnstiles(void)
void lock_destroy(struct lock_object *lock)
Definition: subr_lock.c:97
void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
Definition: kern_mutex.c:266
void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, int line)
Definition: kern_mutex.c:338
struct turnstile * turnstile_trywait(struct lock_object *lock)
void turnstile_cancel(struct turnstile *ts)
void turnstile_chain_lock(struct lock_object *lock)
void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
Definition: kern_mutex.c:221
int kdb_active
Definition: subr_kdb.c:53
#define mtx_unowned(m)
Definition: kern_mutex.c:88
#define mtx_owner(m)
Definition: kern_mutex.c:92
struct mtx blocked_lock
Definition: kern_mutex.c:139
void mtx_destroy(struct mtx *m)
Definition: kern_mutex.c:884
struct mtx * thread_lock_block(struct thread *td)
Definition: kern_mutex.c:672
static void lock_spin(struct lock_object *lock, int how)
Definition: kern_mutex.c:157
#define mtx_destroyed(m)
Definition: kern_mutex.c:90
void thread_lock_unblock(struct thread *td, struct mtx *new)
Definition: kern_mutex.c:685