FreeBSD kernel kern code
subr_witness.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 2008 Isilon Systems, Inc.
3  * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4  * Copyright (c) 1998 Berkeley Software Design, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  * notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  * notice, this list of conditions and the following disclaimer in the
14  * documentation and/or other materials provided with the distribution.
15  * 3. Berkeley Software Design Inc's name may not be used to endorse or
16  * promote products derived from this software without specific prior
17  * written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32  * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33  */
34 
35 /*
36  * Implementation of the `witness' lock verifier. Originally implemented for
37  * mutexes in BSD/OS. Extended to handle generic lock objects and lock
38  * classes in FreeBSD.
39  */
40 
41 /*
42  * Main Entry: witness
43  * Pronunciation: 'wit-n&s
44  * Function: noun
45  * Etymology: Middle English witnesse, from Old English witnes knowledge,
46  * testimony, witness, from 2wit
47  * Date: before 12th century
48  * 1 : attestation of a fact or event : TESTIMONY
49  * 2 : one that gives evidence; specifically : one who testifies in
50  * a cause or before a judicial tribunal
51  * 3 : one asked to be present at a transaction so as to be able to
52  * testify to its having taken place
53  * 4 : one who has personal knowledge of something
54  * 5 a : something serving as evidence or proof : SIGN
55  * b : public affirmation by word or example of usually
56  * religious faith or conviction <the heroic witness to divine
57  * life -- Pilot>
58  * 6 capitalized : a member of the Jehovah's Witnesses
59  */
60 
61 /*
62  * Special rules concerning Giant and lock orders:
63  *
64  * 1) Giant must be acquired before any other mutexes. Stated another way,
65  * no other mutex may be held when Giant is acquired.
66  *
67  * 2) Giant must be released when blocking on a sleepable lock.
68  *
69  * This rule is less obvious, but is a result of Giant providing the same
70  * semantics as spl(). Basically, when a thread sleeps, it must release
71  * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
72  * 2).
73  *
74  * 3) Giant may be acquired before or after sleepable locks.
75  *
76  * This rule is also not quite as obvious. Giant may be acquired after
77  * a sleepable lock because it is a non-sleepable lock and non-sleepable
78  * locks may always be acquired while holding a sleepable lock. The second
79  * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
80  * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
81  * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
82  * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
83  * execute. Thus, acquiring Giant both before and after a sleepable lock
84  * will not result in a lock order reversal.
85  */
86 
87 #include <sys/cdefs.h>
88 __FBSDID("$BSDSUniX$");
89 
90 #include "opt_ddb.h"
91 #include "opt_hwpmc_hooks.h"
92 #include "opt_stack.h"
93 #include "opt_witness.h"
94 
95 #include <sys/param.h>
96 #include <sys/bus.h>
97 #include <sys/kdb.h>
98 #include <sys/kernel.h>
99 #include <sys/ktr.h>
100 #include <sys/lock.h>
101 #include <sys/malloc.h>
102 #include <sys/mutex.h>
103 #include <sys/priv.h>
104 #include <sys/proc.h>
105 #include <sys/sbuf.h>
106 #include <sys/sched.h>
107 #include <sys/stack.h>
108 #include <sys/sysctl.h>
109 #include <sys/systm.h>
110 
111 #ifdef DDB
112 #include <ddb/ddb.h>
113 #endif
114 
115 #include <machine/stdarg.h>
116 
117 #if !defined(DDB) && !defined(STACK)
118 #error "DDB or STACK options are required for WITNESS"
119 #endif
120 
121 /* Note that these traces do not work with KTR_ALQ. */
122 #if 0
123 #define KTR_WITNESS KTR_SUBSYS
124 #else
125 #define KTR_WITNESS 0
126 #endif
127 
128 #define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */
129 #define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */
130 #define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */
131 
132 /* Define this to check for blessed mutexes */
133 #undef BLESSING
134 
135 #define WITNESS_COUNT 1024
136 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
137 #define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */
138 #define WITNESS_PENDLIST 768
139 
140 /* Allocate 256 KB of stack data space */
141 #define WITNESS_LO_DATA_COUNT 2048
142 
143 /* Prime, gives load factor of ~2 at full load */
144 #define WITNESS_LO_HASH_SIZE 1021
145 
146 /*
147  * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148  * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should
149  * probably be safe for the most part, but it's still a SWAG.
150  */
151 #define LOCK_NCHILDREN 5
152 #define LOCK_CHILDCOUNT 2048
153 
154 #define MAX_W_NAME 64
155 
156 #define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT)
157 #define FULLGRAPH_SBUF_SIZE 512
158 
159 /*
160  * These flags go in the witness relationship matrix and describe the
161  * relationship between any two struct witness objects.
162  */
163 #define WITNESS_UNRELATED 0x00 /* No lock order relation. */
164 #define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
165 #define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
166 #define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
167 #define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
168 #define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
169 #define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
170 #define WITNESS_RELATED_MASK \
171  (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172 #define WITNESS_REVERSAL 0x10 /* A lock order reversal has been
173  * observed. */
174 #define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
175 #define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
176 #define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
177 
178 /* Descendant to ancestor flags */
179 #define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2)
180 
181 /* Ancestor to descendant flags */
182 #define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2)
183 
184 #define WITNESS_INDEX_ASSERT(i) \
185  MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
186 
187 static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
188 
189 /*
190  * Lock instances. A lock instance is the data associated with a lock while
191  * it is held by witness. For example, a lock instance will hold the
192  * recursion count of a lock. Lock instances are held in lists. Spin locks
193  * are held in a per-cpu list while sleep locks are held in per-thread list.
194  */
196  struct lock_object *li_lock;
197  const char *li_file;
198  int li_line;
199  u_int li_flags;
200 };
201 
202 /*
203  * A simple list type used to build the list of locks held by a thread
204  * or CPU. We can't simply embed the list in struct lock_object since a
205  * lock may be held by more than one thread if it is a shared lock. Locks
206  * are added to the head of the list, so we fill up each list entry from
207  * "the back" logically. To ease some of the arithmetic, we actually fill
208  * in each list entry the normal way (children[0] then children[1], etc.) but
209  * when we traverse the list we read children[count-1] as the first entry
210  * down to children[0] as the final entry.
211  */
215  u_int ll_count;
216 };
217 
218 /*
219  * The main witness structure. One of these per named lock type in the system
220  * (for example, "vnode interlock").
221  */
222 struct witness {
224  uint32_t w_index; /* Index in the relationship matrix */
225  struct lock_class *w_class;
226  STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
227  STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
228  struct witness *w_hash_next; /* Linked list in hash buckets. */
229  const char *w_file; /* File where last acquired */
230  uint32_t w_line; /* Line where last acquired */
231  uint32_t w_refcount;
232  uint16_t w_num_ancestors; /* direct/indirect
233  * ancestor count */
234  uint16_t w_num_descendants; /* direct/indirect
235  * descendant count */
236  int16_t w_ddb_level;
237  unsigned w_displayed:1;
238  unsigned w_reversed:1;
239 };
240 
241 STAILQ_HEAD(witness_list, witness);
242 
243 /*
244  * The witness hash table. Keys are witness names (const char *), elements are
245  * witness objects (struct witness *).
246  */
247 struct witness_hash {
248  struct witness *wh_array[WITNESS_HASH_SIZE];
249  uint32_t wh_size;
250  uint32_t wh_count;
251 };
252 
253 /*
254  * Key type for the lock order data hash table.
255  */
257  uint16_t from;
258  uint16_t to;
259 };
260 
262  struct stack wlod_stack;
263  struct witness_lock_order_key wlod_key;
265 };
266 
267 /*
268  * The witness lock order data hash table. Keys are witness index tuples
269  * (struct witness_lock_order_key), elements are lock order data objects
270  * (struct witness_lock_order_data).
271  */
274  u_int wloh_size;
275  u_int wloh_count;
276 };
277 
278 #ifdef BLESSING
279 struct witness_blessed {
280  const char *b_lock1;
281  const char *b_lock2;
282 };
283 #endif
284 
286  const char *wh_type;
287  struct lock_object *wh_lock;
288 };
289 
291  const char *w_name;
292  struct lock_class *w_class;
293 };
294 
295 /*
296  * Returns 0 if one of the locks is a spin lock and the other is not.
297  * Returns 1 otherwise.
298  */
299 static __inline int
300 witness_lock_type_equal(struct witness *w1, struct witness *w2)
301 {
302 
303  return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
304  (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
305 }
306 
307 static __inline int
309  const struct witness_lock_order_key *b)
310 {
311 
312  return (a->from == b->from && a->to == b->to);
313 }
314 
315 static int _isitmyx(struct witness *w1, struct witness *w2, int rmask,
316  const char *fname);
317 #ifdef KDB
318 static void _witness_debugger(int cond, const char *msg);
319 #endif
320 static void adopt(struct witness *parent, struct witness *child);
321 #ifdef BLESSING
322 static int blessed(struct witness *, struct witness *);
323 #endif
324 static void depart(struct witness *w);
325 static struct witness *enroll(const char *description,
326  struct lock_class *lock_class);
327 static struct lock_instance *find_instance(struct lock_list_entry *list,
328  struct lock_object *lock);
329 static int isitmychild(struct witness *parent, struct witness *child);
330 static int isitmydescendant(struct witness *parent, struct witness *child);
331 static void itismychild(struct witness *parent, struct witness *child);
332 static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
333 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
334 static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
335 static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
336 #ifdef DDB
337 static void witness_ddb_compute_levels(void);
338 static void witness_ddb_display(int(*)(const char *fmt, ...));
339 static void witness_ddb_display_descendants(int(*)(const char *fmt, ...),
340  struct witness *, int indent);
341 static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
342  struct witness_list *list);
343 static void witness_ddb_level_descendants(struct witness *parent, int l);
344 static void witness_ddb_list(struct thread *td);
345 #endif
346 static void witness_free(struct witness *m);
347 static struct witness *witness_get(void);
348 static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size);
349 static struct witness *witness_hash_get(const char *key);
350 static void witness_hash_put(struct witness *w);
351 static void witness_init_hash_tables(void);
352 static void witness_increment_graph_generation(void);
353 static void witness_lock_list_free(struct lock_list_entry *lle);
354 static struct lock_list_entry *witness_lock_list_get(void);
355 static int witness_lock_order_add(struct witness *parent,
356  struct witness *child);
357 static int witness_lock_order_check(struct witness *parent,
358  struct witness *child);
360  struct witness *parent,
361  struct witness *child);
362 static void witness_list_lock(struct lock_instance *instance,
363  int (*prnt)(const char *fmt, ...));
364 static void witness_setflag(struct lock_object *lock, int flag, int set);
365 
366 #ifdef KDB
367 #define witness_debugger(c) _witness_debugger(c, __func__)
368 #else
369 #define witness_debugger(c)
370 #endif
371 
372 static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
373  "Witness Locking");
374 
375 /*
376  * If set to 0, lock order checking is disabled. If set to -1,
377  * witness is completely disabled. Otherwise witness performs full
378  * lock order checking for all locks. At runtime, lock order checking
379  * may be toggled. However, witness cannot be reenabled once it is
380  * completely disabled.
381  */
382 static int witness_watch = 1;
383 TUNABLE_INT("debug.witness.watch", &witness_watch);
384 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
385  sysctl_debug_witness_watch, "I", "witness is watching lock operations");
386 
387 #ifdef KDB
388 /*
389  * When KDB is enabled and witness_kdb is 1, it will cause the system
390  * to drop into kdebug() when:
391  * - a lock hierarchy violation occurs
392  * - locks are held when going to sleep.
393  */
394 #ifdef WITNESS_KDB
395 int witness_kdb = 1;
396 #else
397 int witness_kdb = 0;
398 #endif
399 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
400 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
401 
402 /*
403  * When KDB is enabled and witness_trace is 1, it will cause the system
404  * to print a stack trace:
405  * - a lock hierarchy violation occurs
406  * - locks are held when going to sleep.
407  */
408 int witness_trace = 1;
409 TUNABLE_INT("debug.witness.trace", &witness_trace);
410 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
411 #endif /* KDB */
412 
413 #ifdef WITNESS_SKIPSPIN
414 int witness_skipspin = 1;
415 #else
416 int witness_skipspin = 0;
417 #endif
418 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
419 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
420  0, "");
421 
422 /*
423  * Call this to print out the relations between locks.
424  */
425 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
426  NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
427 
428 /*
429  * Call this to print out the witness faulty stacks.
430  */
431 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
432  NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
433 
434 static struct mtx w_mtx;
435 
436 /* w_list */
437 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
438 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
439 
440 /* w_typelist */
441 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
442 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
443 
444 /* lock list */
445 static struct lock_list_entry *w_lock_list_free = NULL;
447 static u_int pending_cnt;
448 
450 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
451 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
452 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
453  "");
454 
455 static struct witness *w_data;
456 static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
458 static struct witness_hash w_hash; /* The witness hash table. */
459 
460 /* The lock order data hash */
462 static struct witness_lock_order_data *w_lofree = NULL;
464 static int w_max_used_index = 0;
465 static unsigned int w_generation = 0;
466 static const char w_notrunning[] = "Witness not running\n";
467 static const char w_stillcold[] = "Witness is still cold\n";
468 
469 
471  /*
472  * sx locks
473  */
474  { "proctree", &lock_class_sx },
475  { "allproc", &lock_class_sx },
476  { "allprison", &lock_class_sx },
477  { NULL, NULL },
478  /*
479  * Various mutexes
480  */
481  { "Giant", &lock_class_mtx_sleep },
482  { "pipe mutex", &lock_class_mtx_sleep },
483  { "sigio lock", &lock_class_mtx_sleep },
484  { "process group", &lock_class_mtx_sleep },
485  { "process lock", &lock_class_mtx_sleep },
486  { "session", &lock_class_mtx_sleep },
487  { "uidinfo hash", &lock_class_rw },
488 #ifdef HWPMC_HOOKS
489  { "pmc-sleep", &lock_class_mtx_sleep },
490 #endif
491  { "time lock", &lock_class_mtx_sleep },
492  { NULL, NULL },
493  /*
494  * Sockets
495  */
496  { "accept", &lock_class_mtx_sleep },
497  { "so_snd", &lock_class_mtx_sleep },
498  { "so_rcv", &lock_class_mtx_sleep },
499  { "sellck", &lock_class_mtx_sleep },
500  { NULL, NULL },
501  /*
502  * Routing
503  */
504  { "so_rcv", &lock_class_mtx_sleep },
505  { "radix node head", &lock_class_rw },
506  { "rtentry", &lock_class_mtx_sleep },
507  { "ifaddr", &lock_class_mtx_sleep },
508  { NULL, NULL },
509  /*
510  * IPv4 multicast:
511  * protocol locks before interface locks, after UDP locks.
512  */
513  { "udpinp", &lock_class_rw },
514  { "in_multi_mtx", &lock_class_mtx_sleep },
515  { "igmp_mtx", &lock_class_mtx_sleep },
516  { "if_addr_mtx", &lock_class_mtx_sleep },
517  { NULL, NULL },
518  /*
519  * IPv6 multicast:
520  * protocol locks before interface locks, after UDP locks.
521  */
522  { "udpinp", &lock_class_rw },
523  { "in6_multi_mtx", &lock_class_mtx_sleep },
524  { "mld_mtx", &lock_class_mtx_sleep },
525  { "if_addr_mtx", &lock_class_mtx_sleep },
526  { NULL, NULL },
527  /*
528  * UNIX Domain Sockets
529  */
530  { "unp_link_rwlock", &lock_class_rw },
531  { "unp_list_lock", &lock_class_mtx_sleep },
532  { "unp", &lock_class_mtx_sleep },
533  { "so_snd", &lock_class_mtx_sleep },
534  { NULL, NULL },
535  /*
536  * UDP/IP
537  */
538  { "udp", &lock_class_rw },
539  { "udpinp", &lock_class_rw },
540  { "so_snd", &lock_class_mtx_sleep },
541  { NULL, NULL },
542  /*
543  * TCP/IP
544  */
545  { "tcp", &lock_class_rw },
546  { "tcpinp", &lock_class_rw },
547  { "so_snd", &lock_class_mtx_sleep },
548  { NULL, NULL },
549  /*
550  * netatalk
551  */
552  { "ddp_list_mtx", &lock_class_mtx_sleep },
553  { "ddp_mtx", &lock_class_mtx_sleep },
554  { NULL, NULL },
555  /*
556  * BPF
557  */
558  { "bpf global lock", &lock_class_mtx_sleep },
559  { "bpf interface lock", &lock_class_rw },
560  { "bpf cdev lock", &lock_class_mtx_sleep },
561  { NULL, NULL },
562  /*
563  * NFS server
564  */
565  { "nfsd_mtx", &lock_class_mtx_sleep },
566  { "so_snd", &lock_class_mtx_sleep },
567  { NULL, NULL },
568 
569  /*
570  * IEEE 802.11
571  */
572  { "802.11 com lock", &lock_class_mtx_sleep},
573  { NULL, NULL },
574  /*
575  * Network drivers
576  */
577  { "network driver", &lock_class_mtx_sleep},
578  { NULL, NULL },
579 
580  /*
581  * Netgraph
582  */
583  { "ng_node", &lock_class_mtx_sleep },
584  { "ng_worklist", &lock_class_mtx_sleep },
585  { NULL, NULL },
586  /*
587  * CDEV
588  */
589  { "vm map (system)", &lock_class_mtx_sleep },
590  { "vm page queue", &lock_class_mtx_sleep },
591  { "vnode interlock", &lock_class_mtx_sleep },
592  { "cdev", &lock_class_mtx_sleep },
593  { NULL, NULL },
594  /*
595  * VM
596  */
597  { "vm map (user)", &lock_class_sx },
598  { "vm object", &lock_class_mtx_sleep },
599  { "vm page", &lock_class_mtx_sleep },
600  { "vm page queue", &lock_class_mtx_sleep },
601  { "pmap pv global", &lock_class_rw },
602  { "pmap", &lock_class_mtx_sleep },
603  { "pmap pv list", &lock_class_rw },
604  { "vm page free queue", &lock_class_mtx_sleep },
605  { NULL, NULL },
606  /*
607  * kqueue/VFS interaction
608  */
609  { "kqueue", &lock_class_mtx_sleep },
610  { "struct mount mtx", &lock_class_mtx_sleep },
611  { "vnode interlock", &lock_class_mtx_sleep },
612  { NULL, NULL },
613  /*
614  * ZFS locking
615  */
616  { "dn->dn_mtx", &lock_class_sx },
617  { "dr->dt.di.dr_mtx", &lock_class_sx },
618  { "db->db_mtx", &lock_class_sx },
619  { NULL, NULL },
620  /*
621  * spin locks
622  */
623 #ifdef SMP
624  { "ap boot", &lock_class_mtx_spin },
625 #endif
626  { "rm.mutex_mtx", &lock_class_mtx_spin },
627  { "sio", &lock_class_mtx_spin },
628  { "scrlock", &lock_class_mtx_spin },
629 #ifdef __i386__
630  { "cy", &lock_class_mtx_spin },
631 #endif
632 #ifdef __sparc64__
633  { "pcib_mtx", &lock_class_mtx_spin },
634  { "rtc_mtx", &lock_class_mtx_spin },
635 #endif
636  { "scc_hwmtx", &lock_class_mtx_spin },
637  { "uart_hwmtx", &lock_class_mtx_spin },
638  { "fast_taskqueue", &lock_class_mtx_spin },
639  { "intr table", &lock_class_mtx_spin },
640 #ifdef HWPMC_HOOKS
641  { "pmc-per-proc", &lock_class_mtx_spin },
642 #endif
643  { "process slock", &lock_class_mtx_spin },
644  { "sleepq chain", &lock_class_mtx_spin },
645  { "umtx lock", &lock_class_mtx_spin },
646  { "rm_spinlock", &lock_class_mtx_spin },
647  { "turnstile chain", &lock_class_mtx_spin },
648  { "turnstile lock", &lock_class_mtx_spin },
649  { "sched lock", &lock_class_mtx_spin },
650  { "td_contested", &lock_class_mtx_spin },
651  { "callout", &lock_class_mtx_spin },
652  { "entropy harvest mutex", &lock_class_mtx_spin },
653  { "syscons video lock", &lock_class_mtx_spin },
654 #ifdef SMP
655  { "smp rendezvous", &lock_class_mtx_spin },
656 #endif
657 #ifdef __powerpc__
658  { "tlb0", &lock_class_mtx_spin },
659 #endif
660  /*
661  * leaf locks
662  */
663  { "intrcnt", &lock_class_mtx_spin },
664  { "icu", &lock_class_mtx_spin },
665 #if defined(SMP) && defined(__sparc64__)
666  { "ipi", &lock_class_mtx_spin },
667 #endif
668 #ifdef __i386__
669  { "allpmaps", &lock_class_mtx_spin },
670  { "descriptor tables", &lock_class_mtx_spin },
671 #endif
672  { "clk", &lock_class_mtx_spin },
673  { "cpuset", &lock_class_mtx_spin },
674  { "mprof lock", &lock_class_mtx_spin },
675  { "zombie lock", &lock_class_mtx_spin },
676  { "ALD Queue", &lock_class_mtx_spin },
677 #ifdef __ia64__
678  { "MCA spin lock", &lock_class_mtx_spin },
679 #endif
680 #if defined(__i386__) || defined(__amd64__)
681  { "pcicfg", &lock_class_mtx_spin },
682  { "NDIS thread lock", &lock_class_mtx_spin },
683 #endif
684  { "tw_osl_io_lock", &lock_class_mtx_spin },
685  { "tw_osl_q_lock", &lock_class_mtx_spin },
686  { "tw_cl_io_lock", &lock_class_mtx_spin },
687  { "tw_cl_intr_lock", &lock_class_mtx_spin },
688  { "tw_cl_gen_lock", &lock_class_mtx_spin },
689 #ifdef HWPMC_HOOKS
690  { "pmc-leaf", &lock_class_mtx_spin },
691 #endif
692  { "blocked lock", &lock_class_mtx_spin },
693  { NULL, NULL },
694  { NULL, NULL }
695 };
696 
697 #ifdef BLESSING
698 /*
699  * Pairs of locks which have been blessed
700  * Don't complain about order problems with blessed locks
701  */
702 static struct witness_blessed blessed_list[] = {
703 };
704 static int blessed_count =
705  sizeof(blessed_list) / sizeof(struct witness_blessed);
706 #endif
707 
708 /*
709  * This global is set to 0 once it becomes safe to use the witness code.
710  */
711 static int witness_cold = 1;
712 
713 /*
714  * This global is set to 1 once the static lock orders have been enrolled
715  * so that a warning can be issued for any spin locks enrolled later.
716  */
717 static int witness_spin_warn = 0;
718 
719 /* Trim useless garbage from filenames. */
720 static const char *
721 fixup_filename(const char *file)
722 {
723 
724  if (file == NULL)
725  return (NULL);
726  while (strncmp(file, "../", 3) == 0)
727  file += 3;
728  return (file);
729 }
730 
731 /*
732  * The WITNESS-enabled diagnostic code. Note that the witness code does
733  * assume that the early boot is single-threaded at least until after this
734  * routine is completed.
735  */
736 static void
737 witness_initialize(void *dummy __unused)
738 {
739  struct lock_object *lock;
740  struct witness_order_list_entry *order;
741  struct witness *w, *w1;
742  int i;
743 
744  w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
745  M_NOWAIT | M_ZERO);
746 
747  /*
748  * We have to release Giant before initializing its witness
749  * structure so that WITNESS doesn't get confused.
750  */
751  mtx_unlock(&Giant);
752  mtx_assert(&Giant, MA_NOTOWNED);
753 
754  CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
755  mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
756  MTX_NOWITNESS | MTX_NOPROFILE);
757  for (i = WITNESS_COUNT - 1; i >= 0; i--) {
758  w = &w_data[i];
759  memset(w, 0, sizeof(*w));
760  w_data[i].w_index = i; /* Witness index never changes. */
761  witness_free(w);
762  }
763  KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
764  ("%s: Invalid list of free witness objects", __func__));
765 
766  /* Witness with index 0 is not used to aid in debugging. */
767  STAILQ_REMOVE_HEAD(&w_free, w_list);
768  w_free_cnt--;
769 
770  memset(w_rmatrix, 0,
771  (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
772 
773  for (i = 0; i < LOCK_CHILDCOUNT; i++)
776 
777  /* First add in all the specified order lists. */
778  for (order = order_lists; order->w_name != NULL; order++) {
779  w = enroll(order->w_name, order->w_class);
780  if (w == NULL)
781  continue;
782  w->w_file = "order list";
783  for (order++; order->w_name != NULL; order++) {
784  w1 = enroll(order->w_name, order->w_class);
785  if (w1 == NULL)
786  continue;
787  w1->w_file = "order list";
788  itismychild(w, w1);
789  w = w1;
790  }
791  }
792  witness_spin_warn = 1;
793 
794  /* Iterate through all locks and add them to witness. */
795  for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
796  lock = pending_locks[i].wh_lock;
797  KASSERT(lock->lo_flags & LO_WITNESS,
798  ("%s: lock %s is on pending list but not LO_WITNESS",
799  __func__, lock->lo_name));
800  lock->lo_witness = enroll(pending_locks[i].wh_type,
801  LOCK_CLASS(lock));
802  }
803 
804  /* Mark the witness code as being ready for use. */
805  witness_cold = 0;
806 
807  mtx_lock(&Giant);
808 }
809 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
810  NULL);
811 
812 void
813 witness_init(struct lock_object *lock, const char *type)
814 {
815  struct lock_class *class;
816 
817  /* Various sanity checks. */
818  class = LOCK_CLASS(lock);
819  if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
820  (class->lc_flags & LC_RECURSABLE) == 0)
821  panic("%s: lock (%s) %s can not be recursable", __func__,
822  class->lc_name, lock->lo_name);
823  if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
824  (class->lc_flags & LC_SLEEPABLE) == 0)
825  panic("%s: lock (%s) %s can not be sleepable", __func__,
826  class->lc_name, lock->lo_name);
827  if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
828  (class->lc_flags & LC_UPGRADABLE) == 0)
829  panic("%s: lock (%s) %s can not be upgradable", __func__,
830  class->lc_name, lock->lo_name);
831 
832  /*
833  * If we shouldn't watch this lock, then just clear lo_witness.
834  * Otherwise, if witness_cold is set, then it is too early to
835  * enroll this lock, so defer it to witness_initialize() by adding
836  * it to the pending_locks list. If it is not too early, then enroll
837  * the lock now.
838  */
839  if (witness_watch < 1 || panicstr != NULL ||
840  (lock->lo_flags & LO_WITNESS) == 0)
841  lock->lo_witness = NULL;
842  else if (witness_cold) {
844  pending_locks[pending_cnt++].wh_type = type;
845  if (pending_cnt > WITNESS_PENDLIST)
846  panic("%s: pending locks list is too small, bump it\n",
847  __func__);
848  } else
849  lock->lo_witness = enroll(type, class);
850 }
851 
852 void
853 witness_destroy(struct lock_object *lock)
854 {
855  struct lock_class *class;
856  struct witness *w;
857 
858  class = LOCK_CLASS(lock);
859 
860  if (witness_cold)
861  panic("lock (%s) %s destroyed while witness_cold",
862  class->lc_name, lock->lo_name);
863 
864  /* XXX: need to verify that no one holds the lock */
865  if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
866  return;
867  w = lock->lo_witness;
868 
869  mtx_lock_spin(&w_mtx);
870  MPASS(w->w_refcount > 0);
871  w->w_refcount--;
872 
873  if (w->w_refcount == 0)
874  depart(w);
875  mtx_unlock_spin(&w_mtx);
876 }
877 
878 #ifdef DDB
879 static void
880 witness_ddb_compute_levels(void)
881 {
882  struct witness *w;
883 
884  /*
885  * First clear all levels.
886  */
887  STAILQ_FOREACH(w, &w_all, w_list)
888  w->w_ddb_level = -1;
889 
890  /*
891  * Look for locks with no parents and level all their descendants.
892  */
893  STAILQ_FOREACH(w, &w_all, w_list) {
894 
895  /* If the witness has ancestors (is not a root), skip it. */
896  if (w->w_num_ancestors > 0)
897  continue;
898  witness_ddb_level_descendants(w, 0);
899  }
900 }
901 
902 static void
903 witness_ddb_level_descendants(struct witness *w, int l)
904 {
905  int i;
906 
907  if (w->w_ddb_level >= l)
908  return;
909 
910  w->w_ddb_level = l;
911  l++;
912 
913  for (i = 1; i <= w_max_used_index; i++) {
914  if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
915  witness_ddb_level_descendants(&w_data[i], l);
916  }
917 }
918 
919 static void
920 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
921  struct witness *w, int indent)
922 {
923  int i;
924 
925  for (i = 0; i < indent; i++)
926  prnt(" ");
927  prnt("%s (type: %s, depth: %d, active refs: %d)",
928  w->w_name, w->w_class->lc_name,
929  w->w_ddb_level, w->w_refcount);
930  if (w->w_displayed) {
931  prnt(" -- (already displayed)\n");
932  return;
933  }
934  w->w_displayed = 1;
935  if (w->w_file != NULL && w->w_line != 0)
936  prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
937  w->w_line);
938  else
939  prnt(" -- never acquired\n");
940  indent++;
942  for (i = 1; i <= w_max_used_index; i++) {
943  if (db_pager_quit)
944  return;
945  if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
946  witness_ddb_display_descendants(prnt, &w_data[i],
947  indent);
948  }
949 }
950 
951 static void
952 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
953  struct witness_list *list)
954 {
955  struct witness *w;
956 
957  STAILQ_FOREACH(w, list, w_typelist) {
958  if (w->w_file == NULL || w->w_ddb_level > 0)
959  continue;
960 
961  /* This lock has no anscestors - display its descendants. */
962  witness_ddb_display_descendants(prnt, w, 0);
963  if (db_pager_quit)
964  return;
965  }
966 }
967 
968 static void
969 witness_ddb_display(int(*prnt)(const char *fmt, ...))
970 {
971  struct witness *w;
972 
973  KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
974  witness_ddb_compute_levels();
975 
976  /* Clear all the displayed flags. */
977  STAILQ_FOREACH(w, &w_all, w_list)
978  w->w_displayed = 0;
979 
980  /*
981  * First, handle sleep locks which have been acquired at least
982  * once.
983  */
984  prnt("Sleep locks:\n");
985  witness_ddb_display_list(prnt, &w_sleep);
986  if (db_pager_quit)
987  return;
988 
989  /*
990  * Now do spin locks which have been acquired at least once.
991  */
992  prnt("\nSpin locks:\n");
993  witness_ddb_display_list(prnt, &w_spin);
994  if (db_pager_quit)
995  return;
996 
997  /*
998  * Finally, any locks which have not been acquired yet.
999  */
1000  prnt("\nLocks which were never acquired:\n");
1001  STAILQ_FOREACH(w, &w_all, w_list) {
1002  if (w->w_file != NULL || w->w_refcount == 0)
1003  continue;
1004  prnt("%s (type: %s, depth: %d)\n", w->w_name,
1005  w->w_class->lc_name, w->w_ddb_level);
1006  if (db_pager_quit)
1007  return;
1008  }
1009 }
1010 #endif /* DDB */
1011 
1012 int
1013 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1014 {
1015 
1016  if (witness_watch == -1 || panicstr != NULL)
1017  return (0);
1018 
1019  /* Require locks that witness knows about. */
1020  if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1021  lock2->lo_witness == NULL)
1022  return (EINVAL);
1023 
1024  mtx_assert(&w_mtx, MA_NOTOWNED);
1025  mtx_lock_spin(&w_mtx);
1026 
1027  /*
1028  * If we already have either an explicit or implied lock order that
1029  * is the other way around, then return an error.
1030  */
1031  if (witness_watch &&
1032  isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1033  mtx_unlock_spin(&w_mtx);
1034  return (EDOOFUS);
1035  }
1036 
1037  /* Try to add the new order. */
1038  CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1039  lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1040  itismychild(lock1->lo_witness, lock2->lo_witness);
1041  mtx_unlock_spin(&w_mtx);
1042  return (0);
1043 }
1044 
1045 void
1046 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1047  int line, struct lock_object *interlock)
1048 {
1049  struct lock_list_entry *lock_list, *lle;
1050  struct lock_instance *lock1, *lock2, *plock;
1051  struct lock_class *class;
1052  struct witness *w, *w1;
1053  struct thread *td;
1054  int i, j;
1055 
1056  if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1057  panicstr != NULL)
1058  return;
1059 
1060  w = lock->lo_witness;
1061  class = LOCK_CLASS(lock);
1062  td = curthread;
1063 
1064  if (class->lc_flags & LC_SLEEPLOCK) {
1065 
1066  /*
1067  * Since spin locks include a critical section, this check
1068  * implicitly enforces a lock order of all sleep locks before
1069  * all spin locks.
1070  */
1071  if (td->td_critnest != 0 && !kdb_active)
1072  panic("blockable sleep lock (%s) %s @ %s:%d",
1073  class->lc_name, lock->lo_name,
1074  fixup_filename(file), line);
1075 
1076  /*
1077  * If this is the first lock acquired then just return as
1078  * no order checking is needed.
1079  */
1080  lock_list = td->td_sleeplocks;
1081  if (lock_list == NULL || lock_list->ll_count == 0)
1082  return;
1083  } else {
1084 
1085  /*
1086  * If this is the first lock, just return as no order
1087  * checking is needed. Avoid problems with thread
1088  * migration pinning the thread while checking if
1089  * spinlocks are held. If at least one spinlock is held
1090  * the thread is in a safe path and it is allowed to
1091  * unpin it.
1092  */
1093  sched_pin();
1094  lock_list = PCPU_GET(spinlocks);
1095  if (lock_list == NULL || lock_list->ll_count == 0) {
1096  sched_unpin();
1097  return;
1098  }
1099  sched_unpin();
1100  }
1101 
1102  /*
1103  * Check to see if we are recursing on a lock we already own. If
1104  * so, make sure that we don't mismatch exclusive and shared lock
1105  * acquires.
1106  */
1107  lock1 = find_instance(lock_list, lock);
1108  if (lock1 != NULL) {
1109  if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1110  (flags & LOP_EXCLUSIVE) == 0) {
1111  printf("shared lock of (%s) %s @ %s:%d\n",
1112  class->lc_name, lock->lo_name,
1113  fixup_filename(file), line);
1114  printf("while exclusively locked from %s:%d\n",
1115  fixup_filename(lock1->li_file), lock1->li_line);
1116  panic("share->excl");
1117  }
1118  if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1119  (flags & LOP_EXCLUSIVE) != 0) {
1120  printf("exclusive lock of (%s) %s @ %s:%d\n",
1121  class->lc_name, lock->lo_name,
1122  fixup_filename(file), line);
1123  printf("while share locked from %s:%d\n",
1124  fixup_filename(lock1->li_file), lock1->li_line);
1125  panic("excl->share");
1126  }
1127  return;
1128  }
1129 
1130  /*
1131  * Find the previously acquired lock, but ignore interlocks.
1132  */
1133  plock = &lock_list->ll_children[lock_list->ll_count - 1];
1134  if (interlock != NULL && plock->li_lock == interlock) {
1135  if (lock_list->ll_count > 1)
1136  plock =
1137  &lock_list->ll_children[lock_list->ll_count - 2];
1138  else {
1139  lle = lock_list->ll_next;
1140 
1141  /*
1142  * The interlock is the only lock we hold, so
1143  * simply return.
1144  */
1145  if (lle == NULL)
1146  return;
1147  plock = &lle->ll_children[lle->ll_count - 1];
1148  }
1149  }
1150 
1151  /*
1152  * Try to perform most checks without a lock. If this succeeds we
1153  * can skip acquiring the lock and return success.
1154  */
1155  w1 = plock->li_lock->lo_witness;
1156  if (witness_lock_order_check(w1, w))
1157  return;
1158 
1159  /*
1160  * Check for duplicate locks of the same type. Note that we only
1161  * have to check for this on the last lock we just acquired. Any
1162  * other cases will be caught as lock order violations.
1163  */
1164  mtx_lock_spin(&w_mtx);
1165  witness_lock_order_add(w1, w);
1166  if (w1 == w) {
1167  i = w->w_index;
1168  if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1169  !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1170  w_rmatrix[i][i] |= WITNESS_REVERSAL;
1171  w->w_reversed = 1;
1172  mtx_unlock_spin(&w_mtx);
1173  printf(
1174  "acquiring duplicate lock of same type: \"%s\"\n",
1175  w->w_name);
1176  printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1177  fixup_filename(plock->li_file), plock->li_line);
1178  printf(" 2nd %s @ %s:%d\n", lock->lo_name,
1179  fixup_filename(file), line);
1180  witness_debugger(1);
1181  } else
1182  mtx_unlock_spin(&w_mtx);
1183  return;
1184  }
1185  mtx_assert(&w_mtx, MA_OWNED);
1186 
1187  /*
1188  * If we know that the lock we are acquiring comes after
1189  * the lock we most recently acquired in the lock order tree,
1190  * then there is no need for any further checks.
1191  */
1192  if (isitmychild(w1, w))
1193  goto out;
1194 
1195  for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1196  for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1197 
1198  MPASS(j < WITNESS_COUNT);
1199  lock1 = &lle->ll_children[i];
1200 
1201  /*
1202  * Ignore the interlock the first time we see it.
1203  */
1204  if (interlock != NULL && interlock == lock1->li_lock) {
1205  interlock = NULL;
1206  continue;
1207  }
1208 
1209  /*
1210  * If this lock doesn't undergo witness checking,
1211  * then skip it.
1212  */
1213  w1 = lock1->li_lock->lo_witness;
1214  if (w1 == NULL) {
1215  KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1216  ("lock missing witness structure"));
1217  continue;
1218  }
1219 
1220  /*
1221  * If we are locking Giant and this is a sleepable
1222  * lock, then skip it.
1223  */
1224  if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1225  lock == &Giant.lock_object)
1226  continue;
1227 
1228  /*
1229  * If we are locking a sleepable lock and this lock
1230  * is Giant, then skip it.
1231  */
1232  if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1233  lock1->li_lock == &Giant.lock_object)
1234  continue;
1235 
1236  /*
1237  * If we are locking a sleepable lock and this lock
1238  * isn't sleepable, we want to treat it as a lock
1239  * order violation to enfore a general lock order of
1240  * sleepable locks before non-sleepable locks.
1241  */
1242  if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1243  (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1244  goto reversal;
1245 
1246  /*
1247  * If we are locking Giant and this is a non-sleepable
1248  * lock, then treat it as a reversal.
1249  */
1250  if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1251  lock == &Giant.lock_object)
1252  goto reversal;
1253 
1254  /*
1255  * Check the lock order hierarchy for a reveresal.
1256  */
1257  if (!isitmydescendant(w, w1))
1258  continue;
1259  reversal:
1260 
1261  /*
1262  * We have a lock order violation, check to see if it
1263  * is allowed or has already been yelled about.
1264  */
1265 #ifdef BLESSING
1266 
1267  /*
1268  * If the lock order is blessed, just bail. We don't
1269  * look for other lock order violations though, which
1270  * may be a bug.
1271  */
1272  if (blessed(w, w1))
1273  goto out;
1274 #endif
1275 
1276  /* Bail if this violation is known */
1277  if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1278  goto out;
1279 
1280  /* Record this as a violation */
1281  w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1282  w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1283  w->w_reversed = w1->w_reversed = 1;
1285  mtx_unlock_spin(&w_mtx);
1286 
1287  /*
1288  * Ok, yell about it.
1289  */
1290  if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1291  (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1292  printf(
1293  "lock order reversal: (sleepable after non-sleepable)\n");
1294  else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1295  && lock == &Giant.lock_object)
1296  printf(
1297  "lock order reversal: (Giant after non-sleepable)\n");
1298  else
1299  printf("lock order reversal:\n");
1300 
1301  /*
1302  * Try to locate an earlier lock with
1303  * witness w in our list.
1304  */
1305  do {
1306  lock2 = &lle->ll_children[i];
1307  MPASS(lock2->li_lock != NULL);
1308  if (lock2->li_lock->lo_witness == w)
1309  break;
1310  if (i == 0 && lle->ll_next != NULL) {
1311  lle = lle->ll_next;
1312  i = lle->ll_count - 1;
1313  MPASS(i >= 0 && i < LOCK_NCHILDREN);
1314  } else
1315  i--;
1316  } while (i >= 0);
1317  if (i < 0) {
1318  printf(" 1st %p %s (%s) @ %s:%d\n",
1319  lock1->li_lock, lock1->li_lock->lo_name,
1320  w1->w_name, fixup_filename(lock1->li_file),
1321  lock1->li_line);
1322  printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1323  lock->lo_name, w->w_name,
1324  fixup_filename(file), line);
1325  } else {
1326  printf(" 1st %p %s (%s) @ %s:%d\n",
1327  lock2->li_lock, lock2->li_lock->lo_name,
1328  lock2->li_lock->lo_witness->w_name,
1329  fixup_filename(lock2->li_file),
1330  lock2->li_line);
1331  printf(" 2nd %p %s (%s) @ %s:%d\n",
1332  lock1->li_lock, lock1->li_lock->lo_name,
1333  w1->w_name, fixup_filename(lock1->li_file),
1334  lock1->li_line);
1335  printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1336  lock->lo_name, w->w_name,
1337  fixup_filename(file), line);
1338  }
1339  witness_debugger(1);
1340  return;
1341  }
1342  }
1343 
1344  /*
1345  * If requested, build a new lock order. However, don't build a new
1346  * relationship between a sleepable lock and Giant if it is in the
1347  * wrong direction. The correct lock order is that sleepable locks
1348  * always come before Giant.
1349  */
1350  if (flags & LOP_NEWORDER &&
1351  !(plock->li_lock == &Giant.lock_object &&
1352  (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1353  CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1354  w->w_name, plock->li_lock->lo_witness->w_name);
1355  itismychild(plock->li_lock->lo_witness, w);
1356  }
1357 out:
1358  mtx_unlock_spin(&w_mtx);
1359 }
1360 
1361 void
1362 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1363 {
1364  struct lock_list_entry **lock_list, *lle;
1365  struct lock_instance *instance;
1366  struct witness *w;
1367  struct thread *td;
1368 
1369  if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1370  panicstr != NULL)
1371  return;
1372  w = lock->lo_witness;
1373  td = curthread;
1374 
1375  /* Determine lock list for this lock. */
1376  if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1377  lock_list = &td->td_sleeplocks;
1378  else
1379  lock_list = PCPU_PTR(spinlocks);
1380 
1381  /* Check to see if we are recursing on a lock we already own. */
1382  instance = find_instance(*lock_list, lock);
1383  if (instance != NULL) {
1384  instance->li_flags++;
1385  CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1386  td->td_proc->p_pid, lock->lo_name,
1387  instance->li_flags & LI_RECURSEMASK);
1388  instance->li_file = file;
1389  instance->li_line = line;
1390  return;
1391  }
1392 
1393  /* Update per-witness last file and line acquire. */
1394  w->w_file = file;
1395  w->w_line = line;
1396 
1397  /* Find the next open lock instance in the list and fill it. */
1398  lle = *lock_list;
1399  if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1400  lle = witness_lock_list_get();
1401  if (lle == NULL)
1402  return;
1403  lle->ll_next = *lock_list;
1404  CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1405  td->td_proc->p_pid, lle);
1406  *lock_list = lle;
1407  }
1408  instance = &lle->ll_children[lle->ll_count++];
1409  instance->li_lock = lock;
1410  instance->li_line = line;
1411  instance->li_file = file;
1412  if ((flags & LOP_EXCLUSIVE) != 0)
1413  instance->li_flags = LI_EXCLUSIVE;
1414  else
1415  instance->li_flags = 0;
1416  CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1417  td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1418 }
1419 
1420 void
1421 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1422 {
1423  struct lock_instance *instance;
1424  struct lock_class *class;
1425 
1426  KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1427  if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1428  return;
1429  class = LOCK_CLASS(lock);
1430  if (witness_watch) {
1431  if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1432  panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1433  class->lc_name, lock->lo_name,
1434  fixup_filename(file), line);
1435  if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1436  panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1437  class->lc_name, lock->lo_name,
1438  fixup_filename(file), line);
1439  }
1440  instance = find_instance(curthread->td_sleeplocks, lock);
1441  if (instance == NULL)
1442  panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1443  class->lc_name, lock->lo_name,
1444  fixup_filename(file), line);
1445  if (witness_watch) {
1446  if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1447  panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1448  class->lc_name, lock->lo_name,
1449  fixup_filename(file), line);
1450  if ((instance->li_flags & LI_RECURSEMASK) != 0)
1451  panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1452  class->lc_name, lock->lo_name,
1453  instance->li_flags & LI_RECURSEMASK,
1454  fixup_filename(file), line);
1455  }
1456  instance->li_flags |= LI_EXCLUSIVE;
1457 }
1458 
1459 void
1460 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1461  int line)
1462 {
1463  struct lock_instance *instance;
1464  struct lock_class *class;
1465 
1466  KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1467  if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1468  return;
1469  class = LOCK_CLASS(lock);
1470  if (witness_watch) {
1471  if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1472  panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1473  class->lc_name, lock->lo_name,
1474  fixup_filename(file), line);
1475  if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1476  panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1477  class->lc_name, lock->lo_name,
1478  fixup_filename(file), line);
1479  }
1480  instance = find_instance(curthread->td_sleeplocks, lock);
1481  if (instance == NULL)
1482  panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1483  class->lc_name, lock->lo_name,
1484  fixup_filename(file), line);
1485  if (witness_watch) {
1486  if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1487  panic("downgrade of shared lock (%s) %s @ %s:%d",
1488  class->lc_name, lock->lo_name,
1489  fixup_filename(file), line);
1490  if ((instance->li_flags & LI_RECURSEMASK) != 0)
1491  panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1492  class->lc_name, lock->lo_name,
1493  instance->li_flags & LI_RECURSEMASK,
1494  fixup_filename(file), line);
1495  }
1496  instance->li_flags &= ~LI_EXCLUSIVE;
1497 }
1498 
1499 void
1500 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1501 {
1502  struct lock_list_entry **lock_list, *lle;
1503  struct lock_instance *instance;
1504  struct lock_class *class;
1505  struct thread *td;
1506  register_t s;
1507  int i, j;
1508 
1509  if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1510  return;
1511  td = curthread;
1512  class = LOCK_CLASS(lock);
1513 
1514  /* Find lock instance associated with this lock. */
1515  if (class->lc_flags & LC_SLEEPLOCK)
1516  lock_list = &td->td_sleeplocks;
1517  else
1518  lock_list = PCPU_PTR(spinlocks);
1519  lle = *lock_list;
1520  for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1521  for (i = 0; i < (*lock_list)->ll_count; i++) {
1522  instance = &(*lock_list)->ll_children[i];
1523  if (instance->li_lock == lock)
1524  goto found;
1525  }
1526 
1527  /*
1528  * When disabling WITNESS through witness_watch we could end up in
1529  * having registered locks in the td_sleeplocks queue.
1530  * We have to make sure we flush these queues, so just search for
1531  * eventual register locks and remove them.
1532  */
1533  if (witness_watch > 0)
1534  panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1535  lock->lo_name, fixup_filename(file), line);
1536  else
1537  return;
1538 found:
1539 
1540  /* First, check for shared/exclusive mismatches. */
1541  if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1542  (flags & LOP_EXCLUSIVE) == 0) {
1543  printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1544  lock->lo_name, fixup_filename(file), line);
1545  printf("while exclusively locked from %s:%d\n",
1546  fixup_filename(instance->li_file), instance->li_line);
1547  panic("excl->ushare");
1548  }
1549  if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1550  (flags & LOP_EXCLUSIVE) != 0) {
1551  printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1552  lock->lo_name, fixup_filename(file), line);
1553  printf("while share locked from %s:%d\n",
1554  fixup_filename(instance->li_file),
1555  instance->li_line);
1556  panic("share->uexcl");
1557  }
1558  /* If we are recursed, unrecurse. */
1559  if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1560  CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1561  td->td_proc->p_pid, instance->li_lock->lo_name,
1562  instance->li_flags);
1563  instance->li_flags--;
1564  return;
1565  }
1566  /* The lock is now being dropped, check for NORELEASE flag */
1567  if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1568  printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1569  lock->lo_name, fixup_filename(file), line);
1570  panic("lock marked norelease");
1571  }
1572 
1573  /* Otherwise, remove this item from the list. */
1574  s = intr_disable();
1575  CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1576  td->td_proc->p_pid, instance->li_lock->lo_name,
1577  (*lock_list)->ll_count - 1);
1578  for (j = i; j < (*lock_list)->ll_count - 1; j++)
1579  (*lock_list)->ll_children[j] =
1580  (*lock_list)->ll_children[j + 1];
1581  (*lock_list)->ll_count--;
1582  intr_restore(s);
1583 
1584  /*
1585  * In order to reduce contention on w_mtx, we want to keep always an
1586  * head object into lists so that frequent allocation from the
1587  * free witness pool (and subsequent locking) is avoided.
1588  * In order to maintain the current code simple, when the head
1589  * object is totally unloaded it means also that we do not have
1590  * further objects in the list, so the list ownership needs to be
1591  * hand over to another object if the current head needs to be freed.
1592  */
1593  if ((*lock_list)->ll_count == 0) {
1594  if (*lock_list == lle) {
1595  if (lle->ll_next == NULL)
1596  return;
1597  } else
1598  lle = *lock_list;
1599  *lock_list = lle->ll_next;
1600  CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1601  td->td_proc->p_pid, lle);
1603  }
1604 }
1605 
1606 void
1607 witness_thread_exit(struct thread *td)
1608 {
1609  struct lock_list_entry *lle;
1610  int i, n;
1611 
1612  lle = td->td_sleeplocks;
1613  if (lle == NULL || panicstr != NULL)
1614  return;
1615  if (lle->ll_count != 0) {
1616  for (n = 0; lle != NULL; lle = lle->ll_next)
1617  for (i = lle->ll_count - 1; i >= 0; i--) {
1618  if (n == 0)
1619  printf("Thread %p exiting with the following locks held:\n",
1620  td);
1621  n++;
1623 
1624  }
1625  panic("Thread %p cannot exit while holding sleeplocks\n", td);
1626  }
1628 }
1629 
1630 /*
1631  * Warn if any locks other than 'lock' are held. Flags can be passed in to
1632  * exempt Giant and sleepable locks from the checks as well. If any
1633  * non-exempt locks are held, then a supplied message is printed to the
1634  * console along with a list of the offending locks. If indicated in the
1635  * flags then a failure results in a panic as well.
1636  */
1637 int
1638 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1639 {
1640  struct lock_list_entry *lock_list, *lle;
1641  struct lock_instance *lock1;
1642  struct thread *td;
1643  va_list ap;
1644  int i, n;
1645 
1646  if (witness_cold || witness_watch < 1 || panicstr != NULL)
1647  return (0);
1648  n = 0;
1649  td = curthread;
1650  for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1651  for (i = lle->ll_count - 1; i >= 0; i--) {
1652  lock1 = &lle->ll_children[i];
1653  if (lock1->li_lock == lock)
1654  continue;
1655  if (flags & WARN_GIANTOK &&
1656  lock1->li_lock == &Giant.lock_object)
1657  continue;
1658  if (flags & WARN_SLEEPOK &&
1659  (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1660  continue;
1661  if (n == 0) {
1662  va_start(ap, fmt);
1663  vprintf(fmt, ap);
1664  va_end(ap);
1665  printf(" with the following");
1666  if (flags & WARN_SLEEPOK)
1667  printf(" non-sleepable");
1668  printf(" locks held:\n");
1669  }
1670  n++;
1671  witness_list_lock(lock1, printf);
1672  }
1673 
1674  /*
1675  * Pin the thread in order to avoid problems with thread migration.
1676  * Once that all verifies are passed about spinlocks ownership,
1677  * the thread is in a safe path and it can be unpinned.
1678  */
1679  sched_pin();
1680  lock_list = PCPU_GET(spinlocks);
1681  if (lock_list != NULL && lock_list->ll_count != 0) {
1682  sched_unpin();
1683 
1684  /*
1685  * We should only have one spinlock and as long as
1686  * the flags cannot match for this locks class,
1687  * check if the first spinlock is the one curthread
1688  * should hold.
1689  */
1690  lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1691  if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1692  lock1->li_lock == lock && n == 0)
1693  return (0);
1694 
1695  va_start(ap, fmt);
1696  vprintf(fmt, ap);
1697  va_end(ap);
1698  printf(" with the following");
1699  if (flags & WARN_SLEEPOK)
1700  printf(" non-sleepable");
1701  printf(" locks held:\n");
1702  n += witness_list_locks(&lock_list, printf);
1703  } else
1704  sched_unpin();
1705  if (flags & WARN_PANIC && n)
1706  panic("%s", __func__);
1707  else
1708  witness_debugger(n);
1709  return (n);
1710 }
1711 
1712 const char *
1713 witness_file(struct lock_object *lock)
1714 {
1715  struct witness *w;
1716 
1717  if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1718  return ("?");
1719  w = lock->lo_witness;
1720  return (w->w_file);
1721 }
1722 
1723 int
1724 witness_line(struct lock_object *lock)
1725 {
1726  struct witness *w;
1727 
1728  if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1729  return (0);
1730  w = lock->lo_witness;
1731  return (w->w_line);
1732 }
1733 
1734 static struct witness *
1735 enroll(const char *description, struct lock_class *lock_class)
1736 {
1737  struct witness *w;
1738  struct witness_list *typelist;
1739 
1740  MPASS(description != NULL);
1741 
1742  if (witness_watch == -1 || panicstr != NULL)
1743  return (NULL);
1744  if ((lock_class->lc_flags & LC_SPINLOCK)) {
1745  if (witness_skipspin)
1746  return (NULL);
1747  else
1748  typelist = &w_spin;
1749  } else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1750  typelist = &w_sleep;
1751  else
1752  panic("lock class %s is not sleep or spin",
1753  lock_class->lc_name);
1754 
1755  mtx_lock_spin(&w_mtx);
1756  w = witness_hash_get(description);
1757  if (w)
1758  goto found;
1759  if ((w = witness_get()) == NULL)
1760  return (NULL);
1761  MPASS(strlen(description) < MAX_W_NAME);
1762  strcpy(w->w_name, description);
1763  w->w_class = lock_class;
1764  w->w_refcount = 1;
1765  STAILQ_INSERT_HEAD(&w_all, w, w_list);
1766  if (lock_class->lc_flags & LC_SPINLOCK) {
1767  STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1768  w_spin_cnt++;
1769  } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1770  STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1771  w_sleep_cnt++;
1772  }
1773 
1774  /* Insert new witness into the hash */
1775  witness_hash_put(w);
1777  mtx_unlock_spin(&w_mtx);
1778  return (w);
1779 found:
1780  w->w_refcount++;
1781  mtx_unlock_spin(&w_mtx);
1782  if (lock_class != w->w_class)
1783  panic(
1784  "lock (%s) %s does not match earlier (%s) lock",
1785  description, lock_class->lc_name,
1786  w->w_class->lc_name);
1787  return (w);
1788 }
1789 
1790 static void
1791 depart(struct witness *w)
1792 {
1793  struct witness_list *list;
1794 
1795  MPASS(w->w_refcount == 0);
1796  if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1797  list = &w_sleep;
1798  w_sleep_cnt--;
1799  } else {
1800  list = &w_spin;
1801  w_spin_cnt--;
1802  }
1803  /*
1804  * Set file to NULL as it may point into a loadable module.
1805  */
1806  w->w_file = NULL;
1807  w->w_line = 0;
1809 }
1810 
1811 
1812 static void
1813 adopt(struct witness *parent, struct witness *child)
1814 {
1815  int pi, ci, i, j;
1816 
1817  if (witness_cold == 0)
1818  mtx_assert(&w_mtx, MA_OWNED);
1819 
1820  /* If the relationship is already known, there's no work to be done. */
1821  if (isitmychild(parent, child))
1822  return;
1823 
1824  /* When the structure of the graph changes, bump up the generation. */
1826 
1827  /*
1828  * The hard part ... create the direct relationship, then propagate all
1829  * indirect relationships.
1830  */
1831  pi = parent->w_index;
1832  ci = child->w_index;
1835  MPASS(pi != ci);
1836  w_rmatrix[pi][ci] |= WITNESS_PARENT;
1837  w_rmatrix[ci][pi] |= WITNESS_CHILD;
1838 
1839  /*
1840  * If parent was not already an ancestor of child,
1841  * then we increment the descendant and ancestor counters.
1842  */
1843  if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1844  parent->w_num_descendants++;
1845  child->w_num_ancestors++;
1846  }
1847 
1848  /*
1849  * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1850  * an ancestor of 'pi' during this loop.
1851  */
1852  for (i = 1; i <= w_max_used_index; i++) {
1853  if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1854  (i != pi))
1855  continue;
1856 
1857  /* Find each descendant of 'i' and mark it as a descendant. */
1858  for (j = 1; j <= w_max_used_index; j++) {
1859 
1860  /*
1861  * Skip children that are already marked as
1862  * descendants of 'i'.
1863  */
1864  if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1865  continue;
1866 
1867  /*
1868  * We are only interested in descendants of 'ci'. Note
1869  * that 'ci' itself is counted as a descendant of 'ci'.
1870  */
1871  if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1872  (j != ci))
1873  continue;
1874  w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1875  w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1876  w_data[i].w_num_descendants++;
1877  w_data[j].w_num_ancestors++;
1878 
1879  /*
1880  * Make sure we aren't marking a node as both an
1881  * ancestor and descendant. We should have caught
1882  * this as a lock order reversal earlier.
1883  */
1884  if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1885  (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1886  printf("witness rmatrix paradox! [%d][%d]=%d "
1887  "both ancestor and descendant\n",
1888  i, j, w_rmatrix[i][j]);
1889  kdb_backtrace();
1890  printf("Witness disabled.\n");
1891  witness_watch = -1;
1892  }
1893  if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1894  (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1895  printf("witness rmatrix paradox! [%d][%d]=%d "
1896  "both ancestor and descendant\n",
1897  j, i, w_rmatrix[j][i]);
1898  kdb_backtrace();
1899  printf("Witness disabled.\n");
1900  witness_watch = -1;
1901  }
1902  }
1903  }
1904 }
1905 
1906 static void
1907 itismychild(struct witness *parent, struct witness *child)
1908 {
1909 
1910  MPASS(child != NULL && parent != NULL);
1911  if (witness_cold == 0)
1912  mtx_assert(&w_mtx, MA_OWNED);
1913 
1914  if (!witness_lock_type_equal(parent, child)) {
1915  if (witness_cold == 0)
1916  mtx_unlock_spin(&w_mtx);
1917  panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1918  "the same lock type", __func__, parent->w_name,
1919  parent->w_class->lc_name, child->w_name,
1920  child->w_class->lc_name);
1921  }
1922  adopt(parent, child);
1923 }
1924 
1925 /*
1926  * Generic code for the isitmy*() functions. The rmask parameter is the
1927  * expected relationship of w1 to w2.
1928  */
1929 static int
1930 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1931 {
1932  unsigned char r1, r2;
1933  int i1, i2;
1934 
1935  i1 = w1->w_index;
1936  i2 = w2->w_index;
1939  r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1940  r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1941 
1942  /* The flags on one better be the inverse of the flags on the other */
1943  if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1944  (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1945  printf("%s: rmatrix mismatch between %s (index %d) and %s "
1946  "(index %d): w_rmatrix[%d][%d] == %hhx but "
1947  "w_rmatrix[%d][%d] == %hhx\n",
1948  fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1949  i2, i1, r2);
1950  kdb_backtrace();
1951  printf("Witness disabled.\n");
1952  witness_watch = -1;
1953  }
1954  return (r1 & rmask);
1955 }
1956 
1957 /*
1958  * Checks if @child is a direct child of @parent.
1959  */
1960 static int
1961 isitmychild(struct witness *parent, struct witness *child)
1962 {
1963 
1964  return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1965 }
1966 
1967 /*
1968  * Checks if @descendant is a direct or inderect descendant of @ancestor.
1969  */
1970 static int
1971 isitmydescendant(struct witness *ancestor, struct witness *descendant)
1972 {
1973 
1974  return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1975  __func__));
1976 }
1977 
1978 #ifdef BLESSING
1979 static int
1980 blessed(struct witness *w1, struct witness *w2)
1981 {
1982  int i;
1983  struct witness_blessed *b;
1984 
1985  for (i = 0; i < blessed_count; i++) {
1986  b = &blessed_list[i];
1987  if (strcmp(w1->w_name, b->b_lock1) == 0) {
1988  if (strcmp(w2->w_name, b->b_lock2) == 0)
1989  return (1);
1990  continue;
1991  }
1992  if (strcmp(w1->w_name, b->b_lock2) == 0)
1993  if (strcmp(w2->w_name, b->b_lock1) == 0)
1994  return (1);
1995  }
1996  return (0);
1997 }
1998 #endif
1999 
2000 static struct witness *
2002 {
2003  struct witness *w;
2004  int index;
2005 
2006  if (witness_cold == 0)
2007  mtx_assert(&w_mtx, MA_OWNED);
2008 
2009  if (witness_watch == -1) {
2010  mtx_unlock_spin(&w_mtx);
2011  return (NULL);
2012  }
2013  if (STAILQ_EMPTY(&w_free)) {
2014  witness_watch = -1;
2015  mtx_unlock_spin(&w_mtx);
2016  printf("WITNESS: unable to allocate a new witness object\n");
2017  return (NULL);
2018  }
2019  w = STAILQ_FIRST(&w_free);
2020  STAILQ_REMOVE_HEAD(&w_free, w_list);
2021  w_free_cnt--;
2022  index = w->w_index;
2023  MPASS(index > 0 && index == w_max_used_index+1 &&
2024  index < WITNESS_COUNT);
2025  bzero(w, sizeof(*w));
2026  w->w_index = index;
2027  if (index > w_max_used_index)
2028  w_max_used_index = index;
2029  return (w);
2030 }
2031 
2032 static void
2034 {
2035 
2036  STAILQ_INSERT_HEAD(&w_free, w, w_list);
2037  w_free_cnt++;
2038 }
2039 
2040 static struct lock_list_entry *
2042 {
2043  struct lock_list_entry *lle;
2044 
2045  if (witness_watch == -1)
2046  return (NULL);
2047  mtx_lock_spin(&w_mtx);
2048  lle = w_lock_list_free;
2049  if (lle == NULL) {
2050  witness_watch = -1;
2051  mtx_unlock_spin(&w_mtx);
2052  printf("%s: witness exhausted\n", __func__);
2053  return (NULL);
2054  }
2055  w_lock_list_free = lle->ll_next;
2056  mtx_unlock_spin(&w_mtx);
2057  bzero(lle, sizeof(*lle));
2058  return (lle);
2059 }
2060 
2061 static void
2063 {
2064 
2065  mtx_lock_spin(&w_mtx);
2066  lle->ll_next = w_lock_list_free;
2067  w_lock_list_free = lle;
2068  mtx_unlock_spin(&w_mtx);
2069 }
2070 
2071 static struct lock_instance *
2072 find_instance(struct lock_list_entry *list, struct lock_object *lock)
2073 {
2074  struct lock_list_entry *lle;
2075  struct lock_instance *instance;
2076  int i;
2077 
2078  for (lle = list; lle != NULL; lle = lle->ll_next)
2079  for (i = lle->ll_count - 1; i >= 0; i--) {
2080  instance = &lle->ll_children[i];
2081  if (instance->li_lock == lock)
2082  return (instance);
2083  }
2084  return (NULL);
2085 }
2086 
2087 static void
2089  int (*prnt)(const char *fmt, ...))
2090 {
2091  struct lock_object *lock;
2092 
2093  lock = instance->li_lock;
2094  prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2095  "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2096  if (lock->lo_witness->w_name != lock->lo_name)
2097  prnt(" (%s)", lock->lo_witness->w_name);
2098  prnt(" r = %d (%p) locked @ %s:%d\n",
2099  instance->li_flags & LI_RECURSEMASK, lock,
2100  fixup_filename(instance->li_file), instance->li_line);
2101 }
2102 
2103 #ifdef DDB
2104 static int
2105 witness_thread_has_locks(struct thread *td)
2106 {
2107 
2108  if (td->td_sleeplocks == NULL)
2109  return (0);
2110  return (td->td_sleeplocks->ll_count != 0);
2111 }
2112 
2113 static int
2114 witness_proc_has_locks(struct proc *p)
2115 {
2116  struct thread *td;
2117 
2118  FOREACH_THREAD_IN_PROC(p, td) {
2119  if (witness_thread_has_locks(td))
2120  return (1);
2121  }
2122  return (0);
2123 }
2124 #endif
2125 
2126 int
2128  int (*prnt)(const char *fmt, ...))
2129 {
2130  struct lock_list_entry *lle;
2131  int i, nheld;
2132 
2133  nheld = 0;
2134  for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2135  for (i = lle->ll_count - 1; i >= 0; i--) {
2136  witness_list_lock(&lle->ll_children[i], prnt);
2137  nheld++;
2138  }
2139  return (nheld);
2140 }
2141 
2142 /*
2143  * This is a bit risky at best. We call this function when we have timed
2144  * out acquiring a spin lock, and we assume that the other CPU is stuck
2145  * with this lock held. So, we go groveling around in the other CPU's
2146  * per-cpu data to try to find the lock instance for this spin lock to
2147  * see when it was last acquired.
2148  */
2149 void
2150 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2151  int (*prnt)(const char *fmt, ...))
2152 {
2153  struct lock_instance *instance;
2154  struct pcpu *pc;
2155 
2156  if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2157  return;
2158  pc = pcpu_find(owner->td_oncpu);
2159  instance = find_instance(pc->pc_spinlocks, lock);
2160  if (instance != NULL)
2161  witness_list_lock(instance, prnt);
2162 }
2163 
2164 void
2165 witness_save(struct lock_object *lock, const char **filep, int *linep)
2166 {
2167  struct lock_list_entry *lock_list;
2168  struct lock_instance *instance;
2169  struct lock_class *class;
2170 
2171  /*
2172  * This function is used independently in locking code to deal with
2173  * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2174  * is gone.
2175  */
2176  if (SCHEDULER_STOPPED())
2177  return;
2178  KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2179  if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2180  return;
2181  class = LOCK_CLASS(lock);
2182  if (class->lc_flags & LC_SLEEPLOCK)
2183  lock_list = curthread->td_sleeplocks;
2184  else {
2185  if (witness_skipspin)
2186  return;
2187  lock_list = PCPU_GET(spinlocks);
2188  }
2189  instance = find_instance(lock_list, lock);
2190  if (instance == NULL)
2191  panic("%s: lock (%s) %s not locked", __func__,
2192  class->lc_name, lock->lo_name);
2193  *filep = instance->li_file;
2194  *linep = instance->li_line;
2195 }
2196 
2197 void
2198 witness_restore(struct lock_object *lock, const char *file, int line)
2199 {
2200  struct lock_list_entry *lock_list;
2201  struct lock_instance *instance;
2202  struct lock_class *class;
2203 
2204  /*
2205  * This function is used independently in locking code to deal with
2206  * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2207  * is gone.
2208  */
2209  if (SCHEDULER_STOPPED())
2210  return;
2211  KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2212  if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2213  return;
2214  class = LOCK_CLASS(lock);
2215  if (class->lc_flags & LC_SLEEPLOCK)
2216  lock_list = curthread->td_sleeplocks;
2217  else {
2218  if (witness_skipspin)
2219  return;
2220  lock_list = PCPU_GET(spinlocks);
2221  }
2222  instance = find_instance(lock_list, lock);
2223  if (instance == NULL)
2224  panic("%s: lock (%s) %s not locked", __func__,
2225  class->lc_name, lock->lo_name);
2226  lock->lo_witness->w_file = file;
2227  lock->lo_witness->w_line = line;
2228  instance->li_file = file;
2229  instance->li_line = line;
2230 }
2231 
2232 void
2233 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
2234 {
2235 #ifdef INVARIANT_SUPPORT
2236  struct lock_instance *instance;
2237  struct lock_class *class;
2238 
2239  if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2240  return;
2241  class = LOCK_CLASS(lock);
2242  if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2243  instance = find_instance(curthread->td_sleeplocks, lock);
2244  else if ((class->lc_flags & LC_SPINLOCK) != 0)
2245  instance = find_instance(PCPU_GET(spinlocks), lock);
2246  else {
2247  panic("Lock (%s) %s is not sleep or spin!",
2248  class->lc_name, lock->lo_name);
2249  }
2250  switch (flags) {
2251  case LA_UNLOCKED:
2252  if (instance != NULL)
2253  panic("Lock (%s) %s locked @ %s:%d.",
2254  class->lc_name, lock->lo_name,
2255  fixup_filename(file), line);
2256  break;
2257  case LA_LOCKED:
2258  case LA_LOCKED | LA_RECURSED:
2259  case LA_LOCKED | LA_NOTRECURSED:
2260  case LA_SLOCKED:
2261  case LA_SLOCKED | LA_RECURSED:
2262  case LA_SLOCKED | LA_NOTRECURSED:
2263  case LA_XLOCKED:
2264  case LA_XLOCKED | LA_RECURSED:
2265  case LA_XLOCKED | LA_NOTRECURSED:
2266  if (instance == NULL) {
2267  panic("Lock (%s) %s not locked @ %s:%d.",
2268  class->lc_name, lock->lo_name,
2269  fixup_filename(file), line);
2270  break;
2271  }
2272  if ((flags & LA_XLOCKED) != 0 &&
2273  (instance->li_flags & LI_EXCLUSIVE) == 0)
2274  panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2275  class->lc_name, lock->lo_name,
2276  fixup_filename(file), line);
2277  if ((flags & LA_SLOCKED) != 0 &&
2278  (instance->li_flags & LI_EXCLUSIVE) != 0)
2279  panic("Lock (%s) %s exclusively locked @ %s:%d.",
2280  class->lc_name, lock->lo_name,
2281  fixup_filename(file), line);
2282  if ((flags & LA_RECURSED) != 0 &&
2283  (instance->li_flags & LI_RECURSEMASK) == 0)
2284  panic("Lock (%s) %s not recursed @ %s:%d.",
2285  class->lc_name, lock->lo_name,
2286  fixup_filename(file), line);
2287  if ((flags & LA_NOTRECURSED) != 0 &&
2288  (instance->li_flags & LI_RECURSEMASK) != 0)
2289  panic("Lock (%s) %s recursed @ %s:%d.",
2290  class->lc_name, lock->lo_name,
2291  fixup_filename(file), line);
2292  break;
2293  default:
2294  panic("Invalid lock assertion at %s:%d.",
2295  fixup_filename(file), line);
2296 
2297  }
2298 #endif /* INVARIANT_SUPPORT */
2299 }
2300 
2301 static void
2302 witness_setflag(struct lock_object *lock, int flag, int set)
2303 {
2304  struct lock_list_entry *lock_list;
2305  struct lock_instance *instance;
2306  struct lock_class *class;
2307 
2308  if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2309  return;
2310  class = LOCK_CLASS(lock);
2311  if (class->lc_flags & LC_SLEEPLOCK)
2312  lock_list = curthread->td_sleeplocks;
2313  else {
2314  if (witness_skipspin)
2315  return;
2316  lock_list = PCPU_GET(spinlocks);
2317  }
2318  instance = find_instance(lock_list, lock);
2319  if (instance == NULL)
2320  panic("%s: lock (%s) %s not locked", __func__,
2321  class->lc_name, lock->lo_name);
2322 
2323  if (set)
2324  instance->li_flags |= flag;
2325  else
2326  instance->li_flags &= ~flag;
2327 }
2328 
2329 void
2330 witness_norelease(struct lock_object *lock)
2331 {
2332 
2333  witness_setflag(lock, LI_NORELEASE, 1);
2334 }
2335 
2336 void
2337 witness_releaseok(struct lock_object *lock)
2338 {
2339 
2340  witness_setflag(lock, LI_NORELEASE, 0);
2341 }
2342 
2343 #ifdef DDB
2344 static void
2345 witness_ddb_list(struct thread *td)
2346 {
2347 
2348  KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2349  KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2350 
2351  if (witness_watch < 1)
2352  return;
2353 
2354  witness_list_locks(&td->td_sleeplocks, db_printf);
2355 
2356  /*
2357  * We only handle spinlocks if td == curthread. This is somewhat broken
2358  * if td is currently executing on some other CPU and holds spin locks
2359  * as we won't display those locks. If we had a MI way of getting
2360  * the per-cpu data for a given cpu then we could use
2361  * td->td_oncpu to get the list of spinlocks for this thread
2362  * and "fix" this.
2363  *
2364  * That still wouldn't really fix this unless we locked the scheduler
2365  * lock or stopped the other CPU to make sure it wasn't changing the
2366  * list out from under us. It is probably best to just not try to
2367  * handle threads on other CPU's for now.
2368  */
2369  if (td == curthread && PCPU_GET(spinlocks) != NULL)
2370  witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2371 }
2372 
2373 DB_SHOW_COMMAND(locks, db_witness_list)
2374 {
2375  struct thread *td;
2376 
2377  if (have_addr)
2378  td = db_lookup_thread(addr, TRUE);
2379  else
2380  td = kdb_thread;
2381  witness_ddb_list(td);
2382 }
2383 
2384 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2385 {
2386  struct thread *td;
2387  struct proc *p;
2388 
2389  /*
2390  * It would be nice to list only threads and processes that actually
2391  * held sleep locks, but that information is currently not exported
2392  * by WITNESS.
2393  */
2394  FOREACH_PROC_IN_SYSTEM(p) {
2395  if (!witness_proc_has_locks(p))
2396  continue;
2397  FOREACH_THREAD_IN_PROC(p, td) {
2398  if (!witness_thread_has_locks(td))
2399  continue;
2400  db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2401  p->p_comm, td, td->td_tid);
2402  witness_ddb_list(td);
2403  if (db_pager_quit)
2404  return;
2405  }
2406  }
2407 }
2408 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2409 
2410 DB_SHOW_COMMAND(witness, db_witness_display)
2411 {
2412 
2413  witness_ddb_display(db_printf);
2414 }
2415 #endif
2416 
2417 static int
2419 {
2420  struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2421  struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2422  struct sbuf *sb;
2423  u_int w_rmatrix1, w_rmatrix2;
2424  int error, generation, i, j;
2425 
2426  tmp_data1 = NULL;
2427  tmp_data2 = NULL;
2428  tmp_w1 = NULL;
2429  tmp_w2 = NULL;
2430  if (witness_watch < 1) {
2431  error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2432  return (error);
2433  }
2434  if (witness_cold) {
2435  error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2436  return (error);
2437  }
2438  error = 0;
2439  sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2440  if (sb == NULL)
2441  return (ENOMEM);
2442 
2443  /* Allocate and init temporary storage space. */
2444  tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2445  tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2446  tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2447  M_WAITOK | M_ZERO);
2448  tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2449  M_WAITOK | M_ZERO);
2450  stack_zero(&tmp_data1->wlod_stack);
2451  stack_zero(&tmp_data2->wlod_stack);
2452 
2453 restart:
2454  mtx_lock_spin(&w_mtx);
2455  generation = w_generation;
2456  mtx_unlock_spin(&w_mtx);
2457  sbuf_printf(sb, "Number of known direct relationships is %d\n",
2459  for (i = 1; i < w_max_used_index; i++) {
2460  mtx_lock_spin(&w_mtx);
2461  if (generation != w_generation) {
2462  mtx_unlock_spin(&w_mtx);
2463 
2464  /* The graph has changed, try again. */
2465  req->oldidx = 0;
2466  sbuf_clear(sb);
2467  goto restart;
2468  }
2469 
2470  w1 = &w_data[i];
2471  if (w1->w_reversed == 0) {
2472  mtx_unlock_spin(&w_mtx);
2473  continue;
2474  }
2475 
2476  /* Copy w1 locally so we can release the spin lock. */
2477  *tmp_w1 = *w1;
2478  mtx_unlock_spin(&w_mtx);
2479 
2480  if (tmp_w1->w_reversed == 0)
2481  continue;
2482  for (j = 1; j < w_max_used_index; j++) {
2483  if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2484  continue;
2485 
2486  mtx_lock_spin(&w_mtx);
2487  if (generation != w_generation) {
2488  mtx_unlock_spin(&w_mtx);
2489 
2490  /* The graph has changed, try again. */
2491  req->oldidx = 0;
2492  sbuf_clear(sb);
2493  goto restart;
2494  }
2495 
2496  w2 = &w_data[j];
2497  data1 = witness_lock_order_get(w1, w2);
2498  data2 = witness_lock_order_get(w2, w1);
2499 
2500  /*
2501  * Copy information locally so we can release the
2502  * spin lock.
2503  */
2504  *tmp_w2 = *w2;
2505  w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2506  w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2507 
2508  if (data1) {
2509  stack_zero(&tmp_data1->wlod_stack);
2510  stack_copy(&data1->wlod_stack,
2511  &tmp_data1->wlod_stack);
2512  }
2513  if (data2 && data2 != data1) {
2514  stack_zero(&tmp_data2->wlod_stack);
2515  stack_copy(&data2->wlod_stack,
2516  &tmp_data2->wlod_stack);
2517  }
2518  mtx_unlock_spin(&w_mtx);
2519 
2520  sbuf_printf(sb,
2521  "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2522  tmp_w1->w_name, tmp_w1->w_class->lc_name,
2523  tmp_w2->w_name, tmp_w2->w_class->lc_name);
2524 #if 0
2525  sbuf_printf(sb,
2526  "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2527  tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2528  tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2529 #endif
2530  if (data1) {
2531  sbuf_printf(sb,
2532  "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2533  tmp_w1->w_name, tmp_w1->w_class->lc_name,
2534  tmp_w2->w_name, tmp_w2->w_class->lc_name);
2535  stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2536  sbuf_printf(sb, "\n");
2537  }
2538  if (data2 && data2 != data1) {
2539  sbuf_printf(sb,
2540  "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2541  tmp_w2->w_name, tmp_w2->w_class->lc_name,
2542  tmp_w1->w_name, tmp_w1->w_class->lc_name);
2543  stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2544  sbuf_printf(sb, "\n");
2545  }
2546  }
2547  }
2548  mtx_lock_spin(&w_mtx);
2549  if (generation != w_generation) {
2550  mtx_unlock_spin(&w_mtx);
2551 
2552  /*
2553  * The graph changed while we were printing stack data,
2554  * try again.
2555  */
2556  req->oldidx = 0;
2557  sbuf_clear(sb);
2558  goto restart;
2559  }
2560  mtx_unlock_spin(&w_mtx);
2561 
2562  /* Free temporary storage space. */
2563  free(tmp_data1, M_TEMP);
2564  free(tmp_data2, M_TEMP);
2565  free(tmp_w1, M_TEMP);
2566  free(tmp_w2, M_TEMP);
2567 
2568  sbuf_finish(sb);
2569  error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2570  sbuf_delete(sb);
2571 
2572  return (error);
2573 }
2574 
2575 static int
2577 {
2578  struct witness *w;
2579  struct sbuf *sb;
2580  int error;
2581 
2582  if (witness_watch < 1) {
2583  error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2584  return (error);
2585  }
2586  if (witness_cold) {
2587  error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2588  return (error);
2589  }
2590  error = 0;
2591 
2592  error = sysctl_wire_old_buffer(req, 0);
2593  if (error != 0)
2594  return (error);
2595  sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2596  if (sb == NULL)
2597  return (ENOMEM);
2598  sbuf_printf(sb, "\n");
2599 
2600  mtx_lock_spin(&w_mtx);
2601  STAILQ_FOREACH(w, &w_all, w_list)
2602  w->w_displayed = 0;
2603  STAILQ_FOREACH(w, &w_all, w_list)
2604  witness_add_fullgraph(sb, w);
2605  mtx_unlock_spin(&w_mtx);
2606 
2607  /*
2608  * Close the sbuf and return to userland.
2609  */
2610  error = sbuf_finish(sb);
2611  sbuf_delete(sb);
2612 
2613  return (error);
2614 }
2615 
2616 static int
2617 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2618 {
2619  int error, value;
2620 
2621  value = witness_watch;
2622  error = sysctl_handle_int(oidp, &value, 0, req);
2623  if (error != 0 || req->newptr == NULL)
2624  return (error);
2625  if (value > 1 || value < -1 ||
2626  (witness_watch == -1 && value != witness_watch))
2627  return (EINVAL);
2628  witness_watch = value;
2629  return (0);
2630 }
2631 
2632 static void
2633 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2634 {
2635  int i;
2636 
2637  if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2638  return;
2639  w->w_displayed = 1;
2640 
2642  for (i = 1; i <= w_max_used_index; i++) {
2643  if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2644  sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2645  w_data[i].w_name);
2646  witness_add_fullgraph(sb, &w_data[i]);
2647  }
2648  }
2649 }
2650 
2651 /*
2652  * A simple hash function. Takes a key pointer and a key size. If size == 0,
2653  * interprets the key as a string and reads until the null
2654  * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2655  * hash value computed from the key.
2656  */
2657 static uint32_t
2658 witness_hash_djb2(const uint8_t *key, uint32_t size)
2659 {
2660  unsigned int hash = 5381;
2661  int i;
2662 
2663  /* hash = hash * 33 + key[i] */
2664  if (size)
2665  for (i = 0; i < size; i++)
2666  hash = ((hash << 5) + hash) + (unsigned int)key[i];
2667  else
2668  for (i = 0; key[i] != 0; i++)
2669  hash = ((hash << 5) + hash) + (unsigned int)key[i];
2670 
2671  return (hash);
2672 }
2673 
2674 
2675 /*
2676  * Initializes the two witness hash tables. Called exactly once from
2677  * witness_initialize().
2678  */
2679 static void
2681 {
2682  int i;
2683 
2684  MPASS(witness_cold);
2685 
2686  /* Initialize the hash tables. */
2687  for (i = 0; i < WITNESS_HASH_SIZE; i++)
2688  w_hash.wh_array[i] = NULL;
2689 
2690  w_hash.wh_size = WITNESS_HASH_SIZE;
2691  w_hash.wh_count = 0;
2692 
2693  /* Initialize the lock order data hash. */
2694  w_lofree = NULL;
2695  for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2696  memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2698  w_lofree = &w_lodata[i];
2699  }
2701  w_lohash.wloh_count = 0;
2702  for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2703  w_lohash.wloh_array[i] = NULL;
2704 }
2705 
2706 static struct witness *
2707 witness_hash_get(const char *key)
2708 {
2709  struct witness *w;
2710  uint32_t hash;
2711 
2712  MPASS(key != NULL);
2713  if (witness_cold == 0)
2714  mtx_assert(&w_mtx, MA_OWNED);
2715  hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2716  w = w_hash.wh_array[hash];
2717  while (w != NULL) {
2718  if (strcmp(w->w_name, key) == 0)
2719  goto out;
2720  w = w->w_hash_next;
2721  }
2722 
2723 out:
2724  return (w);
2725 }
2726 
2727 static void
2729 {
2730  uint32_t hash;
2731 
2732  MPASS(w != NULL);
2733  MPASS(w->w_name != NULL);
2734  if (witness_cold == 0)
2735  mtx_assert(&w_mtx, MA_OWNED);
2736  KASSERT(witness_hash_get(w->w_name) == NULL,
2737  ("%s: trying to add a hash entry that already exists!", __func__));
2738  KASSERT(w->w_hash_next == NULL,
2739  ("%s: w->w_hash_next != NULL", __func__));
2740 
2741  hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2742  w->w_hash_next = w_hash.wh_array[hash];
2743  w_hash.wh_array[hash] = w;
2744  w_hash.wh_count++;
2745 }
2746 
2747 
2748 static struct witness_lock_order_data *
2750 {
2751  struct witness_lock_order_data *data = NULL;
2752  struct witness_lock_order_key key;
2753  unsigned int hash;
2754 
2755  MPASS(parent != NULL && child != NULL);
2756  key.from = parent->w_index;
2757  key.to = child->w_index;
2759  WITNESS_INDEX_ASSERT(key.to);
2760  if ((w_rmatrix[parent->w_index][child->w_index]
2761  & WITNESS_LOCK_ORDER_KNOWN) == 0)
2762  goto out;
2763 
2764  hash = witness_hash_djb2((const char*)&key,
2765  sizeof(key)) % w_lohash.wloh_size;
2766  data = w_lohash.wloh_array[hash];
2767  while (data != NULL) {
2768  if (witness_lock_order_key_equal(&data->wlod_key, &key))
2769  break;
2770  data = data->wlod_next;
2771  }
2772 
2773 out:
2774  return (data);
2775 }
2776 
2777 /*
2778  * Verify that parent and child have a known relationship, are not the same,
2779  * and child is actually a child of parent. This is done without w_mtx
2780  * to avoid contention in the common case.
2781  */
2782 static int
2784 {
2785 
2786  if (parent != child &&
2787  w_rmatrix[parent->w_index][child->w_index]
2789  isitmychild(parent, child))
2790  return (1);
2791 
2792  return (0);
2793 }
2794 
2795 static int
2797 {
2798  struct witness_lock_order_data *data = NULL;
2799  struct witness_lock_order_key key;
2800  unsigned int hash;
2801 
2802  MPASS(parent != NULL && child != NULL);
2803  key.from = parent->w_index;
2804  key.to = child->w_index;
2806  WITNESS_INDEX_ASSERT(key.to);
2807  if (w_rmatrix[parent->w_index][child->w_index]
2809  return (1);
2810 
2811  hash = witness_hash_djb2((const char*)&key,
2812  sizeof(key)) % w_lohash.wloh_size;
2813  w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2814  data = w_lofree;
2815  if (data == NULL)
2816  return (0);
2817  w_lofree = data->wlod_next;
2818  data->wlod_next = w_lohash.wloh_array[hash];
2819  data->wlod_key = key;
2820  w_lohash.wloh_array[hash] = data;
2821  w_lohash.wloh_count++;
2822  stack_zero(&data->wlod_stack);
2823  stack_save(&data->wlod_stack);
2824  return (1);
2825 }
2826 
2827 /* Call this whenver the structure of the witness graph changes. */
2828 static void
2830 {
2831 
2832  if (witness_cold == 0)
2833  mtx_assert(&w_mtx, MA_OWNED);
2834  w_generation++;
2835 }
2836 
2837 #ifdef KDB
2838 static void
2839 _witness_debugger(int cond, const char *msg)
2840 {
2841 
2842  if (witness_trace && cond)
2843  kdb_backtrace();
2844  if (witness_kdb && cond)
2845  kdb_enter(KDB_WHY_WITNESS, msg);
2846 }
2847 #endif
static void witness_free(struct witness *m)
static struct lock_instance * find_instance(struct lock_list_entry *list, struct lock_object *lock)
void witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
#define WITNESS_COUNT
Definition: subr_witness.c:135
static int w_sleep_cnt
Definition: subr_witness.c:449
#define WITNESS_HASH_SIZE
Definition: subr_witness.c:137
#define BADSTACK_SBUF_SIZE
Definition: subr_witness.c:156
uint32_t wh_count
Definition: subr_witness.c:250
static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]
Definition: subr_witness.c:446
METHOD int set
Definition: cpufreq_if.m:43
static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent)
struct lock_class lock_class_mtx_spin
Definition: kern_mutex.c:122
void witness_downgrade(struct lock_object *lock, int flags, const char *file, int line)
SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,&witness_skipspin, 0,"")
const char * w_name
Definition: subr_witness.c:291
Definition: subr_witness.c:212
static void witness_hash_put(struct witness *w)
caddr_t value
Definition: linker_if.m:51
void witness_assert(struct lock_object *lock, int flags, const char *file, int line)
static void witness_initialize(void *dummy __unused)
Definition: subr_witness.c:737
ssize_t sbuf_len(struct sbuf *s)
Definition: subr_sbuf.c:736
#define MAX_W_NAME
Definition: subr_witness.c:154
void stack_sbuf_print(struct sbuf *sb, struct stack *st)
Definition: subr_stack.c:170
static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
#define WITNESS_DTOA(x)
Definition: subr_witness.c:179
static u_int pending_cnt
Definition: subr_witness.c:447
#define LOCK_NCHILDREN
Definition: subr_witness.c:151
static const char * fixup_filename(const char *file)
Definition: subr_witness.c:721
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:454
#define WITNESS_DESCENDANT
Definition: subr_witness.c:167
void witness_init(struct lock_object *lock, const char *type)
Definition: subr_witness.c:813
#define WITNESS_ANCESTOR_MASK
Definition: subr_witness.c:168
static int w_free_cnt
Definition: subr_witness.c:449
const char * panicstr
struct lock_object * li_lock
Definition: subr_witness.c:196
Definition: subr_witness.c:290
struct lock_class lock_class_rw
Definition: kern_rwlock.c:79
#define LI_RECURSEMASK
Definition: subr_witness.c:128
void panic(const char *fmt,...)
#define WITNESS_ATOD(x)
Definition: subr_witness.c:182
void witness_thread_exit(struct thread *td)
#define LI_EXCLUSIVE
Definition: subr_witness.c:129
device_t parent
Definition: device_if.m:171
struct lock_list_entry * ll_next
Definition: subr_witness.c:213
int witness_skipspin
Definition: subr_witness.c:416
u_int ll_count
Definition: subr_witness.c:215
static struct witness_list w_sleep
Definition: subr_witness.c:442
struct pcpu * pcpu_find(u_int cpuid)
Definition: subr_pcpu.c:253
STAILQ_HEAD(witness_list, witness)
struct lock_object * wh_lock
Definition: subr_witness.c:287
static int isitmydescendant(struct witness *parent, struct witness *child)
static __inline int witness_lock_type_equal(struct witness *w1, struct witness *w2)
Definition: subr_witness.c:300
static struct witness * witness_hash_get(const char *key)
static struct witness * w_data
Definition: subr_witness.c:455
char w_name[MAX_W_NAME]
Definition: subr_witness.c:223
uint32_t wh_size
Definition: subr_witness.c:249
void witness_releaseok(struct lock_object *lock)
struct lock_class * w_class
Definition: subr_witness.c:292
void witness_norelease(struct lock_object *lock)
struct witness_lock_order_data * wlod_next
Definition: subr_witness.c:264
int * type
Definition: cpufreq_if.m:98
static struct witness_order_list_entry order_lists[]
Definition: subr_witness.c:470
__FBSDID("$BSDSUniX$")
#define KTR_WITNESS
Definition: subr_witness.c:125
void witness_restore(struct lock_object *lock, const char *file, int line)
int witness_list_locks(struct lock_list_entry **lock_list, int(*prnt)(const char *fmt,...))
void witness_destroy(struct lock_object *lock)
Definition: subr_witness.c:853
static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size)
static const char w_notrunning[]
Definition: subr_witness.c:466
static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]
Definition: subr_witness.c:461
static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
static int w_max_used_index
Definition: subr_witness.c:464
#define WITNESS_LO_DATA_COUNT
Definition: subr_witness.c:141
static int dummy
int witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
static void witness_setflag(struct lock_object *lock, int flag, int set)
void sbuf_clear(struct sbuf *s)
Definition: subr_sbuf.c:269
struct mtx Giant
Definition: kern_mutex.c:140
SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
#define WITNESS_PARENT
Definition: subr_witness.c:164
#define WITNESS_ANCESTOR
Definition: subr_witness.c:165
#define LI_NORELEASE
Definition: subr_witness.c:130
static struct witness_list w_all
Definition: subr_witness.c:438
static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,"Witness Locking")
static struct witness * enroll(const char *description, struct lock_class *lock_class)
#define witness_debugger(c)
Definition: subr_witness.c:369
void stack_zero(struct stack *st)
Definition: subr_stack.c:87
static int witness_spin_warn
Definition: subr_witness.c:717
void witness_checkorder(struct lock_object *lock, int flags, const char *file, int line, struct lock_object *interlock)
int sbuf_printf(struct sbuf *s, const char *fmt,...)
Definition: subr_sbuf.c:632
static MALLOC_DEFINE(M_WITNESS,"Witness","Witness")
static void itismychild(struct witness *parent, struct witness *child)
struct lock_class lock_class_sx
Definition: kern_sx.c:130
void kdb_backtrace(void)
Definition: subr_kdb.c:362
struct witness_lock_order_key wlod_key
Definition: subr_witness.c:263
static int witness_lock_order_check(struct witness *parent, struct witness *child)
static void witness_increment_graph_generation(void)
static struct witness_hash w_hash
Definition: subr_witness.c:458
static int isitmychild(struct witness *parent, struct witness *child)
static __inline int witness_lock_order_key_equal(const struct witness_lock_order_key *a, const struct witness_lock_order_key *b)
Definition: subr_witness.c:308
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:986
static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]
Definition: subr_witness.c:457
static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1]
Definition: subr_witness.c:456
#define WITNESS_RELATED_MASK
Definition: subr_witness.c:170
static struct lock_list_entry * witness_lock_list_get(void)
struct lock_instance ll_children[LOCK_NCHILDREN]
Definition: subr_witness.c:214
static struct witness * witness_get(void)
const char * witness_file(struct lock_object *lock)
SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW|CTLTYPE_INT, NULL, 0, sysctl_debug_witness_watch,"I","witness is watching lock operations")
static struct lock_list_entry * w_lock_list_free
Definition: subr_witness.c:445
struct sbuf * sbuf_new(struct sbuf *s, char *buf, int length, int flags)
Definition: subr_sbuf.c:211
int witness_line(struct lock_object *lock)
struct lock_class lock_class_mtx_sleep
Definition: kern_mutex.c:109
#define LOCK_CHILDCOUNT
Definition: subr_witness.c:152
static int witness_watch
Definition: subr_witness.c:382
struct thread * kdb_thread
Definition: subr_kdb.c:58
static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
static void depart(struct witness *w)
#define WITNESS_CHILD
Definition: subr_witness.c:166
void stack_copy(struct stack *src, struct stack *dst)
Definition: subr_stack.c:80
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:554
void kdb_enter(const char *why, const char *msg)
Definition: subr_kdb.c:433
static void witness_init_hash_tables(void)
int printf(const char *fmt,...)
Definition: subr_prf.c:367
#define FULLGRAPH_SBUF_SIZE
Definition: subr_witness.c:157
void witness_lock(struct lock_object *lock, int flags, const char *file, int line)
int witness_warn(int flags, struct lock_object *lock, const char *fmt,...)
void sbuf_delete(struct sbuf *s)
Definition: subr_sbuf.c:753
static const char w_stillcold[]
Definition: subr_witness.c:467
void witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
static int witness_lock_order_add(struct witness *parent, struct witness *child)
static void witness_lock_list_free(struct lock_list_entry *lle)
static void adopt(struct witness *parent, struct witness *child)
int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
Definition: kern_sysctl.c:1364
void witness_display_spinlock(struct lock_object *lock, struct thread *owner, int(*prnt)(const char *fmt,...))
static struct mtx w_mtx
Definition: subr_witness.c:434
static void witness_list_lock(struct lock_instance *instance, int(*prnt)(const char *fmt,...))
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
void witness_save(struct lock_object *lock, const char **filep, int *linep)
#define WITNESS_INDEX_ASSERT(i)
Definition: subr_witness.c:184
struct witness * wh_array[WITNESS_HASH_SIZE]
Definition: subr_witness.c:248
static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
char * sbuf_data(struct sbuf *s)
Definition: subr_sbuf.c:721
int sbuf_finish(struct sbuf *s)
Definition: subr_sbuf.c:694
struct lock_class * w_class
Definition: subr_witness.c:225
static struct witness_lock_order_data * witness_lock_order_get(struct witness *parent, struct witness *child)
#define WITNESS_LOCK_ORDER_KNOWN
Definition: subr_witness.c:176
static int w_spin_cnt
Definition: subr_witness.c:449
int vprintf(const char *fmt, va_list ap)
Definition: subr_prf.c:380
int kdb_active
Definition: subr_kdb.c:53
static struct witness_list w_free
Definition: subr_witness.c:437
static struct witness_list w_spin
Definition: subr_witness.c:441
struct witness_lock_order_data * wloh_array[WITNESS_LO_HASH_SIZE]
Definition: subr_witness.c:273
#define WITNESS_REVERSAL
Definition: subr_witness.c:172
const char * wh_type
Definition: subr_witness.c:286
static int witness_cold
Definition: subr_witness.c:711
static struct witness_lock_order_data * w_lofree
Definition: subr_witness.c:462
TUNABLE_INT("debug.witness.watch",&witness_watch)
const char * li_file
Definition: subr_witness.c:197
#define WITNESS_PENDLIST
Definition: subr_witness.c:138
uint32_t w_index
Definition: subr_witness.c:224
static unsigned int w_generation
Definition: subr_witness.c:465
static struct witness_lock_order_hash w_lohash
Definition: subr_witness.c:463
#define WITNESS_LO_HASH_SIZE
Definition: subr_witness.c:144
int flag
struct sbuf * sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, struct sysctl_req *req)
Definition: kern_sysctl.c:1676
#define WITNESS_DESCENDANT_MASK
Definition: subr_witness.c:169