FreeBSD kernel kern code
kern_thread.c
Go to the documentation of this file.
1 /*-
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice(s), this list of conditions and the following disclaimer as
10  * the first lines of this file unmodified other than the possible
11  * addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  * notice(s), this list of conditions and the following disclaimer in the
14  * documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_witness.h"
30 #include "opt_kdtrace.h"
31 #include "opt_hwpmc_hooks.h"
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$BSDSUniX$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/rangelock.h>
43 #include <sys/resourcevar.h>
44 #include <sys/sdt.h>
45 #include <sys/smp.h>
46 #include <sys/sched.h>
47 #include <sys/sleepqueue.h>
48 #include <sys/selinfo.h>
49 #include <sys/turnstile.h>
50 #include <sys/ktr.h>
51 #include <sys/rwlock.h>
52 #include <sys/umtx.h>
53 #include <sys/cpuset.h>
54 #ifdef HWPMC_HOOKS
55 #include <sys/pmckern.h>
56 #endif
57 
58 #include <security/audit/audit.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_extern.h>
62 #include <vm/uma.h>
63 #include <sys/eventhandler.h>
64 
66 SDT_PROBE_DEFINE(proc, , , lwp__exit);
67 
68 
69 /*
70  * thread related storage.
71  */
72 static uma_zone_t thread_zone;
73 
74 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
75 static struct mtx zombie_lock;
76 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
77 
78 static void thread_zombie(struct thread *);
79 
80 #define TID_BUFFER_SIZE 1024
81 
82 struct mtx tid_lock;
83 static struct unrhdr *tid_unrhdr;
84 static lwpid_t tid_buffer[TID_BUFFER_SIZE];
85 static int tid_head, tid_tail;
86 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
87 
88 struct tidhashhead *tidhashtbl;
89 u_long tidhash;
90 struct rwlock tidhash_lock;
91 
92 static lwpid_t
93 tid_alloc(void)
94 {
95  lwpid_t tid;
96 
97  tid = alloc_unr(tid_unrhdr);
98  if (tid != -1)
99  return (tid);
100  mtx_lock(&tid_lock);
101  if (tid_head == tid_tail) {
102  mtx_unlock(&tid_lock);
103  return (-1);
104  }
105  tid = tid_buffer[tid_head];
106  tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
107  mtx_unlock(&tid_lock);
108  return (tid);
109 }
110 
111 static void
112 tid_free(lwpid_t tid)
113 {
114  lwpid_t tmp_tid = -1;
115 
116  mtx_lock(&tid_lock);
117  if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
118  tmp_tid = tid_buffer[tid_head];
119  tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
120  }
121  tid_buffer[tid_tail] = tid;
122  tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
123  mtx_unlock(&tid_lock);
124  if (tmp_tid != -1)
125  free_unr(tid_unrhdr, tmp_tid);
126 }
127 
128 /*
129  * Prepare a thread for use.
130  */
131 static int
132 thread_ctor(void *mem, int size, void *arg, int flags)
133 {
134  struct thread *td;
135 
136  td = (struct thread *)mem;
137  td->td_state = TDS_INACTIVE;
138  td->td_oncpu = NOCPU;
139 
140  td->td_tid = tid_alloc();
141 
142  /*
143  * Note that td_critnest begins life as 1 because the thread is not
144  * running and is thereby implicitly waiting to be on the receiving
145  * end of a context switch.
146  */
147  td->td_critnest = 1;
148  td->td_lend_user_pri = PRI_MAX;
149  EVENTHANDLER_INVOKE(thread_ctor, td);
150 #ifdef AUDIT
151  audit_thread_alloc(td);
152 #endif
153  umtx_thread_alloc(td);
154  return (0);
155 }
156 
157 /*
158  * Reclaim a thread after use.
159  */
160 static void
161 thread_dtor(void *mem, int size, void *arg)
162 {
163  struct thread *td;
164 
165  td = (struct thread *)mem;
166 
167 #ifdef INVARIANTS
168  /* Verify that this thread is in a safe state to free. */
169  switch (td->td_state) {
170  case TDS_INHIBITED:
171  case TDS_RUNNING:
172  case TDS_CAN_RUN:
173  case TDS_RUNQ:
174  /*
175  * We must never unlink a thread that is in one of
176  * these states, because it is currently active.
177  */
178  panic("bad state for thread unlinking");
179  /* NOTREACHED */
180  case TDS_INACTIVE:
181  break;
182  default:
183  panic("bad thread state");
184  /* NOTREACHED */
185  }
186 #endif
187 #ifdef AUDIT
188  audit_thread_free(td);
189 #endif
190  /* Free all OSD associated to this thread. */
191  osd_thread_exit(td);
192 
193  EVENTHANDLER_INVOKE(thread_dtor, td);
194  tid_free(td->td_tid);
195 }
196 
197 /*
198  * Initialize type-stable parts of a thread (when newly created).
199  */
200 static int
201 thread_init(void *mem, int size, int flags)
202 {
203  struct thread *td;
204 
205  td = (struct thread *)mem;
206 
207  td->td_sleepqueue = sleepq_alloc();
208  td->td_turnstile = turnstile_alloc();
209  td->td_rlqe = NULL;
210  td->td_vp_reserv = 0;
211  EVENTHANDLER_INVOKE(thread_init, td);
212  td->td_sched = (struct td_sched *)&td[1];
213  umtx_thread_init(td);
214  td->td_kstack = 0;
215  td->td_sel = NULL;
216  return (0);
217 }
218 
219 /*
220  * Tear down type-stable parts of a thread (just before being discarded).
221  */
222 static void
223 thread_fini(void *mem, int size)
224 {
225  struct thread *td;
226 
227  td = (struct thread *)mem;
228  EVENTHANDLER_INVOKE(thread_fini, td);
229  rlqentry_free(td->td_rlqe);
230  turnstile_free(td->td_turnstile);
231  sleepq_free(td->td_sleepqueue);
232  umtx_thread_fini(td);
233  seltdfini(td);
234 }
235 
236 /*
237  * For a newly created process,
238  * link up all the structures and its initial threads etc.
239  * called from:
240  * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
241  * proc_dtor() (should go away)
242  * proc_init()
243  */
244 void
245 proc_linkup0(struct proc *p, struct thread *td)
246 {
247  TAILQ_INIT(&p->p_threads); /* all threads in proc */
248  proc_linkup(p, td);
249 }
250 
251 void
252 proc_linkup(struct proc *p, struct thread *td)
253 {
254 
255  sigqueue_init(&p->p_sigqueue, p);
256  p->p_ksi = ksiginfo_alloc(1);
257  if (p->p_ksi != NULL) {
258  /* XXX p_ksi may be null if ksiginfo zone is not ready */
259  p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
260  }
261  LIST_INIT(&p->p_mqnotifier);
262  p->p_numthreads = 0;
263  thread_link(td, p);
264 }
265 
266 /*
267  * Initialize global thread allocation resources.
268  */
269 void
271 {
272 
273  mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
274 
275  /*
276  * pid_max cannot be greater than PID_MAX.
277  * leave one number for thread0.
278  */
279  tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
280 
281  thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
283  16 - 1, 0);
284  tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
285  rw_init(&tidhash_lock, "tidhash");
286 }
287 
288 /*
289  * Place an unused thread on the zombie list.
290  * Use the slpq as that must be unused by now.
291  */
292 void
293 thread_zombie(struct thread *td)
294 {
295  mtx_lock_spin(&zombie_lock);
296  TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
297  mtx_unlock_spin(&zombie_lock);
298 }
299 
300 /*
301  * Release a thread that has exited after cpu_throw().
302  */
303 void
304 thread_stash(struct thread *td)
305 {
306  atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
307  thread_zombie(td);
308 }
309 
310 /*
311  * Reap zombie resources.
312  */
313 void
315 {
316  struct thread *td_first, *td_next;
317 
318  /*
319  * Don't even bother to lock if none at this instant,
320  * we really don't care about the next instant..
321  */
322  if (!TAILQ_EMPTY(&zombie_threads)) {
323  mtx_lock_spin(&zombie_lock);
324  td_first = TAILQ_FIRST(&zombie_threads);
325  if (td_first)
326  TAILQ_INIT(&zombie_threads);
327  mtx_unlock_spin(&zombie_lock);
328  while (td_first) {
329  td_next = TAILQ_NEXT(td_first, td_slpq);
330  if (td_first->td_ucred)
331  crfree(td_first->td_ucred);
332  thread_free(td_first);
333  td_first = td_next;
334  }
335  }
336 }
337 
338 /*
339  * Allocate a thread.
340  */
341 struct thread *
342 thread_alloc(int pages)
343 {
344  struct thread *td;
345 
346  thread_reap(); /* check if any zombies to get */
347 
348  td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
349  KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
350  if (!vm_thread_new(td, pages)) {
351  uma_zfree(thread_zone, td);
352  return (NULL);
353  }
354  cpu_thread_alloc(td);
355  return (td);
356 }
357 
358 int
359 thread_alloc_stack(struct thread *td, int pages)
360 {
361 
362  KASSERT(td->td_kstack == 0,
363  ("thread_alloc_stack called on a thread with kstack"));
364  if (!vm_thread_new(td, pages))
365  return (0);
366  cpu_thread_alloc(td);
367  return (1);
368 }
369 
370 /*
371  * Deallocate a thread.
372  */
373 void
374 thread_free(struct thread *td)
375 {
376 
377  lock_profile_thread_exit(td);
378  if (td->td_cpuset)
379  cpuset_rel(td->td_cpuset);
380  td->td_cpuset = NULL;
381  cpu_thread_free(td);
382  if (td->td_kstack != 0)
383  vm_thread_dispose(td);
384  uma_zfree(thread_zone, td);
385 }
386 
387 /*
388  * Discard the current thread and exit from its context.
389  * Always called with scheduler locked.
390  *
391  * Because we can't free a thread while we're operating under its context,
392  * push the current thread into our CPU's deadthread holder. This means
393  * we needn't worry about someone else grabbing our context before we
394  * do a cpu_throw().
395  */
396 void
398 {
399  uint64_t runtime, new_switchtime;
400  struct thread *td;
401  struct thread *td2;
402  struct proc *p;
403  int wakeup_swapper;
404 
405  td = curthread;
406  p = td->td_proc;
407 
408  PROC_SLOCK_ASSERT(p, MA_OWNED);
409  mtx_assert(&Giant, MA_NOTOWNED);
410 
411  PROC_LOCK_ASSERT(p, MA_OWNED);
412  KASSERT(p != NULL, ("thread exiting without a process"));
413  CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
414  (long)p->p_pid, td->td_name);
415  KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
416 
417 #ifdef AUDIT
418  AUDIT_SYSCALL_EXIT(0, td);
419 #endif
420  umtx_thread_exit(td);
421  /*
422  * drop FPU & debug register state storage, or any other
423  * architecture specific resources that
424  * would not be on a new untouched process.
425  */
426  cpu_thread_exit(td); /* XXXSMP */
427 
428  /*
429  * The last thread is left attached to the process
430  * So that the whole bundle gets recycled. Skip
431  * all this stuff if we never had threads.
432  * EXIT clears all sign of other threads when
433  * it goes to single threading, so the last thread always
434  * takes the short path.
435  */
436  if (p->p_flag & P_HADTHREADS) {
437  if (p->p_numthreads > 1) {
438  thread_unlink(td);
439  td2 = FIRST_THREAD_IN_PROC(p);
440  sched_exit_thread(td2, td);
441 
442  /*
443  * The test below is NOT true if we are the
444  * sole exiting thread. P_STOPPED_SINGLE is unset
445  * in exit1() after it is the only survivor.
446  */
447  if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
448  if (p->p_numthreads == p->p_suspcount) {
449  thread_lock(p->p_singlethread);
450  wakeup_swapper = thread_unsuspend_one(
451  p->p_singlethread);
452  thread_unlock(p->p_singlethread);
453  if (wakeup_swapper)
454  kick_proc0();
455  }
456  }
457 
458  atomic_add_int(&td->td_proc->p_exitthreads, 1);
459  PCPU_SET(deadthread, td);
460  } else {
461  /*
462  * The last thread is exiting.. but not through exit()
463  */
464  panic ("thread_exit: Last thread exiting on its own");
465  }
466  }
467 #ifdef HWPMC_HOOKS
468  /*
469  * If this thread is part of a process that is being tracked by hwpmc(4),
470  * inform the module of the thread's impending exit.
471  */
472  if (PMC_PROC_IS_USING_PMCS(td->td_proc))
473  PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
474 #endif
475  PROC_UNLOCK(p);
476 
477  /* Do the same timestamp bookkeeping that mi_switch() would do. */
478  new_switchtime = cpu_ticks();
479  runtime = new_switchtime - PCPU_GET(switchtime);
480  td->td_runtime += runtime;
481  td->td_incruntime += runtime;
482  PCPU_SET(switchtime, new_switchtime);
483  PCPU_SET(switchticks, ticks);
484  PCPU_INC(cnt.v_swtch);
485 
486  /* Save our resource usage in our process. */
487  td->td_ru.ru_nvcsw++;
488  ruxagg(p, td);
489  rucollect(&p->p_ru, &td->td_ru);
490 
491  thread_lock(td);
492  PROC_SUNLOCK(p);
493  td->td_state = TDS_INACTIVE;
494 #ifdef WITNESS
496 #endif
497  CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
498  sched_throw(td);
499  panic("I'm a teapot!");
500  /* NOTREACHED */
501 }
502 
503 /*
504  * Do any thread specific cleanups that may be needed in wait()
505  * called with Giant, proc and schedlock not held.
506  */
507 void
508 thread_wait(struct proc *p)
509 {
510  struct thread *td;
511 
512  mtx_assert(&Giant, MA_NOTOWNED);
513  KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
514  td = FIRST_THREAD_IN_PROC(p);
515  /* Lock the last thread so we spin until it exits cpu_throw(). */
516  thread_lock(td);
517  thread_unlock(td);
518  /* Wait for any remaining threads to exit cpu_throw(). */
519  while (p->p_exitthreads)
520  sched_relinquish(curthread);
521  lock_profile_thread_exit(td);
522  cpuset_rel(td->td_cpuset);
523  td->td_cpuset = NULL;
524  cpu_thread_clean(td);
525  crfree(td->td_ucred);
526  thread_reap(); /* check for zombie threads etc. */
527 }
528 
529 /*
530  * Link a thread to a process.
531  * set up anything that needs to be initialized for it to
532  * be used by the process.
533  */
534 void
535 thread_link(struct thread *td, struct proc *p)
536 {
537 
538  /*
539  * XXX This can't be enabled because it's called for proc0 before
540  * its lock has been created.
541  * PROC_LOCK_ASSERT(p, MA_OWNED);
542  */
543  td->td_state = TDS_INACTIVE;
544  td->td_proc = p;
545  td->td_flags = TDF_INMEM;
546 
547  LIST_INIT(&td->td_contested);
548  LIST_INIT(&td->td_lprof[0]);
549  LIST_INIT(&td->td_lprof[1]);
550  sigqueue_init(&td->td_sigqueue, p);
551  callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
552  TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
553  p->p_numthreads++;
554 }
555 
556 /*
557  * Convert a process with one thread to an unthreaded process.
558  */
559 void
560 thread_unthread(struct thread *td)
561 {
562  struct proc *p = td->td_proc;
563 
564  KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
565  p->p_flag &= ~P_HADTHREADS;
566 }
567 
568 /*
569  * Called from:
570  * thread_exit()
571  */
572 void
573 thread_unlink(struct thread *td)
574 {
575  struct proc *p = td->td_proc;
576 
577  PROC_LOCK_ASSERT(p, MA_OWNED);
578  TAILQ_REMOVE(&p->p_threads, td, td_plist);
579  p->p_numthreads--;
580  /* could clear a few other things here */
581  /* Must NOT clear links to proc! */
582 }
583 
584 static int
585 calc_remaining(struct proc *p, int mode)
586 {
587  int remaining;
588 
589  PROC_LOCK_ASSERT(p, MA_OWNED);
590  PROC_SLOCK_ASSERT(p, MA_OWNED);
591  if (mode == SINGLE_EXIT)
592  remaining = p->p_numthreads;
593  else if (mode == SINGLE_BOUNDARY)
594  remaining = p->p_numthreads - p->p_boundary_count;
595  else if (mode == SINGLE_NO_EXIT)
596  remaining = p->p_numthreads - p->p_suspcount;
597  else
598  panic("calc_remaining: wrong mode %d", mode);
599  return (remaining);
600 }
601 
602 /*
603  * Enforce single-threading.
604  *
605  * Returns 1 if the caller must abort (another thread is waiting to
606  * exit the process or similar). Process is locked!
607  * Returns 0 when you are successfully the only thread running.
608  * A process has successfully single threaded in the suspend mode when
609  * There are no threads in user mode. Threads in the kernel must be
610  * allowed to continue until they get to the user boundary. They may even
611  * copy out their return values and data before suspending. They may however be
612  * accelerated in reaching the user boundary as we will wake up
613  * any sleeping threads that are interruptable. (PCATCH).
614  */
615 int
617 {
618  struct thread *td;
619  struct thread *td2;
620  struct proc *p;
621  int remaining, wakeup_swapper;
622 
623  td = curthread;
624  p = td->td_proc;
625  mtx_assert(&Giant, MA_NOTOWNED);
626  PROC_LOCK_ASSERT(p, MA_OWNED);
627 
628  if ((p->p_flag & P_HADTHREADS) == 0)
629  return (0);
630 
631  /* Is someone already single threading? */
632  if (p->p_singlethread != NULL && p->p_singlethread != td)
633  return (1);
634 
635  if (mode == SINGLE_EXIT) {
636  p->p_flag |= P_SINGLE_EXIT;
637  p->p_flag &= ~P_SINGLE_BOUNDARY;
638  } else {
639  p->p_flag &= ~P_SINGLE_EXIT;
640  if (mode == SINGLE_BOUNDARY)
641  p->p_flag |= P_SINGLE_BOUNDARY;
642  else
643  p->p_flag &= ~P_SINGLE_BOUNDARY;
644  }
645  p->p_flag |= P_STOPPED_SINGLE;
646  PROC_SLOCK(p);
647  p->p_singlethread = td;
648  remaining = calc_remaining(p, mode);
649  while (remaining != 1) {
650  if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
651  goto stopme;
652  wakeup_swapper = 0;
653  FOREACH_THREAD_IN_PROC(p, td2) {
654  if (td2 == td)
655  continue;
656  thread_lock(td2);
657  td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
658  if (TD_IS_INHIBITED(td2)) {
659  switch (mode) {
660  case SINGLE_EXIT:
661  if (TD_IS_SUSPENDED(td2))
662  wakeup_swapper |=
664  if (TD_ON_SLEEPQ(td2) &&
665  (td2->td_flags & TDF_SINTR))
666  wakeup_swapper |=
667  sleepq_abort(td2, EINTR);
668  break;
669  case SINGLE_BOUNDARY:
670  if (TD_IS_SUSPENDED(td2) &&
671  !(td2->td_flags & TDF_BOUNDARY))
672  wakeup_swapper |=
674  if (TD_ON_SLEEPQ(td2) &&
675  (td2->td_flags & TDF_SINTR))
676  wakeup_swapper |=
677  sleepq_abort(td2, ERESTART);
678  break;
679  case SINGLE_NO_EXIT:
680  if (TD_IS_SUSPENDED(td2) &&
681  !(td2->td_flags & TDF_BOUNDARY))
682  wakeup_swapper |=
684  if (TD_ON_SLEEPQ(td2) &&
685  (td2->td_flags & TDF_SINTR))
686  wakeup_swapper |=
687  sleepq_abort(td2, ERESTART);
688  break;
689  default:
690  break;
691  }
692  }
693 #ifdef SMP
694  else if (TD_IS_RUNNING(td2) && td != td2) {
695  forward_signal(td2);
696  }
697 #endif
698  thread_unlock(td2);
699  }
700  if (wakeup_swapper)
701  kick_proc0();
702  remaining = calc_remaining(p, mode);
703 
704  /*
705  * Maybe we suspended some threads.. was it enough?
706  */
707  if (remaining == 1)
708  break;
709 
710 stopme:
711  /*
712  * Wake us up when everyone else has suspended.
713  * In the mean time we suspend as well.
714  */
716  remaining = calc_remaining(p, mode);
717  }
718  if (mode == SINGLE_EXIT) {
719  /*
720  * We have gotten rid of all the other threads and we
721  * are about to either exit or exec. In either case,
722  * we try our utmost to revert to being a non-threaded
723  * process.
724  */
725  p->p_singlethread = NULL;
726  p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
727  thread_unthread(td);
728  }
729  PROC_SUNLOCK(p);
730  return (0);
731 }
732 
733 /*
734  * Called in from locations that can safely check to see
735  * whether we have to suspend or at least throttle for a
736  * single-thread event (e.g. fork).
737  *
738  * Such locations include userret().
739  * If the "return_instead" argument is non zero, the thread must be able to
740  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
741  *
742  * The 'return_instead' argument tells the function if it may do a
743  * thread_exit() or suspend, or whether the caller must abort and back
744  * out instead.
745  *
746  * If the thread that set the single_threading request has set the
747  * P_SINGLE_EXIT bit in the process flags then this call will never return
748  * if 'return_instead' is false, but will exit.
749  *
750  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
751  *---------------+--------------------+---------------------
752  * 0 | returns 0 | returns 0 or 1
753  * | when ST ends | immediatly
754  *---------------+--------------------+---------------------
755  * 1 | thread exits | returns 1
756  * | | immediatly
757  * 0 = thread_exit() or suspension ok,
758  * other = return error instead of stopping the thread.
759  *
760  * While a full suspension is under effect, even a single threading
761  * thread would be suspended if it made this call (but it shouldn't).
762  * This call should only be made from places where
763  * thread_exit() would be safe as that may be the outcome unless
764  * return_instead is set.
765  */
766 int
767 thread_suspend_check(int return_instead)
768 {
769  struct thread *td;
770  struct proc *p;
771  int wakeup_swapper;
772 
773  td = curthread;
774  p = td->td_proc;
775  mtx_assert(&Giant, MA_NOTOWNED);
776  PROC_LOCK_ASSERT(p, MA_OWNED);
777  while (P_SHOULDSTOP(p) ||
778  ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
779  if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
780  KASSERT(p->p_singlethread != NULL,
781  ("singlethread not set"));
782  /*
783  * The only suspension in action is a
784  * single-threading. Single threader need not stop.
785  * XXX Should be safe to access unlocked
786  * as it can only be set to be true by us.
787  */
788  if (p->p_singlethread == td)
789  return (0); /* Exempt from stopping. */
790  }
791  if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
792  return (EINTR);
793 
794  /* Should we goto user boundary if we didn't come from there? */
795  if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
796  (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
797  return (ERESTART);
798 
799  /*
800  * Ignore suspend requests for stop signals if they
801  * are deferred.
802  */
803  if (P_SHOULDSTOP(p) == P_STOPPED_SIG &&
804  td->td_flags & TDF_SBDRY) {
805  KASSERT(return_instead,
806  ("TDF_SBDRY set for unsafe thread_suspend_check"));
807  return (0);
808  }
809 
810  /*
811  * If the process is waiting for us to exit,
812  * this thread should just suicide.
813  * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
814  */
815  if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
816  PROC_UNLOCK(p);
817  tidhash_remove(td);
818  PROC_LOCK(p);
819  tdsigcleanup(td);
820  PROC_SLOCK(p);
821  thread_stopped(p);
822  thread_exit();
823  }
824 
825  PROC_SLOCK(p);
826  thread_stopped(p);
827  if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
828  if (p->p_numthreads == p->p_suspcount + 1) {
829  thread_lock(p->p_singlethread);
830  wakeup_swapper =
831  thread_unsuspend_one(p->p_singlethread);
832  thread_unlock(p->p_singlethread);
833  if (wakeup_swapper)
834  kick_proc0();
835  }
836  }
837  PROC_UNLOCK(p);
838  thread_lock(td);
839  /*
840  * When a thread suspends, it just
841  * gets taken off all queues.
842  */
843  thread_suspend_one(td);
844  if (return_instead == 0) {
845  p->p_boundary_count++;
846  td->td_flags |= TDF_BOUNDARY;
847  }
848  PROC_SUNLOCK(p);
849  mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
850  if (return_instead == 0)
851  td->td_flags &= ~TDF_BOUNDARY;
852  thread_unlock(td);
853  PROC_LOCK(p);
854  if (return_instead == 0) {
855  PROC_SLOCK(p);
856  p->p_boundary_count--;
857  PROC_SUNLOCK(p);
858  }
859  }
860  return (0);
861 }
862 
863 void
864 thread_suspend_switch(struct thread *td)
865 {
866  struct proc *p;
867 
868  p = td->td_proc;
869  KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
870  PROC_LOCK_ASSERT(p, MA_OWNED);
871  PROC_SLOCK_ASSERT(p, MA_OWNED);
872  /*
873  * We implement thread_suspend_one in stages here to avoid
874  * dropping the proc lock while the thread lock is owned.
875  */
876  thread_stopped(p);
877  p->p_suspcount++;
878  PROC_UNLOCK(p);
879  thread_lock(td);
880  td->td_flags &= ~TDF_NEEDSUSPCHK;
881  TD_SET_SUSPENDED(td);
882  sched_sleep(td, 0);
883  PROC_SUNLOCK(p);
884  DROP_GIANT();
885  mi_switch(SW_VOL | SWT_SUSPEND, NULL);
886  thread_unlock(td);
887  PICKUP_GIANT();
888  PROC_LOCK(p);
889  PROC_SLOCK(p);
890 }
891 
892 void
893 thread_suspend_one(struct thread *td)
894 {
895  struct proc *p = td->td_proc;
896 
897  PROC_SLOCK_ASSERT(p, MA_OWNED);
898  THREAD_LOCK_ASSERT(td, MA_OWNED);
899  KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
900  p->p_suspcount++;
901  td->td_flags &= ~TDF_NEEDSUSPCHK;
902  TD_SET_SUSPENDED(td);
903  sched_sleep(td, 0);
904 }
905 
906 int
907 thread_unsuspend_one(struct thread *td)
908 {
909  struct proc *p = td->td_proc;
910 
911  PROC_SLOCK_ASSERT(p, MA_OWNED);
912  THREAD_LOCK_ASSERT(td, MA_OWNED);
913  KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
914  TD_CLR_SUSPENDED(td);
915  p->p_suspcount--;
916  return (setrunnable(td));
917 }
918 
919 /*
920  * Allow all threads blocked by single threading to continue running.
921  */
922 void
923 thread_unsuspend(struct proc *p)
924 {
925  struct thread *td;
926  int wakeup_swapper;
927 
928  PROC_LOCK_ASSERT(p, MA_OWNED);
929  PROC_SLOCK_ASSERT(p, MA_OWNED);
930  wakeup_swapper = 0;
931  if (!P_SHOULDSTOP(p)) {
932  FOREACH_THREAD_IN_PROC(p, td) {
933  thread_lock(td);
934  if (TD_IS_SUSPENDED(td)) {
935  wakeup_swapper |= thread_unsuspend_one(td);
936  }
937  thread_unlock(td);
938  }
939  } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
940  (p->p_numthreads == p->p_suspcount)) {
941  /*
942  * Stopping everything also did the job for the single
943  * threading request. Now we've downgraded to single-threaded,
944  * let it continue.
945  */
946  thread_lock(p->p_singlethread);
947  wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
948  thread_unlock(p->p_singlethread);
949  }
950  if (wakeup_swapper)
951  kick_proc0();
952 }
953 
954 /*
955  * End the single threading mode..
956  */
957 void
959 {
960  struct thread *td;
961  struct proc *p;
962  int wakeup_swapper;
963 
964  td = curthread;
965  p = td->td_proc;
966  PROC_LOCK_ASSERT(p, MA_OWNED);
967  p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
968  PROC_SLOCK(p);
969  p->p_singlethread = NULL;
970  wakeup_swapper = 0;
971  /*
972  * If there are other threads they may now run,
973  * unless of course there is a blanket 'stop order'
974  * on the process. The single threader must be allowed
975  * to continue however as this is a bad place to stop.
976  */
977  if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
978  FOREACH_THREAD_IN_PROC(p, td) {
979  thread_lock(td);
980  if (TD_IS_SUSPENDED(td)) {
981  wakeup_swapper |= thread_unsuspend_one(td);
982  }
983  thread_unlock(td);
984  }
985  }
986  PROC_SUNLOCK(p);
987  if (wakeup_swapper)
988  kick_proc0();
989 }
990 
991 struct thread *
992 thread_find(struct proc *p, lwpid_t tid)
993 {
994  struct thread *td;
995 
996  PROC_LOCK_ASSERT(p, MA_OWNED);
997  FOREACH_THREAD_IN_PROC(p, td) {
998  if (td->td_tid == tid)
999  break;
1000  }
1001  return (td);
1002 }
1003 
1004 /* Locate a thread by number; return with proc lock held. */
1005 struct thread *
1006 tdfind(lwpid_t tid, pid_t pid)
1007 {
1008 #define RUN_THRESH 16
1009  struct thread *td;
1010  int run = 0;
1011 
1012  rw_rlock(&tidhash_lock);
1013  LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1014  if (td->td_tid == tid) {
1015  if (pid != -1 && td->td_proc->p_pid != pid) {
1016  td = NULL;
1017  break;
1018  }
1019  PROC_LOCK(td->td_proc);
1020  if (td->td_proc->p_state == PRS_NEW) {
1021  PROC_UNLOCK(td->td_proc);
1022  td = NULL;
1023  break;
1024  }
1025  if (run > RUN_THRESH) {
1026  if (rw_try_upgrade(&tidhash_lock)) {
1027  LIST_REMOVE(td, td_hash);
1028  LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1029  td, td_hash);
1030  rw_wunlock(&tidhash_lock);
1031  return (td);
1032  }
1033  }
1034  break;
1035  }
1036  run++;
1037  }
1038  rw_runlock(&tidhash_lock);
1039  return (td);
1040 }
1041 
1042 void
1043 tidhash_add(struct thread *td)
1044 {
1045  rw_wlock(&tidhash_lock);
1046  LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1047  rw_wunlock(&tidhash_lock);
1048 }
1049 
1050 void
1051 tidhash_remove(struct thread *td)
1052 {
1053  rw_wlock(&tidhash_lock);
1054  LIST_REMOVE(td, td_hash);
1055  rw_wunlock(&tidhash_lock);
1056 }
void thread_link(struct thread *td, struct proc *p)
Definition: kern_thread.c:535
void umtx_thread_fini(struct thread *td)
Definition: kern_umtx.c:3774
void umtx_thread_exit(struct thread *td)
Definition: kern_umtx.c:3810
static void thread_fini(void *mem, int size)
Definition: kern_thread.c:223
void umtx_thread_alloc(struct thread *td)
Definition: kern_umtx.c:3783
void thread_single_end(void)
Definition: kern_thread.c:958
void thread_reap(void)
Definition: kern_thread.c:314
void sched_relinquish(struct thread *td)
Definition: sched_4bsd.c:1556
void * hashinit(int elements, struct malloc_type *type, u_long *hashmask)
Definition: subr_hash.c:83
MTX_SYSINIT(et_eventtimers_init,&et_eventtimers_mtx,"et_mtx", MTX_DEF)
int mode
static void thread_dtor(void *mem, int size, void *arg)
Definition: kern_thread.c:161
TAILQ_HEAD(thread)
Definition: kern_thread.c:74
void tdsigcleanup(struct thread *td)
Definition: kern_sig.c:2509
#define RUN_THRESH
void panic(const char *fmt,...)
void thread_stopped(struct proc *p)
Definition: kern_sig.c:2763
int sched_sizeof_thread(void)
Definition: sched_4bsd.c:1576
void witness_thread_exit(struct thread *td)
void thread_unsuspend(struct proc *p)
Definition: kern_thread.c:923
void proc_linkup(struct proc *p, struct thread *td)
Definition: kern_thread.c:252
int sleepq_abort(struct thread *td, int intrval)
void thread_exit(void)
Definition: kern_thread.c:397
int alloc_unr(struct unrhdr *uh)
Definition: subr_unit.c:620
int maxproc
Definition: subr_param.c:87
struct turnstile * turnstile_alloc(void)
void mi_switch(int flags, struct thread *newtd)
Definition: kern_synch.c:422
void thread_free(struct thread *td)
Definition: kern_thread.c:374
__FBSDID("$BSDSUniX$")
void turnstile_free(struct turnstile *ts)
void rlqentry_free(struct rl_q_entry *rleq)
void sleepq_free(struct sleepqueue *sq)
struct thread * thread_alloc(int pages)
Definition: kern_thread.c:342
void proc_linkup0(struct proc *p, struct thread *td)
Definition: kern_thread.c:245
void thread_zombie(struct thread *td)
Definition: kern_thread.c:293
static MALLOC_DEFINE(M_GZIP,"gzip_trees","Gzip trees")
void sched_exit_thread(struct thread *td, struct thread *child)
Definition: sched_4bsd.c:771
struct mtx Giant
Definition: kern_mutex.c:140
void rucollect(struct rusage *ru, struct rusage *ru2)
void cpuset_rel(struct cpuset *set)
Definition: kern_cpuset.c:164
#define TID_BUFFER_SIZE
SDT_PROBE_DEFINE(proc,,, lwp__exit)
int setrunnable(struct thread *td)
Definition: kern_synch.c:506
static int thread_init(void *mem, int size, int flags)
Definition: kern_thread.c:201
void crfree(struct ucred *cr)
Definition: kern_prot.c:1835
void ruxagg(struct proc *p, struct thread *td)
int thread_alloc_stack(struct thread *td, int pages)
Definition: kern_thread.c:359
static uma_zone_t thread_zone
Definition: kern_thread.c:72
static int thread_ctor(void *mem, int size, void *arg, int flags)
Definition: kern_thread.c:132
struct unrhdr * new_unrhdr(int low, int high, struct mtx *mutex)
Definition: subr_unit.c:325
void tidhash_add(struct thread *td)
Definition: kern_thread.c:1043
int thread_single(int mode)
Definition: kern_thread.c:616
int thread_unsuspend_one(struct thread *td)
Definition: kern_thread.c:907
ksiginfo_t * ksiginfo_alloc(int wait)
Definition: kern_sig.c:242
static int calc_remaining(struct proc *p, int mode)
Definition: kern_thread.c:585
void umtx_thread_init(struct thread *td)
Definition: kern_umtx.c:3767
void threadinit(void)
Definition: kern_thread.c:270
void sched_throw(struct thread *td)
Definition: sched_4bsd.c:1657
void callout_init(struct callout *c, int mpsafe)
void thread_suspend_one(struct thread *td)
Definition: kern_thread.c:893
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
struct sleepqueue * sleepq_alloc(void)
void sigqueue_init(sigqueue_t *list, struct proc *p)
Definition: kern_sig.c:271
struct thread * thread_find(struct proc *p, lwpid_t tid)
Definition: kern_thread.c:992
void thread_stash(struct thread *td)
Definition: kern_thread.c:304
void seltdfini(struct thread *td)
Definition: sys_generic.c:1751
volatile int ticks
Definition: kern_clock.c:387
void tidhash_remove(struct thread *td)
Definition: kern_thread.c:1051
struct thread * tdfind(lwpid_t tid, pid_t pid)
Definition: kern_thread.c:1006
void thread_unthread(struct thread *td)
Definition: kern_thread.c:560
SDT_PROVIDER_DECLARE(proc)
void free_unr(struct unrhdr *uh, u_int item)
Definition: subr_unit.c:872
static void tid_free(lwpid_t tid)
Definition: kern_thread.c:112
void thread_wait(struct proc *p)
Definition: kern_thread.c:508
cpu_tick_f * cpu_ticks
Definition: kern_tc.c:986
int thread_suspend_check(int return_instead)
Definition: kern_thread.c:767
void thread_suspend_switch(struct thread *td)
Definition: kern_thread.c:864
void sched_sleep(struct thread *td, int pri)
Definition: sched_4bsd.c:948
void thread_unlink(struct thread *td)
Definition: kern_thread.c:573