FreeBSD kernel kern code
kern_exit.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  * The Regents of the University of California. All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  * notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in the
17  * documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  * may be used to endorse or promote products derived from this software
20  * without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$BSDSUniX$");
39 
40 #include "opt_compat.h"
41 #include "opt_kdtrace.h"
42 #include "opt_ktrace.h"
43 #include "opt_procdesc.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/capability.h>
49 #include <sys/eventhandler.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/procdesc.h>
56 #include <sys/pioctl.h>
57 #include <sys/jail.h>
58 #include <sys/tty.h>
59 #include <sys/wait.h>
60 #include <sys/vmmeter.h>
61 #include <sys/vnode.h>
62 #include <sys/racct.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sbuf.h>
65 #include <sys/signalvar.h>
66 #include <sys/sched.h>
67 #include <sys/sx.h>
68 #include <sys/syscallsubr.h>
69 #include <sys/syslog.h>
70 #include <sys/ptrace.h>
71 #include <sys/acct.h> /* for acct_process() function prototype */
72 #include <sys/filedesc.h>
73 #include <sys/sdt.h>
74 #include <sys/shm.h>
75 #include <sys/sem.h>
76 #ifdef KTRACE
77 #include <sys/ktrace.h>
78 #endif
79 
80 #include <security/audit/audit.h>
81 #include <security/mac/mac_framework.h>
82 
83 #include <vm/vm.h>
84 #include <vm/vm_extern.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/uma.h>
90 
91 #ifdef KDTRACE_HOOKS
92 #include <sys/dtrace_bsd.h>
93 dtrace_execexit_func_t dtrace_fasttrap_exit;
94 #endif
95 
97 SDT_PROBE_DEFINE1(proc, kernel, , exit, "int");
98 
99 /* Hook for NFS teardown procedure. */
100 void (*nlminfo_release_p)(struct proc *p);
101 
102 struct proc *
103 proc_realparent(struct proc *child)
104 {
105  struct proc *p, *parent;
106 
107  sx_assert(&proctree_lock, SX_LOCKED);
108  if ((child->p_treeflag & P_TREE_ORPHANED) == 0) {
109  if (child->p_oppid == 0 ||
110  child->p_pptr->p_pid == child->p_oppid)
111  parent = child->p_pptr;
112  else
113  parent = initproc;
114  return (parent);
115  }
116  for (p = child; (p->p_treeflag & P_TREE_FIRST_ORPHAN) == 0;) {
117  /* Cannot use LIST_PREV(), since the list head is not known. */
118  p = __containerof(p->p_orphan.le_prev, struct proc,
119  p_orphan.le_next);
120  KASSERT((p->p_treeflag & P_TREE_ORPHANED) != 0,
121  ("missing P_ORPHAN %p", p));
122  }
123  parent = __containerof(p->p_orphan.le_prev, struct proc,
124  p_orphans.lh_first);
125  return (parent);
126 }
127 
128 static void
129 clear_orphan(struct proc *p)
130 {
131  struct proc *p1;
132 
133  sx_assert(&proctree_lock, SA_XLOCKED);
134  if ((p->p_treeflag & P_TREE_ORPHANED) == 0)
135  return;
136  if ((p->p_treeflag & P_TREE_FIRST_ORPHAN) != 0) {
137  p1 = LIST_NEXT(p, p_orphan);
138  if (p1 != NULL)
139  p1->p_treeflag |= P_TREE_FIRST_ORPHAN;
140  p->p_treeflag &= ~P_TREE_FIRST_ORPHAN;
141  }
142  LIST_REMOVE(p, p_orphan);
143  p->p_treeflag &= ~P_TREE_ORPHANED;
144 }
145 
146 /*
147  * exit -- death of process.
148  */
149 void
150 sys_sys_exit(struct thread *td, struct sys_exit_args *uap)
151 {
152 
153  exit1(td, W_EXITCODE(uap->rval, 0));
154  /* NOTREACHED */
155 }
156 
157 /*
158  * Exit: deallocate address space and other resources, change proc state to
159  * zombie, and unlink proc from allproc and parent's lists. Save exit status
160  * and rusage for wait(). Check for child processes and orphan them.
161  */
162 void
163 exit1(struct thread *td, int rv)
164 {
165  struct proc *p, *nq, *q, *t;
166  struct thread *tdt;
167  struct vnode *vtmp;
168  struct vnode *ttyvp = NULL;
169  struct plimit *plim;
170  int locked;
171 
172  mtx_assert(&Giant, MA_NOTOWNED);
173 
174  p = td->td_proc;
175  /*
176  * XXX in case we're rebooting we just let init die in order to
177  * work around an unsolved stack overflow seen very late during
178  * shutdown on sparc64 when the gmirror worker process exists.
179  */
180  if (p == initproc && rebooting == 0) {
181  printf("init died (signal %d, exit %d)\n",
182  WTERMSIG(rv), WEXITSTATUS(rv));
183  panic("Going nowhere without my init!");
184  }
185 
186  /*
187  * MUST abort all other threads before proceeding past here.
188  */
189  PROC_LOCK(p);
190  while (p->p_flag & P_HADTHREADS) {
191  /*
192  * First check if some other thread got here before us..
193  * if so, act apropriatly, (exit or suspend);
194  */
196 
197  /*
198  * Kill off the other threads. This requires
199  * some co-operation from other parts of the kernel
200  * so it may not be instantaneous. With this state set
201  * any thread entering the kernel from userspace will
202  * thread_exit() in trap(). Any thread attempting to
203  * sleep will return immediately with EINTR or EWOULDBLOCK
204  * which will hopefully force them to back out to userland
205  * freeing resources as they go. Any thread attempting
206  * to return to userland will thread_exit() from userret().
207  * thread_exit() will unsuspend us when the last of the
208  * other threads exits.
209  * If there is already a thread singler after resumption,
210  * calling thread_single will fail; in that case, we just
211  * re-check all suspension request, the thread should
212  * either be suspended there or exit.
213  */
214  if (! thread_single(SINGLE_EXIT))
215  break;
216 
217  /*
218  * All other activity in this process is now stopped.
219  * Threading support has been turned off.
220  */
221  }
222  KASSERT(p->p_numthreads == 1,
223  ("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
224  racct_sub(p, RACCT_NTHR, 1);
225  /*
226  * Wakeup anyone in procfs' PIOCWAIT. They should have a hold
227  * on our vmspace, so we should block below until they have
228  * released their reference to us. Note that if they have
229  * requested S_EXIT stops we will block here until they ack
230  * via PIOCCONT.
231  */
232  _STOPEVENT(p, S_EXIT, rv);
233 
234  /*
235  * Ignore any pending request to stop due to a stop signal.
236  * Once P_WEXIT is set, future requests will be ignored as
237  * well.
238  */
239  p->p_flag &= ~P_STOPPED_SIG;
240  KASSERT(!P_SHOULDSTOP(p), ("exiting process is stopped"));
241 
242  /*
243  * Note that we are exiting and do another wakeup of anyone in
244  * PIOCWAIT in case they aren't listening for S_EXIT stops or
245  * decided to wait again after we told them we are exiting.
246  */
247  p->p_flag |= P_WEXIT;
248  wakeup(&p->p_stype);
249 
250  /*
251  * Wait for any processes that have a hold on our vmspace to
252  * release their reference.
253  */
254  while (p->p_lock > 0)
255  msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);
256 
257  p->p_xstat = rv; /* Let event handler change exit status */
258  PROC_UNLOCK(p);
259  /* Drain the limit callout while we don't have the proc locked */
260  callout_drain(&p->p_limco);
261 
262 #ifdef AUDIT
263  /*
264  * The Sun BSM exit token contains two components: an exit status as
265  * passed to exit(), and a return value to indicate what sort of exit
266  * it was. The exit status is WEXITSTATUS(rv), but it's not clear
267  * what the return value is.
268  */
269  AUDIT_ARG_EXIT(WEXITSTATUS(rv), 0);
270  AUDIT_SYSCALL_EXIT(0, td);
271 #endif
272 
273  /* Are we a task leader? */
274  if (p == p->p_leader) {
275  mtx_lock(&ppeers_lock);
276  q = p->p_peers;
277  while (q != NULL) {
278  PROC_LOCK(q);
279  kern_psignal(q, SIGKILL);
280  PROC_UNLOCK(q);
281  q = q->p_peers;
282  }
283  while (p->p_peers != NULL)
284  msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
285  mtx_unlock(&ppeers_lock);
286  }
287 
288  /*
289  * Check if any loadable modules need anything done at process exit.
290  * E.g. SYSV IPC stuff
291  * XXX what if one of these generates an error?
292  */
293  EVENTHANDLER_INVOKE(process_exit, p);
294 
295  /*
296  * If parent is waiting for us to exit or exec,
297  * P_PPWAIT is set; we will wakeup the parent below.
298  */
299  PROC_LOCK(p);
300  rv = p->p_xstat; /* Event handler could change exit status */
301  stopprofclock(p);
302  p->p_flag &= ~(P_TRACED | P_PPWAIT | P_PPTRACE);
303 
304  /*
305  * Stop the real interval timer. If the handler is currently
306  * executing, prevent it from rearming itself and let it finish.
307  */
308  if (timevalisset(&p->p_realtimer.it_value) &&
309  callout_stop(&p->p_itcallout) == 0) {
310  timevalclear(&p->p_realtimer.it_interval);
311  msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
312  KASSERT(!timevalisset(&p->p_realtimer.it_value),
313  ("realtime timer is still armed"));
314  }
315  PROC_UNLOCK(p);
316 
317  /*
318  * Reset any sigio structures pointing to us as a result of
319  * F_SETOWN with our pid.
320  */
321  funsetownlst(&p->p_sigiolst);
322 
323  /*
324  * If this process has an nlminfo data area (for lockd), release it
325  */
326  if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
327  (*nlminfo_release_p)(p);
328 
329  /*
330  * Close open files and release open-file table.
331  * This may block!
332  */
333  fdfree(td);
334 
335  /*
336  * If this thread tickled GEOM, we need to wait for the giggling to
337  * stop before we return to userland
338  */
339  if (td->td_pflags & TDP_GEOM)
340  g_waitidle();
341 
342  /*
343  * Remove ourself from our leader's peer list and wake our leader.
344  */
345  mtx_lock(&ppeers_lock);
346  if (p->p_leader->p_peers) {
347  q = p->p_leader;
348  while (q->p_peers != p)
349  q = q->p_peers;
350  q->p_peers = p->p_peers;
351  wakeup(p->p_leader);
352  }
353  mtx_unlock(&ppeers_lock);
354 
355  vmspace_exit(td);
356 
357  sx_xlock(&proctree_lock);
358  if (SESS_LEADER(p)) {
359  struct session *sp = p->p_session;
360  struct tty *tp;
361 
362  /*
363  * s_ttyp is not zero'd; we use this to indicate that
364  * the session once had a controlling terminal. (for
365  * logging and informational purposes)
366  */
367  SESS_LOCK(sp);
368  ttyvp = sp->s_ttyvp;
369  tp = sp->s_ttyp;
370  sp->s_ttyvp = NULL;
371  sp->s_ttydp = NULL;
372  sp->s_leader = NULL;
373  SESS_UNLOCK(sp);
374 
375  /*
376  * Signal foreground pgrp and revoke access to
377  * controlling terminal if it has not been revoked
378  * already.
379  *
380  * Because the TTY may have been revoked in the mean
381  * time and could already have a new session associated
382  * with it, make sure we don't send a SIGHUP to a
383  * foreground process group that does not belong to this
384  * session.
385  */
386 
387  if (tp != NULL) {
388  tty_lock(tp);
389  if (tp->t_session == sp)
390  tty_signal_pgrp(tp, SIGHUP);
391  tty_unlock(tp);
392  }
393 
394  if (ttyvp != NULL) {
395  sx_xunlock(&proctree_lock);
396  if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
397  VOP_REVOKE(ttyvp, REVOKEALL);
398  VOP_UNLOCK(ttyvp, 0);
399  }
400  sx_xlock(&proctree_lock);
401  }
402  }
403  fixjobc(p, p->p_pgrp, 0);
404  sx_xunlock(&proctree_lock);
405  (void)acct_process(td);
406 
407  /* Release the TTY now we've unlocked everything. */
408  if (ttyvp != NULL)
409  vrele(ttyvp);
410 #ifdef KTRACE
411  ktrprocexit(td);
412 #endif
413  /*
414  * Release reference to text vnode
415  */
416  if ((vtmp = p->p_textvp) != NULL) {
417  p->p_textvp = NULL;
418  locked = VFS_LOCK_GIANT(vtmp->v_mount);
419  vrele(vtmp);
420  VFS_UNLOCK_GIANT(locked);
421  }
422 
423  /*
424  * Release our limits structure.
425  */
426  PROC_LOCK(p);
427  plim = p->p_limit;
428  p->p_limit = NULL;
429  PROC_UNLOCK(p);
430  lim_free(plim);
431 
432  tidhash_remove(td);
433 
434  /*
435  * Remove proc from allproc queue and pidhash chain.
436  * Place onto zombproc. Unlink from parent's child list.
437  */
438  sx_xlock(&allproc_lock);
439  LIST_REMOVE(p, p_list);
440  LIST_INSERT_HEAD(&zombproc, p, p_list);
441  LIST_REMOVE(p, p_hash);
442  sx_xunlock(&allproc_lock);
443 
444  /*
445  * Call machine-dependent code to release any
446  * machine-dependent resources other than the address space.
447  * The address space is released by "vmspace_exitfree(p)" in
448  * vm_waitproc().
449  */
450  cpu_exit(td);
451 
452  WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);
453 
454  /*
455  * Reparent all children processes:
456  * - traced ones to the original parent (or init if we are that parent)
457  * - the rest to init
458  */
459  sx_xlock(&proctree_lock);
460  q = LIST_FIRST(&p->p_children);
461  if (q != NULL) /* only need this if any child is S_ZOMB */
462  wakeup(initproc);
463  for (; q != NULL; q = nq) {
464  nq = LIST_NEXT(q, p_sibling);
465  PROC_LOCK(q);
466  q->p_sigparent = SIGCHLD;
467 
468  if (!(q->p_flag & P_TRACED)) {
470  } else {
471  /*
472  * Traced processes are killed since their existence
473  * means someone is screwing up.
474  */
475  t = proc_realparent(q);
476  if (t == p) {
478  } else {
479  PROC_LOCK(t);
480  proc_reparent(q, t);
481  PROC_UNLOCK(t);
482  }
483  /*
484  * Since q was found on our children list, the
485  * proc_reparent() call moved q to the orphan
486  * list due to present P_TRACED flag. Clear
487  * orphan link for q now while q is locked.
488  */
489  clear_orphan(q);
490  q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
491  FOREACH_THREAD_IN_PROC(q, tdt)
492  tdt->td_dbgflags &= ~TDB_SUSPEND;
493  kern_psignal(q, SIGKILL);
494  }
495  PROC_UNLOCK(q);
496  }
497 
498  /*
499  * Also get rid of our orphans.
500  */
501  while ((q = LIST_FIRST(&p->p_orphans)) != NULL) {
502  PROC_LOCK(q);
503  CTR2(KTR_PTRACE, "exit: pid %d, clearing orphan %d", p->p_pid,
504  q->p_pid);
505  clear_orphan(q);
506  PROC_UNLOCK(q);
507  }
508 
509  /* Save exit status. */
510  PROC_LOCK(p);
511  p->p_xthread = td;
512 
513  /* Tell the prison that we are gone. */
514  prison_proc_free(p->p_ucred->cr_prison);
515 
516 #ifdef KDTRACE_HOOKS
517  /*
518  * Tell the DTrace fasttrap provider about the exit if it
519  * has declared an interest.
520  */
521  if (dtrace_fasttrap_exit)
522  dtrace_fasttrap_exit(p);
523 #endif
524 
525  /*
526  * Notify interested parties of our demise.
527  */
528  KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);
529 
530 #ifdef KDTRACE_HOOKS
531  int reason = CLD_EXITED;
532  if (WCOREDUMP(rv))
533  reason = CLD_DUMPED;
534  else if (WIFSIGNALED(rv))
535  reason = CLD_KILLED;
536  SDT_PROBE1(proc, kernel, , exit, reason);
537 #endif
538 
539  /*
540  * Just delete all entries in the p_klist. At this point we won't
541  * report any more events, and there are nasty race conditions that
542  * can beat us if we don't.
543  */
544  knlist_clear(&p->p_klist, 1);
545 
546  /*
547  * If this is a process with a descriptor, we may not need to deliver
548  * a signal to the parent. proctree_lock is held over
549  * procdesc_exit() to serialize concurrent calls to close() and
550  * exit().
551  */
552 #ifdef PROCDESC
553  if (p->p_procdesc == NULL || procdesc_exit(p)) {
554 #endif
555  /*
556  * Notify parent that we're gone. If parent has the
557  * PS_NOCLDWAIT flag set, or if the handler is set to SIG_IGN,
558  * notify process 1 instead (and hope it will handle this
559  * situation).
560  */
561  PROC_LOCK(p->p_pptr);
562  mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
563  if (p->p_pptr->p_sigacts->ps_flag &
564  (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
565  struct proc *pp;
566 
567  mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
568  pp = p->p_pptr;
569  PROC_UNLOCK(pp);
571  p->p_sigparent = SIGCHLD;
572  PROC_LOCK(p->p_pptr);
573 
574  /*
575  * Notify parent, so in case he was wait(2)ing or
576  * executing waitpid(2) with our pid, he will
577  * continue.
578  */
579  wakeup(pp);
580  } else
581  mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
582 
583  if (p->p_pptr == initproc)
584  kern_psignal(p->p_pptr, SIGCHLD);
585  else if (p->p_sigparent != 0) {
586  if (p->p_sigparent == SIGCHLD)
587  childproc_exited(p);
588  else /* LINUX thread */
589  kern_psignal(p->p_pptr, p->p_sigparent);
590  }
591 #ifdef PROCDESC
592  } else
593  PROC_LOCK(p->p_pptr);
594 #endif
595  sx_xunlock(&proctree_lock);
596 
597  /*
598  * The state PRS_ZOMBIE prevents other proesses from sending
599  * signal to the process, to avoid memory leak, we free memory
600  * for signal queue at the time when the state is set.
601  */
602  sigqueue_flush(&p->p_sigqueue);
603  sigqueue_flush(&td->td_sigqueue);
604 
605  /*
606  * We have to wait until after acquiring all locks before
607  * changing p_state. We need to avoid all possible context
608  * switches (including ones from blocking on a mutex) while
609  * marked as a zombie. We also have to set the zombie state
610  * before we release the parent process' proc lock to avoid
611  * a lost wakeup. So, we first call wakeup, then we grab the
612  * sched lock, update the state, and release the parent process'
613  * proc lock.
614  */
615  wakeup(p->p_pptr);
616  cv_broadcast(&p->p_pwait);
617  sched_exit(p->p_pptr, td);
618  PROC_SLOCK(p);
619  p->p_state = PRS_ZOMBIE;
620  PROC_UNLOCK(p->p_pptr);
621 
622  /*
623  * Hopefully no one will try to deliver a signal to the process this
624  * late in the game.
625  */
626  knlist_destroy(&p->p_klist);
627 
628  /*
629  * Save our children's rusage information in our exit rusage.
630  */
631  ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
632 
633  /*
634  * Make sure the scheduler takes this thread out of its tables etc.
635  * This will also release this thread's reference to the ucred.
636  * Other thread parts to release include pcb bits and such.
637  */
638  thread_exit();
639 }
640 
641 
642 #ifndef _SYS_SYSPROTO_H_
643 struct abort2_args {
644  char *why;
645  int nargs;
646  void **args;
647 };
648 #endif
649 
650 int
651 sys_abort2(struct thread *td, struct abort2_args *uap)
652 {
653  struct proc *p = td->td_proc;
654  struct sbuf *sb;
655  void *uargs[16];
656  int error, i, sig;
657 
658  /*
659  * Do it right now so we can log either proper call of abort2(), or
660  * note, that invalid argument was passed. 512 is big enough to
661  * handle 16 arguments' descriptions with additional comments.
662  */
663  sb = sbuf_new(NULL, NULL, 512, SBUF_FIXEDLEN);
664  sbuf_clear(sb);
665  sbuf_printf(sb, "%s(pid %d uid %d) aborted: ",
666  p->p_comm, p->p_pid, td->td_ucred->cr_uid);
667  /*
668  * Since we can't return from abort2(), send SIGKILL in cases, where
669  * abort2() was called improperly
670  */
671  sig = SIGKILL;
672  /* Prevent from DoSes from user-space. */
673  if (uap->nargs < 0 || uap->nargs > 16)
674  goto out;
675  if (uap->nargs > 0) {
676  if (uap->args == NULL)
677  goto out;
678  error = copyin(uap->args, uargs, uap->nargs * sizeof(void *));
679  if (error != 0)
680  goto out;
681  }
682  /*
683  * Limit size of 'reason' string to 128. Will fit even when
684  * maximal number of arguments was chosen to be logged.
685  */
686  if (uap->why != NULL) {
687  error = sbuf_copyin(sb, uap->why, 128);
688  if (error < 0)
689  goto out;
690  } else {
691  sbuf_printf(sb, "(null)");
692  }
693  if (uap->nargs > 0) {
694  sbuf_printf(sb, "(");
695  for (i = 0;i < uap->nargs; i++)
696  sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]);
697  sbuf_printf(sb, ")");
698  }
699  /*
700  * Final stage: arguments were proper, string has been
701  * successfully copied from userspace, and copying pointers
702  * from user-space succeed.
703  */
704  sig = SIGABRT;
705 out:
706  if (sig == SIGKILL) {
707  sbuf_trim(sb);
708  sbuf_printf(sb, " (Reason text inaccessible)");
709  }
710  sbuf_cat(sb, "\n");
711  sbuf_finish(sb);
712  log(LOG_INFO, "%s", sbuf_data(sb));
713  sbuf_delete(sb);
714  exit1(td, W_EXITCODE(0, sig));
715  return (0);
716 }
717 
718 
719 #ifdef COMPAT_43
720 /*
721  * The dirty work is handled by kern_wait().
722  */
723 int
724 owait(struct thread *td, struct owait_args *uap __unused)
725 {
726  int error, status;
727 
728  error = kern_wait(td, WAIT_ANY, &status, 0, NULL);
729  if (error == 0)
730  td->td_retval[1] = status;
731  return (error);
732 }
733 #endif /* COMPAT_43 */
734 
735 /*
736  * The dirty work is handled by kern_wait().
737  */
738 int
739 sys_wait4(struct thread *td, struct wait4_args *uap)
740 {
741  struct rusage ru, *rup;
742  int error, status;
743 
744  if (uap->rusage != NULL)
745  rup = &ru;
746  else
747  rup = NULL;
748  error = kern_wait(td, uap->pid, &status, uap->options, rup);
749  if (uap->status != NULL && error == 0)
750  error = copyout(&status, uap->status, sizeof(status));
751  if (uap->rusage != NULL && error == 0)
752  error = copyout(&ru, uap->rusage, sizeof(struct rusage));
753  return (error);
754 }
755 
756 int
757 sys_wait6(struct thread *td, struct wait6_args *uap)
758 {
759  struct __wrusage wru, *wrup;
760  siginfo_t si, *sip;
761  idtype_t idtype;
762  id_t id;
763  int error, status;
764 
765  idtype = uap->idtype;
766  id = uap->id;
767 
768  if (uap->wrusage != NULL)
769  wrup = &wru;
770  else
771  wrup = NULL;
772 
773  if (uap->info != NULL) {
774  sip = &si;
775  bzero(sip, sizeof(*sip));
776  } else
777  sip = NULL;
778 
779  /*
780  * We expect all callers of wait6() to know about WEXITED and
781  * WTRAPPED.
782  */
783  error = kern_wait6(td, idtype, id, &status, uap->options, wrup, sip);
784 
785  if (uap->status != NULL && error == 0)
786  error = copyout(&status, uap->status, sizeof(status));
787  if (uap->wrusage != NULL && error == 0)
788  error = copyout(&wru, uap->wrusage, sizeof(wru));
789  if (uap->info != NULL && error == 0)
790  error = copyout(&si, uap->info, sizeof(si));
791  return (error);
792 }
793 
794 /*
795  * Reap the remains of a zombie process and optionally return status and
796  * rusage. Asserts and will release both the proctree_lock and the process
797  * lock as part of its work.
798  */
799 void
800 proc_reap(struct thread *td, struct proc *p, int *status, int options)
801 {
802  struct proc *q, *t;
803 
804  sx_assert(&proctree_lock, SA_XLOCKED);
805  PROC_LOCK_ASSERT(p, MA_OWNED);
806  PROC_SLOCK_ASSERT(p, MA_OWNED);
807  KASSERT(p->p_state == PRS_ZOMBIE, ("proc_reap: !PRS_ZOMBIE"));
808 
809  q = td->td_proc;
810 
811  PROC_SUNLOCK(p);
812  td->td_retval[0] = p->p_pid;
813  if (status)
814  *status = p->p_xstat; /* convert to int */
815  if (options & WNOWAIT) {
816  /*
817  * Only poll, returning the status. Caller does not wish to
818  * release the proc struct just yet.
819  */
820  PROC_UNLOCK(p);
821  sx_xunlock(&proctree_lock);
822  return;
823  }
824 
825  PROC_LOCK(q);
826  sigqueue_take(p->p_ksi);
827  PROC_UNLOCK(q);
828 
829  /*
830  * If we got the child via a ptrace 'attach', we need to give it back
831  * to the old parent.
832  */
833  if (p->p_oppid != 0 && p->p_oppid != p->p_pptr->p_pid) {
834  PROC_UNLOCK(p);
835  t = proc_realparent(p);
836  PROC_LOCK(t);
837  PROC_LOCK(p);
838  CTR2(KTR_PTRACE,
839  "wait: traced child %d moved back to parent %d", p->p_pid,
840  t->p_pid);
841  proc_reparent(p, t);
842  p->p_oppid = 0;
843  PROC_UNLOCK(p);
844  pksignal(t, SIGCHLD, p->p_ksi);
845  wakeup(t);
846  cv_broadcast(&p->p_pwait);
847  PROC_UNLOCK(t);
848  sx_xunlock(&proctree_lock);
849  return;
850  }
851  p->p_oppid = 0;
852  PROC_UNLOCK(p);
853 
854  /*
855  * Remove other references to this process to ensure we have an
856  * exclusive reference.
857  */
858  sx_xlock(&allproc_lock);
859  LIST_REMOVE(p, p_list); /* off zombproc */
860  sx_xunlock(&allproc_lock);
861  LIST_REMOVE(p, p_sibling);
862  PROC_LOCK(p);
863  clear_orphan(p);
864  PROC_UNLOCK(p);
865  leavepgrp(p);
866 #ifdef PROCDESC
867  if (p->p_procdesc != NULL)
868  procdesc_reap(p);
869 #endif
870  sx_xunlock(&proctree_lock);
871 
872  /*
873  * As a side effect of this lock, we know that all other writes to
874  * this proc are visible now, so no more locking is needed for p.
875  */
876  PROC_LOCK(p);
877  p->p_xstat = 0; /* XXX: why? */
878  PROC_UNLOCK(p);
879  PROC_LOCK(q);
880  ruadd(&q->p_stats->p_cru, &q->p_crux, &p->p_ru, &p->p_rux);
881  PROC_UNLOCK(q);
882 
883  /*
884  * Decrement the count of procs running with this uid.
885  */
886  (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
887 
888  /*
889  * Destroy resource accounting information associated with the process.
890  */
891 #ifdef RACCT
892  PROC_LOCK(p);
893  racct_sub(p, RACCT_NPROC, 1);
894  PROC_UNLOCK(p);
895 #endif
896  racct_proc_exit(p);
897 
898  /*
899  * Free credentials, arguments, and sigacts.
900  */
901  crfree(p->p_ucred);
902  p->p_ucred = NULL;
903  pargs_drop(p->p_args);
904  p->p_args = NULL;
905  sigacts_free(p->p_sigacts);
906  p->p_sigacts = NULL;
907 
908  /*
909  * Do any thread-system specific cleanups.
910  */
911  thread_wait(p);
912 
913  /*
914  * Give vm and machine-dependent layer a chance to free anything that
915  * cpu_exit couldn't release while still running in process context.
916  */
917  vm_waitproc(p);
918 #ifdef MAC
919  mac_proc_destroy(p);
920 #endif
921  KASSERT(FIRST_THREAD_IN_PROC(p),
922  ("proc_reap: no residual thread!"));
923  uma_zfree(proc_zone, p);
924  sx_xlock(&allproc_lock);
925  nprocs--;
926  sx_xunlock(&allproc_lock);
927 }
928 
929 static int
930 proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
931  int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo,
932  int check_only)
933 {
934  struct proc *q;
935  struct rusage *rup;
936 
937  sx_assert(&proctree_lock, SA_XLOCKED);
938 
939  q = td->td_proc;
940  PROC_LOCK(p);
941 
942  switch (idtype) {
943  case P_ALL:
944  break;
945  case P_PID:
946  if (p->p_pid != (pid_t)id) {
947  PROC_UNLOCK(p);
948  return (0);
949  }
950  break;
951  case P_PGID:
952  if (p->p_pgid != (pid_t)id) {
953  PROC_UNLOCK(p);
954  return (0);
955  }
956  break;
957  case P_SID:
958  if (p->p_session->s_sid != (pid_t)id) {
959  PROC_UNLOCK(p);
960  return (0);
961  }
962  break;
963  case P_UID:
964  if (p->p_ucred->cr_uid != (uid_t)id) {
965  PROC_UNLOCK(p);
966  return (0);
967  }
968  break;
969  case P_GID:
970  if (p->p_ucred->cr_gid != (gid_t)id) {
971  PROC_UNLOCK(p);
972  return (0);
973  }
974  break;
975  case P_JAILID:
976  if (p->p_ucred->cr_prison == NULL ||
977  (p->p_ucred->cr_prison->pr_id != (int)id)) {
978  PROC_UNLOCK(p);
979  return (0);
980  }
981  break;
982  /*
983  * It seems that the thread structures get zeroed out
984  * at process exit. This makes it impossible to
985  * support P_SETID, P_CID or P_CPUID.
986  */
987  default:
988  PROC_UNLOCK(p);
989  return (0);
990  }
991 
992  if (p_canwait(td, p)) {
993  PROC_UNLOCK(p);
994  return (0);
995  }
996 
997  if (((options & WEXITED) == 0) && (p->p_state == PRS_ZOMBIE)) {
998  PROC_UNLOCK(p);
999  return (0);
1000  }
1001 
1002  /*
1003  * This special case handles a kthread spawned by linux_clone
1004  * (see linux_misc.c). The linux_wait4 and linux_waitpid
1005  * functions need to be able to distinguish between waiting
1006  * on a process and waiting on a thread. It is a thread if
1007  * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
1008  * signifies we want to wait for threads and not processes.
1009  */
1010  if ((p->p_sigparent != SIGCHLD) ^
1011  ((options & WLINUXCLONE) != 0)) {
1012  PROC_UNLOCK(p);
1013  return (0);
1014  }
1015 
1016  PROC_SLOCK(p);
1017 
1018  if (siginfo != NULL) {
1019  bzero(siginfo, sizeof(*siginfo));
1020  siginfo->si_errno = 0;
1021 
1022  /*
1023  * SUSv4 requires that the si_signo value is always
1024  * SIGCHLD. Obey it despite the rfork(2) interface
1025  * allows to request other signal for child exit
1026  * notification.
1027  */
1028  siginfo->si_signo = SIGCHLD;
1029 
1030  /*
1031  * This is still a rough estimate. We will fix the
1032  * cases TRAPPED, STOPPED, and CONTINUED later.
1033  */
1034  if (WCOREDUMP(p->p_xstat)) {
1035  siginfo->si_code = CLD_DUMPED;
1036  siginfo->si_status = WTERMSIG(p->p_xstat);
1037  } else if (WIFSIGNALED(p->p_xstat)) {
1038  siginfo->si_code = CLD_KILLED;
1039  siginfo->si_status = WTERMSIG(p->p_xstat);
1040  } else {
1041  siginfo->si_code = CLD_EXITED;
1042  siginfo->si_status = WEXITSTATUS(p->p_xstat);
1043  }
1044 
1045  siginfo->si_pid = p->p_pid;
1046  siginfo->si_uid = p->p_ucred->cr_uid;
1047 
1048  /*
1049  * The si_addr field would be useful additional
1050  * detail, but apparently the PC value may be lost
1051  * when we reach this point. bzero() above sets
1052  * siginfo->si_addr to NULL.
1053  */
1054  }
1055 
1056  /*
1057  * There should be no reason to limit resources usage info to
1058  * exited processes only. A snapshot about any resources used
1059  * by a stopped process may be exactly what is needed.
1060  */
1061  if (wrusage != NULL) {
1062  rup = &wrusage->wru_self;
1063  *rup = p->p_ru;
1064  calcru(p, &rup->ru_utime, &rup->ru_stime);
1065 
1066  rup = &wrusage->wru_children;
1067  *rup = p->p_stats->p_cru;
1068  calccru(p, &rup->ru_utime, &rup->ru_stime);
1069  }
1070 
1071  if (p->p_state == PRS_ZOMBIE && !check_only) {
1072  proc_reap(td, p, status, options);
1073  return (-1);
1074  }
1075  PROC_SUNLOCK(p);
1076  PROC_UNLOCK(p);
1077  return (1);
1078 }
1079 
1080 int
1081 kern_wait(struct thread *td, pid_t pid, int *status, int options,
1082  struct rusage *rusage)
1083 {
1084  struct __wrusage wru, *wrup;
1085  idtype_t idtype;
1086  id_t id;
1087  int ret;
1088 
1089  /*
1090  * Translate the special pid values into the (idtype, pid)
1091  * pair for kern_wait6. The WAIT_MYPGRP case is handled by
1092  * kern_wait6() on its own.
1093  */
1094  if (pid == WAIT_ANY) {
1095  idtype = P_ALL;
1096  id = 0;
1097  } else if (pid < 0) {
1098  idtype = P_PGID;
1099  id = (id_t)-pid;
1100  } else {
1101  idtype = P_PID;
1102  id = (id_t)pid;
1103  }
1104 
1105  if (rusage != NULL)
1106  wrup = &wru;
1107  else
1108  wrup = NULL;
1109 
1110  /*
1111  * For backward compatibility we implicitly add flags WEXITED
1112  * and WTRAPPED here.
1113  */
1114  options |= WEXITED | WTRAPPED;
1115  ret = kern_wait6(td, idtype, id, status, options, wrup, NULL);
1116  if (rusage != NULL)
1117  *rusage = wru.wru_self;
1118  return (ret);
1119 }
1120 
1121 int
1122 kern_wait6(struct thread *td, idtype_t idtype, id_t id, int *status,
1123  int options, struct __wrusage *wrusage, siginfo_t *siginfo)
1124 {
1125  struct proc *p, *q;
1126  int error, nfound, ret;
1127 
1128  AUDIT_ARG_VALUE((int)idtype); /* XXX - This is likely wrong! */
1129  AUDIT_ARG_PID((pid_t)id); /* XXX - This may be wrong! */
1130  AUDIT_ARG_VALUE(options);
1131 
1132  q = td->td_proc;
1133 
1134  if ((pid_t)id == WAIT_MYPGRP && (idtype == P_PID || idtype == P_PGID)) {
1135  PROC_LOCK(q);
1136  id = (id_t)q->p_pgid;
1137  PROC_UNLOCK(q);
1138  idtype = P_PGID;
1139  }
1140 
1141  /* If we don't know the option, just return. */
1142  if ((options & ~(WUNTRACED | WNOHANG | WCONTINUED | WNOWAIT |
1143  WEXITED | WTRAPPED | WLINUXCLONE)) != 0)
1144  return (EINVAL);
1145  if ((options & (WEXITED | WUNTRACED | WCONTINUED | WTRAPPED)) == 0) {
1146  /*
1147  * We will be unable to find any matching processes,
1148  * because there are no known events to look for.
1149  * Prefer to return error instead of blocking
1150  * indefinitely.
1151  */
1152  return (EINVAL);
1153  }
1154 
1155 loop:
1156  if (q->p_flag & P_STATCHILD) {
1157  PROC_LOCK(q);
1158  q->p_flag &= ~P_STATCHILD;
1159  PROC_UNLOCK(q);
1160  }
1161  nfound = 0;
1162  sx_xlock(&proctree_lock);
1163  LIST_FOREACH(p, &q->p_children, p_sibling) {
1164  ret = proc_to_reap(td, p, idtype, id, status, options,
1165  wrusage, siginfo, 0);
1166  if (ret == 0)
1167  continue;
1168  else if (ret == 1)
1169  nfound++;
1170  else
1171  return (0);
1172 
1173  PROC_LOCK(p);
1174  PROC_SLOCK(p);
1175 
1176  if ((options & WTRAPPED) != 0 &&
1177  (p->p_flag & P_TRACED) != 0 &&
1178  (p->p_flag & (P_STOPPED_TRACE | P_STOPPED_SIG)) != 0 &&
1179  (p->p_suspcount == p->p_numthreads) &&
1180  ((p->p_flag & P_WAITED) == 0)) {
1181  PROC_SUNLOCK(p);
1182  if ((options & WNOWAIT) == 0)
1183  p->p_flag |= P_WAITED;
1184  sx_xunlock(&proctree_lock);
1185  td->td_retval[0] = p->p_pid;
1186 
1187  if (status != NULL)
1188  *status = W_STOPCODE(p->p_xstat);
1189  if (siginfo != NULL) {
1190  siginfo->si_status = p->p_xstat;
1191  siginfo->si_code = CLD_TRAPPED;
1192  }
1193  if ((options & WNOWAIT) == 0) {
1194  PROC_LOCK(q);
1195  sigqueue_take(p->p_ksi);
1196  PROC_UNLOCK(q);
1197  }
1198 
1199  CTR4(KTR_PTRACE,
1200  "wait: returning trapped pid %d status %#x (xstat %d) xthread %d",
1201  p->p_pid, W_STOPCODE(p->p_xstat), p->p_xstat,
1202  p->p_xthread != NULL ? p->p_xthread->td_tid : -1);
1203  PROC_UNLOCK(p);
1204  return (0);
1205  }
1206  if ((options & WUNTRACED) != 0 &&
1207  (p->p_flag & P_STOPPED_SIG) != 0 &&
1208  (p->p_suspcount == p->p_numthreads) &&
1209  ((p->p_flag & P_WAITED) == 0)) {
1210  PROC_SUNLOCK(p);
1211  if ((options & WNOWAIT) == 0)
1212  p->p_flag |= P_WAITED;
1213  sx_xunlock(&proctree_lock);
1214  td->td_retval[0] = p->p_pid;
1215 
1216  if (status != NULL)
1217  *status = W_STOPCODE(p->p_xstat);
1218  if (siginfo != NULL) {
1219  siginfo->si_status = p->p_xstat;
1220  siginfo->si_code = CLD_STOPPED;
1221  }
1222  if ((options & WNOWAIT) == 0) {
1223  PROC_LOCK(q);
1224  sigqueue_take(p->p_ksi);
1225  PROC_UNLOCK(q);
1226  }
1227 
1228  PROC_UNLOCK(p);
1229  return (0);
1230  }
1231  PROC_SUNLOCK(p);
1232  if ((options & WCONTINUED) != 0 &&
1233  (p->p_flag & P_CONTINUED) != 0) {
1234  sx_xunlock(&proctree_lock);
1235  td->td_retval[0] = p->p_pid;
1236  if ((options & WNOWAIT) == 0) {
1237  p->p_flag &= ~P_CONTINUED;
1238  PROC_LOCK(q);
1239  sigqueue_take(p->p_ksi);
1240  PROC_UNLOCK(q);
1241  }
1242  PROC_UNLOCK(p);
1243 
1244  if (status != NULL)
1245  *status = SIGCONT;
1246  if (siginfo != NULL) {
1247  siginfo->si_status = SIGCONT;
1248  siginfo->si_code = CLD_CONTINUED;
1249  }
1250  return (0);
1251  }
1252  PROC_UNLOCK(p);
1253  }
1254 
1255  /*
1256  * Look in the orphans list too, to allow the parent to
1257  * collect it's child exit status even if child is being
1258  * debugged.
1259  *
1260  * Debugger detaches from the parent upon successful
1261  * switch-over from parent to child. At this point due to
1262  * re-parenting the parent loses the child to debugger and a
1263  * wait4(2) call would report that it has no children to wait
1264  * for. By maintaining a list of orphans we allow the parent
1265  * to successfully wait until the child becomes a zombie.
1266  */
1267  if (nfound == 0) {
1268  LIST_FOREACH(p, &q->p_orphans, p_orphan) {
1269  ret = proc_to_reap(td, p, idtype, id, NULL, options,
1270  NULL, NULL, 1);
1271  if (ret != 0) {
1272  KASSERT(ret != -1, ("reaped an orphan (pid %d)",
1273  (int)td->td_retval[0]));
1274  nfound++;
1275  break;
1276  }
1277  }
1278  }
1279  if (nfound == 0) {
1280  sx_xunlock(&proctree_lock);
1281  return (ECHILD);
1282  }
1283  if (options & WNOHANG) {
1284  sx_xunlock(&proctree_lock);
1285  td->td_retval[0] = 0;
1286  return (0);
1287  }
1288  PROC_LOCK(q);
1289  sx_xunlock(&proctree_lock);
1290  if (q->p_flag & P_STATCHILD) {
1291  q->p_flag &= ~P_STATCHILD;
1292  error = 0;
1293  } else
1294  error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0);
1295  PROC_UNLOCK(q);
1296  if (error)
1297  return (error);
1298  goto loop;
1299 }
1300 
1301 /*
1302  * Make process 'parent' the new parent of process 'child'.
1303  * Must be called with an exclusive hold of proctree lock.
1304  */
1305 void
1306 proc_reparent(struct proc *child, struct proc *parent)
1307 {
1308 
1309  sx_assert(&proctree_lock, SX_XLOCKED);
1310  PROC_LOCK_ASSERT(child, MA_OWNED);
1311  if (child->p_pptr == parent)
1312  return;
1313 
1314  PROC_LOCK(child->p_pptr);
1315  sigqueue_take(child->p_ksi);
1316  PROC_UNLOCK(child->p_pptr);
1317  LIST_REMOVE(child, p_sibling);
1318  LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1319 
1320  clear_orphan(child);
1321  if (child->p_flag & P_TRACED) {
1322  if (LIST_EMPTY(&child->p_pptr->p_orphans)) {
1323  child->p_treeflag |= P_TREE_FIRST_ORPHAN;
1324  LIST_INSERT_HEAD(&child->p_pptr->p_orphans, child,
1325  p_orphan);
1326  } else {
1327  LIST_INSERT_AFTER(LIST_FIRST(&child->p_pptr->p_orphans),
1328  child, p_orphan);
1329  }
1330  child->p_treeflag |= P_TREE_ORPHANED;
1331  }
1332 
1333  child->p_pptr = parent;
1334 }
struct proc * proc_realparent(struct proc *child)
Definition: kern_exit.c:103
int p_canwait(struct thread *td, struct proc *p)
Definition: kern_prot.c:1779
int nprocs
Definition: kern_fork.c:184
static void clear_orphan(struct proc *p)
Definition: kern_exit.c:129
int pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
Definition: kern_sig.c:1986
void fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
Definition: kern_proc.c:637
void ** args
Definition: kern_exit.c:646
void sched_exit(struct proc *p, struct thread *td)
Definition: sched_4bsd.c:760
void sigqueue_take(ksiginfo_t *ksi)
Definition: kern_sig.c:324
int leavepgrp(struct proc *p)
Definition: kern_proc.c:554
void pargs_drop(struct pargs *pa)
Definition: kern_proc.c:1464
void panic(const char *fmt,...)
void sigacts_free(struct sigacts *ps)
Definition: kern_sig.c:3463
void funsetownlst(struct sigiolst *sigiolst)
struct mtx ppeers_lock
Definition: kern_proc.c:138
device_t parent
Definition: device_if.m:171
SDT_PROBE_DEFINE1(proc, kernel,, exit,"int")
void thread_exit(void)
Definition: kern_thread.c:397
int kern_wait(struct thread *td, pid_t pid, int *status, int options, struct rusage *rusage)
Definition: kern_exit.c:1081
struct proc * initproc
Definition: init_main.c:102
void(* nlminfo_release_p)(struct proc *p)
Definition: kern_exit.c:100
void knlist_destroy(struct knlist *knl)
Definition: kern_event.c:2002
void kern_psignal(struct proc *p, int sig)
Definition: kern_sig.c:1975
uma_zone_t proc_zone
Definition: kern_proc.c:139
void sigqueue_flush(sigqueue_t *sq)
Definition: kern_sig.c:412
SDT_PROVIDER_DECLARE(proc)
void racct_sub(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1239
struct sx allproc_lock
Definition: kern_proc.c:136
void calccru(struct proc *p, struct timeval *up, struct timeval *sp)
int rebooting
void sbuf_clear(struct sbuf *s)
Definition: subr_sbuf.c:269
struct mtx Giant
Definition: kern_mutex.c:140
void childproc_exited(struct proc *p)
Definition: kern_sig.c:2991
void exit1(struct thread *td, int rv)
Definition: kern_exit.c:163
int acct_process(struct thread *td)
Definition: kern_acct.c:348
int sbuf_printf(struct sbuf *s, const char *fmt,...)
Definition: subr_sbuf.c:632
static int proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo, int check_only)
Definition: kern_exit.c:930
int chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
void crfree(struct ucred *cr)
Definition: kern_prot.c:1835
void stopprofclock(struct proc *p)
Definition: kern_clock.c:672
char * why
Definition: kern_exit.c:644
__FBSDID("$BSDSUniX$")
void log(int level, const char *fmt,...)
Definition: subr_prf.c:289
void proc_reap(struct thread *td, struct proc *p, int *status, int options)
Definition: kern_exit.c:800
void prison_proc_free(struct prison *pr)
Definition: kern_jail.c:2652
int thread_single(int mode)
Definition: kern_thread.c:616
int sys_wait4(struct thread *td, struct wait4_args *uap)
Definition: kern_exit.c:739
struct sbuf * sbuf_new(struct sbuf *s, char *buf, int length, int flags)
Definition: subr_sbuf.c:211
struct proclist zombproc
Definition: kern_proc.c:135
int kern_wait6(struct thread *td, idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo)
Definition: kern_exit.c:1122
int printf(const char *fmt,...)
Definition: subr_prf.c:367
void sys_sys_exit(struct thread *td, struct sys_exit_args *uap)
Definition: kern_exit.c:150
void fdfree(struct thread *td)
int sys_wait6(struct thread *td, struct wait6_args *uap)
Definition: kern_exit.c:757
void sbuf_delete(struct sbuf *s)
Definition: subr_sbuf.c:753
void racct_proc_exit(struct proc *p)
Definition: kern_racct.c:1285
void wakeup(void *ident)
Definition: kern_synch.c:378
void vrele(struct vnode *vp)
Definition: vfs_subr.c:2416
char * sbuf_data(struct sbuf *s)
Definition: subr_sbuf.c:721
int sbuf_finish(struct sbuf *s)
Definition: subr_sbuf.c:694
int sbuf_cat(struct sbuf *s, const char *str)
Definition: subr_sbuf.c:455
int sbuf_trim(struct sbuf *s)
Definition: subr_sbuf.c:660
void tidhash_remove(struct thread *td)
Definition: kern_thread.c:1051
void proc_reparent(struct proc *child, struct proc *parent)
Definition: kern_exit.c:1306
struct sx proctree_lock
Definition: kern_proc.c:137
int sbuf_copyin(struct sbuf *s, const void *uaddr, size_t len)
Definition: subr_sbuf.c:477
void lim_free(struct plimit *limp)
void calcru(struct proc *p, struct timeval *up, struct timeval *sp)
int sys_abort2(struct thread *td, struct abort2_args *uap)
Definition: kern_exit.c:651
void tty_signal_pgrp(struct tty *tp, int sig)
Definition: tty.c:1283
void thread_wait(struct proc *p)
Definition: kern_thread.c:508
void ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2, struct rusage_ext *rux2)
int thread_suspend_check(int return_instead)
Definition: kern_thread.c:767