32 #include <sys/cdefs.h>
35 #include "opt_compat.h"
37 #include "opt_kdtrace.h"
38 #include "opt_ktrace.h"
39 #include "opt_kstack_pages.h"
40 #include "opt_stack.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/limits.h>
49 #include <sys/loginclass.h>
50 #include <sys/malloc.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
55 #include <sys/ptrace.h>
56 #include <sys/refcount.h>
57 #include <sys/resourcevar.h>
59 #include <sys/sysent.h>
60 #include <sys/sched.h>
62 #include <sys/stack.h>
64 #include <sys/sysctl.h>
65 #include <sys/filedesc.h>
67 #include <sys/signalvar.h>
72 #include <sys/vnode.h>
73 #include <sys/eventhandler.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_extern.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
89 #include <compat/compat32bit/compat32bit.h>
90 #include <compat/compat32bit/compat32bit_util.h>
99 "void *",
"struct thread *");
112 static void doenterpgrp(
struct proc *,
struct pgrp *);
113 static void orphanpg(
struct pgrp *pg);
118 static void pgadjustjobc(
struct pgrp *pgrp,
int entering);
119 static void pgdelete(
struct pgrp *);
120 static int proc_ctor(
void *mem,
int size,
void *arg,
int flags);
121 static void proc_dtor(
void *mem,
int size,
void *arg);
122 static int proc_init(
void *mem,
int size,
int flags);
123 static void proc_fini(
void *mem,
int size);
143 "Kernel stack size in pages");
145 SYSCTL_INT(_kern, OID_AUTO, proc_vmmap_skip_resident_count, CTLFLAG_RW,
147 "Skip calculation of the pages resident count in kern.proc.vmmap");
149 CTASSERT(
sizeof(
struct kinfo_proc) == KINFO_PROC_SIZE);
151 CTASSERT(
sizeof(
struct kinfo_proc32) == KINFO_PROC32_SIZE);
170 UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
182 p = (
struct proc *)mem;
183 SDT_PROBE4(proc, kernel, ctor , entry, p, size, arg, flags);
184 EVENTHANDLER_INVOKE(process_ctor, p);
185 SDT_PROBE4(proc, kernel, ctor ,
return, p, size, arg, flags);
199 p = (
struct proc *)mem;
200 td = FIRST_THREAD_IN_PROC(p);
201 SDT_PROBE4(proc, kernel, dtor, entry, p, size, arg, td);
204 KASSERT((p->p_numthreads == 1),
205 (
"bad number of threads in exiting process"));
206 KASSERT(STAILQ_EMPTY(&p->p_ktr), (
"proc_dtor: non-empty p_ktr"));
211 EVENTHANDLER_INVOKE(process_dtor, p);
212 if (p->p_ksi != NULL)
213 KASSERT(! KSI_ONQ(p->p_ksi), (
"SIGCHLD queue"));
214 SDT_PROBE3(proc, kernel, dtor,
return, p, size, arg);
225 p = (
struct proc *)mem;
226 SDT_PROBE3(proc, kernel, init, entry, p, size, flags);
227 p->p_sched = (
struct p_sched *)&p[1];
228 bzero(&p->p_mtx,
sizeof(
struct mtx));
229 mtx_init(&p->p_mtx,
"process lock", NULL, MTX_DEF | MTX_DUPOK);
230 mtx_init(&p->p_slock,
"process slock", NULL, MTX_SPIN | MTX_RECURSE);
231 cv_init(&p->p_pwait,
"ppwait");
232 cv_init(&p->p_dbgwait,
"dbgwait");
233 TAILQ_INIT(&p->p_threads);
234 EVENTHANDLER_INVOKE(process_init, p);
236 SDT_PROBE3(proc, kernel, init,
return, p, size, flags);
250 p = (
struct proc *)mem;
251 EVENTHANDLER_INVOKE(process_fini, p);
255 if (p->p_ksi != NULL)
258 panic(
"proc reclaimed");
270 PROC_LOCK_ASSERT(p, MA_OWNED);
284 LIST_FOREACH(p, PIDHASH(pid), p_hash) {
285 if (p->p_pid == pid) {
287 if (p->p_state == PRS_NEW) {
321 FOREACH_PROC_IN_SYSTEM(p) {
323 if (p->p_state == PRS_NEW) {
327 FOREACH_THREAD_IN_PROC(p, td) {
328 if (td->td_tid == tid)
345 register struct pgrp *pgrp;
349 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
350 if (pgrp->pg_id == pgid) {
362 pget(pid_t pid,
int flags,
struct proc **pp)
368 if (pid <= PID_MAX) {
370 if (p == NULL && (flags & PGET_NOTWEXIT) == 0)
372 }
else if ((flags & PGET_NOTID) == 0) {
380 if ((flags & PGET_CANSEE) != 0) {
385 if ((flags & PGET_CANDEBUG) != 0) {
390 if ((flags & PGET_ISCURRENT) != 0 && curproc != p) {
394 if ((flags & PGET_NOTWEXIT) != 0 && (p->p_flag & P_WEXIT) != 0) {
398 if ((flags & PGET_NOTINEXEC) != 0 && (p->p_flag & P_INEXEC) != 0) {
406 if ((flags & PGET_HOLD) != 0) {
424 register struct proc *p;
427 struct session *sess;
432 KASSERT(pgrp != NULL, (
"enterpgrp: pgrp == NULL"));
433 KASSERT(p->p_pid == pgid,
434 (
"enterpgrp: new pgrp and pid != pgid"));
435 KASSERT(
pgfind(pgid) == NULL,
436 (
"enterpgrp: pgrp with pgid exists"));
437 KASSERT(!SESS_LEADER(p),
438 (
"enterpgrp: session leader attempted setpgrp"));
440 mtx_init(&pgrp->pg_mtx,
"process group", NULL, MTX_DEF | MTX_DUPOK);
446 mtx_init(&sess->s_mtx,
"session", NULL, MTX_DEF);
448 p->p_flag &= ~P_CONTROLT;
452 sess->s_sid = p->p_pid;
453 refcount_init(&sess->s_count, 1);
454 sess->s_ttyvp = NULL;
455 sess->s_ttydp = NULL;
457 bcopy(p->p_session->s_login, sess->s_login,
458 sizeof(sess->s_login));
459 pgrp->pg_session = sess;
460 KASSERT(p == curproc,
461 (
"enterpgrp: mksession and p != curproc"));
463 pgrp->pg_session = p->p_session;
468 LIST_INIT(&pgrp->pg_members);
474 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
476 SLIST_INIT(&pgrp->pg_sigiolst);
489 register struct proc *p;
494 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
495 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
496 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
497 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
498 KASSERT(pgrp->pg_session == p->p_session,
499 (
"%s: pgrp's session %p, p->p_session %p.\n",
503 KASSERT(pgrp != p->p_pgrp,
504 (
"%s: p belongs to pgrp.", __func__));
519 struct pgrp *savepgrp;
522 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
523 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
524 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
525 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
527 savepgrp = p->p_pgrp;
540 LIST_REMOVE(p, p_pglist);
543 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
544 PGRP_UNLOCK(savepgrp);
546 if (LIST_EMPTY(&savepgrp->pg_members))
555 register struct proc *p;
557 struct pgrp *savepgrp;
560 savepgrp = p->p_pgrp;
563 LIST_REMOVE(p, p_pglist);
566 PGRP_UNLOCK(savepgrp);
567 if (LIST_EMPTY(&savepgrp->pg_members))
577 register struct pgrp *pgrp;
579 struct session *savesess;
583 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
584 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
593 tp = pgrp->pg_session->s_ttyp;
594 LIST_REMOVE(pgrp, pg_hash);
595 savesess = pgrp->pg_session;
620 if (pgrp->pg_jobc == 0)
638 register struct proc *p;
639 register struct pgrp *pgrp;
642 register struct pgrp *hispgrp;
643 register struct session *mysession;
646 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
647 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
648 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
654 mysession = pgrp->pg_session;
655 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
656 hispgrp->pg_session == mysession)
664 LIST_FOREACH(p, &p->p_children, p_sibling) {
666 if (hispgrp == pgrp ||
667 hispgrp->pg_session != mysession)
670 if (p->p_state == PRS_ZOMBIE) {
688 register struct proc *p;
690 PGRP_LOCK_ASSERT(pg, MA_OWNED);
692 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
694 if (P_SHOULDSTOP(p) == P_STOPPED_SIG) {
696 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
712 refcount_acquire(&s->s_count);
719 if (refcount_release(&s->s_count)) {
720 if (s->s_ttyp != NULL) {
733 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
735 register struct pgrp *pgrp;
736 register struct proc *p;
744 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
745 (
void *)pgrp, (
long)pgrp->pg_id,
746 (
void *)pgrp->pg_session,
747 pgrp->pg_session->s_count,
748 (
void *)LIST_FIRST(&pgrp->pg_members));
749 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
750 printf(
"\t\tpid %ld addr %p pgrp %p\n",
751 (
long)p->p_pid, (
void *)p,
770 PROC_LOCK_ASSERT(p, MA_OWNED);
774 FOREACH_THREAD_IN_PROC(p, td) {
777 kp->ki_estcpu += td->td_estcpu;
796 PROC_LOCK_ASSERT(p, MA_OWNED);
797 bzero(kp,
sizeof(*kp));
799 kp->ki_structsize =
sizeof(*kp);
802 kp->ki_args = p->p_args;
803 kp->ki_textvp = p->p_textvp;
805 kp->ki_tracep = p->p_tracevp;
806 kp->ki_traceflag = p->p_traceflag;
809 kp->ki_vmspace = p->p_vmspace;
810 kp->ki_flag = p->p_flag;
811 kp->ki_flag2 = p->p_flag2;
814 kp->ki_uid = cred->cr_uid;
815 kp->ki_ruid = cred->cr_ruid;
816 kp->ki_svuid = cred->cr_svuid;
818 if (cred->cr_flags & CRED_FLAG_CAPMODE)
819 kp->ki_cr_flags |= KI_CRF_CAPABILITY_MODE;
821 if (cred->cr_ngroups > KI_NGROUPS) {
822 kp->ki_ngroups = KI_NGROUPS;
823 kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
825 kp->ki_ngroups = cred->cr_ngroups;
826 bcopy(cred->cr_groups, kp->ki_groups,
827 kp->ki_ngroups *
sizeof(gid_t));
828 kp->ki_rgid = cred->cr_rgid;
829 kp->ki_svgid = cred->cr_svgid;
832 kp->ki_flag |= P_JAILED;
834 if (cred->cr_prison != curthread->td_ucred->cr_prison)
835 kp->ki_jid = cred->cr_prison->pr_id;
837 strlcpy(kp->ki_loginclass, cred->cr_loginclass->lc_name,
838 sizeof(kp->ki_loginclass));
842 mtx_lock(&ps->ps_mtx);
843 kp->ki_sigignore = ps->ps_sigignore;
844 kp->ki_sigcatch = ps->ps_sigcatch;
845 mtx_unlock(&ps->ps_mtx);
847 if (p->p_state != PRS_NEW &&
848 p->p_state != PRS_ZOMBIE &&
849 p->p_vmspace != NULL) {
850 struct vmspace *vm = p->p_vmspace;
852 kp->ki_size = vm->vm_map.size;
853 kp->ki_rssize = vmspace_resident_count(vm);
854 FOREACH_THREAD_IN_PROC(p, td0) {
855 if (!TD_IS_SWAPPED(td0))
856 kp->ki_rssize += td0->td_kstack_pages;
858 kp->ki_swrss = vm->vm_swrss;
859 kp->ki_tsize = vm->vm_tsize;
860 kp->ki_dsize = vm->vm_dsize;
861 kp->ki_ssize = vm->vm_ssize;
862 }
else if (p->p_state == PRS_ZOMBIE)
864 if (kp->ki_flag & P_INMEM)
865 kp->ki_sflag = PS_INMEM;
869 kp->ki_swtime = (
ticks - p->p_swtick) /
hz;
870 kp->ki_pid = p->p_pid;
871 kp->ki_nice = p->p_nice;
872 kp->ki_fibnum = p->p_fibnum;
873 kp->ki_start = p->p_stats->p_start;
878 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime);
880 calccru(p, &kp->ki_childutime, &kp->ki_childstime);
882 kp->ki_childtime = kp->ki_childstime;
883 timevaladd(&kp->ki_childtime, &kp->ki_childutime);
885 FOREACH_THREAD_IN_PROC(p, td0)
886 kp->ki_cow += td0->td_cow;
890 kp->ki_pgid = p->p_pgrp->pg_id;
891 kp->ki_jobc = p->p_pgrp->pg_jobc;
892 sp = p->p_pgrp->pg_session;
895 kp->ki_sid = sp->s_sid;
897 strlcpy(kp->ki_login, sp->s_login,
898 sizeof(kp->ki_login));
900 kp->ki_kiflag |= KI_CTTY;
902 kp->ki_kiflag |= KI_SLEADER;
908 if ((p->p_flag & P_CONTROLT) && tp != NULL) {
910 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
912 kp->ki_tsid = tp->t_session->s_sid;
915 if (p->p_comm[0] !=
'\0')
916 strlcpy(kp->ki_comm, p->p_comm,
sizeof(kp->ki_comm));
917 if (p->p_sysent && p->p_sysent->sv_name != NULL &&
918 p->p_sysent->sv_name[0] !=
'\0')
919 strlcpy(kp->ki_emul, p->p_sysent->sv_name,
sizeof(kp->ki_emul));
920 kp->ki_siglist = p->p_siglist;
921 kp->ki_xstat = p->p_xstat;
922 kp->ki_acflag = p->p_acflag;
923 kp->ki_lock = p->p_lock;
925 kp->ki_ppid = p->p_pptr->p_pid;
941 PROC_LOCK_ASSERT(p, MA_OWNED);
946 if (td->td_wmesg != NULL)
947 strlcpy(kp->ki_wmesg, td->td_wmesg,
sizeof(kp->ki_wmesg));
949 bzero(kp->ki_wmesg,
sizeof(kp->ki_wmesg));
950 strlcpy(kp->ki_tdname, td->td_name,
sizeof(kp->ki_tdname));
951 if (TD_ON_LOCK(td)) {
952 kp->ki_kiflag |= KI_LOCKBLOCK;
953 strlcpy(kp->ki_lockname, td->td_lockname,
954 sizeof(kp->ki_lockname));
956 kp->ki_kiflag &= ~KI_LOCKBLOCK;
957 bzero(kp->ki_lockname,
sizeof(kp->ki_lockname));
960 if (p->p_state == PRS_NORMAL) {
961 if (TD_ON_RUNQ(td) ||
965 }
else if (P_SHOULDSTOP(p)) {
967 }
else if (TD_IS_SLEEPING(td)) {
968 kp->ki_stat = SSLEEP;
969 }
else if (TD_ON_LOCK(td)) {
974 }
else if (p->p_state == PRS_ZOMBIE) {
981 kp->ki_wchan = td->td_wchan;
982 kp->ki_pri.pri_level = td->td_priority;
983 kp->ki_pri.pri_native = td->td_base_pri;
984 kp->ki_lastcpu = td->td_lastcpu;
985 kp->ki_oncpu = td->td_oncpu;
986 kp->ki_tdflags = td->td_flags;
987 kp->ki_tid = td->td_tid;
988 kp->ki_numthreads = p->p_numthreads;
989 kp->ki_pcb = td->td_pcb;
990 kp->ki_kstack = (
void *)td->td_kstack;
991 kp->ki_slptime = (
ticks - td->td_slptick) /
hz;
992 kp->ki_pri.pri_class = td->td_pri_class;
993 kp->ki_pri.pri_user = td->td_user_pri;
999 kp->ki_estcpu = td->td_estcpu;
1000 kp->ki_cow = td->td_cow;
1007 kp->ki_siglist = td->td_siglist;
1008 kp->ki_sigmask = td->td_sigmask;
1022 MPASS(FIRST_THREAD_IN_PROC(p) != NULL);
1033 return (
malloc(
sizeof(
struct pstats), M_SUBPROC, M_ZERO|M_WAITOK));
1043 bzero(&dst->pstat_startzero,
1044 __rangeof(
struct pstats, pstat_startzero, pstat_endzero));
1045 bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
1046 __rangeof(
struct pstats, pstat_startcopy, pstat_endcopy));
1053 free(ps, M_SUBPROC);
1056 static struct proc *
1062 LIST_FOREACH(p, &
zombproc, p_list) {
1063 if (p->p_pid == pid) {
1091 static inline uint32_t
1092 ptr32_trim(
void *ptr)
1096 uptr = (uintptr_t)ptr;
1097 return ((uptr > UINT_MAX) ? 0 : uptr);
1100 #define PTRTRIM_CP(src,dst,fld) \
1101 do { (dst).fld = ptr32_trim((src).fld); } while (0)
1104 compat32bit_kinfo_proc_out(
const struct kinfo_proc *ki,
struct kinfo_proc32 *ki32)
1108 bzero(ki32,
sizeof(
struct kinfo_proc32));
1109 ki32->ki_structsize =
sizeof(
struct kinfo_proc32);
1110 CP(*ki, *ki32, ki_layout);
1111 PTRTRIM_CP(*ki, *ki32, ki_args);
1112 PTRTRIM_CP(*ki, *ki32, ki_paddr);
1113 PTRTRIM_CP(*ki, *ki32, ki_addr);
1114 PTRTRIM_CP(*ki, *ki32, ki_tracep);
1115 PTRTRIM_CP(*ki, *ki32, ki_textvp);
1116 PTRTRIM_CP(*ki, *ki32, ki_fd);
1117 PTRTRIM_CP(*ki, *ki32, ki_vmspace);
1118 PTRTRIM_CP(*ki, *ki32, ki_wchan);
1119 CP(*ki, *ki32, ki_pid);
1120 CP(*ki, *ki32, ki_ppid);
1121 CP(*ki, *ki32, ki_pgid);
1122 CP(*ki, *ki32, ki_tpgid);
1123 CP(*ki, *ki32, ki_sid);
1124 CP(*ki, *ki32, ki_tsid);
1125 CP(*ki, *ki32, ki_jobc);
1126 CP(*ki, *ki32, ki_tdev);
1127 CP(*ki, *ki32, ki_siglist);
1128 CP(*ki, *ki32, ki_sigmask);
1129 CP(*ki, *ki32, ki_sigignore);
1130 CP(*ki, *ki32, ki_sigcatch);
1131 CP(*ki, *ki32, ki_uid);
1132 CP(*ki, *ki32, ki_ruid);
1133 CP(*ki, *ki32, ki_svuid);
1134 CP(*ki, *ki32, ki_rgid);
1135 CP(*ki, *ki32, ki_svgid);
1136 CP(*ki, *ki32, ki_ngroups);
1137 for (i = 0; i < KI_NGROUPS; i++)
1138 CP(*ki, *ki32, ki_groups[i]);
1139 CP(*ki, *ki32, ki_size);
1140 CP(*ki, *ki32, ki_rssize);
1141 CP(*ki, *ki32, ki_swrss);
1142 CP(*ki, *ki32, ki_tsize);
1143 CP(*ki, *ki32, ki_dsize);
1144 CP(*ki, *ki32, ki_ssize);
1145 CP(*ki, *ki32, ki_xstat);
1146 CP(*ki, *ki32, ki_acflag);
1147 CP(*ki, *ki32, ki_pctcpu);
1148 CP(*ki, *ki32, ki_estcpu);
1149 CP(*ki, *ki32, ki_slptime);
1150 CP(*ki, *ki32, ki_swtime);
1151 CP(*ki, *ki32, ki_cow);
1152 CP(*ki, *ki32, ki_runtime);
1153 TV_CP(*ki, *ki32, ki_start);
1154 TV_CP(*ki, *ki32, ki_childtime);
1155 CP(*ki, *ki32, ki_flag);
1156 CP(*ki, *ki32, ki_kiflag);
1157 CP(*ki, *ki32, ki_traceflag);
1158 CP(*ki, *ki32, ki_stat);
1159 CP(*ki, *ki32, ki_nice);
1160 CP(*ki, *ki32, ki_lock);
1161 CP(*ki, *ki32, ki_rqindex);
1162 CP(*ki, *ki32, ki_oncpu);
1163 CP(*ki, *ki32, ki_lastcpu);
1164 bcopy(ki->ki_tdname, ki32->ki_tdname, TDNAMLEN + 1);
1165 bcopy(ki->ki_wmesg, ki32->ki_wmesg, WMESGLEN + 1);
1166 bcopy(ki->ki_login, ki32->ki_login, LOGNAMELEN + 1);
1167 bcopy(ki->ki_lockname, ki32->ki_lockname, LOCKNAMELEN + 1);
1168 bcopy(ki->ki_comm, ki32->ki_comm, COMMLEN + 1);
1169 bcopy(ki->ki_emul, ki32->ki_emul, KI_EMULNAMELEN + 1);
1170 bcopy(ki->ki_loginclass, ki32->ki_loginclass, LOGINCLASSLEN + 1);
1171 CP(*ki, *ki32, ki_flag2);
1172 CP(*ki, *ki32, ki_fibnum);
1173 CP(*ki, *ki32, ki_cr_flags);
1174 CP(*ki, *ki32, ki_jid);
1175 CP(*ki, *ki32, ki_numthreads);
1176 CP(*ki, *ki32, ki_tid);
1177 CP(*ki, *ki32, ki_pri);
1178 compat32bit_rusage_out(&ki->ki_rusage, &ki32->ki_rusage);
1179 compat32bit_rusage_out(&ki->ki_rusage_ch, &ki32->ki_rusage_ch);
1180 PTRTRIM_CP(*ki, *ki32, ki_pcb);
1181 PTRTRIM_CP(*ki, *ki32, ki_kstack);
1182 PTRTRIM_CP(*ki, *ki32, ki_udata);
1183 CP(*ki, *ki32, ki_sflag);
1184 CP(*ki, *ki32, ki_tdflags);
1192 struct kinfo_proc ki;
1194 struct kinfo_proc32 ki32;
1198 PROC_LOCK_ASSERT(p, MA_OWNED);
1199 MPASS(FIRST_THREAD_IN_PROC(p) != NULL);
1203 if ((flags & KERN_PROC_NOTHREADS) != 0) {
1205 if ((flags & KERN_PROC_MASK32) != 0) {
1206 compat32bit_kinfo_proc_out(&ki, &ki32);
1207 error =
sbuf_bcat(sb, &ki32,
sizeof(ki32));
1212 FOREACH_THREAD_IN_PROC(p, td) {
1215 if ((flags & KERN_PROC_MASK32) != 0) {
1216 compat32bit_kinfo_proc_out(&ki, &ki32);
1217 error =
sbuf_bcat(sb, &ki32,
sizeof(ki32));
1234 struct kinfo_proc ki;
1246 else if (error2 != 0)
1268 int *
name = (
int *)arg1;
1269 u_int namelen = arg2;
1271 int flags, doingzomb, oid_number;
1274 oid_number = oidp->oid_number;
1275 if (oid_number != KERN_PROC_ALL &&
1276 (oid_number & KERN_PROC_INC_THREAD) == 0)
1277 flags = KERN_PROC_NOTHREADS;
1280 oid_number &= ~KERN_PROC_INC_THREAD;
1283 if (req->flags & SCTL_MASK32)
1284 flags |= KERN_PROC_MASK32;
1286 if (oid_number == KERN_PROC_PID) {
1292 error =
pget((pid_t)name[0], PGET_CANSEE, &p);
1299 switch (oid_number) {
1304 case KERN_PROC_PROC:
1305 if (namelen != 0 && namelen != 1)
1316 error = SYSCTL_OUT(req, 0,
sizeof (
struct kinfo_proc) * 5);
1324 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
1329 for (; p != 0; p = LIST_NEXT(p, p_list)) {
1334 if (p->p_state == PRS_NEW) {
1338 KASSERT(p->p_ucred != NULL,
1339 (
"process credential is NULL for non-NEW proc"));
1351 switch (oid_number) {
1354 if (p->p_ucred->cr_gid != (gid_t)name[0]) {
1360 case KERN_PROC_PGRP:
1362 if (p->p_pgrp == NULL ||
1363 p->p_pgrp->pg_id != (pid_t)name[0]) {
1369 case KERN_PROC_RGID:
1370 if (p->p_ucred->cr_rgid != (gid_t)name[0]) {
1376 case KERN_PROC_SESSION:
1377 if (p->p_session == NULL ||
1378 p->p_session->s_sid != (pid_t)name[0]) {
1385 if ((p->p_flag & P_CONTROLT) == 0 ||
1386 p->p_session == NULL) {
1391 SESS_LOCK(p->p_session);
1392 if (p->p_session->s_ttyp == NULL ||
1395 SESS_UNLOCK(p->p_session);
1399 SESS_UNLOCK(p->p_session);
1403 if (p->p_ucred->cr_uid != (uid_t)name[0]) {
1409 case KERN_PROC_RUID:
1410 if (p->p_ucred->cr_ruid != (uid_t)name[0]) {
1416 case KERN_PROC_PROC:
1440 pa =
malloc(
sizeof(
struct pargs) + len, M_PARGS,
1442 refcount_init(&pa->ar_ref, 1);
1443 pa->ar_length = len;
1460 refcount_acquire(&pa->ar_ref);
1469 if (refcount_release(&pa->ar_ref))
1480 iov.iov_base = (caddr_t)buf;
1484 uio.uio_offset = offset;
1485 uio.uio_resid = (ssize_t)len;
1486 uio.uio_segflg = UIO_SYSSPACE;
1487 uio.uio_rw = UIO_READ;
1507 if (error == EFAULT) {
1508 for (i = 0; i < len; i++, buf++, sptr++) {
1520 #define PROC_AUXV_MAX 256
1530 get_proc_vector32(
struct thread *td,
struct proc *p,
char ***proc_vectorp,
1533 struct compat32bit_ps_strings pss;
1535 vm_offset_t vptr, ptr;
1536 uint32_t *proc_vector32;
1541 error =
proc_read_mem(td, p, (vm_offset_t)(p->p_sysent->sv_psstrings),
1547 vptr = (vm_offset_t)PTRIN(pss.ps_argvstr);
1548 vsize = pss.ps_nargvstr;
1549 if (vsize > ARG_MAX)
1551 size = vsize *
sizeof(int32_t);
1554 vptr = (vm_offset_t)PTRIN(pss.ps_envstr);
1555 vsize = pss.ps_nenvstr;
1556 if (vsize > ARG_MAX)
1558 size = vsize *
sizeof(int32_t);
1561 vptr = (vm_offset_t)PTRIN(pss.ps_envstr) +
1562 (pss.ps_nenvstr + 1) *
sizeof(int32_t);
1569 if (aux.a_type == AT_NULL)
1573 if (aux.a_type != AT_NULL)
1576 size = vsize *
sizeof(aux);
1579 KASSERT(0, (
"Wrong proc vector type: %d", type));
1582 proc_vector32 =
malloc(size, M_TEMP, M_WAITOK);
1587 *proc_vectorp = (
char **)proc_vector32;
1591 proc_vector =
malloc(vsize *
sizeof(
char *), M_TEMP, M_WAITOK);
1592 for (i = 0; i < (int)vsize; i++)
1593 proc_vector[i] = PTRIN(proc_vector32[i]);
1594 *proc_vectorp = proc_vector;
1597 free(proc_vector32, M_TEMP);
1606 struct ps_strings pss;
1608 vm_offset_t vptr, ptr;
1614 if (SV_PROC_FLAG(p, SV_ILP32) != 0)
1615 return (get_proc_vector32(td, p, proc_vectorp, vsizep, type));
1617 error =
proc_read_mem(td, p, (vm_offset_t)(p->p_sysent->sv_psstrings),
1623 vptr = (vm_offset_t)pss.ps_argvstr;
1624 vsize = pss.ps_nargvstr;
1625 if (vsize > ARG_MAX)
1627 size = vsize *
sizeof(
char *);
1630 vptr = (vm_offset_t)pss.ps_envstr;
1631 vsize = pss.ps_nenvstr;
1632 if (vsize > ARG_MAX)
1634 size = vsize *
sizeof(
char *);
1641 vptr = (vm_offset_t)pss.ps_envstr + (pss.ps_nenvstr + 1)
1643 #if __ELF_WORD_SIZE == 64
1644 if (vptr %
sizeof(uint64_t) != 0)
1646 if (vptr %
sizeof(uint32_t) != 0)
1660 if (aux.a_type == AT_NULL)
1670 if (aux.a_type != AT_NULL)
1673 size = vsize *
sizeof(aux);
1676 KASSERT(0, (
"Wrong proc vector type: %d", type));
1679 proc_vector =
malloc(size, M_TEMP, M_WAITOK);
1680 if (proc_vector == NULL)
1684 free(proc_vector, M_TEMP);
1687 *proc_vectorp = proc_vector;
1693 #define GET_PS_STRINGS_CHUNK_SZ 256
1699 size_t done, len, nchr, vsize;
1701 char **proc_vector, *sptr;
1704 PROC_ASSERT_HELD(p);
1709 nchr = 2 * (PATH_MAX + ARG_MAX);
1714 for (done = 0, i = 0; i < (int)vsize && done < nchr; i++) {
1720 if (proc_vector[i] == NULL)
1724 sizeof(pss_string));
1728 if (done + len >= nchr)
1729 len = nchr - done - 1;
1739 free(proc_vector, M_TEMP);
1767 if (SV_PROC_FLAG(p, SV_ILP32) != 0)
1768 size = vsize *
sizeof(Elf32_Auxinfo);
1771 size = vsize *
sizeof(Elf_Auxinfo);
1787 int *
name = (
int *)arg1;
1788 u_int namelen = arg2;
1789 struct pargs *newpa, *pa;
1792 int flags, error = 0, error2;
1797 flags = PGET_CANSEE;
1798 if (req->newptr != NULL)
1799 flags |= PGET_ISCURRENT;
1800 error =
pget((pid_t)name[0], flags, &p);
1808 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1810 }
else if ((p->p_flag & (P_WEXIT | P_SYSTEM)) == 0) {
1818 if (error == 0 && error2 != 0)
1823 if (error != 0 || req->newptr == NULL)
1829 error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1848 int *
name = (
int *)arg1;
1849 u_int namelen = arg2;
1857 error =
pget((pid_t)name[0], PGET_WANTREAD, &p);
1860 if ((p->p_flag & P_SYSTEM) != 0) {
1870 return (error != 0 ? error : error2);
1880 int *
name = (
int *)arg1;
1881 u_int namelen = arg2;
1889 error =
pget((pid_t)name[0], PGET_WANTREAD, &p);
1892 if ((p->p_flag & P_SYSTEM) != 0) {
1901 return (error != 0 ? error : error2);
1911 pid_t *pidp = (pid_t *)arg1;
1912 unsigned int arglen = arg2;
1915 char *retbuf, *freebuf;
1916 int error, vfslocked;
1921 p = req->td->td_proc;
1923 error =
pget(*pidp, PGET_CANSEE, &p);
1937 error =
vn_fullpath(req->td, vp, &retbuf, &freebuf);
1938 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1940 VFS_UNLOCK_GIANT(vfslocked);
1943 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
1944 free(freebuf, M_TEMP);
1962 error =
pget((pid_t)name[0], PGET_CANSEE, &p);
1965 sv_name = p->p_sysent->sv_name;
1970 #ifdef KINFO_OVMENTRY_SIZE
1971 CTASSERT(
sizeof(
struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE);
1974 #ifdef COMPAT_FREEBSD7
1976 sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
1978 vm_map_entry_t entry, tmp_entry;
1979 unsigned int last_timestamp;
1980 char *fullpath, *freepath;
1981 struct kinfo_ovmentry *kve;
1991 error =
pget((pid_t)name[0], PGET_WANTREAD, &p);
1994 vm = vmspace_acquire_ref(p);
1999 kve =
malloc(
sizeof(*kve), M_TEMP, M_WAITOK);
2002 vm_map_lock_read(map);
2003 for (entry = map->header.next; entry != &map->header;
2004 entry = entry->next) {
2005 vm_object_t obj, tobj, lobj;
2009 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2012 bzero(kve,
sizeof(*kve));
2013 kve->kve_structsize =
sizeof(*kve);
2015 kve->kve_private_resident = 0;
2016 obj = entry->object.vm_object;
2018 VM_OBJECT_LOCK(obj);
2019 if (obj->shadow_count == 1)
2020 kve->kve_private_resident =
2021 obj->resident_page_count;
2023 kve->kve_resident = 0;
2024 addr = entry->start;
2025 while (addr < entry->end) {
2026 if (pmap_extract(map->pmap, addr))
2027 kve->kve_resident++;
2031 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
2033 VM_OBJECT_LOCK(tobj);
2035 VM_OBJECT_UNLOCK(lobj);
2039 kve->kve_start = (
void*)entry->start;
2040 kve->kve_end = (
void*)entry->end;
2041 kve->kve_offset = (off_t)entry->offset;
2043 if (entry->protection & VM_PROT_READ)
2044 kve->kve_protection |= KVME_PROT_READ;
2045 if (entry->protection & VM_PROT_WRITE)
2046 kve->kve_protection |= KVME_PROT_WRITE;
2047 if (entry->protection & VM_PROT_EXECUTE)
2048 kve->kve_protection |= KVME_PROT_EXEC;
2050 if (entry->eflags & MAP_ENTRY_COW)
2051 kve->kve_flags |= KVME_FLAG_COW;
2052 if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
2053 kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
2054 if (entry->eflags & MAP_ENTRY_NOCOREDUMP)
2055 kve->kve_flags |= KVME_FLAG_NOCOREDUMP;
2057 last_timestamp = map->timestamp;
2058 vm_map_unlock_read(map);
2060 kve->kve_fileid = 0;
2066 switch (lobj->type) {
2068 kve->kve_type = KVME_TYPE_DEFAULT;
2071 kve->kve_type = KVME_TYPE_VNODE;
2076 kve->kve_type = KVME_TYPE_SWAP;
2079 kve->kve_type = KVME_TYPE_DEVICE;
2082 kve->kve_type = KVME_TYPE_PHYS;
2085 kve->kve_type = KVME_TYPE_DEAD;
2088 kve->kve_type = KVME_TYPE_SG;
2091 kve->kve_type = KVME_TYPE_UNKNOWN;
2095 VM_OBJECT_UNLOCK(lobj);
2097 kve->kve_ref_count = obj->ref_count;
2098 kve->kve_shadow_count = obj->shadow_count;
2099 VM_OBJECT_UNLOCK(obj);
2103 cred = curthread->td_ucred;
2104 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2105 vn_lock(vp, LK_SHARED | LK_RETRY);
2106 if (VOP_GETATTR(vp, &va, cred) == 0) {
2107 kve->kve_fileid = va.va_fileid;
2108 kve->kve_fsid = va.va_fsid;
2111 VFS_UNLOCK_GIANT(vfslocked);
2114 kve->kve_type = KVME_TYPE_NONE;
2115 kve->kve_ref_count = 0;
2116 kve->kve_shadow_count = 0;
2119 strlcpy(kve->kve_path, fullpath,
sizeof(kve->kve_path));
2120 if (freepath != NULL)
2121 free(freepath, M_TEMP);
2123 error = SYSCTL_OUT(req, kve,
sizeof(*kve));
2124 vm_map_lock_read(map);
2127 if (last_timestamp != map->timestamp) {
2128 vm_map_lookup_entry(map, addr - 1, &tmp_entry);
2132 vm_map_unlock_read(map);
2140 #ifdef KINFO_VMENTRY_SIZE
2141 CTASSERT(
sizeof(
struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
2150 vm_map_entry_t entry, tmp_entry;
2151 unsigned int last_timestamp;
2152 char *fullpath, *freepath;
2153 struct kinfo_vmentry *kve;
2161 PROC_LOCK_ASSERT(p, MA_OWNED);
2165 vm = vmspace_acquire_ref(p);
2170 kve =
malloc(
sizeof(*kve), M_TEMP, M_WAITOK);
2174 vm_map_lock_read(map);
2175 for (entry = map->header.next; entry != &map->header;
2176 entry = entry->next) {
2177 vm_object_t obj, tobj, lobj;
2179 vm_paddr_t locked_pa;
2180 int vfslocked, mincoreinfo;
2182 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2185 bzero(kve,
sizeof(*kve));
2187 kve->kve_private_resident = 0;
2188 obj = entry->object.vm_object;
2190 VM_OBJECT_LOCK(obj);
2191 if (obj->shadow_count == 1)
2192 kve->kve_private_resident =
2193 obj->resident_page_count;
2195 kve->kve_resident = 0;
2196 addr = entry->start;
2198 goto skip_resident_count;
2199 while (addr < entry->end) {
2201 mincoreinfo = pmap_mincore(map->pmap, addr, &locked_pa);
2203 vm_page_unlock(PHYS_TO_VM_PAGE(locked_pa));
2204 if (mincoreinfo & MINCORE_INCORE)
2205 kve->kve_resident++;
2206 if (mincoreinfo & MINCORE_SUPER)
2207 kve->kve_flags |= KVME_FLAG_SUPER;
2211 skip_resident_count:
2212 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
2214 VM_OBJECT_LOCK(tobj);
2216 VM_OBJECT_UNLOCK(lobj);
2220 kve->kve_start = entry->start;
2221 kve->kve_end = entry->end;
2222 kve->kve_offset = entry->offset;
2224 if (entry->protection & VM_PROT_READ)
2225 kve->kve_protection |= KVME_PROT_READ;
2226 if (entry->protection & VM_PROT_WRITE)
2227 kve->kve_protection |= KVME_PROT_WRITE;
2228 if (entry->protection & VM_PROT_EXECUTE)
2229 kve->kve_protection |= KVME_PROT_EXEC;
2231 if (entry->eflags & MAP_ENTRY_COW)
2232 kve->kve_flags |= KVME_FLAG_COW;
2233 if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
2234 kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
2235 if (entry->eflags & MAP_ENTRY_NOCOREDUMP)
2236 kve->kve_flags |= KVME_FLAG_NOCOREDUMP;
2237 if (entry->eflags & MAP_ENTRY_GROWS_UP)
2238 kve->kve_flags |= KVME_FLAG_GROWS_UP;
2239 if (entry->eflags & MAP_ENTRY_GROWS_DOWN)
2240 kve->kve_flags |= KVME_FLAG_GROWS_DOWN;
2242 last_timestamp = map->timestamp;
2243 vm_map_unlock_read(map);
2249 switch (lobj->type) {
2251 kve->kve_type = KVME_TYPE_DEFAULT;
2254 kve->kve_type = KVME_TYPE_VNODE;
2259 kve->kve_type = KVME_TYPE_SWAP;
2262 kve->kve_type = KVME_TYPE_DEVICE;
2265 kve->kve_type = KVME_TYPE_PHYS;
2268 kve->kve_type = KVME_TYPE_DEAD;
2271 kve->kve_type = KVME_TYPE_SG;
2273 case OBJT_MGTDEVICE:
2274 kve->kve_type = KVME_TYPE_MGTDEVICE;
2277 kve->kve_type = KVME_TYPE_UNKNOWN;
2281 VM_OBJECT_UNLOCK(lobj);
2283 kve->kve_ref_count = obj->ref_count;
2284 kve->kve_shadow_count = obj->shadow_count;
2285 VM_OBJECT_UNLOCK(obj);
2290 cred = curthread->td_ucred;
2291 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2292 vn_lock(vp, LK_SHARED | LK_RETRY);
2293 if (VOP_GETATTR(vp, &va, cred) == 0) {
2294 kve->kve_vn_fileid = va.va_fileid;
2295 kve->kve_vn_fsid = va.va_fsid;
2297 MAKEIMODE(va.va_type, va.va_mode);
2298 kve->kve_vn_size = va.va_size;
2299 kve->kve_vn_rdev = va.va_rdev;
2300 kve->kve_status = KF_ATTR_VALID;
2303 VFS_UNLOCK_GIANT(vfslocked);
2306 kve->kve_type = KVME_TYPE_NONE;
2307 kve->kve_ref_count = 0;
2308 kve->kve_shadow_count = 0;
2311 strlcpy(kve->kve_path, fullpath,
sizeof(kve->kve_path));
2312 if (freepath != NULL)
2313 free(freepath, M_TEMP);
2316 kve->kve_structsize = offsetof(
struct kinfo_vmentry, kve_path) +
2317 strlen(kve->kve_path) + 1;
2318 kve->kve_structsize = roundup(kve->kve_structsize,
2320 error =
sbuf_bcat(sb, kve, kve->kve_structsize);
2321 vm_map_lock_read(map);
2324 if (last_timestamp != map->timestamp) {
2325 vm_map_lookup_entry(map, addr - 1, &tmp_entry);
2329 vm_map_unlock_read(map);
2341 int error, error2, *
name;
2345 error =
pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
2353 return (error != 0 ? error : error2);
2356 #if defined(STACK) || defined(DDB)
2358 sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS)
2360 struct kinfo_kstack *kkstp;
2361 int error, i, *
name, numthreads;
2362 lwpid_t *lwpidarray;
2369 error =
pget((pid_t)name[0], PGET_NOTINEXEC | PGET_WANTREAD, &p);
2373 kkstp =
malloc(
sizeof(*kkstp), M_TEMP, M_WAITOK);
2380 if (numthreads < p->p_numthreads) {
2381 if (lwpidarray != NULL) {
2382 free(lwpidarray, M_TEMP);
2385 numthreads = p->p_numthreads;
2387 lwpidarray =
malloc(
sizeof(*lwpidarray) * numthreads, M_TEMP,
2403 FOREACH_THREAD_IN_PROC(p, td) {
2404 KASSERT(i < numthreads,
2405 (
"sysctl_kern_proc_kstack: numthreads"));
2406 lwpidarray[i] = td->td_tid;
2410 for (i = 0; i < numthreads; i++) {
2415 bzero(kkstp,
sizeof(*kkstp));
2416 (void)
sbuf_new(&sb, kkstp->kkst_trace,
2417 sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN);
2419 kkstp->kkst_tid = td->td_tid;
2420 if (TD_IS_SWAPPED(td))
2421 kkstp->kkst_state = KKST_STATE_SWAPPED;
2422 else if (TD_IS_RUNNING(td))
2423 kkstp->kkst_state = KKST_STATE_RUNNING;
2425 kkstp->kkst_state = KKST_STATE_STACKOK;
2426 stack_save_td(st, td);
2433 error = SYSCTL_OUT(req, kkstp,
sizeof(*kkstp));
2440 if (lwpidarray != NULL)
2441 free(lwpidarray, M_TEMP);
2443 free(kkstp, M_TEMP);
2455 pid_t *pidp = (pid_t *)arg1;
2456 unsigned int arglen = arg2;
2464 p = req->td->td_proc;
2466 error =
pget(*pidp, PGET_CANSEE, &p);
2471 cred =
crhold(p->p_ucred);
2475 error = SYSCTL_OUT(req, cred->cr_groups,
2476 cred->cr_ngroups *
sizeof(gid_t));
2488 int *name = (
int *)arg1;
2489 u_int namelen = arg2;
2498 which = (u_int)name[1];
2499 if (which >= RLIM_NLIMITS)
2502 if (req->newptr != NULL && req->newlen !=
sizeof(rlim))
2505 flags = PGET_HOLD | PGET_NOTWEXIT;
2506 if (req->newptr != NULL)
2507 flags |= PGET_CANDEBUG;
2509 flags |= PGET_CANSEE;
2510 error =
pget((pid_t)name[0], flags, &p);
2517 if (req->oldptr != NULL) {
2522 error = SYSCTL_OUT(req, &rlim,
sizeof(rlim));
2529 if (req->newptr != NULL) {
2530 error = SYSCTL_IN(req, &rlim,
sizeof(rlim));
2547 int *name = (
int *)arg1;
2548 u_int namelen = arg2;
2550 vm_offset_t ps_strings;
2553 uint32_t ps_strings32;
2559 error =
pget((pid_t)name[0], PGET_CANDEBUG, &p);
2563 if ((req->flags & SCTL_MASK32) != 0) {
2568 ps_strings32 = SV_PROC_FLAG(p, SV_ILP32) != 0 ?
2569 PTROUT(p->p_sysent->sv_psstrings) : 0;
2571 error = SYSCTL_OUT(req, &ps_strings32,
sizeof(ps_strings32));
2575 ps_strings = p->p_sysent->sv_psstrings;
2577 error = SYSCTL_OUT(req, &ps_strings,
sizeof(ps_strings));
2587 int *name = (
int *)arg1;
2588 u_int namelen = arg2;
2596 error =
pget((pid_t)name[0], PGET_WANTREAD, &p);
2600 FILEDESC_SLOCK(p->p_fd);
2601 fd_cmask = p->p_fd->fd_cmask;
2602 FILEDESC_SUNLOCK(p->p_fd);
2604 error = SYSCTL_OUT(req, &fd_cmask,
sizeof(fd_cmask));
2615 int *name = (
int *)arg1;
2616 u_int namelen = arg2;
2618 int flags, error, osrel;
2623 if (req->newptr != NULL && req->newlen !=
sizeof(osrel))
2626 flags = PGET_HOLD | PGET_NOTWEXIT;
2627 if (req->newptr != NULL)
2628 flags |= PGET_CANDEBUG;
2630 flags |= PGET_CANSEE;
2631 error =
pget((pid_t)name[0], flags, &p);
2635 error = SYSCTL_OUT(req, &p->p_osrel,
sizeof(p->p_osrel));
2639 if (req->newptr != NULL) {
2640 error = SYSCTL_IN(req, &osrel,
sizeof(osrel));
2657 int *name = (
int *)arg1;
2658 u_int namelen = arg2;
2660 struct kinfo_sigtramp kst;
2661 const struct sysentvec *sv;
2664 struct kinfo_sigtramp32 kst32;
2670 error =
pget((pid_t)name[0], PGET_CANDEBUG, &p);
2675 if ((req->flags & SCTL_MASK32) != 0) {
2676 bzero(&kst32,
sizeof(kst32));
2677 if (SV_PROC_FLAG(p, SV_ILP32)) {
2678 if (sv->sv_sigcode_base != 0) {
2679 kst32.ksigtramp_start = sv->sv_sigcode_base;
2680 kst32.ksigtramp_end = sv->sv_sigcode_base +
2683 kst32.ksigtramp_start = sv->sv_psstrings -
2685 kst32.ksigtramp_end = sv->sv_psstrings;
2689 error = SYSCTL_OUT(req, &kst32,
sizeof(kst32));
2693 bzero(&kst,
sizeof(kst));
2694 if (sv->sv_sigcode_base != 0) {
2695 kst.ksigtramp_start = (
char *)sv->sv_sigcode_base;
2696 kst.ksigtramp_end = (
char *)sv->sv_sigcode_base +
2699 kst.ksigtramp_start = (
char *)sv->sv_psstrings -
2701 kst.ksigtramp_end = (
char *)sv->sv_psstrings;
2704 error = SYSCTL_OUT(req, &kst,
sizeof(kst));
2708 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0,
"Process table");
2710 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT|
2712 "Return entire process table");
2714 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2717 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE,
2720 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2723 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD |
2726 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE,
2729 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2732 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2735 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2738 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE,
2741 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args,
2742 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE,
2745 static SYSCTL_NODE(_kern_proc, KERN_PROC_ENV, env, CTLFLAG_RD | CTLFLAG_MPSAFE,
2748 static SYSCTL_NODE(_kern_proc, KERN_PROC_AUXV, auxv, CTLFLAG_RD |
2751 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD |
2754 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD |
2756 "Process syscall vector name (ABI type)");
2758 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
2761 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
2764 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
2767 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD),
2770 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
2773 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
2776 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
2779 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
2782 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
2784 "Return process table, no threads");
2786 #ifdef COMPAT_FREEBSD7
2787 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD |
2788 CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap,
"Old Process vm map entries");
2791 static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD |
2794 #if defined(STACK) || defined(DDB)
2795 static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD |
2796 CTLFLAG_MPSAFE, sysctl_kern_proc_kstack,
"Process kernel stacks");
2799 static SYSCTL_NODE(_kern_proc, KERN_PROC_GROUPS, groups, CTLFLAG_RD |
2802 static SYSCTL_NODE(_kern_proc, KERN_PROC_RLIMIT, rlimit, CTLFLAG_RW |
2804 "Process resource limits");
2806 static SYSCTL_NODE(_kern_proc, KERN_PROC_PS_STRINGS, ps_strings, CTLFLAG_RD |
2808 "Process ps_strings location");
2810 static SYSCTL_NODE(_kern_proc, KERN_PROC_UMASK, umask, CTLFLAG_RD |
2813 static SYSCTL_NODE(_kern_proc, KERN_PROC_OSREL, osrel, CTLFLAG_RW |
2815 "Process binary osreldate");
2817 static SYSCTL_NODE(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, CTLFLAG_RD |
2819 "Process signal trampoline location");
struct proc * proc_realparent(struct proc *child)
void rufetch(struct proc *p, struct rusage *ru)
static int sysctl_kern_proc_osrel(SYSCTL_HANDLER_ARGS)
int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess)
SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0,"Process table")
struct proc * pfind_locked(pid_t pid)
void * hashinit(int elements, struct malloc_type *type, u_long *hashmask)
CTASSERT(sizeof(struct kinfo_proc)==KINFO_PROC_SIZE)
void stack_sbuf_print(struct sbuf *sb, struct stack *st)
static int sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
u_long ps_arg_cache_limit
static int sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS)
void sess_hold(struct session *s)
void fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
static void pargs_free(struct pargs *pa)
static int vmmap_skip_res_cnt
int p_candebug(struct thread *td, struct proc *p)
void pstats_fork(struct pstats *src, struct pstats *dst)
int proc_rwmem(struct proc *p, struct uio *uio)
SDT_PROVIDER_DEFINE(proc)
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
void sess_release(struct session *s)
int leavepgrp(struct proc *p)
void pargs_drop(struct pargs *pa)
static int sysctl_kern_proc_ps_strings(SYSCTL_HANDLER_ARGS)
void panic(const char *fmt,...)
int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb)
static int get_proc_vector(struct thread *td, struct proc *p, char ***proc_vectorp, size_t *vsizep, enum proc_vector_type type)
static int sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
void funsetownlst(struct sigiolst *sigiolst)
void stack_destroy(struct stack *st)
int vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
static void pgadjustjobc(struct pgrp *pgrp, int entering)
static int sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
static int proc_ctor(void *mem, int size, void *arg, int flags)
void kern_psignal(struct proc *p, int sig)
void tty_rel_sess(struct tty *tp, struct session *sess)
int sysctl_handle_string(SYSCTL_HANDLER_ARGS)
static void doenterpgrp(struct proc *, struct pgrp *)
void pstats_free(struct pstats *ps)
static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp)
void thread_free(struct thread *td)
static int sysctl_kern_proc_env(SYSCTL_HANDLER_ARGS)
struct proc * pfind(pid_t pid)
void vref(struct vnode *vp)
SDT_PROBE_DEFINE3(proc, kernel, dtor, return,"struct proc *","int","void *")
#define GET_PS_STRINGS_CHUNK_SZ
void calccru(struct proc *p, struct timeval *up, struct timeval *sp)
static int get_ps_strings(struct thread *td, struct proc *p, struct sbuf *sb, enum proc_vector_type type)
void vput(struct vnode *vp)
int vntype_to_kinfo(int vtype)
dev_t tty_udev(struct tty *tp)
int inferior(struct proc *p)
uint64_t cputick2usec(uint64_t tick)
static void orphanpg(struct pgrp *pg)
struct pgrphashhead * pgrphashtbl
int enterthispgrp(struct proc *p, struct pgrp *pgrp)
int jailed(struct ucred *cred)
MALLOC_DEFINE(M_PGRP,"pgrp","process group header")
int kern_proc_out(struct proc *p, struct sbuf *sb, int flags)
static void proc_fini(void *mem, int size)
static struct proc * zpfind_locked(pid_t pid)
struct pstats * pstats_alloc(void)
void crfree(struct ucred *cr)
void fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
void timevaladd(struct timeval *t1, const struct timeval *t2)
static int sysctl_kern_proc_auxv(SYSCTL_HANDLER_ARGS)
static int sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
static void pgdelete(struct pgrp *)
struct pgrp * pgfind(pid_t pgid)
static int sysctl_kern_proc_rlimit(SYSCTL_HANDLER_ARGS)
void lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
static int sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
struct sbuf * sbuf_new(struct sbuf *s, char *buf, int length, int flags)
void ksiginfo_free(ksiginfo_t *ksi)
struct pidhashhead * pidhashtbl
void cv_init(struct cv *cvp, const char *desc)
struct ucred * crhold(struct ucred *cr)
SDT_PROBE_DEFINE4(proc, kernel, ctor, entry,"struct proc *","int","void *","int")
int kern_proc_vmmap_out(struct proc *p, struct sbuf *sb)
void rufetchtd(struct thread *td, struct rusage *ru)
SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD,&kstack_pages, 0,"Kernel stack size in pages")
void free(void *addr, struct malloc_type *mtp)
int printf(const char *fmt,...)
void sbuf_delete(struct sbuf *s)
struct pargs * pargs_alloc(int len)
static int sysctl_kern_proc_groups(SYSCTL_HANDLER_ARGS)
int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
void tty_rel_pgrp(struct tty *tp, struct pgrp *pg)
void vrele(struct vnode *vp)
static struct proc * pfind_tid_locked(pid_t tid)
int sbuf_bcat(struct sbuf *s, const void *buf, size_t len)
int kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which, struct rlimit *limp)
int sbuf_finish(struct sbuf *s)
int pget(pid_t pid, int flags, struct proc **pp)
int sched_sizeof_proc(void)
struct thread * thread_find(struct proc *p, lwpid_t tid)
static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
struct stack * stack_create(void)
static int proc_read_mem(struct thread *td, struct proc *p, vm_offset_t offset, void *buf, size_t len)
static int sysctl_kern_proc_umask(SYSCTL_HANDLER_ARGS)
void calcru(struct proc *p, struct timeval *up, struct timeval *sp)
struct proc * zpfind(pid_t pid)
void mtx_destroy(struct mtx *m)
SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc,"S,proc","Return entire process table")
int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb)
static int sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags, int doingzomb)
static int proc_init(void *mem, int size, int flags)
int p_cansee(struct thread *td, struct proc *p)
void pargs_hold(struct pargs *pa)
int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb)
struct sbuf * sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, struct sysctl_req *req)
static int proc_read_string(struct thread *td, struct proc *p, const char *sptr, char *buf, size_t len)
static void proc_dtor(void *mem, int size, void *arg)
fixpt_t sched_pctcpu(struct thread *td)