FreeBSD kernel kern code
kern_thr.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice unmodified, this list of conditions, and the following
10  * disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$BSDSUniX$");
29 
30 #include "opt_compat.h"
31 #include "opt_posix.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/priv.h>
37 #include <sys/proc.h>
38 #include <sys/posix4.h>
39 #include <sys/racct.h>
40 #include <sys/resourcevar.h>
41 #include <sys/rwlock.h>
42 #include <sys/sched.h>
43 #include <sys/sysctl.h>
44 #include <sys/smp.h>
45 #include <sys/syscallsubr.h>
46 #include <sys/sysent.h>
47 #include <sys/systm.h>
48 #include <sys/sysproto.h>
49 #include <sys/signalvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/ucontext.h>
52 #include <sys/thr.h>
53 #include <sys/rtprio.h>
54 #include <sys/umtx.h>
55 #include <sys/limits.h>
56 
57 #include <machine/frame.h>
58 
59 #include <security/audit/audit.h>
60 
61 static SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0,
62  "thread allocation");
63 
64 static int max_threads_per_proc = 1500;
65 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
66  &max_threads_per_proc, 0, "Limit on threads per proc");
67 
68 static int max_threads_hits;
69 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
70  &max_threads_hits, 0, "");
71 
72 #ifdef COMPAT_32BIT
73 
74 static inline int
75 suword_lwpid(void *addr, lwpid_t lwpid)
76 {
77  int error;
78 
79  if (SV_CURPROC_FLAG(SV_LP64))
80  error = suword(addr, lwpid);
81  else
82  error = suword32(addr, lwpid);
83  return (error);
84 }
85 
86 #else
87 #define suword_lwpid suword
88 #endif
89 
90 static int create_thread(struct thread *td, mcontext_t *ctx,
91  void (*start_func)(void *), void *arg,
92  char *stack_base, size_t stack_size,
93  char *tls_base,
94  long *child_tid, long *parent_tid,
95  int flags, struct rtprio *rtp);
96 
97 /*
98  * System call interface.
99  */
100 int
101 sys_thr_create(struct thread *td, struct thr_create_args *uap)
102  /* ucontext_t *ctx, long *id, int flags */
103 {
104  ucontext_t ctx;
105  int error;
106 
107  if ((error = copyin(uap->ctx, &ctx, sizeof(ctx))))
108  return (error);
109 
110  error = create_thread(td, &ctx.uc_mcontext, NULL, NULL,
111  NULL, 0, NULL, uap->id, NULL, uap->flags, NULL);
112  return (error);
113 }
114 
115 int
116 sys_thr_new(struct thread *td, struct thr_new_args *uap)
117  /* struct thr_param * */
118 {
119  struct thr_param param;
120  int error;
121 
122  if (uap->param_size < 0 || uap->param_size > sizeof(param))
123  return (EINVAL);
124  bzero(&param, sizeof(param));
125  if ((error = copyin(uap->param, &param, uap->param_size)))
126  return (error);
127  return (kern_thr_new(td, &param));
128 }
129 
130 int
131 kern_thr_new(struct thread *td, struct thr_param *param)
132 {
133  struct rtprio rtp, *rtpp;
134  int error;
135 
136  rtpp = NULL;
137  if (param->rtp != 0) {
138  error = copyin(param->rtp, &rtp, sizeof(struct rtprio));
139  if (error)
140  return (error);
141  rtpp = &rtp;
142  }
143  error = create_thread(td, NULL, param->start_func, param->arg,
144  param->stack_base, param->stack_size, param->tls_base,
145  param->child_tid, param->parent_tid, param->flags,
146  rtpp);
147  return (error);
148 }
149 
150 static int
151 create_thread(struct thread *td, mcontext_t *ctx,
152  void (*start_func)(void *), void *arg,
153  char *stack_base, size_t stack_size,
154  char *tls_base,
155  long *child_tid, long *parent_tid,
156  int flags, struct rtprio *rtp)
157 {
158  stack_t stack;
159  struct thread *newtd;
160  struct proc *p;
161  int error;
162 
163  p = td->td_proc;
164 
165  /* Have race condition but it is cheap. */
166  if (p->p_numthreads >= max_threads_per_proc) {
168  return (EPROCLIM);
169  }
170 
171  if (rtp != NULL) {
172  switch(rtp->type) {
173  case RTP_PRIO_REALTIME:
174  case RTP_PRIO_FIFO:
175  /* Only root can set scheduler policy */
176  if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
177  return (EPERM);
178  if (rtp->prio > RTP_PRIO_MAX)
179  return (EINVAL);
180  break;
181  case RTP_PRIO_NORMAL:
182  rtp->prio = 0;
183  break;
184  default:
185  return (EINVAL);
186  }
187  }
188 
189 #ifdef RACCT
190  PROC_LOCK(td->td_proc);
191  error = racct_add(p, RACCT_NTHR, 1);
192  PROC_UNLOCK(td->td_proc);
193  if (error != 0)
194  return (EPROCLIM);
195 #endif
196 
197  /* Initialize our td */
198  newtd = thread_alloc(0);
199  if (newtd == NULL) {
200  error = ENOMEM;
201  goto fail;
202  }
203 
204  cpu_set_upcall(newtd, td);
205 
206  /*
207  * Try the copyout as soon as we allocate the td so we don't
208  * have to tear things down in a failure case below.
209  * Here we copy out tid to two places, one for child and one
210  * for parent, because pthread can create a detached thread,
211  * if parent wants to safely access child tid, it has to provide
212  * its storage, because child thread may exit quickly and
213  * memory is freed before parent thread can access it.
214  */
215  if ((child_tid != NULL &&
216  suword_lwpid(child_tid, newtd->td_tid)) ||
217  (parent_tid != NULL &&
218  suword_lwpid(parent_tid, newtd->td_tid))) {
219  thread_free(newtd);
220  error = EFAULT;
221  goto fail;
222  }
223 
224  bzero(&newtd->td_startzero,
225  __rangeof(struct thread, td_startzero, td_endzero));
226  bcopy(&td->td_startcopy, &newtd->td_startcopy,
227  __rangeof(struct thread, td_startcopy, td_endcopy));
228  newtd->td_proc = td->td_proc;
229  newtd->td_ucred = crhold(td->td_ucred);
230  newtd->td_dbg_sc_code = td->td_dbg_sc_code;
231  newtd->td_dbg_sc_narg = td->td_dbg_sc_narg;
232 
233  if (ctx != NULL) { /* old way to set user context */
234  error = set_mcontext(newtd, ctx);
235  if (error != 0) {
236  thread_free(newtd);
237  crfree(td->td_ucred);
238  goto fail;
239  }
240  } else {
241  /* Set up our machine context. */
242  stack.ss_sp = stack_base;
243  stack.ss_size = stack_size;
244  /* Set upcall address to user thread entry function. */
245  cpu_set_upcall_kse(newtd, start_func, arg, &stack);
246  /* Setup user TLS address and TLS pointer register. */
247  error = cpu_set_user_tls(newtd, tls_base);
248  if (error != 0) {
249  thread_free(newtd);
250  crfree(td->td_ucred);
251  goto fail;
252  }
253  }
254 
255  PROC_LOCK(td->td_proc);
256  td->td_proc->p_flag |= P_HADTHREADS;
257  newtd->td_sigmask = td->td_sigmask;
258  thread_link(newtd, p);
259  bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
260  thread_lock(td);
261  /* let the scheduler know about these things. */
262  sched_fork_thread(td, newtd);
263  thread_unlock(td);
264  if (P_SHOULDSTOP(p))
265  newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
266  PROC_UNLOCK(p);
267 
268  tidhash_add(newtd);
269 
270  thread_lock(newtd);
271  if (rtp != NULL) {
272  if (!(td->td_pri_class == PRI_TIMESHARE &&
273  rtp->type == RTP_PRIO_NORMAL)) {
274  rtp_to_pri(rtp, newtd);
275  sched_prio(newtd, newtd->td_user_pri);
276  } /* ignore timesharing class */
277  }
278  TD_SET_CAN_RUN(newtd);
279  sched_add(newtd, SRQ_BORING);
280  thread_unlock(newtd);
281 
282  return (0);
283 
284 fail:
285 #ifdef RACCT
286  PROC_LOCK(p);
287  racct_sub(p, RACCT_NTHR, 1);
288  PROC_UNLOCK(p);
289 #endif
290  return (error);
291 }
292 
293 int
294 sys_thr_self(struct thread *td, struct thr_self_args *uap)
295  /* long *id */
296 {
297  int error;
298 
299  error = suword_lwpid(uap->id, (unsigned)td->td_tid);
300  if (error == -1)
301  return (EFAULT);
302  return (0);
303 }
304 
305 int
306 sys_thr_exit(struct thread *td, struct thr_exit_args *uap)
307  /* long *state */
308 {
309  struct proc *p;
310 
311  p = td->td_proc;
312 
313  /* Signal userland that it can free the stack. */
314  if ((void *)uap->state != NULL) {
315  suword_lwpid(uap->state, 1);
316  kern_umtx_wake(td, uap->state, INT_MAX, 0);
317  }
318 
319  rw_wlock(&tidhash_lock);
320 
321  PROC_LOCK(p);
322 
323  /*
324  * Shutting down last thread in the proc. This will actually
325  * call exit() in the trampoline when it returns.
326  */
327  if (p->p_numthreads != 1) {
328  racct_sub(p, RACCT_NTHR, 1);
329  LIST_REMOVE(td, td_hash);
330  rw_wunlock(&tidhash_lock);
331  tdsigcleanup(td);
332  PROC_SLOCK(p);
333  thread_stopped(p);
334  thread_exit();
335  /* NOTREACHED */
336  }
337  PROC_UNLOCK(p);
338  rw_wunlock(&tidhash_lock);
339  return (0);
340 }
341 
342 int
343 sys_thr_kill(struct thread *td, struct thr_kill_args *uap)
344  /* long id, int sig */
345 {
346  ksiginfo_t ksi;
347  struct thread *ttd;
348  struct proc *p;
349  int error;
350 
351  p = td->td_proc;
352  ksiginfo_init(&ksi);
353  ksi.ksi_signo = uap->sig;
354  ksi.ksi_code = SI_LWP;
355  ksi.ksi_pid = p->p_pid;
356  ksi.ksi_uid = td->td_ucred->cr_ruid;
357  if (uap->id == -1) {
358  if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
359  error = EINVAL;
360  } else {
361  error = ESRCH;
362  PROC_LOCK(p);
363  FOREACH_THREAD_IN_PROC(p, ttd) {
364  if (ttd != td) {
365  error = 0;
366  if (uap->sig == 0)
367  break;
368  tdksignal(ttd, uap->sig, &ksi);
369  }
370  }
371  PROC_UNLOCK(p);
372  }
373  } else {
374  error = 0;
375  ttd = tdfind((lwpid_t)uap->id, p->p_pid);
376  if (ttd == NULL)
377  return (ESRCH);
378  if (uap->sig == 0)
379  ;
380  else if (!_SIG_VALID(uap->sig))
381  error = EINVAL;
382  else
383  tdksignal(ttd, uap->sig, &ksi);
384  PROC_UNLOCK(ttd->td_proc);
385  }
386  return (error);
387 }
388 
389 int
390 sys_thr_kill2(struct thread *td, struct thr_kill2_args *uap)
391  /* pid_t pid, long id, int sig */
392 {
393  ksiginfo_t ksi;
394  struct thread *ttd;
395  struct proc *p;
396  int error;
397 
398  AUDIT_ARG_SIGNUM(uap->sig);
399 
400  ksiginfo_init(&ksi);
401  ksi.ksi_signo = uap->sig;
402  ksi.ksi_code = SI_LWP;
403  ksi.ksi_pid = td->td_proc->p_pid;
404  ksi.ksi_uid = td->td_ucred->cr_ruid;
405  if (uap->id == -1) {
406  if ((p = pfind(uap->pid)) == NULL)
407  return (ESRCH);
408  AUDIT_ARG_PROCESS(p);
409  error = p_cansignal(td, p, uap->sig);
410  if (error) {
411  PROC_UNLOCK(p);
412  return (error);
413  }
414  if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
415  error = EINVAL;
416  } else {
417  error = ESRCH;
418  FOREACH_THREAD_IN_PROC(p, ttd) {
419  if (ttd != td) {
420  error = 0;
421  if (uap->sig == 0)
422  break;
423  tdksignal(ttd, uap->sig, &ksi);
424  }
425  }
426  }
427  PROC_UNLOCK(p);
428  } else {
429  ttd = tdfind((lwpid_t)uap->id, uap->pid);
430  if (ttd == NULL)
431  return (ESRCH);
432  p = ttd->td_proc;
433  AUDIT_ARG_PROCESS(p);
434  error = p_cansignal(td, p, uap->sig);
435  if (uap->sig == 0)
436  ;
437  else if (!_SIG_VALID(uap->sig))
438  error = EINVAL;
439  else
440  tdksignal(ttd, uap->sig, &ksi);
441  PROC_UNLOCK(p);
442  }
443  return (error);
444 }
445 
446 int
447 sys_thr_suspend(struct thread *td, struct thr_suspend_args *uap)
448  /* const struct timespec *timeout */
449 {
450  struct timespec ts, *tsp;
451  int error;
452 
453  tsp = NULL;
454  if (uap->timeout != NULL) {
455  error = umtx_copyin_timeout(uap->timeout, &ts);
456  if (error != 0)
457  return (error);
458  tsp = &ts;
459  }
460 
461  return (kern_thr_suspend(td, tsp));
462 }
463 
464 int
465 kern_thr_suspend(struct thread *td, struct timespec *tsp)
466 {
467  struct proc *p = td->td_proc;
468  struct timeval tv;
469  int error = 0;
470  int timo = 0;
471 
472  if (td->td_pflags & TDP_WAKEUP) {
473  td->td_pflags &= ~TDP_WAKEUP;
474  return (0);
475  }
476 
477  if (tsp != NULL) {
478  if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
479  error = EWOULDBLOCK;
480  else {
481  TIMESPEC_TO_TIMEVAL(&tv, tsp);
482  timo = tvtohz(&tv);
483  }
484  }
485 
486  PROC_LOCK(p);
487  if (error == 0 && (td->td_flags & TDF_THRWAKEUP) == 0)
488  error = msleep((void *)td, &p->p_mtx,
489  PCATCH, "lthr", timo);
490 
491  if (td->td_flags & TDF_THRWAKEUP) {
492  thread_lock(td);
493  td->td_flags &= ~TDF_THRWAKEUP;
494  thread_unlock(td);
495  PROC_UNLOCK(p);
496  return (0);
497  }
498  PROC_UNLOCK(p);
499  if (error == EWOULDBLOCK)
500  error = ETIMEDOUT;
501  else if (error == ERESTART) {
502  if (timo != 0)
503  error = EINTR;
504  }
505  return (error);
506 }
507 
508 int
509 sys_thr_wake(struct thread *td, struct thr_wake_args *uap)
510  /* long id */
511 {
512  struct proc *p;
513  struct thread *ttd;
514 
515  if (uap->id == td->td_tid) {
516  td->td_pflags |= TDP_WAKEUP;
517  return (0);
518  }
519 
520  p = td->td_proc;
521  ttd = tdfind((lwpid_t)uap->id, p->p_pid);
522  if (ttd == NULL)
523  return (ESRCH);
524  thread_lock(ttd);
525  ttd->td_flags |= TDF_THRWAKEUP;
526  thread_unlock(ttd);
527  wakeup((void *)ttd);
528  PROC_UNLOCK(p);
529  return (0);
530 }
531 
532 int
533 sys_thr_set_name(struct thread *td, struct thr_set_name_args *uap)
534 {
535  struct proc *p;
536  char name[MAXCOMLEN + 1];
537  struct thread *ttd;
538  int error;
539 
540  error = 0;
541  name[0] = '\0';
542  if (uap->name != NULL) {
543  error = copyinstr(uap->name, name, sizeof(name),
544  NULL);
545  if (error)
546  return (error);
547  }
548  p = td->td_proc;
549  ttd = tdfind((lwpid_t)uap->id, p->p_pid);
550  if (ttd == NULL)
551  return (ESRCH);
552  strcpy(ttd->td_name, name);
553 #ifdef KTR
554  sched_clear_tdname(ttd);
555 #endif
556  PROC_UNLOCK(p);
557  return (error);
558 }
void thread_link(struct thread *td, struct proc *p)
Definition: kern_thread.c:535
int tvtohz(struct timeval *tv)
Definition: kern_clock.c:590
static int max_threads_per_proc
Definition: kern_thr.c:64
void sched_prio(struct thread *td, u_char prio)
Definition: sched_4bsd.c:897
int racct_add(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1208
int sys_thr_suspend(struct thread *td, struct thr_suspend_args *uap)
Definition: kern_thr.c:447
struct timespec * ts
Definition: clock_if.m:39
int p_cansignal(struct thread *td, struct proc *p, int signum)
Definition: kern_prot.c:1534
int kern_thr_suspend(struct thread *td, struct timespec *tsp)
Definition: kern_thr.c:465
void tdsigcleanup(struct thread *td)
Definition: kern_sig.c:2509
void thread_stopped(struct proc *p)
Definition: kern_sig.c:2763
void thread_exit(void)
Definition: kern_thread.c:397
#define suword_lwpid
Definition: kern_thr.c:87
const char * name
Definition: kern_fail.c:97
void sched_fork_thread(struct thread *td, struct thread *childtd)
Definition: sched_4bsd.c:792
static int max_threads_hits
Definition: kern_thr.c:68
int sys_thr_kill2(struct thread *td, struct thr_kill2_args *uap)
Definition: kern_thr.c:390
void thread_free(struct thread *td)
Definition: kern_thread.c:374
int priv_check(struct thread *td, int priv)
Definition: kern_priv.c:170
struct proc * pfind(pid_t pid)
Definition: kern_proc.c:304
void sched_add(struct thread *td, int flags)
Definition: sched_4bsd.c:1258
void racct_sub(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1239
struct thread * thread_alloc(int pages)
Definition: kern_thread.c:342
SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,&max_threads_per_proc, 0,"Limit on threads per proc")
int sys_thr_exit(struct thread *td, struct thr_exit_args *uap)
Definition: kern_thr.c:306
int rtp_to_pri(struct rtprio *rtp, struct thread *td)
int kern_thr_new(struct thread *td, struct thr_param *param)
Definition: kern_thr.c:131
static SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0,"thread allocation")
void crfree(struct ucred *cr)
Definition: kern_prot.c:1835
int kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
Definition: kern_umtx.c:1136
int umtx_copyin_timeout(const void *addr, struct timespec *tsp)
Definition: kern_umtx.c:3171
void tidhash_add(struct thread *td)
Definition: kern_thread.c:1043
int sys_thr_create(struct thread *td, struct thr_create_args *uap)
Definition: kern_thr.c:101
int sys_thr_self(struct thread *td, struct thr_self_args *uap)
Definition: kern_thr.c:294
struct ucred * crhold(struct ucred *cr)
Definition: kern_prot.c:1824
#define suword
Definition: imgact_elf.c:978
int sys_thr_kill(struct thread *td, struct thr_kill_args *uap)
Definition: kern_thr.c:343
int sys_thr_wake(struct thread *td, struct thr_wake_args *uap)
Definition: kern_thr.c:509
void wakeup(void *ident)
Definition: kern_synch.c:378
void tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
Definition: kern_sig.c:2022
int sys_thr_new(struct thread *td, struct thr_new_args *uap)
Definition: kern_thr.c:116
struct thread * tdfind(lwpid_t tid, pid_t pid)
Definition: kern_thread.c:1006
__FBSDID("$BSDSUniX$")
int sys_thr_set_name(struct thread *td, struct thr_set_name_args *uap)
Definition: kern_thr.c:533
static int create_thread(struct thread *td, mcontext_t *ctx, void(*start_func)(void *), void *arg, char *stack_base, size_t stack_size, char *tls_base, long *child_tid, long *parent_tid, int flags, struct rtprio *rtp)
Definition: kern_thr.c:151