FreeBSD kernel kern code
kern_descrip.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  * The Regents of the University of California. All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  * notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in the
17  * documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  * may be used to endorse or promote products derived from this software
20  * without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$BSDSUniX$");
39 
40 #include "opt_capsicum.h"
41 #include "opt_compat.h"
42 #include "opt_ddb.h"
43 #include "opt_ktrace.h"
44 #include "opt_procdesc.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 
49 #include <sys/capability.h>
50 #include <sys/conf.h>
51 #include <sys/domain.h>
52 #include <sys/fcntl.h>
53 #include <sys/file.h>
54 #include <sys/filedesc.h>
55 #include <sys/filio.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/ksem.h>
59 #include <sys/limits.h>
60 #include <sys/lock.h>
61 #include <sys/malloc.h>
62 #include <sys/mman.h>
63 #include <sys/mount.h>
64 #include <sys/mqueue.h>
65 #include <sys/mutex.h>
66 #include <sys/namei.h>
67 #include <sys/selinfo.h>
68 #include <sys/pipe.h>
69 #include <sys/priv.h>
70 #include <sys/proc.h>
71 #include <sys/procdesc.h>
72 #include <sys/protosw.h>
73 #include <sys/racct.h>
74 #include <sys/resourcevar.h>
75 #include <sys/sbuf.h>
76 #include <sys/signalvar.h>
77 #include <sys/socketvar.h>
78 #include <sys/stat.h>
79 #include <sys/sx.h>
80 #include <sys/syscallsubr.h>
81 #include <sys/sysctl.h>
82 #include <sys/sysproto.h>
83 #include <sys/tty.h>
84 #include <sys/unistd.h>
85 #include <sys/un.h>
86 #include <sys/unpcb.h>
87 #include <sys/user.h>
88 #include <sys/vnode.h>
89 #ifdef KTRACE
90 #include <sys/ktrace.h>
91 #endif
92 
93 #include <net/vnet.h>
94 
95 #include <netinet/in.h>
96 #include <netinet/in_pcb.h>
97 
98 #include <security/audit/audit.h>
99 
100 #include <vm/uma.h>
101 #include <vm/vm.h>
102 
103 #include <ddb/ddb.h>
104 
105 static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table");
106 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",
107  "file desc to leader structures");
108 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
109 
110 MALLOC_DECLARE(M_FADVISE);
111 
112 static uma_zone_t file_zone;
113 
114 void (*ksem_info)(struct ksem *ks, char *path, size_t size, uint32_t *value);
115 
116 /* Flags for do_dup() */
117 #define DUP_FIXED 0x1 /* Force fixed allocation */
118 #define DUP_FCNTL 0x2 /* fcntl()-style errors */
119 #define DUP_CLOEXEC 0x4 /* Atomically set FD_CLOEXEC. */
120 
121 static int do_dup(struct thread *td, int flags, int old, int new,
122  register_t *retval);
123 static int fd_first_free(struct filedesc *, int, int);
124 static int fd_last_used(struct filedesc *, int, int);
125 static void fdgrowtable(struct filedesc *, int);
126 static void fdunused(struct filedesc *fdp, int fd);
127 static void fdused(struct filedesc *fdp, int fd);
128 static int fill_vnode_info(struct vnode *vp, struct kinfo_file *kif);
129 static int fill_socket_info(struct socket *so, struct kinfo_file *kif);
130 static int fill_pts_info(struct tty *tp, struct kinfo_file *kif);
131 static int fill_pipe_info(struct pipe *pi, struct kinfo_file *kif);
132 static int fill_procdesc_info(struct procdesc *pdp,
133  struct kinfo_file *kif);
134 static int fill_sem_info(struct file *fp, struct kinfo_file *kif);
135 static int fill_shm_info(struct file *fp, struct kinfo_file *kif);
136 static int getmaxfd(struct proc *p);
137 
138 /*
139  * A process is initially started out with NDFILE descriptors stored within
140  * this structure, selected to be enough for typical applications based on
141  * the historical limit of 20 open files (and the usage of descriptors by
142  * shells). If these descriptors are exhausted, a larger descriptor table
143  * may be allocated, up to a process' resource limit; the internal arrays
144  * are then unused.
145  */
146 #define NDFILE 20
147 #define NDSLOTSIZE sizeof(NDSLOTTYPE)
148 #define NDENTRIES (NDSLOTSIZE * __CHAR_BIT)
149 #define NDSLOT(x) ((x) / NDENTRIES)
150 #define NDBIT(x) ((NDSLOTTYPE)1 << ((x) % NDENTRIES))
151 #define NDSLOTS(x) (((x) + NDENTRIES - 1) / NDENTRIES)
152 
153 /*
154  * Storage required per open file descriptor.
155  */
156 #define OFILESIZE (sizeof(struct file *) + sizeof(char))
157 
158 /*
159  * Storage to hold unused ofiles that need to be reclaimed.
160  */
161 struct freetable {
162  struct file **ft_table;
163  SLIST_ENTRY(freetable) ft_next;
164 };
165 
166 /*
167  * Basic allocation of descriptors:
168  * one of the above, plus arrays for NDFILE descriptors.
169  */
170 struct filedesc0 {
171  struct filedesc fd_fd;
172  /*
173  * ofiles which need to be reclaimed on free.
174  */
175  SLIST_HEAD(,freetable) fd_free;
176  /*
177  * These arrays are used when the number of open files is
178  * <= NDFILE, and are then pointed to by the pointers above.
179  */
180  struct file *fd_dfiles[NDFILE];
181  char fd_dfileflags[NDFILE];
182  NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
183 };
184 
185 /*
186  * Descriptor management.
187  */
188 volatile int openfiles; /* actual number of open files */
189 struct mtx sigio_lock; /* mtx to protect pointers to sigio */
190 void (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
191 
192 /* A mutex to protect the association between a proc and filedesc. */
193 static struct mtx fdesc_mtx;
194 
195 /*
196  * Find the first zero bit in the given bitmap, starting at low and not
197  * exceeding size - 1.
198  */
199 static int
200 fd_first_free(struct filedesc *fdp, int low, int size)
201 {
202  NDSLOTTYPE *map = fdp->fd_map;
203  NDSLOTTYPE mask;
204  int off, maxoff;
205 
206  if (low >= size)
207  return (low);
208 
209  off = NDSLOT(low);
210  if (low % NDENTRIES) {
211  mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
212  if ((mask &= ~map[off]) != 0UL)
213  return (off * NDENTRIES + ffsl(mask) - 1);
214  ++off;
215  }
216  for (maxoff = NDSLOTS(size); off < maxoff; ++off)
217  if (map[off] != ~0UL)
218  return (off * NDENTRIES + ffsl(~map[off]) - 1);
219  return (size);
220 }
221 
222 /*
223  * Find the highest non-zero bit in the given bitmap, starting at low and
224  * not exceeding size - 1.
225  */
226 static int
227 fd_last_used(struct filedesc *fdp, int low, int size)
228 {
229  NDSLOTTYPE *map = fdp->fd_map;
230  NDSLOTTYPE mask;
231  int off, minoff;
232 
233  if (low >= size)
234  return (-1);
235 
236  off = NDSLOT(size);
237  if (size % NDENTRIES) {
238  mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES));
239  if ((mask &= map[off]) != 0)
240  return (off * NDENTRIES + flsl(mask) - 1);
241  --off;
242  }
243  for (minoff = NDSLOT(low); off >= minoff; --off)
244  if (map[off] != 0)
245  return (off * NDENTRIES + flsl(map[off]) - 1);
246  return (low - 1);
247 }
248 
249 static int
250 fdisused(struct filedesc *fdp, int fd)
251 {
252  KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
253  ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
254  return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
255 }
256 
257 /*
258  * Mark a file descriptor as used.
259  */
260 static void
261 fdused(struct filedesc *fdp, int fd)
262 {
263 
264  FILEDESC_XLOCK_ASSERT(fdp);
265  KASSERT(!fdisused(fdp, fd),
266  ("fd already used"));
267 
268  fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
269  if (fd > fdp->fd_lastfile)
270  fdp->fd_lastfile = fd;
271  if (fd == fdp->fd_freefile)
272  fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles);
273 }
274 
275 /*
276  * Mark a file descriptor as unused.
277  */
278 static void
279 fdunused(struct filedesc *fdp, int fd)
280 {
281 
282  FILEDESC_XLOCK_ASSERT(fdp);
283  KASSERT(fdisused(fdp, fd),
284  ("fd is already unused"));
285  KASSERT(fdp->fd_ofiles[fd] == NULL,
286  ("fd is still in use"));
287 
288  fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
289  if (fd < fdp->fd_freefile)
290  fdp->fd_freefile = fd;
291  if (fd == fdp->fd_lastfile)
292  fdp->fd_lastfile = fd_last_used(fdp, 0, fd);
293 }
294 
295 /*
296  * System calls on descriptors.
297  */
298 #ifndef _SYS_SYSPROTO_H_
300  int dummy;
301 };
302 #endif
303 /* ARGSUSED */
304 int
305 sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
306 {
307  struct proc *p = td->td_proc;
308  uint64_t lim;
309 
310  PROC_LOCK(p);
311  td->td_retval[0] =
312  min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
313  lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
314  PROC_UNLOCK(p);
315  if (lim < td->td_retval[0])
316  td->td_retval[0] = lim;
317  return (0);
318 }
319 
320 /*
321  * Duplicate a file descriptor to a particular value.
322  *
323  * Note: keep in mind that a potential race condition exists when closing
324  * descriptors from a shared descriptor table (via rfork).
325  */
326 #ifndef _SYS_SYSPROTO_H_
327 struct dup2_args {
328  u_int from;
329  u_int to;
330 };
331 #endif
332 /* ARGSUSED */
333 int
334 sys_dup2(struct thread *td, struct dup2_args *uap)
335 {
336 
337  return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to,
338  td->td_retval));
339 }
340 
341 /*
342  * Duplicate a file descriptor.
343  */
344 #ifndef _SYS_SYSPROTO_H_
345 struct dup_args {
346  u_int fd;
347 };
348 #endif
349 /* ARGSUSED */
350 int
351 sys_dup(struct thread *td, struct dup_args *uap)
352 {
353 
354  return (do_dup(td, 0, (int)uap->fd, 0, td->td_retval));
355 }
356 
357 /*
358  * The file control system call.
359  */
360 #ifndef _SYS_SYSPROTO_H_
361 struct fcntl_args {
362  int fd;
363  int cmd;
364  long arg;
365 };
366 #endif
367 /* ARGSUSED */
368 int
369 sys_fcntl(struct thread *td, struct fcntl_args *uap)
370 {
371  struct flock fl;
372  struct __oflock ofl;
373  intptr_t arg;
374  int error;
375  int cmd;
376 
377  error = 0;
378  cmd = uap->cmd;
379  switch (uap->cmd) {
380  case F_OGETLK:
381  case F_OSETLK:
382  case F_OSETLKW:
383  /*
384  * Convert old flock structure to new.
385  */
386  error = copyin((void *)(intptr_t)uap->arg, &ofl, sizeof(ofl));
387  fl.l_start = ofl.l_start;
388  fl.l_len = ofl.l_len;
389  fl.l_pid = ofl.l_pid;
390  fl.l_type = ofl.l_type;
391  fl.l_whence = ofl.l_whence;
392  fl.l_sysid = 0;
393 
394  switch (uap->cmd) {
395  case F_OGETLK:
396  cmd = F_GETLK;
397  break;
398  case F_OSETLK:
399  cmd = F_SETLK;
400  break;
401  case F_OSETLKW:
402  cmd = F_SETLKW;
403  break;
404  }
405  arg = (intptr_t)&fl;
406  break;
407  case F_GETLK:
408  case F_SETLK:
409  case F_SETLKW:
410  case F_SETLK_REMOTE:
411  error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl));
412  arg = (intptr_t)&fl;
413  break;
414  default:
415  arg = uap->arg;
416  break;
417  }
418  if (error)
419  return (error);
420  error = kern_fcntl(td, uap->fd, cmd, arg);
421  if (error)
422  return (error);
423  if (uap->cmd == F_OGETLK) {
424  ofl.l_start = fl.l_start;
425  ofl.l_len = fl.l_len;
426  ofl.l_pid = fl.l_pid;
427  ofl.l_type = fl.l_type;
428  ofl.l_whence = fl.l_whence;
429  error = copyout(&ofl, (void *)(intptr_t)uap->arg, sizeof(ofl));
430  } else if (uap->cmd == F_GETLK) {
431  error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl));
432  }
433  return (error);
434 }
435 
436 static inline struct file *
437 fdtofp(int fd, struct filedesc *fdp)
438 {
439  struct file *fp;
440 
441  FILEDESC_LOCK_ASSERT(fdp);
442  if ((unsigned)fd >= fdp->fd_nfiles ||
443  (fp = fdp->fd_ofiles[fd]) == NULL)
444  return (NULL);
445  return (fp);
446 }
447 
448 static inline int
449 fdunwrap(int fd, cap_rights_t rights, struct filedesc *fdp, struct file **fpp)
450 {
451 
452  *fpp = fdtofp(fd, fdp);
453  if (*fpp == NULL)
454  return (EBADF);
455 
456 #ifdef CAPABILITIES
457  if ((*fpp)->f_type == DTYPE_CAPABILITY) {
458  int err = cap_funwrap(*fpp, rights, fpp);
459  if (err != 0) {
460  *fpp = NULL;
461  return (err);
462  }
463  }
464 #endif /* CAPABILITIES */
465  return (0);
466 }
467 
468 int
469 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
470 {
471  struct filedesc *fdp;
472  struct flock *flp;
473  struct file *fp;
474  struct proc *p;
475  char *pop;
476  struct vnode *vp;
477  int error, flg, tmp;
478  int vfslocked;
479  u_int old, new;
480  uint64_t bsize;
481  off_t foffset;
482 
483  vfslocked = 0;
484  error = 0;
485  flg = F_POSIX;
486  p = td->td_proc;
487  fdp = p->p_fd;
488 
489  switch (cmd) {
490  case F_DUPFD:
491  tmp = arg;
492  error = do_dup(td, DUP_FCNTL, fd, tmp, td->td_retval);
493  break;
494 
495  case F_DUPFD_CLOEXEC:
496  tmp = arg;
497  error = do_dup(td, DUP_FCNTL | DUP_CLOEXEC, fd, tmp,
498  td->td_retval);
499  break;
500 
501  case F_DUP2FD:
502  tmp = arg;
503  error = do_dup(td, DUP_FIXED, fd, tmp, td->td_retval);
504  break;
505 
506  case F_DUP2FD_CLOEXEC:
507  tmp = arg;
508  error = do_dup(td, DUP_FIXED | DUP_CLOEXEC, fd, tmp,
509  td->td_retval);
510  break;
511 
512  case F_GETFD:
513  FILEDESC_SLOCK(fdp);
514  if ((fp = fdtofp(fd, fdp)) == NULL) {
515  FILEDESC_SUNLOCK(fdp);
516  error = EBADF;
517  break;
518  }
519  pop = &fdp->fd_ofileflags[fd];
520  td->td_retval[0] = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0;
521  FILEDESC_SUNLOCK(fdp);
522  break;
523 
524  case F_SETFD:
525  FILEDESC_XLOCK(fdp);
526  if ((fp = fdtofp(fd, fdp)) == NULL) {
527  FILEDESC_XUNLOCK(fdp);
528  error = EBADF;
529  break;
530  }
531  pop = &fdp->fd_ofileflags[fd];
532  *pop = (*pop &~ UF_EXCLOSE) |
533  (arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
534  FILEDESC_XUNLOCK(fdp);
535  break;
536 
537  case F_GETFL:
538  FILEDESC_SLOCK(fdp);
539  error = fdunwrap(fd, CAP_FCNTL, fdp, &fp);
540  if (error != 0) {
541  FILEDESC_SUNLOCK(fdp);
542  break;
543  }
544  td->td_retval[0] = OFLAGS(fp->f_flag);
545  FILEDESC_SUNLOCK(fdp);
546  break;
547 
548  case F_SETFL:
549  FILEDESC_SLOCK(fdp);
550  error = fdunwrap(fd, CAP_FCNTL, fdp, &fp);
551  if (error != 0) {
552  FILEDESC_SUNLOCK(fdp);
553  break;
554  }
555  fhold(fp);
556  FILEDESC_SUNLOCK(fdp);
557  do {
558  tmp = flg = fp->f_flag;
559  tmp &= ~FCNTLFLAGS;
560  tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
561  } while(atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
562  tmp = fp->f_flag & FNONBLOCK;
563  error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
564  if (error) {
565  fdrop(fp, td);
566  break;
567  }
568  tmp = fp->f_flag & FASYNC;
569  error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
570  if (error == 0) {
571  fdrop(fp, td);
572  break;
573  }
574  atomic_clear_int(&fp->f_flag, FNONBLOCK);
575  tmp = 0;
576  (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
577  fdrop(fp, td);
578  break;
579 
580  case F_GETOWN:
581  FILEDESC_SLOCK(fdp);
582  error = fdunwrap(fd, CAP_FCNTL, fdp, &fp);
583  if (error != 0) {
584  FILEDESC_SUNLOCK(fdp);
585  break;
586  }
587  fhold(fp);
588  FILEDESC_SUNLOCK(fdp);
589  error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
590  if (error == 0)
591  td->td_retval[0] = tmp;
592  fdrop(fp, td);
593  break;
594 
595  case F_SETOWN:
596  FILEDESC_SLOCK(fdp);
597  error = fdunwrap(fd, CAP_FCNTL, fdp, &fp);
598  if (error != 0) {
599  FILEDESC_SUNLOCK(fdp);
600  break;
601  }
602  fhold(fp);
603  FILEDESC_SUNLOCK(fdp);
604  tmp = arg;
605  error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
606  fdrop(fp, td);
607  break;
608 
609  case F_SETLK_REMOTE:
610  error = priv_check(td, PRIV_NFS_LOCKD);
611  if (error)
612  return (error);
613  flg = F_REMOTE;
614  goto do_setlk;
615 
616  case F_SETLKW:
617  flg |= F_WAIT;
618  /* FALLTHROUGH F_SETLK */
619 
620  case F_SETLK:
621  do_setlk:
622  FILEDESC_SLOCK(fdp);
623  error = fdunwrap(fd, CAP_FLOCK, fdp, &fp);
624  if (error != 0) {
625  FILEDESC_SUNLOCK(fdp);
626  break;
627  }
628  if (fp->f_type != DTYPE_VNODE) {
629  FILEDESC_SUNLOCK(fdp);
630  error = EBADF;
631  break;
632  }
633  flp = (struct flock *)arg;
634  if (flp->l_whence == SEEK_CUR) {
635  foffset = foffset_get(fp);
636  if (foffset < 0 ||
637  (flp->l_start > 0 &&
638  foffset > OFF_MAX - flp->l_start)) {
639  FILEDESC_SUNLOCK(fdp);
640  error = EOVERFLOW;
641  break;
642  }
643  flp->l_start += foffset;
644  }
645 
646  /*
647  * VOP_ADVLOCK() may block.
648  */
649  fhold(fp);
650  FILEDESC_SUNLOCK(fdp);
651  vp = fp->f_vnode;
652  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
653  switch (flp->l_type) {
654  case F_RDLCK:
655  if ((fp->f_flag & FREAD) == 0) {
656  error = EBADF;
657  break;
658  }
659  PROC_LOCK(p->p_leader);
660  p->p_leader->p_flag |= P_ADVLOCK;
661  PROC_UNLOCK(p->p_leader);
662  error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
663  flp, flg);
664  break;
665  case F_WRLCK:
666  if ((fp->f_flag & FWRITE) == 0) {
667  error = EBADF;
668  break;
669  }
670  PROC_LOCK(p->p_leader);
671  p->p_leader->p_flag |= P_ADVLOCK;
672  PROC_UNLOCK(p->p_leader);
673  error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
674  flp, flg);
675  break;
676  case F_UNLCK:
677  error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
678  flp, flg);
679  break;
680  case F_UNLCKSYS:
681  /*
682  * Temporary api for testing remote lock
683  * infrastructure.
684  */
685  if (flg != F_REMOTE) {
686  error = EINVAL;
687  break;
688  }
689  error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
690  F_UNLCKSYS, flp, flg);
691  break;
692  default:
693  error = EINVAL;
694  break;
695  }
696  VFS_UNLOCK_GIANT(vfslocked);
697  vfslocked = 0;
698  /* Check for race with close */
699  FILEDESC_SLOCK(fdp);
700  if ((unsigned) fd >= fdp->fd_nfiles ||
701  fp != fdp->fd_ofiles[fd]) {
702  FILEDESC_SUNLOCK(fdp);
703  flp->l_whence = SEEK_SET;
704  flp->l_start = 0;
705  flp->l_len = 0;
706  flp->l_type = F_UNLCK;
707  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
708  (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
709  F_UNLCK, flp, F_POSIX);
710  VFS_UNLOCK_GIANT(vfslocked);
711  vfslocked = 0;
712  } else
713  FILEDESC_SUNLOCK(fdp);
714  fdrop(fp, td);
715  break;
716 
717  case F_GETLK:
718  FILEDESC_SLOCK(fdp);
719  error = fdunwrap(fd, CAP_FLOCK, fdp, &fp);
720  if (error != 0) {
721  FILEDESC_SUNLOCK(fdp);
722  break;
723  }
724  if (fp->f_type != DTYPE_VNODE) {
725  FILEDESC_SUNLOCK(fdp);
726  error = EBADF;
727  break;
728  }
729  flp = (struct flock *)arg;
730  if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
731  flp->l_type != F_UNLCK) {
732  FILEDESC_SUNLOCK(fdp);
733  error = EINVAL;
734  break;
735  }
736  if (flp->l_whence == SEEK_CUR) {
737  foffset = foffset_get(fp);
738  if ((flp->l_start > 0 &&
739  foffset > OFF_MAX - flp->l_start) ||
740  (flp->l_start < 0 &&
741  foffset < OFF_MIN - flp->l_start)) {
742  FILEDESC_SUNLOCK(fdp);
743  error = EOVERFLOW;
744  break;
745  }
746  flp->l_start += foffset;
747  }
748  /*
749  * VOP_ADVLOCK() may block.
750  */
751  fhold(fp);
752  FILEDESC_SUNLOCK(fdp);
753  vp = fp->f_vnode;
754  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
755  error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
756  F_POSIX);
757  VFS_UNLOCK_GIANT(vfslocked);
758  vfslocked = 0;
759  fdrop(fp, td);
760  break;
761 
762  case F_RDAHEAD:
763  arg = arg ? 128 * 1024: 0;
764  /* FALLTHROUGH */
765  case F_READAHEAD:
766  FILEDESC_SLOCK(fdp);
767  if ((fp = fdtofp(fd, fdp)) == NULL) {
768  FILEDESC_SUNLOCK(fdp);
769  error = EBADF;
770  break;
771  }
772  if (fp->f_type != DTYPE_VNODE) {
773  FILEDESC_SUNLOCK(fdp);
774  error = EBADF;
775  break;
776  }
777  fhold(fp);
778  FILEDESC_SUNLOCK(fdp);
779  if (arg != 0) {
780  vp = fp->f_vnode;
781  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
782  error = vn_lock(vp, LK_SHARED);
783  if (error != 0)
784  goto readahead_vnlock_fail;
785  bsize = fp->f_vnode->v_mount->mnt_stat.f_iosize;
786  VOP_UNLOCK(vp, 0);
787  fp->f_seqcount = (arg + bsize - 1) / bsize;
788  do {
789  new = old = fp->f_flag;
790  new |= FRDAHEAD;
791  } while (!atomic_cmpset_rel_int(&fp->f_flag, old, new));
792 readahead_vnlock_fail:
793  VFS_UNLOCK_GIANT(vfslocked);
794  vfslocked = 0;
795  } else {
796  do {
797  new = old = fp->f_flag;
798  new &= ~FRDAHEAD;
799  } while (!atomic_cmpset_rel_int(&fp->f_flag, old, new));
800  }
801  fdrop(fp, td);
802  break;
803 
804  default:
805  error = EINVAL;
806  break;
807  }
808  VFS_UNLOCK_GIANT(vfslocked);
809  return (error);
810 }
811 
812 static int
813 getmaxfd(struct proc *p)
814 {
815  int maxfd;
816 
817  PROC_LOCK(p);
818  maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
819  PROC_UNLOCK(p);
820 
821  return (maxfd);
822 }
823 
824 /*
825  * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
826  */
827 static int
828 do_dup(struct thread *td, int flags, int old, int new,
829  register_t *retval)
830 {
831  struct filedesc *fdp;
832  struct proc *p;
833  struct file *fp;
834  struct file *delfp;
835  int error, holdleaders, maxfd;
836 
837  p = td->td_proc;
838  fdp = p->p_fd;
839 
840  /*
841  * Verify we have a valid descriptor to dup from and possibly to
842  * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should
843  * return EINVAL when the new descriptor is out of bounds.
844  */
845  if (old < 0)
846  return (EBADF);
847  if (new < 0)
848  return (flags & DUP_FCNTL ? EINVAL : EBADF);
849  maxfd = getmaxfd(p);
850  if (new >= maxfd)
851  return (flags & DUP_FCNTL ? EINVAL : EBADF);
852 
853  FILEDESC_XLOCK(fdp);
854  if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) {
855  FILEDESC_XUNLOCK(fdp);
856  return (EBADF);
857  }
858  if (flags & DUP_FIXED && old == new) {
859  *retval = new;
860  if (flags & DUP_CLOEXEC)
861  fdp->fd_ofileflags[new] |= UF_EXCLOSE;
862  FILEDESC_XUNLOCK(fdp);
863  return (0);
864  }
865  fp = fdp->fd_ofiles[old];
866  fhold(fp);
867 
868  /*
869  * If the caller specified a file descriptor, make sure the file
870  * table is large enough to hold it, and grab it. Otherwise, just
871  * allocate a new descriptor the usual way. Since the filedesc
872  * lock may be temporarily dropped in the process, we have to look
873  * out for a race.
874  */
875  if (flags & DUP_FIXED) {
876  if (new >= fdp->fd_nfiles) {
877  /*
878  * The resource limits are here instead of e.g.
879  * fdalloc(), because the file descriptor table may be
880  * shared between processes, so we can't really use
881  * racct_add()/racct_sub(). Instead of counting the
882  * number of actually allocated descriptors, just put
883  * the limit on the size of the file descriptor table.
884  */
885 #ifdef RACCT
886  PROC_LOCK(p);
887  error = racct_set(p, RACCT_NOFILE, new + 1);
888  PROC_UNLOCK(p);
889  if (error != 0) {
890  FILEDESC_XUNLOCK(fdp);
891  fdrop(fp, td);
892  return (EMFILE);
893  }
894 #endif
895  fdgrowtable(fdp, new + 1);
896  }
897  if (fdp->fd_ofiles[new] == NULL)
898  fdused(fdp, new);
899  } else {
900  if ((error = fdalloc(td, new, &new)) != 0) {
901  FILEDESC_XUNLOCK(fdp);
902  fdrop(fp, td);
903  return (error);
904  }
905  }
906 
907  /*
908  * If the old file changed out from under us then treat it as a
909  * bad file descriptor. Userland should do its own locking to
910  * avoid this case.
911  */
912  if (fdp->fd_ofiles[old] != fp) {
913  /* we've allocated a descriptor which we won't use */
914  if (fdp->fd_ofiles[new] == NULL)
915  fdunused(fdp, new);
916  FILEDESC_XUNLOCK(fdp);
917  fdrop(fp, td);
918  return (EBADF);
919  }
920  KASSERT(old != new,
921  ("new fd is same as old"));
922 
923  /*
924  * Save info on the descriptor being overwritten. We cannot close
925  * it without introducing an ownership race for the slot, since we
926  * need to drop the filedesc lock to call closef().
927  *
928  * XXX this duplicates parts of close().
929  */
930  delfp = fdp->fd_ofiles[new];
931  holdleaders = 0;
932  if (delfp != NULL) {
933  if (td->td_proc->p_fdtol != NULL) {
934  /*
935  * Ask fdfree() to sleep to ensure that all relevant
936  * process leaders can be traversed in closef().
937  */
938  fdp->fd_holdleaderscount++;
939  holdleaders = 1;
940  }
941  }
942 
943  /*
944  * Duplicate the source descriptor
945  */
946  fdp->fd_ofiles[new] = fp;
947  if ((flags & DUP_CLOEXEC) != 0)
948  fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] | UF_EXCLOSE;
949  else
950  fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] & ~UF_EXCLOSE;
951  if (new > fdp->fd_lastfile)
952  fdp->fd_lastfile = new;
953  *retval = new;
954 
955  /*
956  * If we dup'd over a valid file, we now own the reference to it
957  * and must dispose of it using closef() semantics (as if a
958  * close() were performed on it).
959  *
960  * XXX this duplicates parts of close().
961  */
962  if (delfp != NULL) {
963  knote_fdclose(td, new);
964  if (delfp->f_type == DTYPE_MQUEUE)
965  mq_fdclose(td, new, delfp);
966  FILEDESC_XUNLOCK(fdp);
967  (void) closef(delfp, td);
968  if (holdleaders) {
969  FILEDESC_XLOCK(fdp);
970  fdp->fd_holdleaderscount--;
971  if (fdp->fd_holdleaderscount == 0 &&
972  fdp->fd_holdleaderswakeup != 0) {
973  fdp->fd_holdleaderswakeup = 0;
974  wakeup(&fdp->fd_holdleaderscount);
975  }
976  FILEDESC_XUNLOCK(fdp);
977  }
978  } else {
979  FILEDESC_XUNLOCK(fdp);
980  }
981  return (0);
982 }
983 
984 /*
985  * If sigio is on the list associated with a process or process group,
986  * disable signalling from the device, remove sigio from the list and
987  * free sigio.
988  */
989 void
990 funsetown(struct sigio **sigiop)
991 {
992  struct sigio *sigio;
993 
994  SIGIO_LOCK();
995  sigio = *sigiop;
996  if (sigio == NULL) {
997  SIGIO_UNLOCK();
998  return;
999  }
1000  *(sigio->sio_myref) = NULL;
1001  if ((sigio)->sio_pgid < 0) {
1002  struct pgrp *pg = (sigio)->sio_pgrp;
1003  PGRP_LOCK(pg);
1004  SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
1005  sigio, sio_pgsigio);
1006  PGRP_UNLOCK(pg);
1007  } else {
1008  struct proc *p = (sigio)->sio_proc;
1009  PROC_LOCK(p);
1010  SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
1011  sigio, sio_pgsigio);
1012  PROC_UNLOCK(p);
1013  }
1014  SIGIO_UNLOCK();
1015  crfree(sigio->sio_ucred);
1016  free(sigio, M_SIGIO);
1017 }
1018 
1019 /*
1020  * Free a list of sigio structures.
1021  * We only need to lock the SIGIO_LOCK because we have made ourselves
1022  * inaccessible to callers of fsetown and therefore do not need to lock
1023  * the proc or pgrp struct for the list manipulation.
1024  */
1025 void
1026 funsetownlst(struct sigiolst *sigiolst)
1027 {
1028  struct proc *p;
1029  struct pgrp *pg;
1030  struct sigio *sigio;
1031 
1032  sigio = SLIST_FIRST(sigiolst);
1033  if (sigio == NULL)
1034  return;
1035  p = NULL;
1036  pg = NULL;
1037 
1038  /*
1039  * Every entry of the list should belong
1040  * to a single proc or pgrp.
1041  */
1042  if (sigio->sio_pgid < 0) {
1043  pg = sigio->sio_pgrp;
1044  PGRP_LOCK_ASSERT(pg, MA_NOTOWNED);
1045  } else /* if (sigio->sio_pgid > 0) */ {
1046  p = sigio->sio_proc;
1047  PROC_LOCK_ASSERT(p, MA_NOTOWNED);
1048  }
1049 
1050  SIGIO_LOCK();
1051  while ((sigio = SLIST_FIRST(sigiolst)) != NULL) {
1052  *(sigio->sio_myref) = NULL;
1053  if (pg != NULL) {
1054  KASSERT(sigio->sio_pgid < 0,
1055  ("Proc sigio in pgrp sigio list"));
1056  KASSERT(sigio->sio_pgrp == pg,
1057  ("Bogus pgrp in sigio list"));
1058  PGRP_LOCK(pg);
1059  SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio,
1060  sio_pgsigio);
1061  PGRP_UNLOCK(pg);
1062  } else /* if (p != NULL) */ {
1063  KASSERT(sigio->sio_pgid > 0,
1064  ("Pgrp sigio in proc sigio list"));
1065  KASSERT(sigio->sio_proc == p,
1066  ("Bogus proc in sigio list"));
1067  PROC_LOCK(p);
1068  SLIST_REMOVE(&p->p_sigiolst, sigio, sigio,
1069  sio_pgsigio);
1070  PROC_UNLOCK(p);
1071  }
1072  SIGIO_UNLOCK();
1073  crfree(sigio->sio_ucred);
1074  free(sigio, M_SIGIO);
1075  SIGIO_LOCK();
1076  }
1077  SIGIO_UNLOCK();
1078 }
1079 
1080 /*
1081  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1082  *
1083  * After permission checking, add a sigio structure to the sigio list for
1084  * the process or process group.
1085  */
1086 int
1087 fsetown(pid_t pgid, struct sigio **sigiop)
1088 {
1089  struct proc *proc;
1090  struct pgrp *pgrp;
1091  struct sigio *sigio;
1092  int ret;
1093 
1094  if (pgid == 0) {
1095  funsetown(sigiop);
1096  return (0);
1097  }
1098 
1099  ret = 0;
1100 
1101  /* Allocate and fill in the new sigio out of locks. */
1102  sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
1103  sigio->sio_pgid = pgid;
1104  sigio->sio_ucred = crhold(curthread->td_ucred);
1105  sigio->sio_myref = sigiop;
1106 
1107  sx_slock(&proctree_lock);
1108  if (pgid > 0) {
1109  proc = pfind(pgid);
1110  if (proc == NULL) {
1111  ret = ESRCH;
1112  goto fail;
1113  }
1114 
1115  /*
1116  * Policy - Don't allow a process to FSETOWN a process
1117  * in another session.
1118  *
1119  * Remove this test to allow maximum flexibility or
1120  * restrict FSETOWN to the current process or process
1121  * group for maximum safety.
1122  */
1123  PROC_UNLOCK(proc);
1124  if (proc->p_session != curthread->td_proc->p_session) {
1125  ret = EPERM;
1126  goto fail;
1127  }
1128 
1129  pgrp = NULL;
1130  } else /* if (pgid < 0) */ {
1131  pgrp = pgfind(-pgid);
1132  if (pgrp == NULL) {
1133  ret = ESRCH;
1134  goto fail;
1135  }
1136  PGRP_UNLOCK(pgrp);
1137 
1138  /*
1139  * Policy - Don't allow a process to FSETOWN a process
1140  * in another session.
1141  *
1142  * Remove this test to allow maximum flexibility or
1143  * restrict FSETOWN to the current process or process
1144  * group for maximum safety.
1145  */
1146  if (pgrp->pg_session != curthread->td_proc->p_session) {
1147  ret = EPERM;
1148  goto fail;
1149  }
1150 
1151  proc = NULL;
1152  }
1153  funsetown(sigiop);
1154  if (pgid > 0) {
1155  PROC_LOCK(proc);
1156  /*
1157  * Since funsetownlst() is called without the proctree
1158  * locked, we need to check for P_WEXIT.
1159  * XXX: is ESRCH correct?
1160  */
1161  if ((proc->p_flag & P_WEXIT) != 0) {
1162  PROC_UNLOCK(proc);
1163  ret = ESRCH;
1164  goto fail;
1165  }
1166  SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
1167  sigio->sio_proc = proc;
1168  PROC_UNLOCK(proc);
1169  } else {
1170  PGRP_LOCK(pgrp);
1171  SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
1172  sigio->sio_pgrp = pgrp;
1173  PGRP_UNLOCK(pgrp);
1174  }
1175  sx_sunlock(&proctree_lock);
1176  SIGIO_LOCK();
1177  *sigiop = sigio;
1178  SIGIO_UNLOCK();
1179  return (0);
1180 
1181 fail:
1182  sx_sunlock(&proctree_lock);
1183  crfree(sigio->sio_ucred);
1184  free(sigio, M_SIGIO);
1185  return (ret);
1186 }
1187 
1188 /*
1189  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1190  */
1191 pid_t
1192 fgetown(sigiop)
1193  struct sigio **sigiop;
1194 {
1195  pid_t pgid;
1196 
1197  SIGIO_LOCK();
1198  pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
1199  SIGIO_UNLOCK();
1200  return (pgid);
1201 }
1202 
1203 /*
1204  * Close a file descriptor.
1205  */
1206 #ifndef _SYS_SYSPROTO_H_
1207 struct close_args {
1208  int fd;
1209 };
1210 #endif
1211 /* ARGSUSED */
1212 int
1213 sys_close(td, uap)
1214  struct thread *td;
1215  struct close_args *uap;
1216 {
1217 
1218  return (kern_close(td, uap->fd));
1219 }
1220 
1221 int
1223  struct thread *td;
1224  int fd;
1225 {
1226  struct filedesc *fdp;
1227  struct file *fp, *fp_object;
1228  int error;
1229  int holdleaders;
1230 
1231  error = 0;
1232  holdleaders = 0;
1233  fdp = td->td_proc->p_fd;
1234 
1235  AUDIT_SYSCLOSE(td, fd);
1236 
1237  FILEDESC_XLOCK(fdp);
1238  if ((unsigned)fd >= fdp->fd_nfiles ||
1239  (fp = fdp->fd_ofiles[fd]) == NULL) {
1240  FILEDESC_XUNLOCK(fdp);
1241  return (EBADF);
1242  }
1243  fdp->fd_ofiles[fd] = NULL;
1244  fdp->fd_ofileflags[fd] = 0;
1245  fdunused(fdp, fd);
1246  if (td->td_proc->p_fdtol != NULL) {
1247  /*
1248  * Ask fdfree() to sleep to ensure that all relevant
1249  * process leaders can be traversed in closef().
1250  */
1251  fdp->fd_holdleaderscount++;
1252  holdleaders = 1;
1253  }
1254 
1255  /*
1256  * We now hold the fp reference that used to be owned by the
1257  * descriptor array. We have to unlock the FILEDESC *AFTER*
1258  * knote_fdclose to prevent a race of the fd getting opened, a knote
1259  * added, and deleteing a knote for the new fd.
1260  */
1261  knote_fdclose(td, fd);
1262 
1263  /*
1264  * When we're closing an fd with a capability, we need to notify
1265  * mqueue if the underlying object is of type mqueue.
1266  */
1267  (void)cap_funwrap(fp, 0, &fp_object);
1268  if (fp_object->f_type == DTYPE_MQUEUE)
1269  mq_fdclose(td, fd, fp_object);
1270  FILEDESC_XUNLOCK(fdp);
1271 
1272  error = closef(fp, td);
1273  if (holdleaders) {
1274  FILEDESC_XLOCK(fdp);
1275  fdp->fd_holdleaderscount--;
1276  if (fdp->fd_holdleaderscount == 0 &&
1277  fdp->fd_holdleaderswakeup != 0) {
1278  fdp->fd_holdleaderswakeup = 0;
1279  wakeup(&fdp->fd_holdleaderscount);
1280  }
1281  FILEDESC_XUNLOCK(fdp);
1282  }
1283  return (error);
1284 }
1285 
1286 /*
1287  * Close open file descriptors.
1288  */
1289 #ifndef _SYS_SYSPROTO_H_
1291  int lowfd;
1292 };
1293 #endif
1294 /* ARGSUSED */
1295 int
1296 sys_closefrom(struct thread *td, struct closefrom_args *uap)
1297 {
1298  struct filedesc *fdp;
1299  int fd;
1300 
1301  fdp = td->td_proc->p_fd;
1302  AUDIT_ARG_FD(uap->lowfd);
1303 
1304  /*
1305  * Treat negative starting file descriptor values identical to
1306  * closefrom(0) which closes all files.
1307  */
1308  if (uap->lowfd < 0)
1309  uap->lowfd = 0;
1310  FILEDESC_SLOCK(fdp);
1311  for (fd = uap->lowfd; fd < fdp->fd_nfiles; fd++) {
1312  if (fdp->fd_ofiles[fd] != NULL) {
1313  FILEDESC_SUNLOCK(fdp);
1314  (void)kern_close(td, fd);
1315  FILEDESC_SLOCK(fdp);
1316  }
1317  }
1318  FILEDESC_SUNLOCK(fdp);
1319  return (0);
1320 }
1321 
1322 #if defined(COMPAT_43)
1323 /*
1324  * Return status information about a file descriptor.
1325  */
1326 #ifndef _SYS_SYSPROTO_H_
1327 struct ofstat_args {
1328  int fd;
1329  struct ostat *sb;
1330 };
1331 #endif
1332 /* ARGSUSED */
1333 int
1334 ofstat(struct thread *td, struct ofstat_args *uap)
1335 {
1336  struct ostat oub;
1337  struct stat ub;
1338  int error;
1339 
1340  error = kern_fstat(td, uap->fd, &ub);
1341  if (error == 0) {
1342  cvtstat(&ub, &oub);
1343  error = copyout(&oub, uap->sb, sizeof(oub));
1344  }
1345  return (error);
1346 }
1347 #endif /* COMPAT_43 */
1348 
1349 /*
1350  * Return status information about a file descriptor.
1351  */
1352 #ifndef _SYS_SYSPROTO_H_
1353 struct fstat_args {
1354  int fd;
1355  struct stat *sb;
1356 };
1357 #endif
1358 /* ARGSUSED */
1359 int
1360 sys_fstat(struct thread *td, struct fstat_args *uap)
1361 {
1362  struct stat ub;
1363  int error;
1364 
1365  error = kern_fstat(td, uap->fd, &ub);
1366  if (error == 0)
1367  error = copyout(&ub, uap->sb, sizeof(ub));
1368  return (error);
1369 }
1370 
1371 int
1372 kern_fstat(struct thread *td, int fd, struct stat *sbp)
1373 {
1374  struct file *fp;
1375  int error;
1376 
1377  AUDIT_ARG_FD(fd);
1378 
1379  if ((error = fget(td, fd, CAP_FSTAT, &fp)) != 0)
1380  return (error);
1381 
1382  AUDIT_ARG_FILE(td->td_proc, fp);
1383 
1384  error = fo_stat(fp, sbp, td->td_ucred, td);
1385  fdrop(fp, td);
1386 #ifdef KTRACE
1387  if (error == 0 && KTRPOINT(td, KTR_STRUCT))
1388  ktrstat(sbp);
1389 #endif
1390  return (error);
1391 }
1392 
1393 /*
1394  * Return status information about a file descriptor.
1395  */
1396 #ifndef _SYS_SYSPROTO_H_
1397 struct nfstat_args {
1398  int fd;
1399  struct nstat *sb;
1400 };
1401 #endif
1402 /* ARGSUSED */
1403 int
1404 sys_nfstat(struct thread *td, struct nfstat_args *uap)
1405 {
1406  struct nstat nub;
1407  struct stat ub;
1408  int error;
1409 
1410  error = kern_fstat(td, uap->fd, &ub);
1411  if (error == 0) {
1412  cvtnstat(&ub, &nub);
1413  error = copyout(&nub, uap->sb, sizeof(nub));
1414  }
1415  return (error);
1416 }
1417 
1418 /*
1419  * Return pathconf information about a file descriptor.
1420  */
1421 #ifndef _SYS_SYSPROTO_H_
1423  int fd;
1424  int name;
1425 };
1426 #endif
1427 /* ARGSUSED */
1428 int
1429 sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
1430 {
1431  struct file *fp;
1432  struct vnode *vp;
1433  int error;
1434 
1435  if ((error = fget(td, uap->fd, CAP_FPATHCONF, &fp)) != 0)
1436  return (error);
1437 
1438  /* If asynchronous I/O is available, it works for all descriptors. */
1439  if (uap->name == _PC_ASYNC_IO) {
1440  td->td_retval[0] = async_io_version;
1441  goto out;
1442  }
1443  vp = fp->f_vnode;
1444  if (vp != NULL) {
1445  int vfslocked;
1446  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1447  vn_lock(vp, LK_SHARED | LK_RETRY);
1448  error = VOP_PATHCONF(vp, uap->name, td->td_retval);
1449  VOP_UNLOCK(vp, 0);
1450  VFS_UNLOCK_GIANT(vfslocked);
1451  } else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
1452  if (uap->name != _PC_PIPE_BUF) {
1453  error = EINVAL;
1454  } else {
1455  td->td_retval[0] = PIPE_BUF;
1456  error = 0;
1457  }
1458  } else {
1459  error = EOPNOTSUPP;
1460  }
1461 out:
1462  fdrop(fp, td);
1463  return (error);
1464 }
1465 
1466 /*
1467  * Grow the file table to accomodate (at least) nfd descriptors. This may
1468  * block and drop the filedesc lock, but it will reacquire it before
1469  * returning.
1470  */
1471 static void
1472 fdgrowtable(struct filedesc *fdp, int nfd)
1473 {
1474  struct filedesc0 *fdp0;
1475  struct freetable *fo;
1476  struct file **ntable;
1477  struct file **otable;
1478  char *nfileflags;
1479  int nnfiles, onfiles;
1480  NDSLOTTYPE *nmap;
1481 
1482  FILEDESC_XLOCK_ASSERT(fdp);
1483 
1484  KASSERT(fdp->fd_nfiles > 0,
1485  ("zero-length file table"));
1486 
1487  /* compute the size of the new table */
1488  onfiles = fdp->fd_nfiles;
1489  nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
1490  if (nnfiles <= onfiles)
1491  /* the table is already large enough */
1492  return;
1493 
1494  /* allocate a new table and (if required) new bitmaps */
1495  FILEDESC_XUNLOCK(fdp);
1496  ntable = malloc((nnfiles * OFILESIZE) + sizeof(struct freetable),
1497  M_FILEDESC, M_ZERO | M_WAITOK);
1498  nfileflags = (char *)&ntable[nnfiles];
1499  if (NDSLOTS(nnfiles) > NDSLOTS(onfiles))
1500  nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE,
1501  M_FILEDESC, M_ZERO | M_WAITOK);
1502  else
1503  nmap = NULL;
1504  FILEDESC_XLOCK(fdp);
1505 
1506  /*
1507  * We now have new tables ready to go. Since we dropped the
1508  * filedesc lock to call malloc(), watch out for a race.
1509  */
1510  onfiles = fdp->fd_nfiles;
1511  if (onfiles >= nnfiles) {
1512  /* we lost the race, but that's OK */
1513  free(ntable, M_FILEDESC);
1514  if (nmap != NULL)
1515  free(nmap, M_FILEDESC);
1516  return;
1517  }
1518  bcopy(fdp->fd_ofiles, ntable, onfiles * sizeof(*ntable));
1519  bcopy(fdp->fd_ofileflags, nfileflags, onfiles);
1520  otable = fdp->fd_ofiles;
1521  fdp->fd_ofileflags = nfileflags;
1522  fdp->fd_ofiles = ntable;
1523  /*
1524  * We must preserve ofiles until the process exits because we can't
1525  * be certain that no threads have references to the old table via
1526  * _fget().
1527  */
1528  if (onfiles > NDFILE) {
1529  fo = (struct freetable *)&otable[onfiles];
1530  fdp0 = (struct filedesc0 *)fdp;
1531  fo->ft_table = otable;
1532  SLIST_INSERT_HEAD(&fdp0->fd_free, fo, ft_next);
1533  }
1534  if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) {
1535  bcopy(fdp->fd_map, nmap, NDSLOTS(onfiles) * sizeof(*nmap));
1536  if (NDSLOTS(onfiles) > NDSLOTS(NDFILE))
1537  free(fdp->fd_map, M_FILEDESC);
1538  fdp->fd_map = nmap;
1539  }
1540  fdp->fd_nfiles = nnfiles;
1541 }
1542 
1543 /*
1544  * Allocate a file descriptor for the process.
1545  */
1546 int
1547 fdalloc(struct thread *td, int minfd, int *result)
1548 {
1549  struct proc *p = td->td_proc;
1550  struct filedesc *fdp = p->p_fd;
1551  int fd = -1, maxfd;
1552 #ifdef RACCT
1553  int error;
1554 #endif
1555 
1556  FILEDESC_XLOCK_ASSERT(fdp);
1557 
1558  if (fdp->fd_freefile > minfd)
1559  minfd = fdp->fd_freefile;
1560 
1561  maxfd = getmaxfd(p);
1562 
1563  /*
1564  * Search the bitmap for a free descriptor. If none is found, try
1565  * to grow the file table. Keep at it until we either get a file
1566  * descriptor or run into process or system limits; fdgrowtable()
1567  * may drop the filedesc lock, so we're in a race.
1568  */
1569  for (;;) {
1570  fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
1571  if (fd >= maxfd)
1572  return (EMFILE);
1573  if (fd < fdp->fd_nfiles)
1574  break;
1575 #ifdef RACCT
1576  PROC_LOCK(p);
1577  error = racct_set(p, RACCT_NOFILE, min(fdp->fd_nfiles * 2, maxfd));
1578  PROC_UNLOCK(p);
1579  if (error != 0)
1580  return (EMFILE);
1581 #endif
1582  fdgrowtable(fdp, min(fdp->fd_nfiles * 2, maxfd));
1583  }
1584 
1585  /*
1586  * Perform some sanity checks, then mark the file descriptor as
1587  * used and return it to the caller.
1588  */
1589  KASSERT(!fdisused(fdp, fd),
1590  ("fd_first_free() returned non-free descriptor"));
1591  KASSERT(fdp->fd_ofiles[fd] == NULL,
1592  ("free descriptor isn't"));
1593  fdp->fd_ofileflags[fd] = 0; /* XXX needed? */
1594  fdused(fdp, fd);
1595  *result = fd;
1596  return (0);
1597 }
1598 
1599 /*
1600  * Allocate n file descriptors for the process.
1601  */
1602 int
1603 fdallocn(struct thread *td, int minfd, int *fds, int n)
1604 {
1605  struct proc *p = td->td_proc;
1606  struct filedesc *fdp = p->p_fd;
1607  int i;
1608 
1609  FILEDESC_XLOCK_ASSERT(fdp);
1610 
1611  if (!fdavail(td, n))
1612  return (EMFILE);
1613 
1614  for (i = 0; i < n; i++)
1615  if (fdalloc(td, 0, &fds[i]) != 0)
1616  break;
1617 
1618  if (i < n) {
1619  for (i--; i >= 0; i--)
1620  fdunused(fdp, fds[i]);
1621  return (EMFILE);
1622  }
1623 
1624  return (0);
1625 }
1626 
1627 /*
1628  * Check to see whether n user file descriptors are available to the process
1629  * p.
1630  */
1631 int
1632 fdavail(struct thread *td, int n)
1633 {
1634  struct proc *p = td->td_proc;
1635  struct filedesc *fdp = td->td_proc->p_fd;
1636  struct file **fpp;
1637  int i, lim, last;
1638 
1639  FILEDESC_LOCK_ASSERT(fdp);
1640 
1641  /*
1642  * XXX: This is only called from uipc_usrreq.c:unp_externalize();
1643  * call racct_add() from there instead of dealing with containers
1644  * here.
1645  */
1646  lim = getmaxfd(p);
1647  if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
1648  return (1);
1649  last = min(fdp->fd_nfiles, lim);
1650  fpp = &fdp->fd_ofiles[fdp->fd_freefile];
1651  for (i = last - fdp->fd_freefile; --i >= 0; fpp++) {
1652  if (*fpp == NULL && --n <= 0)
1653  return (1);
1654  }
1655  return (0);
1656 }
1657 
1658 /*
1659  * Create a new open file structure and allocate a file decriptor for the
1660  * process that refers to it. We add one reference to the file for the
1661  * descriptor table and one reference for resultfp. This is to prevent us
1662  * being preempted and the entry in the descriptor table closed after we
1663  * release the FILEDESC lock.
1664  */
1665 int
1666 falloc(struct thread *td, struct file **resultfp, int *resultfd, int flags)
1667 {
1668  struct file *fp;
1669  int error, fd;
1670 
1671  error = falloc_noinstall(td, &fp);
1672  if (error)
1673  return (error); /* no reference held on error */
1674 
1675  error = finstall(td, fp, &fd, flags);
1676  if (error) {
1677  fdrop(fp, td); /* one reference (fp only) */
1678  return (error);
1679  }
1680 
1681  if (resultfp != NULL)
1682  *resultfp = fp; /* copy out result */
1683  else
1684  fdrop(fp, td); /* release local reference */
1685 
1686  if (resultfd != NULL)
1687  *resultfd = fd;
1688 
1689  return (0);
1690 }
1691 
1692 /*
1693  * Create a new open file structure without allocating a file descriptor.
1694  */
1695 int
1696 falloc_noinstall(struct thread *td, struct file **resultfp)
1697 {
1698  struct file *fp;
1699  int maxuserfiles = maxfiles - (maxfiles / 20);
1700  static struct timeval lastfail;
1701  static int curfail;
1702 
1703  KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__));
1704 
1705  if ((openfiles >= maxuserfiles &&
1706  priv_check(td, PRIV_MAXFILES) != 0) ||
1707  openfiles >= maxfiles) {
1708  if (ppsratecheck(&lastfail, &curfail, 1)) {
1709  printf("kern.maxfiles limit exceeded by uid %i, "
1710  "please see tuning(7).\n", td->td_ucred->cr_ruid);
1711  }
1712  return (ENFILE);
1713  }
1714  atomic_add_int(&openfiles, 1);
1715  fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO);
1716  refcount_init(&fp->f_count, 1);
1717  fp->f_cred = crhold(td->td_ucred);
1718  fp->f_ops = &badfileops;
1719  fp->f_data = NULL;
1720  fp->f_vnode = NULL;
1721  *resultfp = fp;
1722  return (0);
1723 }
1724 
1725 /*
1726  * Install a file in a file descriptor table.
1727  */
1728 int
1729 finstall(struct thread *td, struct file *fp, int *fd, int flags)
1730 {
1731  struct filedesc *fdp = td->td_proc->p_fd;
1732  int error;
1733 
1734  KASSERT(fd != NULL, ("%s: fd == NULL", __func__));
1735  KASSERT(fp != NULL, ("%s: fp == NULL", __func__));
1736 
1737  FILEDESC_XLOCK(fdp);
1738  if ((error = fdalloc(td, 0, fd))) {
1739  FILEDESC_XUNLOCK(fdp);
1740  return (error);
1741  }
1742  fhold(fp);
1743  fdp->fd_ofiles[*fd] = fp;
1744  if ((flags & O_CLOEXEC) != 0)
1745  fdp->fd_ofileflags[*fd] |= UF_EXCLOSE;
1746  FILEDESC_XUNLOCK(fdp);
1747  return (0);
1748 }
1749 
1750 /*
1751  * Build a new filedesc structure from another.
1752  * Copy the current, root, and jail root vnode references.
1753  */
1754 struct filedesc *
1755 fdinit(struct filedesc *fdp)
1756 {
1757  struct filedesc0 *newfdp;
1758 
1759  newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO);
1760  FILEDESC_LOCK_INIT(&newfdp->fd_fd);
1761  if (fdp != NULL) {
1762  FILEDESC_XLOCK(fdp);
1763  newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
1764  if (newfdp->fd_fd.fd_cdir)
1765  VREF(newfdp->fd_fd.fd_cdir);
1766  newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
1767  if (newfdp->fd_fd.fd_rdir)
1768  VREF(newfdp->fd_fd.fd_rdir);
1769  newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
1770  if (newfdp->fd_fd.fd_jdir)
1771  VREF(newfdp->fd_fd.fd_jdir);
1772  FILEDESC_XUNLOCK(fdp);
1773  }
1774 
1775  /* Create the file descriptor table. */
1776  newfdp->fd_fd.fd_refcnt = 1;
1777  newfdp->fd_fd.fd_holdcnt = 1;
1778  newfdp->fd_fd.fd_cmask = CMASK;
1779  newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
1780  newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
1781  newfdp->fd_fd.fd_nfiles = NDFILE;
1782  newfdp->fd_fd.fd_map = newfdp->fd_dmap;
1783  newfdp->fd_fd.fd_lastfile = -1;
1784  return (&newfdp->fd_fd);
1785 }
1786 
1787 static struct filedesc *
1788 fdhold(struct proc *p)
1789 {
1790  struct filedesc *fdp;
1791 
1792  mtx_lock(&fdesc_mtx);
1793  fdp = p->p_fd;
1794  if (fdp != NULL)
1795  fdp->fd_holdcnt++;
1796  mtx_unlock(&fdesc_mtx);
1797  return (fdp);
1798 }
1799 
1800 static void
1801 fddrop(struct filedesc *fdp)
1802 {
1803  struct filedesc0 *fdp0;
1804  struct freetable *ft;
1805  int i;
1806 
1807  mtx_lock(&fdesc_mtx);
1808  i = --fdp->fd_holdcnt;
1809  mtx_unlock(&fdesc_mtx);
1810  if (i > 0)
1811  return;
1812 
1813  FILEDESC_LOCK_DESTROY(fdp);
1814  fdp0 = (struct filedesc0 *)fdp;
1815  while ((ft = SLIST_FIRST(&fdp0->fd_free)) != NULL) {
1816  SLIST_REMOVE_HEAD(&fdp0->fd_free, ft_next);
1817  free(ft->ft_table, M_FILEDESC);
1818  }
1819  free(fdp, M_FILEDESC);
1820 }
1821 
1822 /*
1823  * Share a filedesc structure.
1824  */
1825 struct filedesc *
1826 fdshare(struct filedesc *fdp)
1827 {
1828 
1829  FILEDESC_XLOCK(fdp);
1830  fdp->fd_refcnt++;
1831  FILEDESC_XUNLOCK(fdp);
1832  return (fdp);
1833 }
1834 
1835 /*
1836  * Unshare a filedesc structure, if necessary by making a copy
1837  */
1838 void
1839 fdunshare(struct proc *p, struct thread *td)
1840 {
1841 
1842  FILEDESC_XLOCK(p->p_fd);
1843  if (p->p_fd->fd_refcnt > 1) {
1844  struct filedesc *tmp;
1845 
1846  FILEDESC_XUNLOCK(p->p_fd);
1847  tmp = fdcopy(p->p_fd);
1848  fdfree(td);
1849  p->p_fd = tmp;
1850  } else
1851  FILEDESC_XUNLOCK(p->p_fd);
1852 }
1853 
1854 /*
1855  * Copy a filedesc structure. A NULL pointer in returns a NULL reference,
1856  * this is to ease callers, not catch errors.
1857  */
1858 struct filedesc *
1859 fdcopy(struct filedesc *fdp)
1860 {
1861  struct filedesc *newfdp;
1862  int i;
1863 
1864  /* Certain daemons might not have file descriptors. */
1865  if (fdp == NULL)
1866  return (NULL);
1867 
1868  newfdp = fdinit(fdp);
1869  FILEDESC_SLOCK(fdp);
1870  while (fdp->fd_lastfile >= newfdp->fd_nfiles) {
1871  FILEDESC_SUNLOCK(fdp);
1872  FILEDESC_XLOCK(newfdp);
1873  fdgrowtable(newfdp, fdp->fd_lastfile + 1);
1874  FILEDESC_XUNLOCK(newfdp);
1875  FILEDESC_SLOCK(fdp);
1876  }
1877  /* copy all passable descriptors (i.e. not kqueue) */
1878  newfdp->fd_freefile = -1;
1879  for (i = 0; i <= fdp->fd_lastfile; ++i) {
1880  if (fdisused(fdp, i) &&
1881  (fdp->fd_ofiles[i]->f_ops->fo_flags & DFLAG_PASSABLE) &&
1882  fdp->fd_ofiles[i]->f_ops != &badfileops) {
1883  newfdp->fd_ofiles[i] = fdp->fd_ofiles[i];
1884  newfdp->fd_ofileflags[i] = fdp->fd_ofileflags[i];
1885  fhold(newfdp->fd_ofiles[i]);
1886  newfdp->fd_lastfile = i;
1887  } else {
1888  if (newfdp->fd_freefile == -1)
1889  newfdp->fd_freefile = i;
1890  }
1891  }
1892  newfdp->fd_cmask = fdp->fd_cmask;
1893  FILEDESC_SUNLOCK(fdp);
1894  FILEDESC_XLOCK(newfdp);
1895  for (i = 0; i <= newfdp->fd_lastfile; ++i)
1896  if (newfdp->fd_ofiles[i] != NULL)
1897  fdused(newfdp, i);
1898  if (newfdp->fd_freefile == -1)
1899  newfdp->fd_freefile = i;
1900  FILEDESC_XUNLOCK(newfdp);
1901  return (newfdp);
1902 }
1903 
1904 /*
1905  * Release a filedesc structure.
1906  */
1907 void
1908 fdfree(struct thread *td)
1909 {
1910  struct filedesc *fdp;
1911  struct file **fpp;
1912  int i, locked;
1913  struct filedesc_to_leader *fdtol;
1914  struct file *fp;
1915  struct vnode *cdir, *jdir, *rdir, *vp;
1916  struct flock lf;
1917 
1918  /* Certain daemons might not have file descriptors. */
1919  fdp = td->td_proc->p_fd;
1920  if (fdp == NULL)
1921  return;
1922 
1923 #ifdef RACCT
1924  PROC_LOCK(td->td_proc);
1925  racct_set(td->td_proc, RACCT_NOFILE, 0);
1926  PROC_UNLOCK(td->td_proc);
1927 #endif
1928 
1929  /* Check for special need to clear POSIX style locks */
1930  fdtol = td->td_proc->p_fdtol;
1931  if (fdtol != NULL) {
1932  FILEDESC_XLOCK(fdp);
1933  KASSERT(fdtol->fdl_refcount > 0,
1934  ("filedesc_to_refcount botch: fdl_refcount=%d",
1935  fdtol->fdl_refcount));
1936  if (fdtol->fdl_refcount == 1 &&
1937  (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
1938  for (i = 0, fpp = fdp->fd_ofiles;
1939  i <= fdp->fd_lastfile;
1940  i++, fpp++) {
1941  if (*fpp == NULL ||
1942  (*fpp)->f_type != DTYPE_VNODE)
1943  continue;
1944  fp = *fpp;
1945  fhold(fp);
1946  FILEDESC_XUNLOCK(fdp);
1947  lf.l_whence = SEEK_SET;
1948  lf.l_start = 0;
1949  lf.l_len = 0;
1950  lf.l_type = F_UNLCK;
1951  vp = fp->f_vnode;
1952  locked = VFS_LOCK_GIANT(vp->v_mount);
1953  (void) VOP_ADVLOCK(vp,
1954  (caddr_t)td->td_proc->
1955  p_leader,
1956  F_UNLCK,
1957  &lf,
1958  F_POSIX);
1959  VFS_UNLOCK_GIANT(locked);
1960  FILEDESC_XLOCK(fdp);
1961  fdrop(fp, td);
1962  fpp = fdp->fd_ofiles + i;
1963  }
1964  }
1965  retry:
1966  if (fdtol->fdl_refcount == 1) {
1967  if (fdp->fd_holdleaderscount > 0 &&
1968  (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
1969  /*
1970  * close() or do_dup() has cleared a reference
1971  * in a shared file descriptor table.
1972  */
1973  fdp->fd_holdleaderswakeup = 1;
1974  sx_sleep(&fdp->fd_holdleaderscount,
1975  FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0);
1976  goto retry;
1977  }
1978  if (fdtol->fdl_holdcount > 0) {
1979  /*
1980  * Ensure that fdtol->fdl_leader remains
1981  * valid in closef().
1982  */
1983  fdtol->fdl_wakeup = 1;
1984  sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,
1985  "fdlhold", 0);
1986  goto retry;
1987  }
1988  }
1989  fdtol->fdl_refcount--;
1990  if (fdtol->fdl_refcount == 0 &&
1991  fdtol->fdl_holdcount == 0) {
1992  fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1993  fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1994  } else
1995  fdtol = NULL;
1996  td->td_proc->p_fdtol = NULL;
1997  FILEDESC_XUNLOCK(fdp);
1998  if (fdtol != NULL)
1999  free(fdtol, M_FILEDESC_TO_LEADER);
2000  }
2001  FILEDESC_XLOCK(fdp);
2002  i = --fdp->fd_refcnt;
2003  FILEDESC_XUNLOCK(fdp);
2004  if (i > 0)
2005  return;
2006 
2007  fpp = fdp->fd_ofiles;
2008  for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
2009  if (*fpp) {
2010  FILEDESC_XLOCK(fdp);
2011  fp = *fpp;
2012  *fpp = NULL;
2013  FILEDESC_XUNLOCK(fdp);
2014  (void) closef(fp, td);
2015  }
2016  }
2017  FILEDESC_XLOCK(fdp);
2018 
2019  /* XXX This should happen earlier. */
2020  mtx_lock(&fdesc_mtx);
2021  td->td_proc->p_fd = NULL;
2022  mtx_unlock(&fdesc_mtx);
2023 
2024  if (fdp->fd_nfiles > NDFILE)
2025  free(fdp->fd_ofiles, M_FILEDESC);
2026  if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
2027  free(fdp->fd_map, M_FILEDESC);
2028 
2029  fdp->fd_nfiles = 0;
2030 
2031  cdir = fdp->fd_cdir;
2032  fdp->fd_cdir = NULL;
2033  rdir = fdp->fd_rdir;
2034  fdp->fd_rdir = NULL;
2035  jdir = fdp->fd_jdir;
2036  fdp->fd_jdir = NULL;
2037  FILEDESC_XUNLOCK(fdp);
2038 
2039  if (cdir) {
2040  locked = VFS_LOCK_GIANT(cdir->v_mount);
2041  vrele(cdir);
2042  VFS_UNLOCK_GIANT(locked);
2043  }
2044  if (rdir) {
2045  locked = VFS_LOCK_GIANT(rdir->v_mount);
2046  vrele(rdir);
2047  VFS_UNLOCK_GIANT(locked);
2048  }
2049  if (jdir) {
2050  locked = VFS_LOCK_GIANT(jdir->v_mount);
2051  vrele(jdir);
2052  VFS_UNLOCK_GIANT(locked);
2053  }
2054 
2055  fddrop(fdp);
2056 }
2057 
2058 /*
2059  * For setugid programs, we don't want to people to use that setugidness
2060  * to generate error messages which write to a file which otherwise would
2061  * otherwise be off-limits to the process. We check for filesystems where
2062  * the vnode can change out from under us after execve (like [lin]procfs).
2063  *
2064  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2065  * sufficient. We also don't check for setugidness since we know we are.
2066  */
2067 static int
2068 is_unsafe(struct file *fp)
2069 {
2070  if (fp->f_type == DTYPE_VNODE) {
2071  struct vnode *vp = fp->f_vnode;
2072 
2073  if ((vp->v_vflag & VV_PROCDEP) != 0)
2074  return (1);
2075  }
2076  return (0);
2077 }
2078 
2079 /*
2080  * Make this setguid thing safe, if at all possible.
2081  */
2082 void
2083 setugidsafety(struct thread *td)
2084 {
2085  struct filedesc *fdp;
2086  int i;
2087 
2088  /* Certain daemons might not have file descriptors. */
2089  fdp = td->td_proc->p_fd;
2090  if (fdp == NULL)
2091  return;
2092 
2093  /*
2094  * Note: fdp->fd_ofiles may be reallocated out from under us while
2095  * we are blocked in a close. Be careful!
2096  */
2097  FILEDESC_XLOCK(fdp);
2098  for (i = 0; i <= fdp->fd_lastfile; i++) {
2099  if (i > 2)
2100  break;
2101  if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
2102  struct file *fp;
2103 
2104  knote_fdclose(td, i);
2105  /*
2106  * NULL-out descriptor prior to close to avoid
2107  * a race while close blocks.
2108  */
2109  fp = fdp->fd_ofiles[i];
2110  fdp->fd_ofiles[i] = NULL;
2111  fdp->fd_ofileflags[i] = 0;
2112  fdunused(fdp, i);
2113  FILEDESC_XUNLOCK(fdp);
2114  (void) closef(fp, td);
2115  FILEDESC_XLOCK(fdp);
2116  }
2117  }
2118  FILEDESC_XUNLOCK(fdp);
2119 }
2120 
2121 /*
2122  * If a specific file object occupies a specific file descriptor, close the
2123  * file descriptor entry and drop a reference on the file object. This is a
2124  * convenience function to handle a subsequent error in a function that calls
2125  * falloc() that handles the race that another thread might have closed the
2126  * file descriptor out from under the thread creating the file object.
2127  */
2128 void
2129 fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td)
2130 {
2131 
2132  FILEDESC_XLOCK(fdp);
2133  if (fdp->fd_ofiles[idx] == fp) {
2134  fdp->fd_ofiles[idx] = NULL;
2135  fdunused(fdp, idx);
2136  FILEDESC_XUNLOCK(fdp);
2137  fdrop(fp, td);
2138  } else
2139  FILEDESC_XUNLOCK(fdp);
2140 }
2141 
2142 /*
2143  * Close any files on exec?
2144  */
2145 void
2146 fdcloseexec(struct thread *td)
2147 {
2148  struct filedesc *fdp;
2149  int i;
2150 
2151  /* Certain daemons might not have file descriptors. */
2152  fdp = td->td_proc->p_fd;
2153  if (fdp == NULL)
2154  return;
2155 
2156  FILEDESC_XLOCK(fdp);
2157 
2158  /*
2159  * We cannot cache fd_ofiles or fd_ofileflags since operations
2160  * may block and rip them out from under us.
2161  */
2162  for (i = 0; i <= fdp->fd_lastfile; i++) {
2163  if (fdp->fd_ofiles[i] != NULL &&
2164  (fdp->fd_ofiles[i]->f_type == DTYPE_MQUEUE ||
2165  (fdp->fd_ofileflags[i] & UF_EXCLOSE))) {
2166  struct file *fp;
2167 
2168  knote_fdclose(td, i);
2169  /*
2170  * NULL-out descriptor prior to close to avoid
2171  * a race while close blocks.
2172  */
2173  fp = fdp->fd_ofiles[i];
2174  fdp->fd_ofiles[i] = NULL;
2175  fdp->fd_ofileflags[i] = 0;
2176  fdunused(fdp, i);
2177  if (fp->f_type == DTYPE_MQUEUE)
2178  mq_fdclose(td, i, fp);
2179  FILEDESC_XUNLOCK(fdp);
2180  (void) closef(fp, td);
2181  FILEDESC_XLOCK(fdp);
2182  }
2183  }
2184  FILEDESC_XUNLOCK(fdp);
2185 }
2186 
2187 /*
2188  * It is unsafe for set[ug]id processes to be started with file
2189  * descriptors 0..2 closed, as these descriptors are given implicit
2190  * significance in the Standard C library. fdcheckstd() will create a
2191  * descriptor referencing /dev/null for each of stdin, stdout, and
2192  * stderr that is not already open.
2193  */
2194 int
2195 fdcheckstd(struct thread *td)
2196 {
2197  struct filedesc *fdp;
2198  register_t retval, save;
2199  int i, error, devnull;
2200 
2201  fdp = td->td_proc->p_fd;
2202  if (fdp == NULL)
2203  return (0);
2204  KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"));
2205  devnull = -1;
2206  error = 0;
2207  for (i = 0; i < 3; i++) {
2208  if (fdp->fd_ofiles[i] != NULL)
2209  continue;
2210  if (devnull < 0) {
2211  save = td->td_retval[0];
2212  error = kern_open(td, "/dev/null", UIO_SYSSPACE,
2213  O_RDWR, 0);
2214  devnull = td->td_retval[0];
2215  td->td_retval[0] = save;
2216  if (error)
2217  break;
2218  KASSERT(devnull == i, ("oof, we didn't get our fd"));
2219  } else {
2220  error = do_dup(td, DUP_FIXED, devnull, i, &retval);
2221  if (error != 0)
2222  break;
2223  }
2224  }
2225  return (error);
2226 }
2227 
2228 /*
2229  * Internal form of close. Decrement reference count on file structure.
2230  * Note: td may be NULL when closing a file that was being passed in a
2231  * message.
2232  *
2233  * XXXRW: Giant is not required for the caller, but often will be held; this
2234  * makes it moderately likely the Giant will be recursed in the VFS case.
2235  */
2236 int
2237 closef(struct file *fp, struct thread *td)
2238 {
2239  struct vnode *vp;
2240  struct flock lf;
2241  struct filedesc_to_leader *fdtol;
2242  struct filedesc *fdp;
2243  struct file *fp_object;
2244 
2245  /*
2246  * POSIX record locking dictates that any close releases ALL
2247  * locks owned by this process. This is handled by setting
2248  * a flag in the unlock to free ONLY locks obeying POSIX
2249  * semantics, and not to free BSD-style file locks.
2250  * If the descriptor was in a message, POSIX-style locks
2251  * aren't passed with the descriptor, and the thread pointer
2252  * will be NULL. Callers should be careful only to pass a
2253  * NULL thread pointer when there really is no owning
2254  * context that might have locks, or the locks will be
2255  * leaked.
2256  *
2257  * If this is a capability, we do lock processing under the underlying
2258  * node, not the capability itself.
2259  */
2260  (void)cap_funwrap(fp, 0, &fp_object);
2261  if ((fp_object->f_type == DTYPE_VNODE) && (td != NULL)) {
2262  int vfslocked;
2263 
2264  vp = fp_object->f_vnode;
2265  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2266  if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
2267  lf.l_whence = SEEK_SET;
2268  lf.l_start = 0;
2269  lf.l_len = 0;
2270  lf.l_type = F_UNLCK;
2271  (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
2272  F_UNLCK, &lf, F_POSIX);
2273  }
2274  fdtol = td->td_proc->p_fdtol;
2275  if (fdtol != NULL) {
2276  /*
2277  * Handle special case where file descriptor table is
2278  * shared between multiple process leaders.
2279  */
2280  fdp = td->td_proc->p_fd;
2281  FILEDESC_XLOCK(fdp);
2282  for (fdtol = fdtol->fdl_next;
2283  fdtol != td->td_proc->p_fdtol;
2284  fdtol = fdtol->fdl_next) {
2285  if ((fdtol->fdl_leader->p_flag &
2286  P_ADVLOCK) == 0)
2287  continue;
2288  fdtol->fdl_holdcount++;
2289  FILEDESC_XUNLOCK(fdp);
2290  lf.l_whence = SEEK_SET;
2291  lf.l_start = 0;
2292  lf.l_len = 0;
2293  lf.l_type = F_UNLCK;
2294  vp = fp_object->f_vnode;
2295  (void) VOP_ADVLOCK(vp,
2296  (caddr_t)fdtol->fdl_leader,
2297  F_UNLCK, &lf, F_POSIX);
2298  FILEDESC_XLOCK(fdp);
2299  fdtol->fdl_holdcount--;
2300  if (fdtol->fdl_holdcount == 0 &&
2301  fdtol->fdl_wakeup != 0) {
2302  fdtol->fdl_wakeup = 0;
2303  wakeup(fdtol);
2304  }
2305  }
2306  FILEDESC_XUNLOCK(fdp);
2307  }
2308  VFS_UNLOCK_GIANT(vfslocked);
2309  }
2310  return (fdrop(fp, td));
2311 }
2312 
2313 /*
2314  * Initialize the file pointer with the specified properties.
2315  *
2316  * The ops are set with release semantics to be certain that the flags, type,
2317  * and data are visible when ops is. This is to prevent ops methods from being
2318  * called with bad data.
2319  */
2320 void
2321 finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
2322 {
2323  fp->f_data = data;
2324  fp->f_flag = flag;
2325  fp->f_type = type;
2326  atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
2327 }
2328 
2329 struct file *
2330 fget_unlocked(struct filedesc *fdp, int fd)
2331 {
2332  struct file *fp;
2333  u_int count;
2334 
2335  if (fd < 0 || fd >= fdp->fd_nfiles)
2336  return (NULL);
2337  /*
2338  * Fetch the descriptor locklessly. We avoid fdrop() races by
2339  * never raising a refcount above 0. To accomplish this we have
2340  * to use a cmpset loop rather than an atomic_add. The descriptor
2341  * must be re-verified once we acquire a reference to be certain
2342  * that the identity is still correct and we did not lose a race
2343  * due to preemption.
2344  */
2345  for (;;) {
2346  fp = fdp->fd_ofiles[fd];
2347  if (fp == NULL)
2348  break;
2349  count = fp->f_count;
2350  if (count == 0)
2351  continue;
2352  /*
2353  * Use an acquire barrier to prevent caching of fd_ofiles
2354  * so it is refreshed for verification.
2355  */
2356  if (atomic_cmpset_acq_int(&fp->f_count, count, count + 1) != 1)
2357  continue;
2358  if (fp == fdp->fd_ofiles[fd])
2359  break;
2360  fdrop(fp, curthread);
2361  }
2362 
2363  return (fp);
2364 }
2365 
2366 /*
2367  * Extract the file pointer associated with the specified descriptor for the
2368  * current user process.
2369  *
2370  * If the descriptor doesn't exist or doesn't match 'flags', EBADF is
2371  * returned.
2372  *
2373  * If the FGET_GETCAP flag is set, the capability itself will be returned.
2374  * Calling _fget() with FGET_GETCAP on a non-capability will return EINVAL.
2375  * Otherwise, if the file is a capability, its rights will be checked against
2376  * the capability rights mask, and if successful, the object will be unwrapped.
2377  *
2378  * If an error occured the non-zero error is returned and *fpp is set to
2379  * NULL. Otherwise *fpp is held and set and zero is returned. Caller is
2380  * responsible for fdrop().
2381  */
2382 #define FGET_GETCAP 0x00000001
2383 static __inline int
2384 _fget(struct thread *td, int fd, struct file **fpp, int flags,
2385  cap_rights_t needrights, cap_rights_t *haverightsp, u_char *maxprotp,
2386  int fget_flags)
2387 {
2388  struct filedesc *fdp;
2389  struct file *fp;
2390 #ifdef CAPABILITIES
2391  struct file *fp_fromcap;
2392 #endif
2393  int error;
2394 
2395  *fpp = NULL;
2396  if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
2397  return (EBADF);
2398  if ((fp = fget_unlocked(fdp, fd)) == NULL)
2399  return (EBADF);
2400  if (fp->f_ops == &badfileops) {
2401  fdrop(fp, td);
2402  return (EBADF);
2403  }
2404 
2405 #ifdef CAPABILITIES
2406  /*
2407  * If this is a capability, what rights does it have?
2408  */
2409  if (haverightsp != NULL) {
2410  if (fp->f_type == DTYPE_CAPABILITY)
2411  *haverightsp = cap_rights(fp);
2412  else
2413  *haverightsp = CAP_MASK_VALID;
2414  }
2415 
2416  /*
2417  * If a capability has been requested, return the capability directly.
2418  * Otherwise, check capability rights, extract the underlying object,
2419  * and check its access flags.
2420  */
2421  if (fget_flags & FGET_GETCAP) {
2422  if (fp->f_type != DTYPE_CAPABILITY) {
2423  fdrop(fp, td);
2424  return (EINVAL);
2425  }
2426  } else {
2427  if (maxprotp == NULL)
2428  error = cap_funwrap(fp, needrights, &fp_fromcap);
2429  else
2430  error = cap_funwrap_mmap(fp, needrights, maxprotp,
2431  &fp_fromcap);
2432  if (error != 0) {
2433  fdrop(fp, td);
2434  return (error);
2435  }
2436 
2437  /*
2438  * If we've unwrapped a file, drop the original capability
2439  * and hold the new descriptor. fp after this point refers to
2440  * the actual (unwrapped) object, not the capability.
2441  */
2442  if (fp != fp_fromcap) {
2443  fhold(fp_fromcap);
2444  fdrop(fp, td);
2445  fp = fp_fromcap;
2446  }
2447  }
2448 #else /* !CAPABILITIES */
2449  KASSERT(fp->f_type != DTYPE_CAPABILITY,
2450  ("%s: saw capability", __func__));
2451  if (maxprotp != NULL)
2452  *maxprotp = VM_PROT_ALL;
2453 #endif /* CAPABILITIES */
2454 
2455  /*
2456  * FREAD and FWRITE failure return EBADF as per POSIX.
2457  */
2458  error = 0;
2459  switch (flags) {
2460  case FREAD:
2461  case FWRITE:
2462  if ((fp->f_flag & flags) == 0)
2463  error = EBADF;
2464  break;
2465  case FEXEC:
2466  if ((fp->f_flag & (FREAD | FEXEC)) == 0 ||
2467  ((fp->f_flag & FWRITE) != 0))
2468  error = EBADF;
2469  break;
2470  case 0:
2471  break;
2472  default:
2473  KASSERT(0, ("wrong flags"));
2474  }
2475 
2476  if (error != 0) {
2477  fdrop(fp, td);
2478  return (error);
2479  }
2480 
2481  *fpp = fp;
2482  return (0);
2483 }
2484 
2485 int
2486 fget(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
2487 {
2488 
2489  return(_fget(td, fd, fpp, 0, rights, NULL, NULL, 0));
2490 }
2491 
2492 int
2493 fget_mmap(struct thread *td, int fd, cap_rights_t rights, u_char *maxprotp,
2494  struct file **fpp)
2495 {
2496 
2497  return (_fget(td, fd, fpp, 0, rights, NULL, maxprotp, 0));
2498 }
2499 
2500 int
2501 fget_read(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
2502 {
2503 
2504  return(_fget(td, fd, fpp, FREAD, rights, NULL, NULL, 0));
2505 }
2506 
2507 int
2508 fget_write(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
2509 {
2510 
2511  return (_fget(td, fd, fpp, FWRITE, rights, NULL, NULL, 0));
2512 }
2513 
2514 /*
2515  * Unlike the other fget() calls, which accept and check capability rights
2516  * but never return capabilities, fgetcap() returns the capability but doesn't
2517  * check capability rights.
2518  */
2519 int
2520 fgetcap(struct thread *td, int fd, struct file **fpp)
2521 {
2522 
2523  return (_fget(td, fd, fpp, 0, 0, NULL, NULL, FGET_GETCAP));
2524 }
2525 
2526 
2527 /*
2528  * Like fget() but loads the underlying vnode, or returns an error if the
2529  * descriptor does not represent a vnode. Note that pipes use vnodes but
2530  * never have VM objects. The returned vnode will be vref()'d.
2531  *
2532  * XXX: what about the unused flags ?
2533  */
2534 static __inline int
2535 _fgetvp(struct thread *td, int fd, int flags, cap_rights_t needrights,
2536  cap_rights_t *haverightsp, struct vnode **vpp)
2537 {
2538  struct file *fp;
2539  int error;
2540 
2541  *vpp = NULL;
2542  if ((error = _fget(td, fd, &fp, flags, needrights, haverightsp,
2543  NULL, 0)) != 0)
2544  return (error);
2545  if (fp->f_vnode == NULL) {
2546  error = EINVAL;
2547  } else {
2548  *vpp = fp->f_vnode;
2549  vref(*vpp);
2550  }
2551  fdrop(fp, td);
2552 
2553  return (error);
2554 }
2555 
2556 int
2557 fgetvp(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
2558 {
2559 
2560  return (_fgetvp(td, fd, 0, rights, NULL, vpp));
2561 }
2562 
2563 int
2564 fgetvp_rights(struct thread *td, int fd, cap_rights_t need, cap_rights_t *have,
2565  struct vnode **vpp)
2566 {
2567  return (_fgetvp(td, fd, 0, need, have, vpp));
2568 }
2569 
2570 int
2571 fgetvp_read(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
2572 {
2573 
2574  return (_fgetvp(td, fd, FREAD, rights, NULL, vpp));
2575 }
2576 
2577 int
2578 fgetvp_exec(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
2579 {
2580 
2581  return (_fgetvp(td, fd, FEXEC, rights, NULL, vpp));
2582 }
2583 
2584 #ifdef notyet
2585 int
2586 fgetvp_write(struct thread *td, int fd, cap_rights_t rights,
2587  struct vnode **vpp)
2588 {
2589 
2590  return (_fgetvp(td, fd, FWRITE, rights, NULL, vpp));
2591 }
2592 #endif
2593 
2594 /*
2595  * Like fget() but loads the underlying socket, or returns an error if the
2596  * descriptor does not represent a socket.
2597  *
2598  * We bump the ref count on the returned socket. XXX Also obtain the SX lock
2599  * in the future.
2600  *
2601  * Note: fgetsock() and fputsock() are deprecated, as consumers should rely
2602  * on their file descriptor reference to prevent the socket from being free'd
2603  * during use.
2604  */
2605 int
2606 fgetsock(struct thread *td, int fd, cap_rights_t rights, struct socket **spp,
2607  u_int *fflagp)
2608 {
2609  struct file *fp;
2610  int error;
2611 
2612  *spp = NULL;
2613  if (fflagp != NULL)
2614  *fflagp = 0;
2615  if ((error = _fget(td, fd, &fp, 0, rights, NULL, NULL, 0)) != 0)
2616  return (error);
2617  if (fp->f_type != DTYPE_SOCKET) {
2618  error = ENOTSOCK;
2619  } else {
2620  *spp = fp->f_data;
2621  if (fflagp)
2622  *fflagp = fp->f_flag;
2623  SOCK_LOCK(*spp);
2624  soref(*spp);
2625  SOCK_UNLOCK(*spp);
2626  }
2627  fdrop(fp, td);
2628 
2629  return (error);
2630 }
2631 
2632 /*
2633  * Drop the reference count on the socket and XXX release the SX lock in the
2634  * future. The last reference closes the socket.
2635  *
2636  * Note: fputsock() is deprecated, see comment for fgetsock().
2637  */
2638 void
2639 fputsock(struct socket *so)
2640 {
2641 
2642  ACCEPT_LOCK();
2643  SOCK_LOCK(so);
2644  CURVNET_SET(so->so_vnet);
2645  sorele(so);
2646  CURVNET_RESTORE();
2647 }
2648 
2649 /*
2650  * Handle the last reference to a file being closed.
2651  *
2652  * No special capability handling here, as the capability's fo_close will run
2653  * instead of the object here, and perform any necessary drop on the object.
2654  */
2655 int
2656 _fdrop(struct file *fp, struct thread *td)
2657 {
2658  int error;
2659 
2660  error = 0;
2661  if (fp->f_count != 0)
2662  panic("fdrop: count %d", fp->f_count);
2663  if (fp->f_ops != &badfileops)
2664  error = fo_close(fp, td);
2665  atomic_subtract_int(&openfiles, 1);
2666  crfree(fp->f_cred);
2667  free(fp->f_advice, M_FADVISE);
2668  uma_zfree(file_zone, fp);
2669 
2670  return (error);
2671 }
2672 
2673 /*
2674  * Apply an advisory lock on a file descriptor.
2675  *
2676  * Just attempt to get a record lock of the requested type on the entire file
2677  * (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2678  */
2679 #ifndef _SYS_SYSPROTO_H_
2680 struct flock_args {
2681  int fd;
2682  int how;
2683 };
2684 #endif
2685 /* ARGSUSED */
2686 int
2687 sys_flock(struct thread *td, struct flock_args *uap)
2688 {
2689  struct file *fp;
2690  struct vnode *vp;
2691  struct flock lf;
2692  int vfslocked;
2693  int error;
2694 
2695  if ((error = fget(td, uap->fd, CAP_FLOCK, &fp)) != 0)
2696  return (error);
2697  if (fp->f_type != DTYPE_VNODE) {
2698  fdrop(fp, td);
2699  return (EOPNOTSUPP);
2700  }
2701 
2702  vp = fp->f_vnode;
2703  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2704  lf.l_whence = SEEK_SET;
2705  lf.l_start = 0;
2706  lf.l_len = 0;
2707  if (uap->how & LOCK_UN) {
2708  lf.l_type = F_UNLCK;
2709  atomic_clear_int(&fp->f_flag, FHASLOCK);
2710  error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
2711  goto done2;
2712  }
2713  if (uap->how & LOCK_EX)
2714  lf.l_type = F_WRLCK;
2715  else if (uap->how & LOCK_SH)
2716  lf.l_type = F_RDLCK;
2717  else {
2718  error = EBADF;
2719  goto done2;
2720  }
2721  atomic_set_int(&fp->f_flag, FHASLOCK);
2722  error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
2723  (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
2724 done2:
2725  fdrop(fp, td);
2726  VFS_UNLOCK_GIANT(vfslocked);
2727  return (error);
2728 }
2729 /*
2730  * Duplicate the specified descriptor to a free descriptor.
2731  */
2732 int
2733 dupfdopen(struct thread *td, struct filedesc *fdp, int indx, int dfd, int mode, int error)
2734 {
2735  struct file *wfp;
2736  struct file *fp;
2737 
2738  /*
2739  * If the to-be-dup'd fd number is greater than the allowed number
2740  * of file descriptors, or the fd to be dup'd has already been
2741  * closed, then reject.
2742  */
2743  FILEDESC_XLOCK(fdp);
2744  if (dfd < 0 || dfd >= fdp->fd_nfiles ||
2745  (wfp = fdp->fd_ofiles[dfd]) == NULL) {
2746  FILEDESC_XUNLOCK(fdp);
2747  return (EBADF);
2748  }
2749 
2750  /*
2751  * There are two cases of interest here.
2752  *
2753  * For ENODEV simply dup (dfd) to file descriptor (indx) and return.
2754  *
2755  * For ENXIO steal away the file structure from (dfd) and store it in
2756  * (indx). (dfd) is effectively closed by this operation.
2757  *
2758  * Any other error code is just returned.
2759  */
2760  switch (error) {
2761  case ENODEV:
2762  /*
2763  * Check that the mode the file is being opened for is a
2764  * subset of the mode of the existing descriptor.
2765  */
2766  if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
2767  FILEDESC_XUNLOCK(fdp);
2768  return (EACCES);
2769  }
2770  fp = fdp->fd_ofiles[indx];
2771  fdp->fd_ofiles[indx] = wfp;
2772  fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
2773  if (fp == NULL)
2774  fdused(fdp, indx);
2775  fhold(wfp);
2776  FILEDESC_XUNLOCK(fdp);
2777  if (fp != NULL)
2778  /*
2779  * We now own the reference to fp that the ofiles[]
2780  * array used to own. Release it.
2781  */
2782  fdrop(fp, td);
2783  return (0);
2784 
2785  case ENXIO:
2786  /*
2787  * Steal away the file pointer from dfd and stuff it into indx.
2788  */
2789  fp = fdp->fd_ofiles[indx];
2790  fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
2791  fdp->fd_ofiles[dfd] = NULL;
2792  fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
2793  fdp->fd_ofileflags[dfd] = 0;
2794  fdunused(fdp, dfd);
2795  if (fp == NULL)
2796  fdused(fdp, indx);
2797  FILEDESC_XUNLOCK(fdp);
2798 
2799  /*
2800  * We now own the reference to fp that the ofiles[] array
2801  * used to own. Release it.
2802  */
2803  if (fp != NULL)
2804  fdrop(fp, td);
2805  return (0);
2806 
2807  default:
2808  FILEDESC_XUNLOCK(fdp);
2809  return (error);
2810  }
2811  /* NOTREACHED */
2812 }
2813 
2814 /*
2815  * Scan all active processes and prisons to see if any of them have a current
2816  * or root directory of `olddp'. If so, replace them with the new mount point.
2817  */
2818 void
2819 mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
2820 {
2821  struct filedesc *fdp;
2822  struct prison *pr;
2823  struct proc *p;
2824  int nrele;
2825 
2826  if (vrefcnt(olddp) == 1)
2827  return;
2828  nrele = 0;
2829  sx_slock(&allproc_lock);
2830  FOREACH_PROC_IN_SYSTEM(p) {
2831  fdp = fdhold(p);
2832  if (fdp == NULL)
2833  continue;
2834  FILEDESC_XLOCK(fdp);
2835  if (fdp->fd_cdir == olddp) {
2836  vref(newdp);
2837  fdp->fd_cdir = newdp;
2838  nrele++;
2839  }
2840  if (fdp->fd_rdir == olddp) {
2841  vref(newdp);
2842  fdp->fd_rdir = newdp;
2843  nrele++;
2844  }
2845  if (fdp->fd_jdir == olddp) {
2846  vref(newdp);
2847  fdp->fd_jdir = newdp;
2848  nrele++;
2849  }
2850  FILEDESC_XUNLOCK(fdp);
2851  fddrop(fdp);
2852  }
2853  sx_sunlock(&allproc_lock);
2854  if (rootvnode == olddp) {
2855  vref(newdp);
2856  rootvnode = newdp;
2857  nrele++;
2858  }
2859  mtx_lock(&prison0.pr_mtx);
2860  if (prison0.pr_root == olddp) {
2861  vref(newdp);
2862  prison0.pr_root = newdp;
2863  nrele++;
2864  }
2865  mtx_unlock(&prison0.pr_mtx);
2866  sx_slock(&allprison_lock);
2867  TAILQ_FOREACH(pr, &allprison, pr_list) {
2868  mtx_lock(&pr->pr_mtx);
2869  if (pr->pr_root == olddp) {
2870  vref(newdp);
2871  pr->pr_root = newdp;
2872  nrele++;
2873  }
2874  mtx_unlock(&pr->pr_mtx);
2875  }
2876  sx_sunlock(&allprison_lock);
2877  while (nrele--)
2878  vrele(olddp);
2879 }
2880 
2881 struct filedesc_to_leader *
2882 filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
2883 {
2884  struct filedesc_to_leader *fdtol;
2885 
2886  fdtol = malloc(sizeof(struct filedesc_to_leader),
2887  M_FILEDESC_TO_LEADER,
2888  M_WAITOK);
2889  fdtol->fdl_refcount = 1;
2890  fdtol->fdl_holdcount = 0;
2891  fdtol->fdl_wakeup = 0;
2892  fdtol->fdl_leader = leader;
2893  if (old != NULL) {
2894  FILEDESC_XLOCK(fdp);
2895  fdtol->fdl_next = old->fdl_next;
2896  fdtol->fdl_prev = old;
2897  old->fdl_next = fdtol;
2898  fdtol->fdl_next->fdl_prev = fdtol;
2899  FILEDESC_XUNLOCK(fdp);
2900  } else {
2901  fdtol->fdl_next = fdtol;
2902  fdtol->fdl_prev = fdtol;
2903  }
2904  return (fdtol);
2905 }
2906 
2907 /*
2908  * Get file structures globally.
2909  */
2910 static int
2911 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
2912 {
2913  struct xfile xf;
2914  struct filedesc *fdp;
2915  struct file *fp;
2916  struct proc *p;
2917  int error, n;
2918 
2919  error = sysctl_wire_old_buffer(req, 0);
2920  if (error != 0)
2921  return (error);
2922  if (req->oldptr == NULL) {
2923  n = 0;
2924  sx_slock(&allproc_lock);
2925  FOREACH_PROC_IN_SYSTEM(p) {
2926  if (p->p_state == PRS_NEW)
2927  continue;
2928  fdp = fdhold(p);
2929  if (fdp == NULL)
2930  continue;
2931  /* overestimates sparse tables. */
2932  if (fdp->fd_lastfile > 0)
2933  n += fdp->fd_lastfile;
2934  fddrop(fdp);
2935  }
2936  sx_sunlock(&allproc_lock);
2937  return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
2938  }
2939  error = 0;
2940  bzero(&xf, sizeof(xf));
2941  xf.xf_size = sizeof(xf);
2942  sx_slock(&allproc_lock);
2943  FOREACH_PROC_IN_SYSTEM(p) {
2944  PROC_LOCK(p);
2945  if (p->p_state == PRS_NEW) {
2946  PROC_UNLOCK(p);
2947  continue;
2948  }
2949  if (p_cansee(req->td, p) != 0) {
2950  PROC_UNLOCK(p);
2951  continue;
2952  }
2953  xf.xf_pid = p->p_pid;
2954  xf.xf_uid = p->p_ucred->cr_uid;
2955  PROC_UNLOCK(p);
2956  fdp = fdhold(p);
2957  if (fdp == NULL)
2958  continue;
2959  FILEDESC_SLOCK(fdp);
2960  for (n = 0; fdp->fd_refcnt > 0 && n < fdp->fd_nfiles; ++n) {
2961  if ((fp = fdp->fd_ofiles[n]) == NULL)
2962  continue;
2963  xf.xf_fd = n;
2964  xf.xf_file = fp;
2965  xf.xf_data = fp->f_data;
2966  xf.xf_vnode = fp->f_vnode;
2967  xf.xf_type = fp->f_type;
2968  xf.xf_count = fp->f_count;
2969  xf.xf_msgcount = 0;
2970  xf.xf_offset = foffset_get(fp);
2971  xf.xf_flag = fp->f_flag;
2972  error = SYSCTL_OUT(req, &xf, sizeof(xf));
2973  if (error)
2974  break;
2975  }
2976  FILEDESC_SUNLOCK(fdp);
2977  fddrop(fdp);
2978  if (error)
2979  break;
2980  }
2981  sx_sunlock(&allproc_lock);
2982  return (error);
2983 }
2984 
2985 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
2986  0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
2987 
2988 #ifdef KINFO_OFILE_SIZE
2989 CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE);
2990 #endif
2991 
2992 #ifdef COMPAT_FREEBSD7
2993 static int
2994 export_vnode_for_osysctl(struct vnode *vp, int type,
2995  struct kinfo_ofile *kif, struct filedesc *fdp, struct sysctl_req *req)
2996 {
2997  int error;
2998  char *fullpath, *freepath;
2999  int vfslocked;
3000 
3001  bzero(kif, sizeof(*kif));
3002  kif->kf_structsize = sizeof(*kif);
3003 
3004  vref(vp);
3005  kif->kf_fd = type;
3006  kif->kf_type = KF_TYPE_VNODE;
3007  /* This function only handles directories. */
3008  if (vp->v_type != VDIR) {
3009  vrele(vp);
3010  return (ENOTDIR);
3011  }
3012  kif->kf_vnode_type = KF_VTYPE_VDIR;
3013 
3014  /*
3015  * This is not a true file descriptor, so we set a bogus refcount
3016  * and offset to indicate these fields should be ignored.
3017  */
3018  kif->kf_ref_count = -1;
3019  kif->kf_offset = -1;
3020 
3021  freepath = NULL;
3022  fullpath = "-";
3023  FILEDESC_SUNLOCK(fdp);
3024  vn_fullpath(curthread, vp, &fullpath, &freepath);
3025  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
3026  vrele(vp);
3027  VFS_UNLOCK_GIANT(vfslocked);
3028  strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
3029  if (freepath != NULL)
3030  free(freepath, M_TEMP);
3031  error = SYSCTL_OUT(req, kif, sizeof(*kif));
3032  FILEDESC_SLOCK(fdp);
3033  return (error);
3034 }
3035 
3036 /*
3037  * Get per-process file descriptors for use by procstat(1), et al.
3038  */
3039 static int
3040 sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS)
3041 {
3042  char *fullpath, *freepath;
3043  struct kinfo_ofile *kif;
3044  struct filedesc *fdp;
3045  int error, i, *name;
3046  struct shmfd *shmfd;
3047  struct socket *so;
3048  struct vnode *vp;
3049  struct ksem *ks;
3050  struct file *fp;
3051  struct proc *p;
3052  struct tty *tp;
3053  int vfslocked;
3054 
3055  name = (int *)arg1;
3056  error = pget((pid_t)name[0], PGET_CANDEBUG, &p);
3057  if (error != 0)
3058  return (error);
3059  fdp = fdhold(p);
3060  PROC_UNLOCK(p);
3061  if (fdp == NULL)
3062  return (ENOENT);
3063  kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
3064  FILEDESC_SLOCK(fdp);
3065  if (fdp->fd_cdir != NULL)
3066  export_vnode_for_osysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif,
3067  fdp, req);
3068  if (fdp->fd_rdir != NULL)
3069  export_vnode_for_osysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif,
3070  fdp, req);
3071  if (fdp->fd_jdir != NULL)
3072  export_vnode_for_osysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif,
3073  fdp, req);
3074  for (i = 0; i < fdp->fd_nfiles; i++) {
3075  if ((fp = fdp->fd_ofiles[i]) == NULL)
3076  continue;
3077  bzero(kif, sizeof(*kif));
3078  kif->kf_structsize = sizeof(*kif);
3079  ks = NULL;
3080  vp = NULL;
3081  so = NULL;
3082  tp = NULL;
3083  shmfd = NULL;
3084  kif->kf_fd = i;
3085 
3086 #ifdef CAPABILITIES
3087  /*
3088  * When reporting a capability, most fields will be from the
3089  * underlying object, but do mark as a capability. With
3090  * ofiledesc, we don't have a field to export the cap_rights_t,
3091  * but we do with the new filedesc.
3092  */
3093  if (fp->f_type == DTYPE_CAPABILITY) {
3094  kif->kf_flags |= KF_FLAG_CAPABILITY;
3095  (void)cap_funwrap(fp, 0, &fp);
3096  }
3097 #else
3098  KASSERT(fp->f_type != DTYPE_CAPABILITY,
3099  ("sysctl_kern_proc_ofiledesc: saw capability"));
3100 #endif
3101  switch (fp->f_type) {
3102  case DTYPE_VNODE:
3103  kif->kf_type = KF_TYPE_VNODE;
3104  vp = fp->f_vnode;
3105  break;
3106 
3107  case DTYPE_SOCKET:
3108  kif->kf_type = KF_TYPE_SOCKET;
3109  so = fp->f_data;
3110  break;
3111 
3112  case DTYPE_PIPE:
3113  kif->kf_type = KF_TYPE_PIPE;
3114  break;
3115 
3116  case DTYPE_FIFO:
3117  kif->kf_type = KF_TYPE_FIFO;
3118  vp = fp->f_vnode;
3119  break;
3120 
3121  case DTYPE_KQUEUE:
3122  kif->kf_type = KF_TYPE_KQUEUE;
3123  break;
3124 
3125  case DTYPE_CRYPTO:
3126  kif->kf_type = KF_TYPE_CRYPTO;
3127  break;
3128 
3129  case DTYPE_MQUEUE:
3130  kif->kf_type = KF_TYPE_MQUEUE;
3131  break;
3132 
3133  case DTYPE_SHM:
3134  kif->kf_type = KF_TYPE_SHM;
3135  shmfd = fp->f_data;
3136  break;
3137 
3138  case DTYPE_SEM:
3139  kif->kf_type = KF_TYPE_SEM;
3140  ks = fp->f_data;
3141  break;
3142 
3143  case DTYPE_PTS:
3144  kif->kf_type = KF_TYPE_PTS;
3145  tp = fp->f_data;
3146  break;
3147 
3148 #ifdef PROCDESC
3149  case DTYPE_PROCDESC:
3150  kif->kf_type = KF_TYPE_PROCDESC;
3151  break;
3152 #endif
3153 
3154  default:
3155  kif->kf_type = KF_TYPE_UNKNOWN;
3156  break;
3157  }
3158  kif->kf_ref_count = fp->f_count;
3159  if (fp->f_flag & FREAD)
3160  kif->kf_flags |= KF_FLAG_READ;
3161  if (fp->f_flag & FWRITE)
3162  kif->kf_flags |= KF_FLAG_WRITE;
3163  if (fp->f_flag & FAPPEND)
3164  kif->kf_flags |= KF_FLAG_APPEND;
3165  if (fp->f_flag & FASYNC)
3166  kif->kf_flags |= KF_FLAG_ASYNC;
3167  if (fp->f_flag & FFSYNC)
3168  kif->kf_flags |= KF_FLAG_FSYNC;
3169  if (fp->f_flag & FNONBLOCK)
3170  kif->kf_flags |= KF_FLAG_NONBLOCK;
3171  if (fp->f_flag & O_DIRECT)
3172  kif->kf_flags |= KF_FLAG_DIRECT;
3173  if (fp->f_flag & FHASLOCK)
3174  kif->kf_flags |= KF_FLAG_HASLOCK;
3175  kif->kf_offset = foffset_get(fp);
3176  if (vp != NULL) {
3177  vref(vp);
3178  switch (vp->v_type) {
3179  case VNON:
3180  kif->kf_vnode_type = KF_VTYPE_VNON;
3181  break;
3182  case VREG:
3183  kif->kf_vnode_type = KF_VTYPE_VREG;
3184  break;
3185  case VDIR:
3186  kif->kf_vnode_type = KF_VTYPE_VDIR;
3187  break;
3188  case VBLK:
3189  kif->kf_vnode_type = KF_VTYPE_VBLK;
3190  break;
3191  case VCHR:
3192  kif->kf_vnode_type = KF_VTYPE_VCHR;
3193  break;
3194  case VLNK:
3195  kif->kf_vnode_type = KF_VTYPE_VLNK;
3196  break;
3197  case VSOCK:
3198  kif->kf_vnode_type = KF_VTYPE_VSOCK;
3199  break;
3200  case VFIFO:
3201  kif->kf_vnode_type = KF_VTYPE_VFIFO;
3202  break;
3203  case VBAD:
3204  kif->kf_vnode_type = KF_VTYPE_VBAD;
3205  break;
3206  default:
3207  kif->kf_vnode_type = KF_VTYPE_UNKNOWN;
3208  break;
3209  }
3210  /*
3211  * It is OK to drop the filedesc lock here as we will
3212  * re-validate and re-evaluate its properties when
3213  * the loop continues.
3214  */
3215  freepath = NULL;
3216  fullpath = "-";
3217  FILEDESC_SUNLOCK(fdp);
3218  vn_fullpath(curthread, vp, &fullpath, &freepath);
3219  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
3220  vrele(vp);
3221  VFS_UNLOCK_GIANT(vfslocked);
3222  strlcpy(kif->kf_path, fullpath,
3223  sizeof(kif->kf_path));
3224  if (freepath != NULL)
3225  free(freepath, M_TEMP);
3226  FILEDESC_SLOCK(fdp);
3227  }
3228  if (so != NULL) {
3229  struct sockaddr *sa;
3230 
3231  if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa)
3232  == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
3233  bcopy(sa, &kif->kf_sa_local, sa->sa_len);
3234  free(sa, M_SONAME);
3235  }
3236  if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa)
3237  == 0 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
3238  bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
3239  free(sa, M_SONAME);
3240  }
3241  kif->kf_sock_domain =
3242  so->so_proto->pr_domain->dom_family;
3243  kif->kf_sock_type = so->so_type;
3244  kif->kf_sock_protocol = so->so_proto->pr_protocol;
3245  }
3246  if (tp != NULL) {
3247  strlcpy(kif->kf_path, tty_devname(tp),
3248  sizeof(kif->kf_path));
3249  }
3250  if (shmfd != NULL)
3251  shm_path(shmfd, kif->kf_path, sizeof(kif->kf_path));
3252  if (ks != NULL && ksem_info != NULL)
3253  ksem_info(ks, kif->kf_path, sizeof(kif->kf_path), NULL);
3254  error = SYSCTL_OUT(req, kif, sizeof(*kif));
3255  if (error)
3256  break;
3257  }
3258  FILEDESC_SUNLOCK(fdp);
3259  fddrop(fdp);
3260  free(kif, M_TEMP);
3261  return (0);
3262 }
3263 
3264 static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc, CTLFLAG_RD,
3265  sysctl_kern_proc_ofiledesc, "Process ofiledesc entries");
3266 #endif /* COMPAT_FREEBSD7 */
3267 
3268 #ifdef KINFO_FILE_SIZE
3269 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
3270 #endif
3271 
3273  struct filedesc *fdp;
3274  struct sbuf *sb;
3275  ssize_t remainder;
3276  struct kinfo_file kif;
3277 };
3278 
3279 static int
3280 export_fd_to_sb(void *data, int type, int fd, int fflags, int refcnt,
3281  int64_t offset, int fd_is_cap, cap_rights_t fd_cap_rights,
3282  struct export_fd_buf *efbuf)
3283 {
3284  struct {
3285  int fflag;
3286  int kf_fflag;
3287  } fflags_table[] = {
3288  { FAPPEND, KF_FLAG_APPEND },
3289  { FASYNC, KF_FLAG_ASYNC },
3290  { FFSYNC, KF_FLAG_FSYNC },
3291  { FHASLOCK, KF_FLAG_HASLOCK },
3292  { FNONBLOCK, KF_FLAG_NONBLOCK },
3293  { FREAD, KF_FLAG_READ },
3294  { FWRITE, KF_FLAG_WRITE },
3295  { O_CREAT, KF_FLAG_CREAT },
3296  { O_DIRECT, KF_FLAG_DIRECT },
3297  { O_EXCL, KF_FLAG_EXCL },
3298  { O_EXEC, KF_FLAG_EXEC },
3299  { O_EXLOCK, KF_FLAG_EXLOCK },
3300  { O_NOFOLLOW, KF_FLAG_NOFOLLOW },
3301  { O_SHLOCK, KF_FLAG_SHLOCK },
3302  { O_TRUNC, KF_FLAG_TRUNC }
3303  };
3304 #define NFFLAGS (sizeof(fflags_table) / sizeof(*fflags_table))
3305  struct kinfo_file *kif;
3306  struct vnode *vp;
3307  int error, locked, vfslocked;
3308  unsigned int i;
3309 
3310  if (efbuf->remainder == 0)
3311  return (0);
3312  kif = &efbuf->kif;
3313  bzero(kif, sizeof(*kif));
3314  locked = efbuf->fdp != NULL;
3315  switch (type) {
3316  case KF_TYPE_FIFO:
3317  case KF_TYPE_VNODE:
3318  if (locked) {
3319  FILEDESC_SUNLOCK(efbuf->fdp);
3320  locked = 0;
3321  }
3322  vp = (struct vnode *)data;
3323  error = fill_vnode_info(vp, kif);
3324  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
3325  vrele(vp);
3326  VFS_UNLOCK_GIANT(vfslocked);
3327  break;
3328  case KF_TYPE_SOCKET:
3329  error = fill_socket_info((struct socket *)data, kif);
3330  break;
3331  case KF_TYPE_PIPE:
3332  error = fill_pipe_info((struct pipe *)data, kif);
3333  break;
3334  case KF_TYPE_PTS:
3335  error = fill_pts_info((struct tty *)data, kif);
3336  break;
3337  case KF_TYPE_PROCDESC:
3338  error = fill_procdesc_info((struct procdesc *)data, kif);
3339  break;
3340  case KF_TYPE_SEM:
3341  error = fill_sem_info((struct file *)data, kif);
3342  break;
3343  case KF_TYPE_SHM:
3344  error = fill_shm_info((struct file *)data, kif);
3345  break;
3346  default:
3347  error = 0;
3348  }
3349  if (error == 0)
3350  kif->kf_status |= KF_ATTR_VALID;
3351 
3352  /*
3353  * Translate file access flags.
3354  */
3355  for (i = 0; i < NFFLAGS; i++)
3356  if (fflags & fflags_table[i].fflag)
3357  kif->kf_flags |= fflags_table[i].kf_fflag;
3358  if (fd_is_cap)
3359  kif->kf_flags |= KF_FLAG_CAPABILITY;
3360  if (fd_is_cap)
3361  kif->kf_cap_rights = fd_cap_rights;
3362  kif->kf_fd = fd;
3363  kif->kf_type = type;
3364  kif->kf_ref_count = refcnt;
3365  kif->kf_offset = offset;
3366  /* Pack record size down */
3367  kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
3368  strlen(kif->kf_path) + 1;
3369  kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t));
3370  if (efbuf->remainder != -1) {
3371  if (efbuf->remainder < kif->kf_structsize) {
3372  /* Terminate export. */
3373  efbuf->remainder = 0;
3374  if (efbuf->fdp != NULL && !locked)
3375  FILEDESC_SLOCK(efbuf->fdp);
3376  return (0);
3377  }
3378  efbuf->remainder -= kif->kf_structsize;
3379  }
3380  if (locked)
3381  FILEDESC_SUNLOCK(efbuf->fdp);
3382  error = sbuf_bcat(efbuf->sb, kif, kif->kf_structsize);
3383  if (efbuf->fdp != NULL)
3384  FILEDESC_SLOCK(efbuf->fdp);
3385  return (error);
3386 }
3387 
3388 /*
3389  * Store a process file descriptor information to sbuf.
3390  *
3391  * Takes a locked proc as argument, and returns with the proc unlocked.
3392  */
3393 int
3394 kern_proc_filedesc_out(struct proc *p, struct sbuf *sb, ssize_t maxlen)
3395 {
3396  struct file *fp;
3397  struct filedesc *fdp;
3398  struct export_fd_buf *efbuf;
3399  struct vnode *cttyvp, *textvp, *tracevp;
3400  int64_t offset;
3401  void *data;
3402  int error, i;
3403  int fd_is_cap, type, refcnt, fflags;
3404  cap_rights_t fd_cap_rights;
3405 
3406  PROC_LOCK_ASSERT(p, MA_OWNED);
3407 
3408  /* ktrace vnode */
3409  tracevp = p->p_tracevp;
3410  if (tracevp != NULL)
3411  vref(tracevp);
3412  /* text vnode */
3413  textvp = p->p_textvp;
3414  if (textvp != NULL)
3415  vref(textvp);
3416  /* Controlling tty. */
3417  cttyvp = NULL;
3418  if (p->p_pgrp != NULL && p->p_pgrp->pg_session != NULL) {
3419  cttyvp = p->p_pgrp->pg_session->s_ttyvp;
3420  if (cttyvp != NULL)
3421  vref(cttyvp);
3422  }
3423  fdp = fdhold(p);
3424  PROC_UNLOCK(p);
3425  efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
3426  efbuf->fdp = NULL;
3427  efbuf->sb = sb;
3428  efbuf->remainder = maxlen;
3429  if (tracevp != NULL)
3430  export_fd_to_sb(tracevp, KF_TYPE_VNODE, KF_FD_TYPE_TRACE,
3431  FREAD | FWRITE, -1, -1, 0, 0, efbuf);
3432  if (textvp != NULL)
3433  export_fd_to_sb(textvp, KF_TYPE_VNODE, KF_FD_TYPE_TEXT,
3434  FREAD, -1, -1, 0, 0, efbuf);
3435  if (cttyvp != NULL)
3436  export_fd_to_sb(cttyvp, KF_TYPE_VNODE, KF_FD_TYPE_CTTY,
3437  FREAD | FWRITE, -1, -1, 0, 0, efbuf);
3438  error = 0;
3439  if (fdp == NULL)
3440  goto fail;
3441  efbuf->fdp = fdp;
3442  FILEDESC_SLOCK(fdp);
3443  /* working directory */
3444  if (fdp->fd_cdir != NULL) {
3445  vref(fdp->fd_cdir);
3446  data = fdp->fd_cdir;
3447  export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_CWD,
3448  FREAD, -1, -1, 0, 0, efbuf);
3449  }
3450  /* root directory */
3451  if (fdp->fd_rdir != NULL) {
3452  vref(fdp->fd_rdir);
3453  data = fdp->fd_rdir;
3454  export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_ROOT,
3455  FREAD, -1, -1, 0, 0, efbuf);
3456  }
3457  /* jail directory */
3458  if (fdp->fd_jdir != NULL) {
3459  vref(fdp->fd_jdir);
3460  data = fdp->fd_jdir;
3461  export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_JAIL,
3462  FREAD, -1, -1, 0, 0, efbuf);
3463  }
3464  for (i = 0; i < fdp->fd_nfiles; i++) {
3465  if ((fp = fdp->fd_ofiles[i]) == NULL)
3466  continue;
3467  data = NULL;
3468  fd_is_cap = 0;
3469  fd_cap_rights = 0;
3470 
3471 #ifdef CAPABILITIES
3472  /*
3473  * When reporting a capability, most fields will be from the
3474  * underlying object, but do mark as a capability and export
3475  * the capability rights mask.
3476  */
3477  if (fp->f_type == DTYPE_CAPABILITY) {
3478  fd_is_cap = 1;
3479  fd_cap_rights = cap_rights(fp);
3480  (void)cap_funwrap(fp, 0, &fp);
3481  }
3482 #else /* !CAPABILITIES */
3483  KASSERT(fp->f_type != DTYPE_CAPABILITY,
3484  ("sysctl_kern_proc_filedesc: saw capability"));
3485 #endif
3486  switch (fp->f_type) {
3487  case DTYPE_VNODE:
3488  type = KF_TYPE_VNODE;
3489  vref(fp->f_vnode);
3490  data = fp->f_vnode;
3491  break;
3492 
3493  case DTYPE_SOCKET:
3494  type = KF_TYPE_SOCKET;
3495  data = fp->f_data;
3496  break;
3497 
3498  case DTYPE_PIPE:
3499  type = KF_TYPE_PIPE;
3500  data = fp->f_data;
3501  break;
3502 
3503  case DTYPE_FIFO:
3504  type = KF_TYPE_FIFO;
3505  vref(fp->f_vnode);
3506  data = fp->f_vnode;
3507  break;
3508 
3509  case DTYPE_KQUEUE:
3510  type = KF_TYPE_KQUEUE;
3511  break;
3512 
3513  case DTYPE_CRYPTO:
3514  type = KF_TYPE_CRYPTO;
3515  break;
3516 
3517  case DTYPE_MQUEUE:
3518  type = KF_TYPE_MQUEUE;
3519  break;
3520 
3521  case DTYPE_SHM:
3522  type = KF_TYPE_SHM;
3523  data = fp;
3524  break;
3525 
3526  case DTYPE_SEM:
3527  type = KF_TYPE_SEM;
3528  data = fp;
3529  break;
3530 
3531  case DTYPE_PTS:
3532  type = KF_TYPE_PTS;
3533  data = fp->f_data;
3534  break;
3535 
3536 #ifdef PROCDESC
3537  case DTYPE_PROCDESC:
3538  type = KF_TYPE_PROCDESC;
3539  data = fp->f_data;
3540  break;
3541 #endif
3542 
3543  default:
3544  type = KF_TYPE_UNKNOWN;
3545  break;
3546  }
3547  refcnt = fp->f_count;
3548  fflags = fp->f_flag;
3549  offset = foffset_get(fp);
3550 
3551  /*
3552  * Create sysctl entry.
3553  * It is OK to drop the filedesc lock here as we will
3554  * re-validate and re-evaluate its properties when
3555  * the loop continues.
3556  */
3557  error = export_fd_to_sb(data, type, i, fflags, refcnt,
3558  offset, fd_is_cap, fd_cap_rights, efbuf);
3559  if (error)
3560  break;
3561  }
3562  FILEDESC_SUNLOCK(fdp);
3563  fddrop(fdp);
3564 fail:
3565  free(efbuf, M_TEMP);
3566  return (error);
3567 }
3568 
3569 #define FILEDESC_SBUF_SIZE (sizeof(struct kinfo_file) * 5)
3570 
3571 /*
3572  * Get per-process file descriptors for use by procstat(1), et al.
3573  */
3574 static int
3575 sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
3576 {
3577  struct sbuf sb;
3578  struct proc *p;
3579  ssize_t maxlen;
3580  int error, error2, *name;
3581 
3582  name = (int *)arg1;
3583 
3584  sbuf_new_for_sysctl(&sb, NULL, FILEDESC_SBUF_SIZE, req);
3585  error = pget((pid_t)name[0], PGET_CANDEBUG, &p);
3586  if (error != 0) {
3587  sbuf_delete(&sb);
3588  return (error);
3589  }
3590  maxlen = req->oldptr != NULL ? req->oldlen : -1;
3591  error = kern_proc_filedesc_out(p, &sb, maxlen);
3592  error2 = sbuf_finish(&sb);
3593  sbuf_delete(&sb);
3594  return (error != 0 ? error : error2);
3595 }
3596 
3597 int
3599 {
3600  struct {
3601  int vtype;
3602  int kf_vtype;
3603  } vtypes_table[] = {
3604  { VBAD, KF_VTYPE_VBAD },
3605  { VBLK, KF_VTYPE_VBLK },
3606  { VCHR, KF_VTYPE_VCHR },
3607  { VDIR, KF_VTYPE_VDIR },
3608  { VFIFO, KF_VTYPE_VFIFO },
3609  { VLNK, KF_VTYPE_VLNK },
3610  { VNON, KF_VTYPE_VNON },
3611  { VREG, KF_VTYPE_VREG },
3612  { VSOCK, KF_VTYPE_VSOCK }
3613  };
3614 #define NVTYPES (sizeof(vtypes_table) / sizeof(*vtypes_table))
3615  unsigned int i;
3616 
3617  /*
3618  * Perform vtype translation.
3619  */
3620  for (i = 0; i < NVTYPES; i++)
3621  if (vtypes_table[i].vtype == vtype)
3622  break;
3623  if (i < NVTYPES)
3624  return (vtypes_table[i].kf_vtype);
3625 
3626  return (KF_VTYPE_UNKNOWN);
3627 }
3628 
3629 static int
3630 fill_vnode_info(struct vnode *vp, struct kinfo_file *kif)
3631 {
3632  struct vattr va;
3633  char *fullpath, *freepath;
3634  int error, vfslocked;
3635 
3636  if (vp == NULL)
3637  return (1);
3638  kif->kf_vnode_type = vntype_to_kinfo(vp->v_type);
3639  freepath = NULL;
3640  fullpath = "-";
3641  error = vn_fullpath(curthread, vp, &fullpath, &freepath);
3642  if (error == 0) {
3643  strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
3644  }
3645  if (freepath != NULL)
3646  free(freepath, M_TEMP);
3647 
3648  /*
3649  * Retrieve vnode attributes.
3650  */
3651  va.va_fsid = VNOVAL;
3652  va.va_rdev = NODEV;
3653  vfslocked = VFS_LOCK_GIANT(vp->v_mount);
3654  vn_lock(vp, LK_SHARED | LK_RETRY);
3655  error = VOP_GETATTR(vp, &va, curthread->td_ucred);
3656  VOP_UNLOCK(vp, 0);
3657  VFS_UNLOCK_GIANT(vfslocked);
3658  if (error != 0)
3659  return (error);
3660  if (va.va_fsid != VNOVAL)
3661  kif->kf_un.kf_file.kf_file_fsid = va.va_fsid;
3662  else
3663  kif->kf_un.kf_file.kf_file_fsid =
3664  vp->v_mount->mnt_stat.f_fsid.val[0];
3665  kif->kf_un.kf_file.kf_file_fileid = va.va_fileid;
3666  kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode);
3667  kif->kf_un.kf_file.kf_file_size = va.va_size;
3668  kif->kf_un.kf_file.kf_file_rdev = va.va_rdev;
3669  return (0);
3670 }
3671 
3672 static int
3673 fill_socket_info(struct socket *so, struct kinfo_file *kif)
3674 {
3675  struct sockaddr *sa;
3676  struct inpcb *inpcb;
3677  struct unpcb *unpcb;
3678  int error;
3679 
3680  if (so == NULL)
3681  return (1);
3682  kif->kf_sock_domain = so->so_proto->pr_domain->dom_family;
3683  kif->kf_sock_type = so->so_type;
3684  kif->kf_sock_protocol = so->so_proto->pr_protocol;
3685  kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
3686  switch(kif->kf_sock_domain) {
3687  case AF_INET:
3688  case AF_INET6:
3689  if (kif->kf_sock_protocol == IPPROTO_TCP) {
3690  if (so->so_pcb != NULL) {
3691  inpcb = (struct inpcb *)(so->so_pcb);
3692  kif->kf_un.kf_sock.kf_sock_inpcb =
3693  (uintptr_t)inpcb->inp_ppcb;
3694  }
3695  }
3696  break;
3697  case AF_UNIX:
3698  if (so->so_pcb != NULL) {
3699  unpcb = (struct unpcb *)(so->so_pcb);
3700  if (unpcb->unp_conn) {
3701  kif->kf_un.kf_sock.kf_sock_unpconn =
3702  (uintptr_t)unpcb->unp_conn;
3703  kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
3704  so->so_rcv.sb_state;
3705  kif->kf_un.kf_sock.kf_sock_snd_sb_state =
3706  so->so_snd.sb_state;
3707  }
3708  }
3709  break;
3710  }
3711  error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
3712  if (error == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
3713  bcopy(sa, &kif->kf_sa_local, sa->sa_len);
3714  free(sa, M_SONAME);
3715  }
3716  error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
3717  if (error == 0 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
3718  bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
3719  free(sa, M_SONAME);
3720  }
3721  strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
3722  sizeof(kif->kf_path));
3723  return (0);
3724 }
3725 
3726 static int
3727 fill_pts_info(struct tty *tp, struct kinfo_file *kif)
3728 {
3729 
3730  if (tp == NULL)
3731  return (1);
3732  kif->kf_un.kf_pts.kf_pts_dev = tty_udev(tp);
3733  strlcpy(kif->kf_path, tty_devname(tp), sizeof(kif->kf_path));
3734  return (0);
3735 }
3736 
3737 static int
3738 fill_pipe_info(struct pipe *pi, struct kinfo_file *kif)
3739 {
3740 
3741  if (pi == NULL)
3742  return (1);
3743  kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi;
3744  kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer;
3745  kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt;
3746  return (0);
3747 }
3748 
3749 static int
3750 fill_procdesc_info(struct procdesc *pdp, struct kinfo_file *kif)
3751 {
3752 
3753  if (pdp == NULL)
3754  return (1);
3755  kif->kf_un.kf_proc.kf_pid = pdp->pd_pid;
3756  return (0);
3757 }
3758 
3759 static int
3760 fill_sem_info(struct file *fp, struct kinfo_file *kif)
3761 {
3762  struct thread *td;
3763  struct stat sb;
3764 
3765  td = curthread;
3766  if (fp->f_data == NULL)
3767  return (1);
3768  if (fo_stat(fp, &sb, td->td_ucred, td) != 0)
3769  return (1);
3770  if (ksem_info == NULL)
3771  return (1);
3772  ksem_info(fp->f_data, kif->kf_path, sizeof(kif->kf_path),
3773  &kif->kf_un.kf_sem.kf_sem_value);
3774  kif->kf_un.kf_sem.kf_sem_mode = sb.st_mode;
3775  return (0);
3776 }
3777 
3778 static int
3779 fill_shm_info(struct file *fp, struct kinfo_file *kif)
3780 {
3781  struct thread *td;
3782  struct stat sb;
3783 
3784  td = curthread;
3785  if (fp->f_data == NULL)
3786  return (1);
3787  if (fo_stat(fp, &sb, td->td_ucred, td) != 0)
3788  return (1);
3789  shm_path(fp->f_data, kif->kf_path, sizeof(kif->kf_path));
3790  kif->kf_un.kf_file.kf_file_mode = sb.st_mode;
3791  kif->kf_un.kf_file.kf_file_size = sb.st_size;
3792  return (0);
3793 }
3794 
3795 static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc, CTLFLAG_RD,
3796  sysctl_kern_proc_filedesc, "Process filedesc entries");
3797 
3798 #ifdef DDB
3799 /*
3800  * For the purposes of debugging, generate a human-readable string for the
3801  * file type.
3802  */
3803 static const char *
3804 file_type_to_name(short type)
3805 {
3806 
3807  switch (type) {
3808  case 0:
3809  return ("zero");
3810  case DTYPE_VNODE:
3811  return ("vnod");
3812  case DTYPE_SOCKET:
3813  return ("sock");
3814  case DTYPE_PIPE:
3815  return ("pipe");
3816  case DTYPE_FIFO:
3817  return ("fifo");
3818  case DTYPE_KQUEUE:
3819  return ("kque");
3820  case DTYPE_CRYPTO:
3821  return ("crpt");
3822  case DTYPE_MQUEUE:
3823  return ("mque");
3824  case DTYPE_SHM:
3825  return ("shm");
3826  case DTYPE_SEM:
3827  return ("ksem");
3828  default:
3829  return ("unkn");
3830  }
3831 }
3832 
3833 /*
3834  * For the purposes of debugging, identify a process (if any, perhaps one of
3835  * many) that references the passed file in its file descriptor array. Return
3836  * NULL if none.
3837  */
3838 static struct proc *
3839 file_to_first_proc(struct file *fp)
3840 {
3841  struct filedesc *fdp;
3842  struct proc *p;
3843  int n;
3844 
3845  FOREACH_PROC_IN_SYSTEM(p) {
3846  if (p->p_state == PRS_NEW)
3847  continue;
3848  fdp = p->p_fd;
3849  if (fdp == NULL)
3850  continue;
3851  for (n = 0; n < fdp->fd_nfiles; n++) {
3852  if (fp == fdp->fd_ofiles[n])
3853  return (p);
3854  }
3855  }
3856  return (NULL);
3857 }
3858 
3859 static void
3860 db_print_file(struct file *fp, int header)
3861 {
3862  struct proc *p;
3863 
3864  if (header)
3865  db_printf("%8s %4s %8s %8s %4s %5s %6s %8s %5s %12s\n",
3866  "File", "Type", "Data", "Flag", "GCFl", "Count",
3867  "MCount", "Vnode", "FPID", "FCmd");
3868  p = file_to_first_proc(fp);
3869  db_printf("%8p %4s %8p %08x %04x %5d %6d %8p %5d %12s\n", fp,
3870  file_type_to_name(fp->f_type), fp->f_data, fp->f_flag,
3871  0, fp->f_count, 0, fp->f_vnode,
3872  p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-");
3873 }
3874 
3875 DB_SHOW_COMMAND(file, db_show_file)
3876 {
3877  struct file *fp;
3878 
3879  if (!have_addr) {
3880  db_printf("usage: show file <addr>\n");
3881  return;
3882  }
3883  fp = (struct file *)addr;
3884  db_print_file(fp, 1);
3885 }
3886 
3887 DB_SHOW_COMMAND(files, db_show_files)
3888 {
3889  struct filedesc *fdp;
3890  struct file *fp;
3891  struct proc *p;
3892  int header;
3893  int n;
3894 
3895  header = 1;
3896  FOREACH_PROC_IN_SYSTEM(p) {
3897  if (p->p_state == PRS_NEW)
3898  continue;
3899  if ((fdp = p->p_fd) == NULL)
3900  continue;
3901  for (n = 0; n < fdp->fd_nfiles; ++n) {
3902  if ((fp = fdp->fd_ofiles[n]) == NULL)
3903  continue;
3904  db_print_file(fp, header);
3905  header = 0;
3906  }
3907  }
3908 }
3909 #endif
3910 
3911 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
3912  &maxfilesperproc, 0, "Maximum files allowed open per process");
3913 
3914 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
3915  &maxfiles, 0, "Maximum number of files");
3916 
3917 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
3918  __DEVOLATILE(int *, &openfiles), 0, "System-wide number of open files");
3919 
3920 /* ARGSUSED*/
3921 static void
3923 {
3924 
3925  file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
3926  NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
3927  mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
3928  mtx_init(&fdesc_mtx, "fdesc", NULL, MTX_DEF);
3929 }
3930 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL);
3931 
3932 /*-------------------------------------------------------------------*/
3933 
3934 static int
3935 badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred,
3936  int flags, struct thread *td)
3937 {
3938 
3939  return (EBADF);
3940 }
3941 
3942 static int
3943 badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
3944  struct thread *td)
3945 {
3946 
3947  return (EINVAL);
3948 }
3949 
3950 static int
3951 badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
3952  struct thread *td)
3953 {
3954 
3955  return (EBADF);
3956 }
3957 
3958 static int
3959 badfo_poll(struct file *fp, int events, struct ucred *active_cred,
3960  struct thread *td)
3961 {
3962 
3963  return (0);
3964 }
3965 
3966 static int
3967 badfo_kqfilter(struct file *fp, struct knote *kn)
3968 {
3969 
3970  return (EBADF);
3971 }
3972 
3973 static int
3974 badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
3975  struct thread *td)
3976 {
3977 
3978  return (EBADF);
3979 }
3980 
3981 static int
3982 badfo_close(struct file *fp, struct thread *td)
3983 {
3984 
3985  return (EBADF);
3986 }
3987 
3988 static int
3989 badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
3990  struct thread *td)
3991 {
3992 
3993  return (EBADF);
3994 }
3995 
3996 static int
3997 badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
3998  struct thread *td)
3999 {
4000 
4001  return (EBADF);
4002 }
4003 
4004 struct fileops badfileops = {
4005  .fo_read = badfo_readwrite,
4006  .fo_write = badfo_readwrite,
4007  .fo_truncate = badfo_truncate,
4008  .fo_ioctl = badfo_ioctl,
4009  .fo_poll = badfo_poll,
4010  .fo_kqfilter = badfo_kqfilter,
4011  .fo_stat = badfo_stat,
4012  .fo_close = badfo_close,
4013  .fo_chmod = badfo_chmod,
4014  .fo_chown = badfo_chown,
4015 };
4016 
4017 int
4018 invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
4019  struct thread *td)
4020 {
4021 
4022  return (EINVAL);
4023 }
4024 
4025 int
4026 invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
4027  struct thread *td)
4028 {
4029 
4030  return (EINVAL);
4031 }
4032 
4033 /*-------------------------------------------------------------------*/
4034 
4035 /*
4036  * File Descriptor pseudo-device driver (/dev/fd/).
4037  *
4038  * Opening minor device N dup()s the file (if any) connected to file
4039  * descriptor N belonging to the calling process. Note that this driver
4040  * consists of only the ``open()'' routine, because all subsequent
4041  * references to this file will be direct to the other driver.
4042  *
4043  * XXX: we could give this one a cloning event handler if necessary.
4044  */
4045 
4046 /* ARGSUSED */
4047 static int
4048 fdopen(struct cdev *dev, int mode, int type, struct thread *td)
4049 {
4050 
4051  /*
4052  * XXX Kludge: set curthread->td_dupfd to contain the value of the
4053  * the file descriptor being sought for duplication. The error
4054  * return ensures that the vnode for this device will be released
4055  * by vn_open. Open will detect this special error and take the
4056  * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
4057  * will simply report the error.
4058  */
4059  td->td_dupfd = dev2unit(dev);
4060  return (ENODEV);
4061 }
4062 
4063 static struct cdevsw fildesc_cdevsw = {
4064  .d_version = D_VERSION,
4065  .d_open = fdopen,
4066  .d_name = "FD",
4067 };
4068 
4069 static void
4070 fildesc_drvinit(void *unused)
4071 {
4072  struct cdev *dev;
4073 
4074  dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 0, NULL,
4075  UID_ROOT, GID_WHEEL, 0666, "fd/0");
4076  make_dev_alias(dev, "stdin");
4077  dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 1, NULL,
4078  UID_ROOT, GID_WHEEL, 0666, "fd/1");
4079  make_dev_alias(dev, "stdout");
4080  dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 2, NULL,
4081  UID_ROOT, GID_WHEEL, 0666, "fd/2");
4082  make_dev_alias(dev, "stderr");
4083 }
4084 
4085 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL);
static int fill_sem_info(struct file *fp, struct kinfo_file *kif)
int maxfilesperproc
Definition: subr_param.c:90
int fgetvp_rights(struct thread *td, int fd, cap_rights_t need, cap_rights_t *have, struct vnode **vpp)
volatile int openfiles
Definition: kern_descrip.c:188
static int badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td)
#define DUP_CLOEXEC
Definition: kern_descrip.c:119
pid_t fgetown(struct sigio **sigiop)
int fsetown(pid_t pgid, struct sigio **sigiop)
int fd
Definition: kern_exec.c:199
int kern_close(struct thread *td, int fd)
rlim_t lim_cur(struct proc *p, int which)
int sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
char * path
#define OFILESIZE
Definition: kern_descrip.c:156
static void fdunused(struct filedesc *fdp, int fd)
Definition: kern_descrip.c:279
int invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
struct file * fget_unlocked(struct filedesc *fdp, int fd)
caddr_t value
Definition: linker_if.m:51
static int sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
Definition: kern_time.c:948
#define FILEDESC_SBUF_SIZE
int mode
static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc, CTLFLAG_RD, sysctl_kern_proc_filedesc,"Process filedesc entries")
struct cdev * make_dev_alias(struct cdev *pdev, const char *fmt,...)
Definition: kern_conf.c:940
int racct_set(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1227
void knote_fdclose(struct thread *td, int fd)
Definition: kern_event.c:2084
struct filedesc * fdp
void mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
int sys_fstat(struct thread *td, struct fstat_args *uap)
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:454
void setugidsafety(struct thread *td)
int _fdrop(struct file *fp, struct thread *td)
struct filedesc_to_leader * filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
static int fd_last_used(struct filedesc *, int, int)
Definition: kern_descrip.c:227
#define NDENTRIES
Definition: kern_descrip.c:148
static void fdgrowtable(struct filedesc *, int)
int kern_fstat(struct thread *td, int fd, struct stat *sbp)
ssize_t remainder
static MALLOC_DEFINE(M_FILEDESC,"filedesc","Open file descriptor table")
static int do_dup(struct thread *td, int flags, int old, int new, register_t *retval)
Definition: kern_descrip.c:828
int cap_funwrap(struct file *fp_cap, cap_rights_t rights, struct file **fpp)
void(* mq_fdclose)(struct thread *td, int fd, struct file *fp)
Definition: kern_descrip.c:190
int sys_dup(struct thread *td, struct dup_args *uap)
Definition: kern_descrip.c:351
static int fill_procdesc_info(struct procdesc *pdp, struct kinfo_file *kif)
CTASSERT(MAXSHELLCMDLEN >=MAXINTERP+3)
void panic(const char *fmt,...)
static int badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td)
#define FGET_GETCAP
void funsetownlst(struct sigiolst *sigiolst)
void knote(struct knlist *list, long hint, int lockflags)
Definition: kern_event.c:1806
int fgetcap(struct thread *td, int fd, struct file **fpp)
#define NVTYPES
#define NDSLOTS(x)
Definition: kern_descrip.c:151
int vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
Definition: vfs_cache.c:1133
const char * name
Definition: kern_fail.c:97
int sys_closefrom(struct thread *td, struct closefrom_args *uap)
int sys_dup2(struct thread *td, struct dup2_args *uap)
Definition: kern_descrip.c:334
int falloc(struct thread *td, struct file **resultfp, int *resultfd, int flags)
int fdalloc(struct thread *td, int minfd, int *result)
int vrefcnt(struct vnode *vp)
Definition: vfs_subr.c:2321
SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 0, 0, sysctl_kern_file,"S,xfile","Entire file table")
static int badfo_close(struct file *fp, struct thread *td)
static int badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, struct thread *td)
SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL)
struct cdev * make_dev_credf(int flags, struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt,...)
Definition: kern_conf.c:843
static uma_zone_t file_zone
Definition: kern_descrip.c:112
int fget(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
int sys_flock(struct thread *td, struct flock_args *uap)
int * type
Definition: cpufreq_if.m:98
#define NDSLOT(x)
Definition: kern_descrip.c:149
int invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td)
void funsetown(struct sigio **sigiop)
Definition: kern_descrip.c:990
void(* ksem_info)(struct ksem *ks, char *path, size_t size, uint32_t *value)
Definition: kern_descrip.c:114
struct mtx sigio_lock
Definition: kern_descrip.c:189
int priv_check(struct thread *td, int priv)
Definition: kern_priv.c:170
struct proc * pfind(pid_t pid)
Definition: kern_proc.c:304
void vref(struct vnode *vp)
Definition: vfs_subr.c:2302
int kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
Definition: kern_descrip.c:469
int fgetvp_exec(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
void fdunshare(struct proc *p, struct thread *td)
struct sx allproc_lock
Definition: kern_proc.c:136
void fdcloseexec(struct thread *td)
int fdavail(struct thread *td, int n)
int vntype_to_kinfo(int vtype)
static int badfo_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td)
MALLOC_DECLARE(M_FADVISE)
static void fildesc_drvinit(void *unused)
dev_t tty_udev(struct tty *tp)
Definition: tty.c:1758
static int dummy
int maxfiles
Definition: subr_param.c:89
int kern_proc_filedesc_out(struct proc *p, struct sbuf *sb, ssize_t maxlen)
static int is_unsafe(struct file *fp)
struct prisonlist allprison
Definition: kern_jail.c:122
struct filedesc * fdinit(struct filedesc *fdp)
int async_io_version
Definition: vfs_syscalls.c:111
static __inline int _fget(struct thread *td, int fd, struct file **fpp, int flags, cap_rights_t needrights, cap_rights_t *haverightsp, u_char *maxprotp, int fget_flags)
SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,&maxfilesperproc, 0,"Maximum files allowed open per process")
struct sbuf * sb
void fputsock(struct socket *so)
int finstall(struct thread *td, struct file *fp, int *fd, int flags)
int kern_open(struct thread *td, char *path, enum uio_seg pathseg, int flags, int mode)
static int fdisused(struct filedesc *fdp, int fd)
Definition: kern_descrip.c:250
int closef(struct file *fp, struct thread *td)
static int fill_pipe_info(struct pipe *pi, struct kinfo_file *kif)
int falloc_noinstall(struct thread *td, struct file **resultfp)
struct file ** ft_table
Definition: kern_descrip.c:162
struct prison prison0
Definition: kern_jail.c:99
void crfree(struct ucred *cr)
Definition: kern_prot.c:1835
static void filelistinit(void *dummy)
int mask
Definition: subr_acl_nfs4.c:67
int sys_fcntl(struct thread *td, struct fcntl_args *uap)
Definition: kern_descrip.c:369
static int badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
#define NDBIT(x)
Definition: kern_descrip.c:150
#define DUP_FCNTL
Definition: kern_descrip.c:118
struct nstat * sb
struct pgrp * pgfind(pid_t pgid)
Definition: kern_proc.c:342
static int badfo_kqfilter(struct file *fp, struct knote *kn)
int fget_mmap(struct thread *td, int fd, cap_rights_t rights, u_char *maxprotp, struct file **fpp)
u_int fd
Definition: kern_descrip.c:346
void fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td)
int fdcheckstd(struct thread *td)
static int export_fd_to_sb(void *data, int type, int fd, int fflags, int refcnt, int64_t offset, int fd_is_cap, cap_rights_t fd_cap_rights, struct export_fd_buf *efbuf)
static int fill_vnode_info(struct vnode *vp, struct kinfo_file *kif)
static int fdopen(struct cdev *dev, int mode, int type, struct thread *td)
struct ucred * crhold(struct ucred *cr)
Definition: kern_prot.c:1824
static void fddrop(struct filedesc *fdp)
int cap_funwrap_mmap(struct file *fp_cap, cap_rights_t rights, u_char *maxprotp, struct file **fpp)
struct sx allprison_lock
Definition: kern_jail.c:120
static void fdused(struct filedesc *fdp, int fd)
Definition: kern_descrip.c:261
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:554
static int fill_pts_info(struct tty *tp, struct kinfo_file *kif)
int printf(const char *fmt,...)
Definition: subr_prf.c:367
void fdfree(struct thread *td)
int sys_close(struct thread *td, struct close_args *uap)
void sbuf_delete(struct sbuf *s)
Definition: subr_sbuf.c:753
int fgetsock(struct thread *td, int fd, cap_rights_t rights, struct socket **spp, u_int *fflagp)
static struct file * fdtofp(int fd, struct filedesc *fdp)
Definition: kern_descrip.c:437
linker_file_t * result
Definition: linker_if.m:136
void cvtnstat(struct stat *sb, struct nstat *nsb)
static int fill_shm_info(struct file *fp, struct kinfo_file *kif)
#define DUP_FIXED
Definition: kern_descrip.c:117
int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
Definition: kern_sysctl.c:1364
void finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
struct filedesc * fdcopy(struct filedesc *fdp)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
void shm_path(struct shmfd *shmfd, char *path, size_t size)
Definition: uipc_shm.c:864
void wakeup(void *ident)
Definition: kern_synch.c:378
struct stat * sb
void vrele(struct vnode *vp)
Definition: vfs_subr.c:2416
int sys_nfstat(struct thread *td, struct nfstat_args *uap)
int sbuf_bcat(struct sbuf *s, const void *buf, size_t len)
Definition: subr_sbuf.c:389
int fget_read(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
int sbuf_finish(struct sbuf *s)
Definition: subr_sbuf.c:694
int pget(pid_t pid, int flags, struct proc **pp)
Definition: kern_proc.c:362
static int badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td)
#define NFFLAGS
#define NDSLOTSIZE
Definition: kern_descrip.c:147
int fgetvp_read(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
struct filedesc fd_fd
Definition: kern_descrip.c:171
static int sysctl_kern_file(SYSCTL_HANDLER_ARGS)
struct kinfo_file kif
#define NDFILE
Definition: kern_descrip.c:146
SLIST_HEAD(et_eventtimers_list, eventtimer)
static __inline int _fgetvp(struct thread *td, int fd, int flags, cap_rights_t needrights, cap_rights_t *haverightsp, struct vnode **vpp)
int sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
Definition: kern_descrip.c:305
struct sx proctree_lock
Definition: kern_proc.c:137
int fget_write(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
int fgetvp(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
static int fill_socket_info(struct socket *so, struct kinfo_file *kif)
static int fdunwrap(int fd, cap_rights_t rights, struct filedesc *fdp, struct file **fpp)
Definition: kern_descrip.c:449
static struct cdevsw fildesc_cdevsw
static int badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td)
uint64_t racct_get_limit(struct proc *p, int resource)
Definition: kern_racct.c:1249
int fdallocn(struct thread *td, int minfd, int *fds, int n)
static struct pollrec pr[POLL_LIST_LEN]
Definition: kern_poll.c:254
int p_cansee(struct thread *td, struct proc *p)
Definition: kern_prot.c:1426
static struct mtx fdesc_mtx
Definition: kern_descrip.c:193
int dupfdopen(struct thread *td, struct filedesc *fdp, int indx, int dfd, int mode, int error)
struct vnode * rootvnode
Definition: vfs_mountroot.c:96
int flag
static int fd_first_free(struct filedesc *, int, int)
Definition: kern_descrip.c:200
struct filedesc * fdshare(struct filedesc *fdp)
struct sbuf * sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, struct sysctl_req *req)
Definition: kern_sysctl.c:1676
static struct filedesc * fdhold(struct proc *p)
int * count
Definition: cpufreq_if.m:63
static int getmaxfd(struct proc *p)
Definition: kern_descrip.c:813
u_int from
Definition: kern_descrip.c:328
struct fileops badfileops
__FBSDID("$BSDSUniX$")