43 #include <sys/cdefs.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/fcntl.h>
54 #include <sys/limits.h>
56 #include <sys/mount.h>
57 #include <sys/mutex.h>
58 #include <sys/namei.h>
59 #include <sys/vnode.h>
62 #include <sys/filio.h>
63 #include <sys/resourcevar.h>
65 #include <sys/sysctl.h>
66 #include <sys/ttycom.h>
68 #include <sys/syslog.h>
69 #include <sys/unistd.h>
71 #include <security/audit/audit.h>
72 #include <security/mac/mac_framework.h>
75 #include <vm/vm_extern.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
102 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
107 struct nameidata *ndp;
111 struct thread *td = ndp->ni_cnd.cn_thread;
113 return (
vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp));
124 vn_open_cred(
struct nameidata *ndp,
int *flagp,
int cmode, u_int vn_open_flags,
125 struct ucred *cred,
struct file *fp)
129 struct thread *td = ndp->ni_cnd.cn_thread;
131 struct vattr *vap = &vat;
134 int vfslocked, mpsafe;
136 mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
140 if (fmode & O_CREAT) {
141 ndp->ni_cnd.cn_nameiop = CREATE;
142 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
144 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
145 ndp->ni_cnd.cn_flags |= FOLLOW;
146 if (!(vn_open_flags & VN_OPEN_NOAUDIT))
147 ndp->ni_cnd.cn_flags |= AUDITVNODE1;
149 if ((error =
namei(ndp)) != 0)
151 vfslocked = NDHASGIANT(ndp);
153 ndp->ni_cnd.cn_flags &= ~MPSAFE;
154 if (ndp->ni_vp == NULL) {
157 vap->va_mode = cmode;
159 vap->va_vaflags |= VA_EXCLUSIVE;
161 NDFREE(ndp, NDF_ONLY_PNBUF);
163 VFS_UNLOCK_GIANT(vfslocked);
165 V_XSLEEP | PCATCH)) != 0)
170 error = mac_vnode_check_create(cred, ndp->ni_dvp,
174 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
179 VFS_UNLOCK_GIANT(vfslocked);
180 NDFREE(ndp, NDF_ONLY_PNBUF);
186 if (ndp->ni_dvp == ndp->ni_vp)
192 if (fmode & O_EXCL) {
199 ndp->ni_cnd.cn_nameiop = LOOKUP;
200 ndp->ni_cnd.cn_flags = ISOPEN |
201 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
203 if (!(fmode & FWRITE))
204 ndp->ni_cnd.cn_flags |= LOCKSHARED;
205 if (!(vn_open_flags & VN_OPEN_NOAUDIT))
206 ndp->ni_cnd.cn_flags |= AUDITVNODE1;
207 if ((error =
namei(ndp)) != 0)
210 ndp->ni_cnd.cn_flags &= ~MPSAFE;
211 vfslocked = NDHASGIANT(ndp);
214 if (vp->v_type == VLNK) {
218 if (vp->v_type == VSOCK) {
222 if (vp->v_type != VDIR && fmode & O_DIRECTORY) {
227 if (fmode & (FWRITE | O_TRUNC)) {
228 if (vp->v_type == VDIR) {
238 if ((fmode & O_APPEND) && (fmode & FWRITE))
241 error = mac_vnode_check_open(cred, vp, accmode);
245 if ((fmode & O_CREAT) == 0) {
246 if (accmode & VWRITE) {
252 error = VOP_ACCESS(vp, accmode, cred, td);
257 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
258 vn_lock(vp, LK_UPGRADE | LK_RETRY);
259 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
263 VOP_ADD_WRITECOUNT(vp, 1);
265 ASSERT_VOP_LOCKED(vp,
"vn_open_cred");
267 VFS_UNLOCK_GIANT(vfslocked);
270 NDFREE(ndp, NDF_ONLY_PNBUF);
272 VFS_UNLOCK_GIANT(vfslocked);
284 register struct vnode *vp;
287 ASSERT_VOP_LOCKED(vp,
"vn_writechk");
304 register struct vnode *vp;
306 struct ucred *file_cred;
310 int error, lock_flags;
312 if (vp->v_type != VFIFO && !(flags & FWRITE) && vp->v_mount != NULL &&
313 vp->v_mount->mnt_kern_flag & MNTK_EXTENDED_SHARED)
314 lock_flags = LK_SHARED;
316 lock_flags = LK_EXCLUSIVE;
318 VFS_ASSERT_GIANT(vp->v_mount);
321 vn_lock(vp, lock_flags | LK_RETRY);
322 if (flags & FWRITE) {
323 VNASSERT(vp->v_writecount > 0, vp,
324 (
"vn_close: negative writecount"));
325 VOP_ADD_WRITECOUNT(vp, -1);
327 error = VOP_CLOSE(vp, flags, file_cred, td);
340 if (atomic_load_acq_int(&(fp->f_flag)) & FRDAHEAD)
341 return (fp->f_seqcount << IO_SEQSHIFT);
350 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
351 uio->uio_offset == fp->f_nextoff) {
361 fp->f_seqcount += howmany(uio->uio_resid, 16384);
362 if (fp->f_seqcount > IO_SEQMAX)
363 fp->f_seqcount = IO_SEQMAX;
364 return (fp->f_seqcount << IO_SEQSHIFT);
368 if (fp->f_seqcount > 1)
379 vn_rdwr(
enum uio_rw rw,
struct vnode *vp,
void *base,
int len, off_t offset,
380 enum uio_seg segflg,
int ioflg,
struct ucred *active_cred,
381 struct ucred *file_cred, ssize_t *aresid,
struct thread *td)
388 int error, lock_flags;
390 VFS_ASSERT_GIANT(vp->v_mount);
392 auio.uio_iov = &aiov;
394 aiov.iov_base = base;
396 auio.uio_resid = len;
397 auio.uio_offset = offset;
398 auio.uio_segflg = segflg;
403 if ((ioflg & IO_NODELOCKED) == 0) {
404 if (rw == UIO_READ) {
405 rl_cookie = vn_rangelock_rlock(vp, offset,
408 rl_cookie = vn_rangelock_wlock(vp, offset,
412 if (rw == UIO_WRITE) {
413 if (vp->v_type != VCHR &&
417 if (MNT_SHARED_WRITES(mp) ||
418 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount)))
419 lock_flags = LK_SHARED;
421 lock_flags = LK_EXCLUSIVE;
423 lock_flags = LK_SHARED;
424 vn_lock(vp, lock_flags | LK_RETRY);
428 ASSERT_VOP_LOCKED(vp,
"IO_NODELOCKED with no vp lock held");
430 if ((ioflg & IO_NOMACCHECK) == 0) {
432 error = mac_vnode_check_read(active_cred, file_cred,
435 error = mac_vnode_check_write(active_cred, file_cred,
440 if (file_cred != NULL)
445 error = VOP_READ(vp, &auio, ioflg, cred);
447 error = VOP_WRITE(vp, &auio, ioflg, cred);
450 *aresid = auio.uio_resid;
452 if (auio.uio_resid && error == 0)
454 if ((ioflg & IO_NODELOCKED) == 0) {
460 if (rl_cookie != NULL)
461 vn_rangelock_unlock(vp, rl_cookie);
475 file_cred, aresid, td)
483 struct ucred *active_cred;
484 struct ucred *file_cred;
491 VFS_ASSERT_GIANT(vp->v_mount);
502 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
506 if (rw != UIO_READ && vp->v_type == VREG)
509 error =
vn_rdwr(rw, vp, base, chunk, offset, segflg,
510 ioflg, active_cred, file_cred, &iaresid, td);
515 base = (
char *)base + chunk;
519 *aresid = len + iaresid;
529 KASSERT((flags & FOF_OFFSET) == 0, (
"FOF_OFFSET passed"));
531 #if OFF_MAX <= LONG_MAX
536 if ((flags & FOF_NOLOCK) != 0)
537 return (fp->f_offset);
546 if ((flags & FOF_NOLOCK) == 0) {
547 while (fp->f_vnread_flags & FOFFSET_LOCKED) {
548 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
549 msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
552 fp->f_vnread_flags |= FOFFSET_LOCKED;
564 KASSERT((flags & FOF_OFFSET) == 0, (
"FOF_OFFSET passed"));
566 #if OFF_MAX <= LONG_MAX
567 if ((flags & FOF_NOLOCK) != 0) {
568 if ((flags & FOF_NOUPDATE) == 0)
570 if ((flags & FOF_NEXTOFF) != 0)
578 if ((flags & FOF_NOUPDATE) == 0)
580 if ((flags & FOF_NEXTOFF) != 0)
582 if ((flags & FOF_NOLOCK) == 0) {
583 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0,
584 (
"Lost FOFFSET_LOCKED"));
585 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
586 wakeup(&fp->f_vnread_flags);
587 fp->f_vnread_flags = 0;
596 if ((flags & FOF_OFFSET) == 0)
604 if ((flags & FOF_OFFSET) == 0)
614 ret = POSIX_FADV_NORMAL;
615 if (fp->f_advice == NULL)
620 if (uio->uio_offset >= fp->f_advice->fa_start &&
621 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end)
622 ret = fp->f_advice->fa_advice;
634 struct ucred *active_cred;
641 int advice, vfslocked;
642 off_t offset,
start, end;
644 KASSERT(uio->uio_td == td, (
"uio_td %p is not td %p",
646 KASSERT(flags & FOF_OFFSET, (
"No FOF_OFFSET"));
649 if (fp->f_flag & FNONBLOCK)
651 if (fp->f_flag & O_DIRECT)
654 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
655 vn_lock(vp, LK_SHARED | LK_RETRY);
658 case POSIX_FADV_NORMAL:
659 case POSIX_FADV_SEQUENTIAL:
660 case POSIX_FADV_NOREUSE:
663 case POSIX_FADV_RANDOM:
667 offset = uio->uio_offset;
670 error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
673 error = VOP_READ(vp, uio, ioflag, fp->f_cred);
674 fp->f_nextoff = uio->uio_offset;
676 if (error == 0 && advice == POSIX_FADV_NOREUSE &&
677 offset != uio->uio_offset) {
694 end = uio->uio_offset - 1;
697 if (fp->f_advice != NULL &&
698 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) {
699 if (start != 0 && fp->f_advice->fa_prevend + 1 == start)
700 start = fp->f_advice->fa_prevstart;
701 else if (fp->f_advice->fa_prevstart != 0 &&
702 fp->f_advice->fa_prevstart == end + 1)
703 end = fp->f_advice->fa_prevend;
704 fp->f_advice->fa_prevstart =
start;
705 fp->f_advice->fa_prevend = end;
708 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED);
710 VFS_UNLOCK_GIANT(vfslocked);
721 struct ucred *active_cred;
728 int error, ioflag, lock_flags;
729 int advice, vfslocked;
730 off_t offset,
start, end;
732 KASSERT(uio->uio_td == td, (
"uio_td %p is not td %p",
734 KASSERT(flags & FOF_OFFSET, (
"No FOF_OFFSET"));
736 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
737 if (vp->v_type == VREG)
740 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
742 if (fp->f_flag & FNONBLOCK)
744 if (fp->f_flag & O_DIRECT)
746 if ((fp->f_flag & O_FSYNC) ||
747 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
750 if (vp->v_type != VCHR &&
756 if ((MNT_SHARED_WRITES(mp) ||
757 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) &&
758 (flags & FOF_OFFSET) != 0) {
759 lock_flags = LK_SHARED;
761 lock_flags = LK_EXCLUSIVE;
764 vn_lock(vp, lock_flags | LK_RETRY);
766 case POSIX_FADV_NORMAL:
767 case POSIX_FADV_SEQUENTIAL:
768 case POSIX_FADV_NOREUSE:
771 case POSIX_FADV_RANDOM:
775 offset = uio->uio_offset;
778 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
781 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
782 fp->f_nextoff = uio->uio_offset;
784 if (vp->v_type != VCHR)
786 if (error == 0 && advice == POSIX_FADV_NOREUSE &&
787 offset != uio->uio_offset) {
819 end = uio->uio_offset - 1;
822 if (fp->f_advice != NULL &&
823 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) {
824 if (start != 0 && fp->f_advice->fa_prevend + 1 == start)
825 start = fp->f_advice->fa_prevstart;
826 else if (fp->f_advice->fa_prevstart != 0 &&
827 fp->f_advice->fa_prevstart == end + 1)
828 end = fp->f_advice->fa_prevend;
829 fp->f_advice->fa_prevstart =
start;
830 fp->f_advice->fa_prevend = end;
833 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED);
837 VFS_UNLOCK_GIANT(vfslocked);
846 SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD,
883 vn_io_fault(
struct file *fp,
struct uio *uio,
struct ucred *active_cred,
884 int flags,
struct thread *td)
887 struct uio *uio_clone, short_uio;
888 struct iovec short_iovec[1];
893 vm_page_t *prev_td_ma;
894 int error, cnt, save, saveheld, prev_td_ma_cnt;
895 vm_offset_t addr, end;
900 if (uio->uio_rw == UIO_READ)
907 if (uio->uio_segflg != UIO_USERSPACE || vp->v_type != VREG ||
908 ((mp = vp->v_mount) != NULL &&
909 (mp->mnt_kern_flag & MNTK_NO_IOPF) == 0) ||
911 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td);
925 resid = uio->uio_resid;
927 short_uio.uio_segflg = UIO_USERSPACE;
928 short_uio.uio_rw = uio->uio_rw;
929 short_uio.uio_td = uio->uio_td;
931 if (uio->uio_rw == UIO_READ) {
932 prot = VM_PROT_WRITE;
933 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset,
934 uio->uio_offset + uio->uio_resid);
937 if ((fp->f_flag & O_APPEND) != 0 || (flags & FOF_OFFSET) == 0)
939 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
941 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset,
942 uio->uio_offset + uio->uio_resid);
945 save = vm_fault_disable_pagefaults();
946 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td);
951 uio_clone->uio_segflg = UIO_NOCOPY;
952 uiomove(NULL, resid - uio->uio_resid, uio_clone);
953 uio_clone->uio_segflg = uio->uio_segflg;
955 saveheld = curthread_pflags_set(TDP_UIOHELD);
956 prev_td_ma = td->td_ma;
957 prev_td_ma_cnt = td->td_ma_cnt;
959 while (uio_clone->uio_resid != 0) {
960 len = uio_clone->uio_iov->iov_len;
962 KASSERT(uio_clone->uio_iovcnt >= 1,
963 (
"iovcnt underflow"));
964 uio_clone->uio_iov++;
965 uio_clone->uio_iovcnt--;
970 addr = (uintptr_t)uio_clone->uio_iov->iov_base;
971 end = round_page(addr + len);
976 cnt = atop(end - trunc_page(addr));
982 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map,
988 short_uio.uio_iov = &short_iovec[0];
989 short_iovec[0].iov_base = (
void *)addr;
990 short_uio.uio_iovcnt = 1;
991 short_uio.uio_resid = short_iovec[0].iov_len = len;
992 short_uio.uio_offset = uio_clone->uio_offset;
996 error = doio(fp, &short_uio, active_cred, flags | FOF_OFFSET,
998 vm_page_unhold_pages(ma, cnt);
999 adv = len - short_uio.uio_resid;
1001 uio_clone->uio_iov->iov_base =
1002 (
char *)uio_clone->uio_iov->iov_base + adv;
1003 uio_clone->uio_iov->iov_len -= adv;
1004 uio_clone->uio_resid -= adv;
1005 uio_clone->uio_offset += adv;
1007 uio->uio_resid -= adv;
1008 uio->uio_offset += adv;
1010 if (error != 0 || adv == 0)
1013 td->td_ma = prev_td_ma;
1014 td->td_ma_cnt = prev_td_ma_cnt;
1015 curthread_pflags_restore(saveheld);
1017 vm_fault_enable_pagefaults(save);
1018 vn_rangelock_unlock(vp, rl_cookie);
1019 free(uio_clone, M_IOV);
1040 struct uio transp_uio;
1041 struct iovec transp_iov[1];
1047 if ((td->td_pflags & TDP_UIOHELD) == 0 ||
1048 uio->uio_segflg != UIO_USERSPACE)
1049 return (
uiomove(data, xfersize, uio));
1051 KASSERT(uio->uio_iovcnt == 1, (
"uio_iovcnt %d", uio->uio_iovcnt));
1052 transp_iov[0].iov_base = data;
1053 transp_uio.uio_iov = &transp_iov[0];
1054 transp_uio.uio_iovcnt = 1;
1055 if (xfersize > uio->uio_resid)
1056 xfersize = uio->uio_resid;
1057 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize;
1058 transp_uio.uio_offset = 0;
1059 transp_uio.uio_segflg = UIO_SYSSPACE;
1066 switch (uio->uio_rw) {
1068 transp_uio.uio_rw = UIO_READ;
1071 transp_uio.uio_rw = UIO_WRITE;
1074 transp_uio.uio_td = uio->uio_td;
1075 error = uiomove_fromphys(td->td_ma,
1076 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK,
1077 xfersize, &transp_uio);
1078 adv = xfersize - transp_uio.uio_resid;
1080 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) -
1081 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT);
1083 KASSERT(td->td_ma_cnt >= pgadv, (
"consumed pages %d %d", td->td_ma_cnt,
1085 td->td_ma_cnt -= pgadv;
1086 uio->uio_iov->iov_base = (
char *)uio->uio_iov->iov_base + adv;
1087 uio->uio_iov->iov_len -= adv;
1088 uio->uio_resid -= adv;
1089 uio->uio_offset += adv;
1098 vm_offset_t iov_base;
1102 if ((td->td_pflags & TDP_UIOHELD) == 0 ||
1103 uio->uio_segflg != UIO_USERSPACE)
1104 return (uiomove_fromphys(ma, offset, xfersize, uio));
1106 KASSERT(uio->uio_iovcnt == 1, (
"uio_iovcnt %d", uio->uio_iovcnt));
1107 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize;
1108 iov_base = (vm_offset_t)uio->uio_iov->iov_base;
1109 switch (uio->uio_rw) {
1111 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma,
1115 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK,
1119 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT);
1121 KASSERT(td->td_ma_cnt >= pgadv, (
"consumed pages %d %d", td->td_ma_cnt,
1123 td->td_ma_cnt -= pgadv;
1124 uio->uio_iov->iov_base = (
char *)(iov_base + cnt);
1125 uio->uio_iov->iov_len -= cnt;
1126 uio->uio_resid -= cnt;
1127 uio->uio_offset += cnt;
1152 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
1153 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1157 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1158 if (vp->v_type == VDIR) {
1163 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
1170 vattr.va_size = length;
1171 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
1177 VFS_UNLOCK_GIANT(vfslocked);
1178 vn_rangelock_unlock(vp, rl_cookie);
1189 struct ucred *active_cred;
1192 struct vnode *vp = fp->f_vnode;
1196 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1197 vn_lock(vp, LK_SHARED | LK_RETRY);
1198 error =
vn_stat(vp, sb, active_cred, fp->f_cred, td);
1200 VFS_UNLOCK_GIANT(vfslocked);
1211 register struct stat *sb;
1212 struct ucred *active_cred;
1213 struct ucred *file_cred;
1217 register struct vattr *vap;
1222 error = mac_vnode_check_stat(active_cred, file_cred, vp);
1234 vap->va_birthtime.tv_sec = -1;
1235 vap->va_birthtime.tv_nsec = 0;
1236 vap->va_fsid = VNOVAL;
1237 vap->va_rdev = NODEV;
1239 error = VOP_GETATTR(vp, vap, active_cred);
1246 bzero(sb,
sizeof *sb);
1251 if (vap->va_fsid != VNOVAL)
1252 sb->st_dev = vap->va_fsid;
1254 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1255 sb->st_ino = vap->va_fileid;
1256 mode = vap->va_mode;
1257 switch (vap->va_type) {
1283 sb->st_nlink = vap->va_nlink;
1284 sb->st_uid = vap->va_uid;
1285 sb->st_gid = vap->va_gid;
1286 sb->st_rdev = vap->va_rdev;
1287 if (vap->va_size > OFF_MAX)
1289 sb->st_size = vap->va_size;
1290 sb->st_atim = vap->va_atime;
1291 sb->st_mtim = vap->va_mtime;
1292 sb->st_ctim = vap->va_ctime;
1293 sb->st_birthtim = vap->va_birthtime;
1303 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
1305 sb->st_flags = vap->va_flags;
1309 sb->st_gen = vap->va_gen;
1311 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1323 struct ucred *active_cred;
1326 struct vnode *vp = fp->f_vnode;
1331 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1333 switch (vp->v_type) {
1336 if (com == FIONREAD) {
1337 vn_lock(vp, LK_SHARED | LK_RETRY);
1338 error = VOP_GETATTR(vp, &vattr, active_cred);
1341 *(
int *)data = vattr.va_size - fp->f_offset;
1342 }
else if (com == FIONBIO || com == FIOASYNC)
1345 error = VOP_IOCTL(vp, com, data, fp->f_flag,
1352 VFS_UNLOCK_GIANT(vfslocked);
1363 struct ucred *active_cred;
1371 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1373 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1374 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
1379 error = VOP_POLL(vp, events, fp->f_cred, td);
1380 VFS_UNLOCK_GIANT(vfslocked);
1389 _vn_lock(
struct vnode *vp,
int flags,
char *file,
int line)
1393 VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
1394 (
"vn_lock called with no locktype."));
1396 #ifdef DEBUG_VFS_LOCKS
1397 KASSERT(vp->v_holdcnt != 0,
1398 (
"vn_lock %p: zero hold count", vp));
1400 error = VOP_LOCK1(vp, flags, file, line);
1401 flags &= ~LK_INTERLOCK;
1402 KASSERT((flags & LK_RETRY) == 0 || error == 0,
1403 (
"LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)",
1409 if (error == 0 && vp->v_iflag & VI_DOOMED &&
1410 (flags & LK_RETRY) == 0) {
1415 }
while (flags & LK_RETRY && error != 0);
1434 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1435 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
1436 lf.l_whence = SEEK_SET;
1439 lf.l_type = F_UNLCK;
1440 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
1445 error =
vn_close(vp, fp->f_flag, fp->f_cred, td);
1446 VFS_UNLOCK_GIANT(vfslocked);
1461 mtx_assert(MNT_MTX(mp), MA_OWNED);
1467 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
1468 mp->mnt_susp_owner != curthread) {
1469 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1470 if (flags & V_NOWAIT) {
1471 error = EWOULDBLOCK;
1474 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1475 (PUSER - 1) | (flags & PCATCH),
"suspfs", 0);
1480 if (flags & V_XSLEEP)
1482 mp->mnt_writeopcount++;
1484 if (error != 0 || (flags & V_XSLEEP) != 0)
1505 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1507 if (error != EOPNOTSUPP)
1512 if ((mp = *mpp) == NULL)
1547 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1549 if (error != EOPNOTSUPP)
1558 if ((mp = *mpp) == NULL)
1571 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1572 mp->mnt_secondary_writes++;
1573 mp->mnt_secondary_accwrites++;
1577 if (flags & V_NOWAIT) {
1580 return (EWOULDBLOCK);
1585 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1586 (PUSER - 1) | (flags & PCATCH) | PDROP,
"suspfs", 0);
1606 mp->mnt_writeopcount--;
1607 if (mp->mnt_writeopcount < 0)
1608 panic(
"vn_finished_write: neg cnt");
1609 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1610 mp->mnt_writeopcount <= 0)
1611 wakeup(&mp->mnt_writeopcount);
1629 mp->mnt_secondary_writes--;
1630 if (mp->mnt_secondary_writes < 0)
1631 panic(
"vn_finished_secondary_write: neg cnt");
1632 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1633 mp->mnt_secondary_writes <= 0)
1634 wakeup(&mp->mnt_secondary_writes);
1650 if (mp->mnt_susp_owner == curthread) {
1654 while (mp->mnt_kern_flag & MNTK_SUSPEND)
1655 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1,
"wsuspfs", 0);
1656 mp->mnt_kern_flag |= MNTK_SUSPEND;
1657 mp->mnt_susp_owner = curthread;
1658 if (mp->mnt_writeopcount > 0)
1659 (void) msleep(&mp->mnt_writeopcount,
1660 MNT_MTX(mp), (PUSER - 1)|PDROP,
"suspwt", 0);
1663 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0)
1676 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1677 KASSERT(mp->mnt_susp_owner == curthread, (
"mnt_susp_owner"));
1678 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1680 mp->mnt_susp_owner = NULL;
1681 wakeup(&mp->mnt_writeopcount);
1683 curthread->td_pflags &= ~TDP_IGNSUSP;
1684 if ((flags & VR_START_WRITE) != 0) {
1686 mp->mnt_writeopcount++;
1689 if ((flags & VR_NO_SUSPCLR) == 0)
1691 }
else if ((flags & VR_START_WRITE) != 0) {
1715 vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1716 error = VOP_KQFILTER(fp->f_vnode, kn);
1717 VFS_UNLOCK_GIANT(vfslocked);
1729 const char *attrname,
int *buflen,
char *
buf,
struct thread *td)
1735 iov.iov_len = *buflen;
1738 auio.uio_iov = &iov;
1739 auio.uio_iovcnt = 1;
1740 auio.uio_rw = UIO_READ;
1741 auio.uio_segflg = UIO_SYSSPACE;
1743 auio.uio_offset = 0;
1744 auio.uio_resid = *buflen;
1746 if ((ioflg & IO_NODELOCKED) == 0)
1747 vn_lock(vp, LK_SHARED | LK_RETRY);
1749 ASSERT_VOP_LOCKED(vp,
"IO_NODELOCKED with no vp lock held");
1752 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1755 if ((ioflg & IO_NODELOCKED) == 0)
1759 *buflen = *buflen - auio.uio_resid;
1770 const char *attrname,
int buflen,
char *
buf,
struct thread *td)
1777 iov.iov_len = buflen;
1780 auio.uio_iov = &iov;
1781 auio.uio_iovcnt = 1;
1782 auio.uio_rw = UIO_WRITE;
1783 auio.uio_segflg = UIO_SYSSPACE;
1785 auio.uio_offset = 0;
1786 auio.uio_resid = buflen;
1788 if ((ioflg & IO_NODELOCKED) == 0) {
1791 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1794 ASSERT_VOP_LOCKED(vp,
"IO_NODELOCKED with no vp lock held");
1797 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1799 if ((ioflg & IO_NODELOCKED) == 0) {
1809 const char *attrname,
struct thread *td)
1814 if ((ioflg & IO_NODELOCKED) == 0) {
1817 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1820 ASSERT_VOP_LOCKED(vp,
"IO_NODELOCKED with no vp lock held");
1823 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1824 if (error == EOPNOTSUPP)
1825 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1828 if ((ioflg & IO_NODELOCKED) == 0) {
1837 vn_vget_ino(
struct vnode *vp, ino_t ino,
int lkflags,
struct vnode **rvp)
1843 ltype = VOP_ISLOCKED(vp);
1844 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
1845 (
"vn_vget_ino: vp not locked"));
1851 vn_lock(vp, ltype | LK_RETRY);
1855 if (vp->v_iflag & VI_DOOMED) {
1861 error = VFS_VGET(mp, ino, lkflags, rvp);
1863 vn_lock(vp, ltype | LK_RETRY);
1864 if (vp->v_iflag & VI_DOOMED) {
1874 const struct thread *td)
1877 if (vp->v_type != VREG || td == NULL)
1879 PROC_LOCK(td->td_proc);
1880 if ((uoff_t)uio->uio_offset + uio->uio_resid >
1881 lim_cur(td->td_proc, RLIMIT_FSIZE)) {
1883 PROC_UNLOCK(td->td_proc);
1886 PROC_UNLOCK(td->td_proc);
1895 int error, vfslocked;
1898 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1900 vn_lock(vp, LK_SHARED | LK_RETRY);
1901 AUDIT_ARG_VNODE1(vp);
1904 error =
setfmode(td, active_cred, vp, mode);
1905 VFS_UNLOCK_GIANT(vfslocked);
1910 vn_chown(
struct file *fp, uid_t uid, gid_t gid,
struct ucred *active_cred,
1914 int error, vfslocked;
1917 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1919 vn_lock(vp, LK_SHARED | LK_RETRY);
1920 AUDIT_ARG_VNODE1(vp);
1923 error =
setfown(td, active_cred, vp, uid, gid);
1924 VFS_UNLOCK_GIANT(vfslocked);
1933 if ((
object = vp->v_object) == NULL)
1935 VM_OBJECT_LOCK(
object);
1936 vm_object_page_remove(
object, start, end, 0);
1937 VM_OBJECT_UNLOCK(
object);
1949 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA,
1950 (
"Wrong command %lu", cmd));
1952 if (vn_lock(vp, LK_SHARED) != 0)
1954 if (vp->v_type != VREG) {
1958 error = VOP_GETATTR(vp, &va, cred);
1962 if (noff >= va.va_size) {
1966 bsize = vp->v_mount->mnt_stat.f_iosize;
1967 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) {
1968 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL);
1969 if (error == EOPNOTSUPP) {
1973 if ((bnp == -1 && cmd == FIOSEEKHOLE) ||
1974 (bnp != -1 && cmd == FIOSEEKDATA)) {
1981 if (noff > va.va_size)
1984 if (cmd == FIOSEEKDATA)
static u_long vn_io_faults_cnt
int vn_stat(struct vnode *vp, struct stat *sb, struct ucred *active_cred, struct ucred *file_cred, struct thread *td)
struct uio * cloneuio(struct uio *uiop)
off_t foffset_lock(struct file *fp, int flags)
static int vn_start_write_locked(struct mount *mp, int flags)
rlim_t lim_cur(struct proc *p, int which)
void vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
int vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, size_t len, off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred, struct ucred *file_cred, size_t *aresid, struct thread *td)
struct mtx_pool * mtxpool_sleep
void NDFREE(struct nameidata *ndp, const u_int flags)
static fo_close_t vn_closefile
void foffset_unlock_uio(struct file *fp, struct uio *uio, int flags)
void vfs_rel(struct mount *mp)
int vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred)
int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
void panic(const char *fmt,...)
void vn_finished_write(struct mount *mp)
int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio)
void knote(struct knlist *list, long hint, int lockflags)
int vn_writechk(struct vnode *vp)
void kern_psignal(struct proc *p, int sig)
static fo_rdwr_t vn_io_fault
struct mtx * mtx_pool_find(struct mtx_pool *pool, void *ptr)
int vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td)
void vfs_write_resume(struct mount *mp)
int priv_check(struct thread *td, int priv)
int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, struct uio *uio)
static fo_truncate_t vn_truncate
void vput(struct vnode *vp)
int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, struct ucred *cred, struct file *fp)
void vfs_ref(struct mount *mp)
void foffset_unlock(struct file *fp, off_t val, int flags)
int setfown(struct thread *td, struct ucred *cred, struct vnode *vp, uid_t uid, gid_t gid)
static int vn_io_fault_enable
int vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
static fo_rdwr_t vn_write
void vfs_unbusy(struct mount *mp)
int namei(struct nameidata *ndp)
int setfmode(struct thread *td, struct ucred *cred, struct vnode *vp, int mode)
int vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, const struct thread *td)
static fo_ioctl_t vn_ioctl
int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred, struct ucred *file_cred, ssize_t *aresid, struct thread *td)
static const int io_hold_cnt
void vfs_write_resume_flags(struct mount *mp, int flags)
int uiomove(void *cp, int n, struct uio *uio)
void free(void *addr, struct malloc_type *mtp)
int vfs_write_suspend(struct mount *mp)
int vn_close(struct vnode *vp, int flags, struct ucred *file_cred, struct thread *td)
int vfs_busy(struct mount *mp, int flags)
int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, int *buflen, char *buf, struct thread *td)
SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW,&vn_io_fault_enable, 0,"Enable vn_io_fault lock avoidance")
void kern_yield(int prio)
int _vn_lock(struct vnode *vp, int flags, char *file, int line)
static fo_stat_t vn_statfile
SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD,&vn_io_faults_cnt, 0,"Count of vn_io_fault lock avoidance triggers")
void vn_finished_secondary_write(struct mount *mp)
void vrele(struct vnode *vp)
int vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, int buflen, char *buf, struct thread *td)
void foffset_lock_uio(struct file *fp, struct uio *uio, int flags)
int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp)
static int sequential_heuristic(struct uio *uio, struct file *fp)
int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, struct thread *td)
int vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags)
static int get_advice(struct file *fp, struct uio *uio)
struct fileops badfileops
static fo_kqfilter_t vn_kqfilter