37 #include <sys/cdefs.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
48 #include <sys/mutex.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/vnode.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_map.h>
60 #ifdef ZERO_COPY_SOCKETS
61 #include <vm/vm_object.h>
64 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, UIO_MAXIOV,
65 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
69 #ifdef ZERO_COPY_SOCKETS
71 extern int so_zero_copy_receive;
80 vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
83 vm_page_t kern_pg, user_pg;
90 KASSERT((uaddr & PAGE_MASK) == 0,
91 (
"vm_pgmoveco: uaddr is not page aligned"));
97 kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
98 kern_pg->valid = VM_PAGE_BITS_ALL;
99 KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
100 (
"vm_pgmoveco: kern_pg is not correctly wired"));
102 if ((vm_map_lookup(&map, uaddr,
103 VM_PROT_WRITE, &entry, &uobject,
104 &upindex, &prot, &wired)) != KERN_SUCCESS) {
107 VM_OBJECT_LOCK(uobject);
109 if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
110 if (vm_page_sleep_if_busy(user_pg, TRUE,
"vm_pgmoveco"))
112 vm_page_lock(user_pg);
113 pmap_remove_all(user_pg);
114 vm_page_free(user_pg);
115 vm_page_unlock(user_pg);
122 if (uobject->backing_object != NULL)
123 pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
125 vm_page_insert(kern_pg, uobject, upindex);
126 vm_page_dirty(kern_pg);
127 VM_OBJECT_UNLOCK(uobject);
128 vm_map_lookup_done(map, entry);
129 return(KERN_SUCCESS);
138 save = vm_fault_disable_pagefaults();
139 error = copyin(udaddr, kaddr, len);
140 vm_fault_enable_pagefaults(save);
149 save = vm_fault_disable_pagefaults();
150 error = copyout(kaddr, udaddr, len);
151 vm_fault_enable_pagefaults(save);
155 #define PHYS_PAGE_COUNT(len) (howmany(len, PAGE_SIZE) + 1)
165 iov[0].iov_base = src;
166 iov[0].iov_len = len;
171 uio.uio_segflg = UIO_SYSSPACE;
172 uio.uio_rw = UIO_WRITE;
174 m[i] = PHYS_TO_VM_PAGE(dst);
175 return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
186 iov[0].iov_base = dst;
187 iov[0].iov_len = len;
192 uio.uio_segflg = UIO_SYSSPACE;
193 uio.uio_rw = UIO_READ;
195 m[i] = PHYS_TO_VM_PAGE(src);
196 return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
199 #undef PHYS_PAGE_COUNT
221 int error, newflags, save;
226 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
228 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
231 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
232 "Calling uiomove()");
235 newflags = TDP_DEADLKTREAT;
236 if (uio->uio_segflg == UIO_USERSPACE && nofault) {
240 newflags |= TDP_NOFAULTING | TDP_RESETSPUR;
242 save = curthread_pflags_set(newflags);
244 while (n > 0 && uio->uio_resid) {
255 switch (uio->uio_segflg) {
259 if (uio->uio_rw == UIO_READ)
260 error = copyout(cp, iov->iov_base, cnt);
262 error = copyin(iov->iov_base, cp, cnt);
268 if (uio->uio_rw == UIO_READ)
269 bcopy(cp, iov->iov_base, cnt);
271 bcopy(iov->iov_base, cp, cnt);
276 iov->iov_base = (
char *)iov->iov_base + cnt;
278 uio->uio_resid -= cnt;
279 uio->uio_offset += cnt;
280 cp = (
char *)cp + cnt;
284 curthread_pflags_restore(save);
300 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
301 (offset = uio->uio_offset) != uio->uio_offset)
303 if (buflen <= 0 || offset >= buflen)
305 if ((n = buflen - offset) > IOSIZE_MAX)
307 return (
uiomove((
char *)buf + offset, n, uio));
310 #ifdef ZERO_COPY_SOCKETS
315 userspaceco(
void *cp, u_int cnt,
struct uio *uio,
int disposable)
321 if (uio->uio_rw == UIO_READ) {
322 if ((so_zero_copy_receive != 0)
323 && ((cnt & PAGE_MASK) == 0)
324 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
325 && ((uio->uio_offset & PAGE_MASK) == 0)
326 && ((((intptr_t) cp) & PAGE_MASK) == 0)
327 && (disposable != 0)) {
334 error = vm_pgmoveco(&curproc->p_vmspace->vm_map,
335 (vm_offset_t)cp, (vm_offset_t)iov->iov_base);
345 error = copyout(cp, iov->iov_base, cnt);
347 error = copyout(cp, iov->iov_base, cnt);
350 error = copyin(iov->iov_base, cp, cnt);
356 uiomoveco(
void *cp,
int n,
struct uio *uio,
int disposable)
362 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
363 (
"uiomoveco: mode"));
364 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
367 while (n > 0 && uio->uio_resid) {
378 switch (uio->uio_segflg) {
382 error = userspaceco(cp, cnt, uio, disposable);
388 if (uio->uio_rw == UIO_READ)
389 bcopy(cp, iov->iov_base, cnt);
391 bcopy(iov->iov_base, cp, cnt);
396 iov->iov_base = (
char *)iov->iov_base + cnt;
398 uio->uio_resid -= cnt;
399 uio->uio_offset += cnt;
400 cp = (
char *)cp + cnt;
416 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
420 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
423 if (iov->iov_len == 0) {
428 switch (uio->uio_segflg) {
431 if (subyte(iov->iov_base, c) < 0)
436 iov_base = iov->iov_base;
438 iov->iov_base = iov_base;
444 iov->iov_base = (
char *)iov->iov_base + 1;
452 copyinfrom(
const void * __restrict src,
void * __restrict dst,
size_t len,
459 error = copyin(src, dst, len);
462 bcopy(src, dst, len);
465 panic(
"copyinfrom: bad seg %d\n", seg);
471 copyinstrfrom(
const void * __restrict src,
void * __restrict dst,
size_t len,
472 size_t * __restrict copied,
int seg)
478 error = copyinstr(src, dst, len, copied);
481 error = copystr(src, dst, len, copied);
484 panic(
"copyinstrfrom: bad seg %d\n", seg);
490 copyiniov(
struct iovec *iovp, u_int iovcnt,
struct iovec **iov,
int error)
495 if (iovcnt > UIO_MAXIOV)
497 iovlen = iovcnt *
sizeof (
struct iovec);
498 *iov =
malloc(iovlen, M_IOV, M_WAITOK);
499 error = copyin(iovp, *iov, iovlen);
508 copyinuio(
struct iovec *iovp, u_int iovcnt,
struct uio **uiop)
516 if (iovcnt > UIO_MAXIOV)
518 iovlen = iovcnt *
sizeof (
struct iovec);
519 uio =
malloc(iovlen +
sizeof *uio, M_IOV, M_WAITOK);
520 iov = (
struct iovec *)(uio + 1);
521 error = copyin(iovp, iov, iovlen);
527 uio->uio_iovcnt = iovcnt;
528 uio->uio_segflg = UIO_USERSPACE;
529 uio->uio_offset = -1;
531 for (i = 0; i < iovcnt; i++) {
532 if (iov->iov_len > IOSIZE_MAX - uio->uio_resid) {
536 uio->uio_resid += iov->iov_len;
549 iovlen = uiop->uio_iovcnt *
sizeof (
struct iovec);
550 uio =
malloc(iovlen +
sizeof *uio, M_IOV, M_WAITOK);
552 uio->uio_iov = (
struct iovec *)(uio + 1);
553 bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
568 vms = td->td_proc->p_vmspace;
573 PROC_LOCK(td->td_proc);
574 *addr = round_page((vm_offset_t)vms->vm_daddr +
575 lim_max(td->td_proc, RLIMIT_DATA));
576 PROC_UNLOCK(td->td_proc);
579 size = (vm_size_t)round_page(sz);
581 error = vm_mmap(&vms->vm_map, addr, size, PROT_READ | PROT_WRITE,
582 VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, OBJT_DEFAULT, NULL, 0);
599 map = &td->td_proc->p_vmspace->vm_map;
600 size = (vm_size_t)round_page(sz);
602 if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
rlim_t lim_max(struct proc *p, int which)
struct uio * cloneuio(struct uio *uiop)
int ureadc(int c, struct uio *uio)
SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, UIO_MAXIOV,"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)")
int uiomove_frombuf(void *buf, int buflen, struct uio *uio)
int physcopyout(vm_paddr_t src, void *dst, size_t len)
int uiomove_nofault(void *cp, int n, struct uio *uio)
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
int copyin_nofault(const void *udaddr, void *kaddr, size_t len)
void panic(const char *fmt,...)
int copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
int copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
int copyout_nofault(const void *kaddr, void *udaddr, size_t len)
int physcopyin(void *src, vm_paddr_t dst, size_t len)
int uiomove(void *cp, int n, struct uio *uio)
void free(void *addr, struct malloc_type *mtp)
int copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop)
int copyinstrfrom(const void *__restrict src, void *__restrict dst, size_t len, size_t *__restrict copied, int seg)
#define PHYS_PAGE_COUNT(len)
static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
int copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
int copyinfrom(const void *__restrict src, void *__restrict dst, size_t len, int seg)