45 #include <sys/cdefs.h>
48 #include <sys/param.h>
49 #include <sys/systm.h>
53 #include <sys/devicestat.h>
54 #include <sys/eventhandler.h>
56 #include <sys/limits.h>
58 #include <sys/malloc.h>
59 #include <sys/mount.h>
60 #include <sys/mutex.h>
61 #include <sys/kernel.h>
62 #include <sys/kthread.h>
64 #include <sys/resourcevar.h>
65 #include <sys/sysctl.h>
66 #include <sys/vmmeter.h>
67 #include <sys/vnode.h>
68 #include <geom/geom.h>
70 #include <vm/vm_param.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_map.h>
77 #include "opt_compat.h"
78 #include "opt_directio.h"
86 .bop_name =
"buf_ops_bio",
102 static int inmem(
struct vnode *vp, daddr_t blkno);
114 daddr_t lblkno, daddr_t blkno);
119 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
120 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
121 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
126 "Use the VM system for directory writes");
129 "Amount of presently outstanding async buffer io");
131 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
132 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
134 &
bufspace, 0, sysctl_bufspace,
"L",
"Virtual memory used for buffers");
137 "Virtual memory used for buffers");
142 "Amount of unmapped buffers, inclusive in the bufspace");
145 "Maximum allowed value of bufspace (including buf_daemon)");
148 "Amount of malloced memory for buffers");
151 "Maximum amount of malloced memory for buffers");
154 "Minimum amount of buffers we want to have");
157 "Maximum allowed value of bufspace (excluding buf_daemon)");
160 "Number of times we have reused a buffer");
163 "Number of times we have freed the KVA space from some buffer");
166 "Number of times we have had to repeat buffer allocation to defragment");
169 "Minimum preferred space used for in-progress I/O");
172 "Maximum amount of space to use for in-progress I/O");
175 0,
"Number of bdwrite to bawrite conversions to limit dirty buffers");
178 0,
"Number of buffers supplied to bdwrite with snapshot deadlock risk");
181 0,
"Number of fsync flushes to limit dirty buffers");
184 0,
"Number of flushes skipped due to being recursive");
187 "Number of buffers that are dirty (has unwritten changes) at the moment");
190 "How many buffers we want to have free before bufdaemon can sleep");
193 "When the number of dirty buffers is considered severe");
196 0,
"Number of bdwrite to bawrite conversions to clear dirty buffers");
199 "Number of free buffers");
205 "XXX Complicatedly unused");
208 "Number of calls to getnewbuf");
211 "Number of times getnewbuf has had to restart a buffer aquisition");
214 "Number of times getblk has had to restart a buffer mapping for "
218 "Amount of work to do in flushbufqueues when helping bufdaemon");
221 "Number of dirty buffer flushes done by the bufdaemon helpers");
224 "Number of barrier writes");
225 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
226 &unmapped_buf_allowed, 0,
227 "Permit the use of the unmapped i/o");
288 #define BUFFER_QUEUES 6
291 #define QUEUE_CLEAN 1
292 #define QUEUE_DIRTY 2
293 #define QUEUE_DIRTY_GIANT 3
294 #define QUEUE_EMPTYKVA 4
295 #define QUEUE_EMPTY 5
296 #define QUEUE_SENTINEL 1024
313 #define VFS_BIO_NEED_ANY 0x01
314 #define VFS_BIO_NEED_DIRTYFLUSH 0x02
315 #define VFS_BIO_NEED_FREE 0x04
316 #define VFS_BIO_NEED_BUFSPACE 0x08
318 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
319 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
321 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
326 if (
sizeof(
int) ==
sizeof(
long) || req->oldlen >=
sizeof(
long))
328 lvalue = *(
long *)arg1;
329 if (lvalue > INT_MAX)
338 extern void ffs_rawread_setup(
void);
395 if (bp->b_runningbufspace) {
397 bp->b_runningbufspace = 0;
421 KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
422 (
"buf %p already counted as free", bp));
423 if (bp->b_bufobj != NULL)
424 mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
425 bp->b_vflags |= BV_INFREECNT;
427 KASSERT(old >= 0 && old <
nbuf,
428 (
"numfreebuffers climbed to %d", old + 1));
477 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
481 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
482 if (bp->b_flags & B_CACHE) {
483 int base = (foff + off) & PAGE_MASK;
484 if (vm_page_is_valid(m, base, size) == 0)
485 bp->b_flags &= ~B_CACHE;
524 #define NSWBUF_MIN 16
528 #define TRANSIENT_DENOM 5
530 #define TRANSIENT_DENOM 10
543 long maxbuf, maxbuf_sz, buf_sz, biotmap_sz;
549 physmem_est = physmem_est * (PAGE_SIZE / 1024);
562 int factor = 4 * BKVASIZE / 1024;
565 if (physmem_est > 4096)
566 nbuf += min((physmem_est - 4096) / factor,
568 if (physmem_est > 65536)
569 nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
570 32 * 1024 * 1024 / (factor * 5));
579 maxbuf = (LONG_MAX / 3) / BKVASIZE;
582 printf(
"Warning: nbufs lowered from %d to %ld\n",
nbuf,
602 buf_sz = (long)
nbuf * BKVASIZE;
610 biotmap_sz = maxbuf_sz - buf_sz;
618 buf_sz -= biotmap_sz;
620 if (biotmap_sz / INT_MAX > MAXPHYS)
631 nbuf = buf_sz / BKVASIZE;
639 TUNABLE_INT_FETCH(
"kern.nswbuf", &
nswbuf);
671 TAILQ_INIT(&bufqueues[i]);
674 for (i = 0; i <
nbuf; i++) {
676 bzero(bp,
sizeof *bp);
677 bp->b_flags = B_INVAL;
678 bp->b_rcred = NOCRED;
679 bp->b_wcred = NOCRED;
681 bp->b_vflags = BV_INFREECNT;
683 LIST_INIT(&bp->b_dep);
685 TAILQ_INSERT_TAIL(&bufqueues[
QUEUE_EMPTY], bp, b_freelist);
716 16 * 1024 * 1024), 1024 * 1024);
755 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
756 VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
757 unmapped_buf = (caddr_t)kmem_alloc_nofault(kernel_map, MAXPHYS);
762 vfs_buf_check_mapped(
struct buf *bp)
765 KASSERT((bp->b_flags & B_UNMAPPED) == 0,
766 (
"mapped buf %p %x", bp, bp->b_flags));
768 (
"mapped buf: b_kvabase was not updated %p", bp));
770 (
"mapped buf: b_data was not updated %p", bp));
774 vfs_buf_check_unmapped(
struct buf *bp)
777 KASSERT((bp->b_flags & B_UNMAPPED) == B_UNMAPPED,
778 (
"unmapped buf %p %x", bp, bp->b_flags));
780 (
"unmapped buf: corrupted b_kvabase %p", bp));
782 (
"unmapped buf: corrupted b_data %p", bp));
785 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
786 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
788 #define BUF_CHECK_MAPPED(bp) do {} while (0)
789 #define BUF_CHECK_UNMAPPED(bp) do {} while (0)
802 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
803 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
804 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
805 (vm_offset_t)(bp->b_offset & PAGE_MASK));
817 if (bp->b_kvasize == 0)
821 atomic_subtract_long(&
bufspace, bp->b_kvasize);
822 if ((bp->b_flags & B_UNMAPPED) == 0) {
824 vm_map_remove(buffer_map, (vm_offset_t)bp->b_kvabase,
825 (vm_offset_t)bp->b_kvabase + bp->b_kvasize);
828 if ((bp->b_flags & B_KVAALLOC) != 0) {
829 vm_map_remove(buffer_map, (vm_offset_t)bp->b_kvaalloc,
830 (vm_offset_t)bp->b_kvaalloc + bp->b_kvasize);
833 bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC);
850 CTR3(KTR_BUF,
"bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
851 KASSERT((bp->b_flags & B_REMFREE) == 0,
852 (
"bremfree: buffer %p already marked for delayed removal.", bp));
854 (
"bremfree: buffer %p not on a queue.", bp));
857 bp->b_flags |= B_REMFREE;
859 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
860 KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
861 (
"buf %p not counted in numfreebuffers", bp));
862 if (bp->b_bufobj != NULL)
863 mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
864 bp->b_vflags &= ~BV_INFREECNT;
866 KASSERT(old > 0, (
"numfreebuffers dropped to %d", old - 1));
895 CTR3(KTR_BUF,
"bremfreel(%p) vp %p flags %X",
896 bp, bp->b_vp, bp->b_flags);
898 (
"bremfreel: buffer %p not on a queue.", bp));
900 mtx_assert(&
bqlock, MA_OWNED);
902 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
904 KASSERT(bq_len[bp->b_qindex] >= 1, (
"queue %d underflow",
906 bq_len[bp->b_qindex]--;
913 if (bp->b_flags & B_REMFREE) {
914 bp->b_flags &= ~B_REMFREE;
922 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
923 KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
924 (
"buf %p not counted in numfreebuffers", bp));
925 if (bp->b_bufobj != NULL)
926 mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
927 bp->b_vflags &= ~BV_INFREECNT;
929 KASSERT(old > 0, (
"numfreebuffers dropped to %d", old - 1));
937 bread(
struct vnode * vp, daddr_t blkno,
int size,
struct ucred * cred,
941 return (
breadn_flags(vp, blkno, size, 0, 0, 0, cred, 0, bpp));
950 breada(
struct vnode * vp, daddr_t * rablkno,
int * rabsize,
951 int cnt,
struct ucred * cred)
956 for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
957 if (
inmem(vp, *rablkno))
959 rabp =
getblk(vp, *rablkno, *rabsize, 0, 0, 0);
961 if ((rabp->b_flags & B_CACHE) == 0) {
962 if (!TD_IS_IDLETHREAD(curthread))
963 curthread->td_ru.ru_inblock++;
964 rabp->b_flags |= B_ASYNC;
965 rabp->b_flags &= ~B_INVAL;
966 rabp->b_ioflags &= ~BIO_ERROR;
967 rabp->b_iocmd = BIO_READ;
968 if (rabp->b_rcred == NOCRED && cred != NOCRED)
969 rabp->b_rcred =
crhold(cred);
972 rabp->b_iooffset = dbtob(rabp->b_blkno);
984 bread_gb(
struct vnode * vp, daddr_t blkno,
int cnt,
struct ucred * cred,
985 int gbflags,
struct buf **bpp)
989 cred, gbflags, bpp));
997 breadn(
struct vnode * vp, daddr_t blkno,
int size,
998 daddr_t * rablkno,
int *rabsize,
999 int cnt,
struct ucred * cred,
struct buf **bpp)
1002 return (
breadn_flags(vp, blkno, size, rablkno, rabsize, cnt,
1016 int *rabsize,
int cnt,
struct ucred *cred,
int flags,
struct buf **bpp)
1019 int rv = 0, readwait = 0;
1021 CTR3(KTR_BUF,
"breadn(%p, %jd, %d)", vp, blkno, size);
1025 *bpp = bp =
getblk(vp, blkno, size, 0, 0, flags);
1030 if ((bp->b_flags & B_CACHE) == 0) {
1031 if (!TD_IS_IDLETHREAD(curthread))
1032 curthread->td_ru.ru_inblock++;
1033 bp->b_iocmd = BIO_READ;
1034 bp->b_flags &= ~B_INVAL;
1035 bp->b_ioflags &= ~BIO_ERROR;
1036 if (bp->b_rcred == NOCRED && cred != NOCRED)
1037 bp->b_rcred =
crhold(cred);
1039 bp->b_iooffset = dbtob(bp->b_blkno);
1044 breada(vp, rablkno, rabsize, cnt, cred);
1070 CTR3(KTR_BUF,
"bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1071 if (bp->b_flags & B_INVAL) {
1076 if (bp->b_flags & B_BARRIER)
1079 oldflags = bp->b_flags;
1081 BUF_ASSERT_HELD(bp);
1083 if (bp->b_pin_count > 0)
1086 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
1087 (
"FFS background buffer should not get here %p", bp));
1091 vp_md = vp->v_vflag & VV_MD;
1104 bp->b_flags &= ~B_DONE;
1105 bp->b_ioflags &= ~BIO_ERROR;
1106 bp->b_flags |= B_CACHE;
1107 bp->b_iocmd = BIO_WRITE;
1114 bp->b_runningbufspace = bp->b_bufsize;
1117 if (!TD_IS_IDLETHREAD(curthread))
1118 curthread->td_ru.ru_oublock++;
1119 if (oldflags & B_ASYNC)
1121 bp->b_iooffset = dbtob(bp->b_blkno);
1124 if ((oldflags & B_ASYNC) == 0) {
1137 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
1150 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
1157 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
1158 if ((nbp->b_vflags & BV_BKGRDINPROG) ||
1160 LK_EXCLUSIVE | LK_NOWAIT, NULL))
1163 panic(
"bdwrite: found ourselves");
1166 if (buf_countdeps(nbp, 0)) {
1171 if (nbp->b_flags & B_CLUSTEROK) {
1197 struct thread *td = curthread;
1201 CTR3(KTR_BUF,
"bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1202 KASSERT(bp->b_bufobj != NULL, (
"No b_bufobj %p", bp));
1203 KASSERT((bp->b_flags & B_BARRIER) == 0,
1204 (
"Barrier request in delayed write %p", bp));
1205 BUF_ASSERT_HELD(bp);
1207 if (bp->b_flags & B_INVAL) {
1221 if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
1222 td->td_pflags |= TDP_INBDFLUSH;
1224 td->td_pflags &= ~TDP_INBDFLUSH;
1233 bp->b_flags |= B_CACHE;
1244 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
1245 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1297 CTR3(KTR_BUF,
"bdirty(%p) vp %p flags %X",
1298 bp, bp->b_vp, bp->b_flags);
1299 KASSERT(bp->b_bufobj != NULL, (
"No b_bufobj %p", bp));
1300 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex ==
QUEUE_NONE,
1301 (
"bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1302 BUF_ASSERT_HELD(bp);
1303 bp->b_flags &= ~(B_RELBUF);
1304 bp->b_iocmd = BIO_WRITE;
1306 if ((bp->b_flags & B_DELWRI) == 0) {
1307 bp->b_flags |= B_DELWRI;
1329 CTR3(KTR_BUF,
"bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1330 KASSERT(bp->b_bufobj != NULL, (
"No b_bufobj %p", bp));
1331 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex ==
QUEUE_NONE,
1332 (
"bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1333 BUF_ASSERT_HELD(bp);
1335 if (bp->b_flags & B_DELWRI) {
1336 bp->b_flags &= ~B_DELWRI;
1344 bp->b_flags &= ~B_DEFERRED;
1360 bp->b_flags |= B_ASYNC;
1377 bp->b_flags |= B_ASYNC | B_BARRIER;
1394 bp->b_flags |= B_BARRIER;
1395 return (bwrite(bp));
1418 (PRIBIO + 4),
"flswai", 0);
1434 static __noinline
int
1438 KFAIL_POINT_CODE(DEBUG_FP, buf_pressure,
return 1);
1440 return vm_page_count_severe();
1453 CTR3(KTR_BUF,
"brelse(%p) vp %p flags %X",
1454 bp, bp->b_vp, bp->b_flags);
1455 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1456 (
"brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1458 if (BUF_LOCKRECURSED(bp)) {
1467 if (bp->b_flags & B_MANAGED) {
1472 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
1473 bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
1480 bp->b_ioflags &= ~BIO_ERROR;
1482 }
else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1483 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
1488 bp->b_flags |= B_INVAL;
1489 if (!LIST_EMPTY(&bp->b_dep))
1491 if (bp->b_flags & B_DELWRI) {
1495 bp->b_flags &= ~(B_DELWRI | B_CACHE);
1496 if ((bp->b_flags & B_VMIO) == 0) {
1516 if (bp->b_flags & B_DELWRI)
1517 bp->b_flags &= ~B_RELBUF;
1526 if (!(bp->b_vflags & BV_BKGRDINPROG))
1527 bp->b_flags |= B_RELBUF;
1529 bp->b_flags |= B_RELBUF;
1549 if ((bp->b_flags & B_VMIO)
1550 && !(bp->b_vp->v_mount != NULL &&
1551 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
1553 (bp->b_flags & B_DELWRI))
1562 obj = bp->b_bufobj->bo_object;
1576 resid = bp->b_bufsize;
1577 foff = bp->b_offset;
1578 VM_OBJECT_LOCK(obj);
1579 for (i = 0; i < bp->b_npages; i++) {
1589 poff = OFF_TO_IDX(bp->b_offset);
1592 for (j = i; j < bp->b_npages; j++) {
1594 mtmp = bp->b_pages[j];
1596 mtmp = vm_page_lookup(obj, poff + j);
1598 panic(
"brelse: page missing\n");
1600 bp->b_pages[j] = mtmp;
1604 if ((bp->b_flags & (B_INVAL | B_UNMAPPED)) == 0) {
1607 trunc_page((vm_offset_t)bp->b_data),
1608 bp->b_pages, bp->b_npages);
1612 if ((bp->b_flags & B_NOCACHE) ||
1613 (bp->b_ioflags & BIO_ERROR &&
1614 bp->b_iocmd == BIO_READ)) {
1615 int poffset = foff & PAGE_MASK;
1616 int presid = resid > (PAGE_SIZE - poffset) ?
1617 (PAGE_SIZE - poffset) : resid;
1619 KASSERT(presid >= 0, (
"brelse: extra page"));
1620 if (pmap_page_wired_mappings(m) == 0)
1621 vm_page_set_invalid(m, poffset, presid);
1623 printf(
"avoided corruption bug in bogus_page/brelse code\n");
1625 resid -= PAGE_SIZE - (foff & PAGE_MASK);
1626 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1628 VM_OBJECT_UNLOCK(obj);
1629 if (bp->b_flags & (B_INVAL | B_RELBUF))
1632 }
else if (bp->b_flags & B_VMIO) {
1634 if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1638 }
else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
1639 if (bp->b_bufsize != 0)
1641 if (bp->b_vp != NULL)
1648 if (bp->b_flags & B_REMFREE) {
1659 panic(
"brelse: free buffer onto another queue???");
1666 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
1667 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
1668 bp->b_flags |= B_INVAL;
1669 if (bp->b_flags & B_INVAL) {
1670 if (bp->b_flags & B_DELWRI)
1677 if (bp->b_bufsize == 0) {
1678 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1679 if (bp->b_vflags & BV_BKGRDINPROG)
1680 panic(
"losing buffer 1");
1681 if (bp->b_kvasize) {
1686 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1688 }
else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1689 (bp->b_ioflags & BIO_ERROR)) {
1690 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1691 if (bp->b_vflags & BV_BKGRDINPROG)
1692 panic(
"losing buffer 2");
1694 TAILQ_INSERT_HEAD(&bufqueues[
QUEUE_CLEAN], bp, b_freelist);
1697 if ((bp->b_flags & (B_DELWRI|B_NEEDSGIANT)) ==
1698 (B_DELWRI|B_NEEDSGIANT))
1700 else if (bp->b_flags & B_DELWRI)
1704 if (bp->b_flags & B_AGE) {
1705 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp,
1708 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp,
1713 bq_len[bp->b_qindex]++;
1724 if (!(bp->b_flags & B_DELWRI)) {
1738 if (bp->b_bufsize || bp->b_kvasize)
1741 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
1742 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1743 panic(
"brelse: not dirty");
1764 CTR3(KTR_BUF,
"bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1765 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1766 (
"bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1768 if (BUF_LOCKRECURSED(bp)) {
1775 if (bp->b_flags & B_MANAGED) {
1776 if (bp->b_flags & B_REMFREE) {
1785 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1792 if (bp->b_flags & B_REMFREE) {
1800 panic(
"bqrelse: free buffer onto another queue???");
1802 if (bp->b_flags & B_DELWRI) {
1803 if (bp->b_flags & B_NEEDSGIANT)
1807 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1809 bq_len[bp->b_qindex]++;
1839 if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) {
1850 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1853 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1854 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1855 panic(
"bqrelse: not dirty");
1867 if ((bp->b_flags & B_UNMAPPED) == 0) {
1869 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
1872 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
1873 for (i = 0; i < bp->b_npages; i++) {
1875 bp->b_pages[i] = NULL;
1881 vm_page_unwire(m, 0);
1887 if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 &&
1888 m->wire_count == 0) {
1894 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) {
1896 }
else if (bp->b_flags & B_DIRECT) {
1897 vm_page_try_to_free(m);
1899 vm_page_try_to_cache(m);
1904 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
1906 if (bp->b_bufsize) {
1911 bp->b_flags &= ~B_VMIO;
1929 if ((bpa =
gbincore(&vp->v_bufobj, lblkno)) == NULL)
1933 if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1937 if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
1938 (B_DELWRI | B_CLUSTEROK))
1941 if (bpa->b_bufsize != size)
1948 if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
1969 daddr_t lblkno = bp->b_lblkno;
1970 struct vnode *vp = bp->b_vp;
1978 gbflags = (bp->b_flags & B_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
1984 if ((vp->v_type == VREG) &&
1985 (vp->v_mount != 0) &&
1986 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1988 size = vp->v_mount->mnt_stat.f_iosize;
1989 maxcl = MAXPHYS / size;
1992 for (i = 1; i < maxcl; i++)
1994 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
1997 for (j = 1; i + j <= maxcl && j <= lblkno; j++)
1999 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
2015 bp->b_flags |= B_ASYNC;
2021 nwritten = bp->b_bufsize;
2031 KASSERT((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 &&
2032 bp->b_kvasize == 0, (
"call bfreekva(%p)", bp));
2033 if ((gbflags & GB_UNMAPPED) == 0) {
2034 bp->b_kvabase = (caddr_t)addr;
2035 }
else if ((gbflags & GB_KVAALLOC) != 0) {
2036 KASSERT((gbflags & GB_UNMAPPED) != 0,
2037 (
"GB_KVAALLOC without GB_UNMAPPED"));
2038 bp->b_kvaalloc = (caddr_t)addr;
2039 bp->b_flags |= B_UNMAPPED | B_KVAALLOC;
2042 bp->b_kvasize = maxsize;
2058 vm_map_lock(buffer_map);
2059 if (vm_map_findspace(buffer_map, vm_map_min(buffer_map), maxsize,
2061 vm_map_unlock(buffer_map);
2069 rv = vm_map_insert(buffer_map, NULL, 0, addr, addr + maxsize,
2070 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
2071 KASSERT(rv == KERN_SUCCESS, (
"vm_map_insert(buffer_map) rv %d", rv));
2072 vm_map_unlock(buffer_map);
2074 atomic_add_long(&
bufspace, bp->b_kvasize);
2088 int fl, flags, norunbuf;
2090 mtx_assert(&
bqlock, MA_OWNED);
2108 if ((gbflags & GB_NOWAIT_BD) != 0)
2114 if (vp != NULL && vp->v_type != VCHR &&
2115 (td->td_pflags & TDP_BUFNEED) == 0) {
2125 norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
2126 (td->td_pflags & TDP_NORUNNINGBUF);
2128 td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
2130 td->td_pflags &= norunbuf;
2148 CTR6(KTR_BUF,
"getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
2149 "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
2150 bp->b_kvasize, bp->b_bufsize, qindex);
2151 mtx_assert(&
bqlock, MA_NOTOWNED);
2157 KASSERT((bp->b_flags & B_DELWRI) == 0,
2158 (
"delwri buffer %p found in queue %d", bp, qindex));
2161 if (bp->b_flags & B_VMIO) {
2162 bp->b_flags &= ~B_ASYNC;
2165 if (bp->b_vp != NULL)
2174 if (bp->b_rcred != NOCRED) {
2176 bp->b_rcred = NOCRED;
2178 if (bp->b_wcred != NOCRED) {
2180 bp->b_wcred = NOCRED;
2182 if (!LIST_EMPTY(&bp->b_dep))
2184 if (bp->b_vflags & BV_BKGRDINPROG)
2185 panic(
"losing buffer 3");
2186 KASSERT(bp->b_vp == NULL, (
"bp: %p still has vnode %p. qindex: %d",
2187 bp, bp->b_vp, qindex));
2188 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
2189 (
"bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
2194 bp->b_flags &= B_UNMAPPED | B_KVAALLOC;
2197 KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
2198 (
"buf %p still counted as free?", bp));
2201 bp->b_blkno = bp->b_lblkno = 0;
2202 bp->b_offset = NOOFFSET;
2208 bp->b_dirtyoff = bp->b_dirtyend = 0;
2209 bp->b_bufobj = NULL;
2210 bp->b_pin_count = 0;
2211 bp->b_fsprivate1 = NULL;
2212 bp->b_fsprivate2 = NULL;
2213 bp->b_fsprivate3 = NULL;
2215 LIST_INIT(&bp->b_dep);
2223 struct buf *bp, *nbp;
2224 int nqindex, qindex, pass;
2226 KASSERT(!unmapped || !defrag, (
"both unmapped and defrag"));
2248 if (!defrag && unmapped) {
2285 if (nbp == NULL && !TAILQ_EMPTY(&bufqueues[
QUEUE_CLEAN])) {
2287 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
2294 while ((bp = nbp) != NULL) {
2301 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
2309 case QUEUE_EMPTYKVA:
2311 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
2316 if (metadata && pass == 1) {
2334 if (defrag && bp->b_kvasize == 0) {
2335 printf(
"Warning: defrag empty buffer %p\n", bp);
2343 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2346 BO_LOCK(bp->b_bufobj);
2347 if (bp->b_vflags & BV_BKGRDINPROG) {
2348 BO_UNLOCK(bp->b_bufobj);
2352 BO_UNLOCK(bp->b_bufobj);
2355 KASSERT(bp->b_qindex == qindex,
2356 (
"getnewbuf: inconsistent queue %d bp %p", qindex, bp));
2358 if (bp->b_bufobj != NULL)
2359 BO_LOCK(bp->b_bufobj);
2361 if (bp->b_bufobj != NULL)
2362 BO_UNLOCK(bp->b_bufobj);
2370 mtx_assert(&
bqlock, MA_NOTOWNED);
2376 bp->b_flags |= B_INVAL;
2387 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
2388 bp->b_flags |= B_INVAL;
2404 if (flushingbufs && bp->b_kvasize != 0) {
2405 bp->b_flags |= B_INVAL;
2437 getnewbuf(
struct vnode *vp,
int slpflag,
int slptimeo,
int size,
int maxsize,
2441 int defrag, metadata;
2443 KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
2444 (
"GB_KVAALLOC only makes sense with GB_UNMAPPED"));
2445 if (!unmapped_buf_allowed)
2446 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
2449 if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
2464 GB_KVAALLOC)) == GB_UNMAPPED, metadata);
2475 mtx_assert(&
bqlock, MA_OWNED);
2477 mtx_assert(&
bqlock, MA_NOTOWNED);
2478 }
else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) {
2479 mtx_assert(&
bqlock, MA_NOTOWNED);
2482 bp->b_flags |= B_UNMAPPED;
2484 bp->b_kvasize = maxsize;
2485 atomic_add_long(&
bufspace, bp->b_kvasize);
2489 mtx_assert(&
bqlock, MA_NOTOWNED);
2497 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2499 if (maxsize != bp->b_kvasize || (bp->b_flags & (B_UNMAPPED |
2500 B_KVAALLOC)) == B_UNMAPPED) {
2503 bp->b_flags |= B_INVAL;
2508 }
else if ((bp->b_flags & B_KVAALLOC) != 0 &&
2509 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == 0) {
2514 bp->b_kvabase = bp->b_kvaalloc;
2515 bp->b_flags &= ~B_KVAALLOC;
2519 }
else if ((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 &&
2520 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == (GB_UNMAPPED |
2527 bp->b_kvaalloc = bp->b_kvabase;
2529 bp->b_flags |= B_UNMAPPED | B_KVAALLOC;
2534 if ((gbflags & GB_UNMAPPED) == 0) {
2535 bp->b_saveaddr = bp->b_kvabase;
2536 bp->b_data = bp->b_saveaddr;
2537 bp->b_flags &= ~B_UNMAPPED;
2579 &bufqueues[QUEUE_DIRTY_GIANT])) {
2602 curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
2665 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
2666 0,
"Number of buffers flushed with dependecies that require rollbacks");
2671 struct buf *sentinel;
2683 if (flushdeps && target > 2)
2689 sentinel =
malloc(
sizeof(
struct buf), M_TEMP, M_WAITOK | M_ZERO);
2692 TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
2693 while (flushed != target) {
2694 bp = TAILQ_NEXT(sentinel, b_freelist);
2696 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2697 TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
2711 if (lvp != NULL && bp->b_vp != lvp)
2713 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2715 if (bp->b_pin_count > 0) {
2719 BO_LOCK(bp->b_bufobj);
2720 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
2721 (bp->b_flags & B_DELWRI) == 0) {
2722 BO_UNLOCK(bp->b_bufobj);
2726 BO_UNLOCK(bp->b_bufobj);
2727 if (bp->b_flags & B_INVAL) {
2737 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
2738 if (flushdeps == 0) {
2762 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
2764 ASSERT_VOP_LOCKED(vp,
"getbuf");
2766 error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
2767 vn_lock(vp, LK_TRYUPGRADE);
2771 CTR3(KTR_BUF,
"flushbufqueue(%p) vp %p flags %X",
2772 bp, bp->b_vp, bp->b_flags);
2783 flushwithdeps += hasdeps;
2799 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2801 free(sentinel, M_TEMP);
2829 vm_offset_t toff, tinc, size;
2833 ASSERT_VOP_LOCKED(vp,
"inmem");
2835 if (
incore(&vp->v_bufobj, blkno))
2837 if (vp->v_mount == NULL)
2844 if (size > vp->v_mount->mnt_stat.f_iosize)
2845 size = vp->v_mount->mnt_stat.f_iosize;
2846 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2848 VM_OBJECT_LOCK(obj);
2849 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2850 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2854 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2855 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2856 if (vm_page_is_valid(m,
2857 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2860 VM_OBJECT_UNLOCK(obj);
2864 VM_OBJECT_UNLOCK(obj);
2883 vm_ooffset_t foff, noff, eoff;
2887 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
2890 foff = bp->b_offset;
2891 KASSERT(bp->b_offset != NOOFFSET,
2892 (
"vfs_clean_pages_dirty_buf: no buffer offset"));
2894 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2897 for (i = 0; i < bp->b_npages; i++) {
2898 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2900 if (eoff > bp->b_offset + bp->b_bufsize)
2901 eoff = bp->b_offset + bp->b_bufsize;
2907 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
2916 object = bp->b_bufobj->bo_object;
2917 VM_OBJECT_LOCK_ASSERT(
object, MA_OWNED);
2923 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
2924 vm_offset_t boffset;
2925 vm_offset_t eoffset;
2931 for (i = 0; i < bp->b_npages; i++)
2932 vm_page_test_dirty(bp->b_pages[i]);
2939 for (i = 0; i < bp->b_npages; i++) {
2940 if (bp->b_pages[i]->dirty)
2943 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2945 for (i = bp->b_npages - 1; i >= 0; --i) {
2946 if (bp->b_pages[i]->dirty) {
2950 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2956 if (eoffset > bp->b_bcount)
2957 eoffset = bp->b_bcount;
2964 if (boffset < eoffset) {
2965 if (bp->b_dirtyoff > boffset)
2966 bp->b_dirtyoff = boffset;
2967 if (bp->b_dirtyend < eoffset)
2968 bp->b_dirtyend = eoffset;
2981 struct buf *scratch_bp;
2982 int bsize, maxsize, need_mapping, need_kva;
2985 need_mapping = (bp->b_flags & B_UNMAPPED) != 0 &&
2986 (gbflags & GB_UNMAPPED) == 0;
2987 need_kva = (bp->b_flags & (B_KVAALLOC | B_UNMAPPED)) == B_UNMAPPED &&
2988 (gbflags & GB_KVAALLOC) != 0;
2989 if (!need_mapping && !need_kva)
2994 if (need_mapping && (bp->b_flags & B_KVAALLOC) != 0) {
3000 bp->b_flags &= ~B_KVAALLOC;
3001 KASSERT(bp->b_kvaalloc != 0, (
"kvaalloc == 0"));
3002 bp->b_kvabase = bp->b_kvaalloc;
3011 bsize =
vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3012 offset = blkno * bsize;
3013 maxsize = size + (offset & PAGE_MASK);
3014 maxsize = imax(maxsize, bsize);
3022 scratch_bp =
getnewbuf(bp->b_vp, 0, 0, size, maxsize, gbflags |
3023 (GB_UNMAPPED | GB_KVAALLOC));
3024 if (scratch_bp == NULL) {
3025 if ((gbflags & GB_NOWAIT_BD) != 0) {
3030 panic(
"GB_NOWAIT_BD and B_UNMAPPED %p", bp);
3035 KASSERT((scratch_bp->b_flags & B_KVAALLOC) != 0,
3036 (
"scratch bp !B_KVAALLOC %p", scratch_bp));
3037 setbufkva(bp, (vm_offset_t)scratch_bp->b_kvaalloc,
3038 scratch_bp->b_kvasize, gbflags);
3041 scratch_bp->b_kvasize = 0;
3042 scratch_bp->b_flags |= B_INVAL;
3043 scratch_bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC);
3050 bp->b_saveaddr = bp->b_kvabase;
3051 bp->b_data = bp->b_saveaddr;
3052 bp->b_flags &= ~B_UNMAPPED;
3094 getblk(
struct vnode *vp, daddr_t blkno,
int size,
int slpflag,
int slptimeo,
3099 int bsize, error, maxsize, vmio;
3102 CTR3(KTR_BUF,
"getblk(%p, %ld, %d)", vp, (
long)blkno, size);
3103 KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3104 (
"GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3105 ASSERT_VOP_LOCKED(vp,
"getblk");
3106 if (size > MAXBSIZE)
3107 panic(
"getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
3108 if (!unmapped_buf_allowed)
3109 flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3122 if (TD_IS_IDLETHREAD(curthread))
3137 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
3139 if (flags & GB_LOCK_NOWAIT)
3140 lockflags |= LK_NOWAIT;
3142 error = BUF_TIMELOCK(bp, lockflags,
3143 BO_MTX(bo),
"getblk", slpflag, slptimeo);
3149 if (error == ENOLCK)
3155 else if (BUF_LOCKRECURSED(bp))
3164 if (bp->b_flags & B_INVAL)
3165 bp->b_flags &= ~B_CACHE;
3166 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3167 bp->b_flags |= B_CACHE;
3175 if (bp->b_bcount != size) {
3176 if ((bp->b_flags & B_VMIO) == 0 ||
3177 (size > bp->b_kvasize)) {
3178 if (bp->b_flags & B_DELWRI) {
3184 if (bp->b_pin_count > 0) {
3185 if (flags & GB_LOCK_NOWAIT) {
3192 bp->b_flags |= B_NOCACHE;
3195 if (LIST_EMPTY(&bp->b_dep)) {
3196 bp->b_flags |= B_RELBUF;
3199 bp->b_flags |= B_NOCACHE;
3220 if (bp->b_bcount != size)
3223 KASSERT(bp->b_offset != NOOFFSET,
3224 (
"getblk: no buffer offset"));
3253 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3254 bp->b_flags |= B_NOCACHE;
3258 bp->b_flags &= ~B_DONE;
3270 if (flags & GB_NOCREAT)
3272 bsize =
vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
3273 offset = blkno * bsize;
3274 vmio = vp->v_object != NULL;
3276 maxsize = size + (offset & PAGE_MASK);
3280 flags &= ~GB_UNMAPPED;
3282 maxsize = imax(maxsize, bsize);
3284 bp =
getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags);
3286 if (slpflag || slptimeo)
3306 bp->b_flags |= B_INVAL;
3315 bp->b_blkno = bp->b_lblkno = blkno;
3316 bp->b_offset = offset;
3328 bp->b_flags |= B_VMIO;
3329 KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3330 (
"ARGH! different b_bufobj->bo_object %p %p %p\n",
3331 bp, vp->v_object, bp->b_bufobj->bo_object));
3333 bp->b_flags &= ~B_VMIO;
3334 KASSERT(bp->b_bufobj->bo_object == NULL,
3335 (
"ARGH! has b_bufobj->bo_object %p %p\n",
3336 bp, bp->b_bufobj->bo_object));
3341 bp->b_flags &= ~B_DONE;
3343 CTR4(KTR_BUF,
"getblk(%p, %ld, %d) = %p", vp, (
long)blkno, size, bp);
3344 BUF_ASSERT_HELD(bp);
3346 KASSERT(bp->b_bufobj == bo,
3347 (
"bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3361 maxsize = (size + BKVAMASK) & ~BKVAMASK;
3362 while ((bp =
getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) {
3363 if ((flags & GB_NOWAIT_BD) &&
3364 (curthread->td_pflags & TDP_BUFNEED) != 0)
3368 bp->b_flags |= B_INVAL;
3369 BUF_ASSERT_HELD(bp);
3392 int newbsize, mbsize;
3395 BUF_ASSERT_HELD(bp);
3397 if (bp->b_kvasize < size)
3398 panic(
"allocbuf: buffer too small");
3400 if ((bp->b_flags & B_VMIO) == 0) {
3407 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3408 if (bp->b_flags & B_MALLOC)
3411 newbsize = round_page(size);
3413 if (newbsize < bp->b_bufsize) {
3417 if (bp->b_flags & B_MALLOC) {
3419 bp->b_bcount = size;
3421 free(bp->b_data, M_BIOBUF);
3422 if (bp->b_bufsize) {
3423 atomic_subtract_long(
3429 bp->b_saveaddr = bp->b_kvabase;
3430 bp->b_data = bp->b_saveaddr;
3432 bp->b_flags &= ~B_MALLOC;
3437 }
else if (newbsize > bp->b_bufsize) {
3450 (bp->b_bufsize == 0) &&
3451 (mbsize <= PAGE_SIZE/2)) {
3453 bp->b_data =
malloc(mbsize, M_BIOBUF, M_WAITOK);
3454 bp->b_bufsize = mbsize;
3455 bp->b_bcount = size;
3456 bp->b_flags |= B_MALLOC;
3466 if (bp->b_flags & B_MALLOC) {
3467 origbuf = bp->b_data;
3468 origbufsize = bp->b_bufsize;
3469 bp->b_data = bp->b_kvabase;
3470 if (bp->b_bufsize) {
3476 bp->b_flags &= ~B_MALLOC;
3477 newbsize = round_page(newbsize);
3481 (vm_offset_t) bp->b_data + bp->b_bufsize,
3482 (vm_offset_t) bp->b_data + newbsize);
3484 bcopy(origbuf, bp->b_data, origbufsize);
3485 free(origbuf, M_BIOBUF);
3491 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3492 desiredpages = (size == 0) ? 0 :
3493 num_pages((bp->b_offset & PAGE_MASK) + newbsize);
3495 if (bp->b_flags & B_MALLOC)
3496 panic(
"allocbuf: VMIO buffer can't be malloced");
3501 if (size == 0 || bp->b_bufsize == 0)
3502 bp->b_flags |= B_CACHE;
3504 if (newbsize < bp->b_bufsize) {
3510 if (desiredpages < bp->b_npages) {
3513 if ((bp->b_flags & B_UNMAPPED) == 0) {
3515 pmap_qremove((vm_offset_t)trunc_page(
3516 (vm_offset_t)bp->b_data) +
3517 (desiredpages << PAGE_SHIFT),
3518 (bp->b_npages - desiredpages));
3521 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3522 for (i = desiredpages; i < bp->b_npages; i++) {
3530 (
"allocbuf: bogus page found"));
3531 while (vm_page_sleep_if_busy(m, TRUE,
3535 bp->b_pages[i] = NULL;
3537 vm_page_unwire(m, 0);
3540 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3541 bp->b_npages = desiredpages;
3543 }
else if (size > bp->b_bcount) {
3559 obj = bp->b_bufobj->bo_object;
3561 VM_OBJECT_LOCK(obj);
3562 while (bp->b_npages < desiredpages) {
3575 m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
3576 bp->b_npages, VM_ALLOC_NOBUSY |
3577 VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
3578 VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY |
3579 VM_ALLOC_COUNT(desiredpages - bp->b_npages));
3581 bp->b_flags &= ~B_CACHE;
3582 bp->b_pages[bp->b_npages] = m;
3601 toff = bp->b_bcount;
3602 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3604 while ((bp->b_flags & B_CACHE) && toff < size) {
3607 if (tinc > (size - toff))
3610 pi = ((bp->b_offset & PAGE_MASK) + toff) >>
3623 VM_OBJECT_UNLOCK(obj);
3628 if ((bp->b_flags & B_UNMAPPED) == 0)
3634 if (newbsize < bp->b_bufsize)
3636 bp->b_bufsize = newbsize;
3637 bp->b_bcount = size;
3647 void (*done)(
struct bio *);
3648 vm_offset_t
start, end;
3653 bp->bio_flags |= BIO_DONE;
3654 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
3655 start = trunc_page((vm_offset_t)bp->bio_data);
3656 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
3662 done = bp->bio_done;
3669 pmap_qremove(start, OFF_TO_IDX(end - start));
3670 vm_map_remove(bio_transient_map, start, end);
3671 atomic_add_int(&inflight_transient_maps, -1);
3688 while ((bp->bio_flags & BIO_DONE) == 0)
3689 msleep(bp, mtxp, PRIBIO, wchan,
hz / 10);
3691 if (bp->bio_error != 0)
3692 return (bp->bio_error);
3693 if (!(bp->bio_flags & BIO_ERROR))
3703 bp->bio_error = error;
3704 bp->bio_flags |= BIO_ERROR;
3721 if (bp->b_iocmd == BIO_READ)
3722 bwait(bp, PRIBIO,
"biord");
3724 bwait(bp, PRIBIO,
"biowr");
3725 if (bp->b_flags & B_EINTR) {
3726 bp->b_flags &= ~B_EINTR;
3729 if (bp->b_ioflags & BIO_ERROR) {
3730 return (bp->b_error ? bp->b_error : EIO);
3744 bp = bip->bio_caller2;
3745 bp->b_resid = bp->b_bcount - bip->bio_completed;
3746 bp->b_resid = bip->bio_resid;
3747 bp->b_ioflags = bip->bio_flags;
3748 bp->b_error = bip->bio_error;
3750 bp->b_ioflags |= BIO_ERROR;
3761 KASSERT(dev->si_refcount > 0,
3762 (
"dev_strategy on un-referenced struct cdev *(%s) %p",
3775 KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE,
3777 KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) ||
3778 dev->si_threadcount > 0,
3779 (
"dev_strategy_csw threadcount cdev *(%s) %p",
devtoname(dev),
3782 bp->b_error = ENXIO;
3783 bp->b_ioflags = BIO_ERROR;
3792 tsleep(&bp, PRIBIO,
"dev_strat",
hz/10);
3794 bip->bio_cmd = bp->b_iocmd;
3795 bip->bio_offset = bp->b_iooffset;
3796 bip->bio_length = bp->b_bcount;
3797 bip->bio_bcount = bp->b_bcount;
3800 bip->bio_caller2 = bp;
3802 (*csw->d_strategy)(bip);
3827 struct bufobj *dropobj;
3830 CTR3(KTR_BUF,
"bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
3833 KASSERT(!(bp->b_flags & B_DONE), (
"biodone: bp %p already done", bp));
3834 BUF_ASSERT_HELD(bp);
3837 if (bp->b_iocmd == BIO_WRITE)
3838 dropobj = bp->b_bufobj;
3840 if (bp->b_iodone != NULL) {
3842 bp->b_iodone = NULL;
3858 BUF_ASSERT_HELD(bp);
3860 if (!LIST_EMPTY(&bp->b_dep))
3863 if (bp->b_flags & B_VMIO) {
3868 int bogus, i, iosize;
3870 obj = bp->b_bufobj->bo_object;
3871 KASSERT(obj->paging_in_progress >= bp->b_npages,
3872 (
"biodone_finish: paging in progress(%d) < b_npages(%d)",
3873 obj->paging_in_progress, bp->b_npages));
3876 KASSERT(vp->v_holdcnt > 0,
3877 (
"biodone_finish: vnode %p has zero hold count", vp));
3878 KASSERT(vp->v_object != NULL,
3879 (
"biodone_finish: vnode %p has no vm_object", vp));
3881 foff = bp->b_offset;
3882 KASSERT(bp->b_offset != NOOFFSET,
3883 (
"biodone_finish: bp %p has no buffer offset", bp));
3890 iosize = bp->b_bcount - bp->b_resid;
3891 if (bp->b_iocmd == BIO_READ &&
3892 !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
3893 !(bp->b_ioflags & BIO_ERROR)) {
3894 bp->b_flags |= B_CACHE;
3897 VM_OBJECT_LOCK(obj);
3898 for (i = 0; i < bp->b_npages; i++) {
3902 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
3911 bogus = bogusflag = 1;
3912 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
3914 panic(
"biodone: page disappeared!");
3917 KASSERT(OFF_TO_IDX(foff) == m->pindex,
3918 (
"biodone_finish: foff(%jd)/pindex(%ju) mismatch",
3919 (intmax_t)foff, (uintmax_t)m->pindex));
3926 if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
3927 KASSERT((m->dirty & vm_page_bits(foff &
3928 PAGE_MASK, resid)) == 0, (
"bufdone_finish:"
3929 " page %p has unexpected dirty bits", m));
3933 vm_page_io_finish(m);
3934 vm_object_pip_subtract(obj, 1);
3935 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3938 vm_object_pip_wakeupn(obj, 0);
3939 VM_OBJECT_UNLOCK(obj);
3940 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) {
3942 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3943 bp->b_pages, bp->b_npages);
3953 if (bp->b_flags & B_ASYNC) {
3954 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
3975 if (!(bp->b_flags & B_VMIO))
3978 obj = bp->b_bufobj->bo_object;
3979 VM_OBJECT_LOCK(obj);
3980 for (i = 0; i < bp->b_npages; i++) {
3983 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3985 panic(
"vfs_unbusy_pages: page missing\n");
3987 if ((bp->b_flags & B_UNMAPPED) == 0) {
3989 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3990 bp->b_pages, bp->b_npages);
3994 vm_object_pip_subtract(obj, 1);
3995 vm_page_io_finish(m);
3997 vm_object_pip_wakeupn(obj, 0);
3998 VM_OBJECT_UNLOCK(obj);
4020 eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4021 if (eoff > bp->b_offset + bp->b_bcount)
4022 eoff = bp->b_offset + bp->b_bcount;
4029 vm_page_set_valid(m, off & PAGE_MASK, eoff - off);
4041 vm_ooffset_t soff, eoff;
4050 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4051 if (eoff > bp->b_offset + bp->b_bcount)
4052 eoff = bp->b_offset + bp->b_bcount;
4059 vm_page_set_validclean(
4061 (vm_offset_t) (soff & PAGE_MASK),
4062 (vm_offset_t) (eoff - soff)
4077 VM_OBJECT_LOCK_ASSERT(bp->b_bufobj->bo_object, MA_OWNED);
4079 for (i = 0; i < bp->b_npages; i++) {
4081 if ((m->oflags & VPO_BUSY) != 0) {
4082 for (; last_busied < i; last_busied++)
4083 vm_page_busy(bp->b_pages[last_busied]);
4084 while ((m->oflags & VPO_BUSY) != 0)
4085 vm_page_sleep(m,
"vbpage");
4088 for (i = 0; i < last_busied; i++)
4089 vm_page_wakeup(bp->b_pages[i]);
4112 if (!(bp->b_flags & B_VMIO))
4115 obj = bp->b_bufobj->bo_object;
4116 foff = bp->b_offset;
4117 KASSERT(bp->b_offset != NOOFFSET,
4118 (
"vfs_busy_pages: no buffer offset"));
4119 VM_OBJECT_LOCK(obj);
4121 if (bp->b_bufsize != 0)
4124 for (i = 0; i < bp->b_npages; i++) {
4127 if ((bp->b_flags & B_CLUSTER) == 0) {
4128 vm_object_pip_add(obj, 1);
4129 vm_page_io_start(m);
4147 pmap_remove_write(m);
4149 }
else if (m->valid == VM_PAGE_BITS_ALL &&
4150 (bp->b_flags & B_CACHE) == 0) {
4154 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4156 VM_OBJECT_UNLOCK(obj);
4157 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) {
4159 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4160 bp->b_pages, bp->b_npages);
4178 if (!(bp->b_flags & B_VMIO))
4186 base += (bp->b_offset & PAGE_MASK);
4187 n = PAGE_SIZE - (base & PAGE_MASK);
4189 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
4190 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4194 vm_page_set_valid(m, base & PAGE_MASK, n);
4199 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
4217 int i, j,
mask, sa, ea, slide;
4219 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4223 bp->b_flags &= ~B_INVAL;
4224 bp->b_ioflags &= ~BIO_ERROR;
4225 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
4226 if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4227 (bp->b_offset & PAGE_MASK) == 0) {
4230 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4231 VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
4232 if ((bp->b_pages[0]->valid & mask) == mask)
4234 if ((bp->b_pages[0]->valid & mask) == 0) {
4235 pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4236 bp->b_pages[0]->valid |=
mask;
4240 sa = bp->b_offset & PAGE_MASK;
4242 for (i = 0; i < bp->b_npages; i++, sa = 0) {
4243 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4244 ea = slide & PAGE_MASK;
4250 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4251 VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
4252 if ((bp->b_pages[i]->valid & mask) == mask)
4254 if ((bp->b_pages[i]->valid & mask) == 0)
4255 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4257 for (; sa < ea; sa += DEV_BSIZE, j++) {
4258 if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4259 pmap_zero_page_area(bp->b_pages[i],
4264 bp->b_pages[i]->valid |=
mask;
4267 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
4277 if ((bp->b_flags & B_UNMAPPED) == 0) {
4279 bzero(bp->b_data + base, size);
4282 n = PAGE_SIZE - (base & PAGE_MASK);
4283 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
4284 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4288 pmap_zero_page_area(m, base & PAGE_MASK, n);
4293 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
4311 to = round_page(to);
4312 from = round_page(from);
4313 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4315 for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4322 p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4323 VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
4328 pmap_qenter(pg, &p, 1);
4329 bp->b_pages[index] = p;
4331 bp->b_npages = index;
4340 int index, newnpages;
4344 from = round_page((vm_offset_t)bp->b_data + newbsize);
4345 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4346 if (bp->b_npages > newnpages)
4347 pmap_qremove(from, bp->b_npages - newnpages);
4348 for (index = newnpages; index < bp->b_npages; index++) {
4349 p = bp->b_pages[index];
4350 bp->b_pages[index] = NULL;
4352 printf(
"vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
4353 (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
4356 atomic_subtract_int(&cnt.v_wire_count, 1);
4358 bp->b_npages = newnpages;
4380 if (bp->b_bufsize < 0)
4382 prot = VM_PROT_READ;
4383 if (bp->b_iocmd == BIO_READ)
4384 prot |= VM_PROT_WRITE;
4385 if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4386 (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
4387 btoc(MAXPHYS))) < 0)
4389 bp->b_npages = pidx;
4390 if (mapbuf || !unmapped_buf_allowed) {
4391 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
4392 kva = bp->b_saveaddr;
4393 bp->b_saveaddr = bp->b_data;
4394 bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK);
4395 bp->b_flags &= ~B_UNMAPPED;
4397 bp->b_flags |= B_UNMAPPED;
4398 bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
4399 bp->b_saveaddr = bp->b_data;
4414 npages = bp->b_npages;
4415 if (bp->b_flags & B_UNMAPPED)
4416 bp->b_flags &= ~B_UNMAPPED;
4418 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4419 vm_page_unhold_pages(bp->b_pages, npages);
4421 bp->b_data = bp->b_saveaddr;
4431 bp->b_flags |= B_DONE;
4443 while ((bp->b_flags & B_DONE) == 0)
4444 msleep(bp, mtxp, pri, wchan, 0);
4452 return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
4462 KASSERT(vp == bo->bo_private, (
"Inconsistent vnode bufstrategy"));
4463 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
4464 (
"Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4465 i = VOP_STRATEGY(vp, bp);
4466 KASSERT(i == 0, (
"VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4473 KASSERT(bo != NULL, (
"NULL bo in bufobj_wref"));
4474 ASSERT_BO_LOCKED(bo);
4482 KASSERT(bo != NULL, (
"NULL bo in bufobj_wref"));
4492 KASSERT(bo != NULL, (
"NULL bo in bufobj_wdrop"));
4494 KASSERT(bo->bo_numoutput > 0, (
"bufobj_wdrop non-positive count"));
4495 if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
4496 bo->bo_flag &= ~BO_WWAIT;
4497 wakeup(&bo->bo_numoutput);
4507 KASSERT(bo != NULL, (
"NULL bo in bufobj_wwait"));
4508 ASSERT_BO_LOCKED(bo);
4510 while (bo->bo_numoutput) {
4511 bo->bo_flag |= BO_WWAIT;
4512 error = msleep(&bo->bo_numoutput, BO_MTX(bo),
4513 slpflag | (PRIBIO + 1),
"bo_wwait", timeo);
4538 if (--bp->b_pin_count == 0)
4550 while (bp->b_pin_count > 0)
4551 msleep(bp, mtxp, PRIBIO,
"bwunpin", 0);
4562 if ((bp->b_flags & B_UNMAPPED) != 0) {
4563 KASSERT(unmapped_buf_allowed, (
"unmapped"));
4564 bip->bio_ma = bp->b_pages;
4565 bip->bio_ma_n = bp->b_npages;
4567 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4568 bip->bio_flags |= BIO_UNMAPPED;
4569 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
4570 PAGE_SIZE == bp->b_npages,
4571 (
"Buffer %p too short: %d %jd %d", bp, bip->bio_ma_offset,
4572 (uintmax_t)bip->bio_length, bip->bio_ma_n));
4574 bip->bio_data = bp->b_data;
4579 #include "opt_ddb.h"
4581 #include <ddb/ddb.h>
4584 DB_SHOW_COMMAND(buffer, db_show_buffer)
4587 struct buf *bp = (
struct buf *)addr;
4590 db_printf(
"usage: show buffer <addr>\n");
4594 db_printf(
"buf at %p\n", bp);
4595 db_printf(
"b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
4596 (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4597 PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4599 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4600 "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
4602 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4603 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4604 (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4607 db_printf(
"b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4608 for (i = 0; i < bp->b_npages; i++) {
4611 db_printf(
"(%p, 0x%lx, 0x%lx)", (
void *)m->object,
4612 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
4613 if ((i + 1) < bp->b_npages)
4619 BUF_LOCKPRINTINFO(bp);
4622 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4627 for (i = 0; i <
nbuf; i++) {
4629 if (BUF_ISLOCKED(bp)) {
4630 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4636 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4642 db_printf(
"usage: show vnodebufs <addr>\n");
4645 vp = (
struct vnode *)addr;
4646 db_printf(
"Clean buffers:\n");
4647 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4648 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4651 db_printf(
"Dirty buffers:\n");
4652 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4653 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4658 DB_COMMAND(countfreebufs, db_coundfreebufs)
4661 int i, used = 0, nfree = 0;
4664 db_printf(
"usage: countfreebufs\n");
4668 for (i = 0; i <
nbuf; i++) {
4670 if ((bp->b_vflags & BV_INFREECNT) != 0)
4676 db_printf(
"Counted %d free, %d used (%d tot)\n", nfree, used,
void devstat_end_transaction_bio(struct devstat *ds, struct bio *bp)
int biowait(struct bio *bp, const char *wchan)
int breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
void vfs_bio_set_valid(struct buf *bp, int base, int size)
void bunpin(struct buf *bp)
static int vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
caddr_t kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
static struct kproc_desc buf_kp
static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
#define VFS_BIO_NEED_FREE
struct mtx_pool * mtxpool_sleep
int bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
static int allocbufkva(struct buf *bp, int maxsize, int gbflags)
static int buf_do_flush(struct vnode *vp)
static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
struct cdevsw * dev_refthread(struct cdev *dev, int *ref)
static void bufdonebio(struct bio *bip)
struct buf * geteblk(int size, int flags)
void bremfreef(struct buf *bp)
static int getnewbufrestarts
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
int vfs_bio_awrite(struct buf *bp)
#define VFS_BIO_NEED_BUFSPACE
void dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp)
void bwait(struct buf *bp, u_char pri, const char *wchan)
static void getnewbuf_reuse_bp(struct buf *bp, int qindex)
static __inline void numdirtywakeup(int level)
void bufbdflush(struct bufobj *bo, struct buf *bp)
void bufobj_wref(struct bufobj *bo)
void panic(const char *fmt,...)
void bufstrategy(struct bufobj *bo, struct buf *bp)
int breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred, struct buf **bpp)
void vn_finished_write(struct mount *mp)
void bufdone(struct buf *bp)
void dev_relthread(struct cdev *dev, int ref)
void kproc_shutdown(void *arg, int howto)
void bdone(struct buf *bp)
void brelse(struct buf *bp)
static __noinline int buf_vm_page_count_severe(void)
int inflight_transient_maps
int bbarrierwrite(struct buf *bp)
static int flushbufqtarget
struct mtx * mtx_pool_find(struct mtx_pool *pool, void *ptr)
static int mappingrestarts
static MALLOC_DEFINE(M_BIOBUF,"biobuf","BIO buffer")
void bdirty(struct buf *bp)
void waitrunningbufspace(void)
void vunmapbuf(struct buf *bp)
void bpin(struct buf *bp)
void biofinish(struct bio *bp, struct devstat *stat, int error)
static __inline void vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, vm_page_t m)
void vfs_bio_clrbuf(struct buf *bp)
#define BUF_CHECK_UNMAPPED(bp)
int bufsync(struct bufobj *bo, int waitfor)
struct buf * getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo, int flags)
void reassignbuf(struct buf *bp)
static long barrierwrites
static void bremfreel(struct buf *bp)
void runningbufwakeup(struct buf *bp)
static void setbufkva(struct buf *bp, vm_offset_t addr, int maxsize, int gbflags)
void bdata2bio(struct buf *bp, struct bio *bip)
static void vfs_clean_pages_dirty_buf(struct buf *bp)
static void bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
void dev_strategy(struct cdev *dev, struct buf *bp)
static struct mtx rbreqlock
static int flushbufqueues(struct vnode *, int, int)
static int numdirtybuffers
static long bufmallocspace
static int numfreebuffers
#define BUF_CHECK_MAPPED(bp)
void bufobj_wrefl(struct bufobj *bo)
static long lorunningspace
static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
static int getnewbufcalls
void crfree(struct ucred *cr)
struct buf_ops buf_ops_bio
static void getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo, int defrag)
void bdwrite(struct buf *bp)
static void buf_daemon(void)
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
static int inmem(struct vnode *vp, daddr_t blkno)
static struct buf * getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize, int gbflags)
static long maxbufmallocspace
static int hidirtybuffers
void bundirty(struct buf *bp)
static long hirunningspace
static int recursiveflushes
static int lodirtybuffers
static void bfreekva(struct buf *bp)
SYSCTL_PROC(_kern, OID_AUTO, acct_chkfreq, CTLTYPE_INT|CTLFLAG_RW,&acctchkfreq, 0, sysctl_acct_chkfreq,"I","frequency for checking the free space")
struct ucred * crhold(struct ucred *cr)
void bawrite(struct buf *bp)
int allocbuf(struct buf *bp, int size)
void free(void *addr, struct malloc_type *mtp)
int bufwait(struct buf *bp)
static void vfs_vmio_release(struct buf *bp)
struct buf * gbincore(struct bufobj *bo, daddr_t lblkno)
int printf(const char *fmt,...)
static struct proc * bufdaemonproc
void kproc_suspend_check(struct proc *p)
static long notbufdflashes
void kern_yield(int prio)
void breada(struct vnode *vp, daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
void bufdone_finish(struct buf *bp)
void biodone(struct bio *bp)
void bqrelse(struct buf *bp)
int vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
void vfs_unbusy_pages(struct buf *bp)
void kproc_start(void *udata) const
static void vfs_setdirty_locked_object(struct buf *bp)
const char * devtoname(struct cdev *dev)
int bread_gb(struct vnode *vp, daddr_t blkno, int cnt, struct ucred *cred, int gbflags, struct buf **bpp)
void bunpin_wait(struct buf *bp)
void bgetvp(struct vnode *vp, struct buf *bp)
SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW,&vmiodirenable, 0,"Use the VM system for directory writes")
struct buf * incore(struct bufobj *bo, daddr_t blkno)
SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start,&buf_kp)
int vn_isdisk(struct vnode *vp, int *errp)
void bremfree(struct buf *bp)
void vfs_busy_pages(struct buf *bp, int clear_modify)
static void bpmap_qenter(struct buf *bp)
static void vfs_drain_busy_pages(struct buf *bp)
static struct buf * getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata)
void babarrierwrite(struct buf *bp)
#define QUEUE_DIRTY_GIANT
#define VFS_BIO_NEED_DIRTYFLUSH
void bufobj_wdrop(struct bufobj *bo)
int cluster_wbuild_gb(struct vnode *vp, long size, daddr_t start_lbn, int len, int gbflags)
int buf_dirty_count_severe(void)
int sysctl_handle_long(SYSCTL_HANDLER_ARGS)
int bufwrite(struct buf *bp)
SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD,&runningbufspace, 0,"Amount of presently outstanding async buffer io")
static void vm_hold_free_pages(struct buf *bp, int newbsize)
static __inline void bd_wakeup(int dirtybuflevel)
int bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred, struct buf **bpp)
static __inline void bufcountwakeup(struct buf *bp)
static __inline void bufspacewakeup(void)
int vmapbuf(struct buf *bp, int mapbuf)
static TAILQ_HEAD(bqueues, buf)
void vfs_bio_bzero_buf(struct buf *bp, int base, int size)
const struct cf_level * level
static long unmapped_bufspace
void brelvp(struct buf *bp)