34 #include <sys/cdefs.h>
37 #include "opt_debug_cluster.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
45 #include <sys/vnode.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/resourcevar.h>
49 #include <sys/vmmeter.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <sys/sysctl.h>
55 #if defined(CLUSTERDEBUG)
56 static int rcluster= 0;
57 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0,
58 "Debug VFS clustering code");
61 static MALLOC_DEFINE(M_SEGMENT,
"cl_savebuf",
"cluster_save buffer");
64 struct buf *last_bp,
int gbflags);
66 daddr_t lbn, daddr_t blkno,
long size,
int run,
int gbflags,
72 "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
76 "Cluster read-ahead max block count");
80 "Cluster read min block count");
90 cluster_read(
struct vnode *vp, u_quad_t filesize, daddr_t lblkno,
long size,
91 struct ucred *cred,
long totread,
int seqcount,
struct buf **bpp)
100 struct ucred *cred,
long totread,
int seqcount,
int gbflags,
103 struct buf *bp, *rbp, *reqbp;
105 daddr_t blkno, origblkno;
106 int maxra, racluster;
112 if (!unmapped_buf_allowed)
113 gbflags &= ~GB_UNMAPPED;
119 racluster = vp->v_mount->mnt_iosize_max / size;
122 maxra = min(
nbuf/8, maxra);
123 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
124 maxra = (filesize / size) - lblkno;
129 *bpp = reqbp = bp =
getblk(vp, lblkno, size, 0, 0, gbflags);
137 if (bp->b_flags & B_CACHE) {
140 }
else if ((bp->b_flags & B_RAM) == 0) {
143 bp->b_flags &= ~B_RAM;
145 for (i = 1; i < maxra; i++) {
150 rbp =
gbincore(&vp->v_bufobj, lblkno+i);
151 if (rbp == NULL || (rbp->b_flags & B_INVAL))
159 if ((((i % racluster) == (racluster - 1)) ||
161 && (0 == BUF_LOCK(rbp,
162 LK_EXCLUSIVE | LK_NOWAIT, NULL))) {
163 rbp->b_flags |= B_RAM;
179 off_t firstread = bp->b_offset;
183 KASSERT(bp->b_offset != NOOFFSET,
184 (
"cluster_read: no buffer offset"));
192 if (minread > totread)
199 if (firstread + totread > filesize)
200 totread = filesize - firstread;
201 nblks = howmany(totread, size);
202 if (nblks > racluster)
209 error = VOP_BMAP(vp, lblkno, NULL,
210 &blkno, &ncontig, NULL);
214 if (error || blkno == -1)
224 ncontig = min(ncontig + 1, nblks);
228 blkno, size, nblks, gbflags, bp);
229 lblkno += (bp->b_bufsize / size);
231 bp->b_flags |= B_RAM;
232 bp->b_iocmd = BIO_READ;
241 if ((bp->b_flags & B_CLUSTER) == 0) {
244 bp->b_flags &= ~B_INVAL;
245 bp->b_ioflags &= ~BIO_ERROR;
246 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
248 bp->b_iooffset = dbtob(bp->b_blkno);
250 curthread->td_ru.ru_inblock++;
256 while (lblkno < (origblkno + maxra)) {
257 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
270 ncontig = min(ncontig + 1, racluster);
272 size, ncontig, gbflags, NULL);
273 lblkno += (rbp->b_bufsize / size);
274 if (rbp->b_flags & B_DELWRI) {
279 rbp =
getblk(vp, lblkno, size, 0, 0, gbflags);
281 if (rbp->b_flags & B_DELWRI) {
285 rbp->b_flags |= B_ASYNC | B_RAM;
286 rbp->b_iocmd = BIO_READ;
287 rbp->b_blkno = blkno;
289 if (rbp->b_flags & B_CACHE) {
290 rbp->b_flags &= ~B_ASYNC;
294 if ((rbp->b_flags & B_CLUSTER) == 0) {
297 rbp->b_flags &= ~B_INVAL;
298 rbp->b_ioflags &= ~BIO_ERROR;
299 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
301 rbp->b_iooffset = dbtob(rbp->b_blkno);
303 curthread->td_ru.ru_inblock++;
319 daddr_t blkno,
long size,
int run,
int gbflags,
struct buf *fbp)
322 struct buf *bp, *tbp;
328 KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
329 (
"cluster_rbuild: size %ld != filesize %jd\n",
330 size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
335 while ((u_quad_t) size * (lbn + run) > filesize) {
341 tbp->b_iocmd = BIO_READ;
343 tbp =
getblk(vp, lbn, size, 0, 0, gbflags);
344 if (tbp->b_flags & B_CACHE)
346 tbp->b_flags |= B_ASYNC | B_RAM;
347 tbp->b_iocmd = BIO_READ;
349 tbp->b_blkno = blkno;
350 if( (tbp->b_flags & B_MALLOC) ||
351 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
354 bp = trypbuf(&cluster_pbuf_freecnt);
364 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
365 if ((gbflags & GB_UNMAPPED) != 0) {
366 bp->b_flags |= B_UNMAPPED;
369 bp->b_data = (
char *)((vm_offset_t)bp->b_data |
370 ((vm_offset_t)tbp->b_data & PAGE_MASK));
372 bp->b_iocmd = BIO_READ;
376 bp->b_offset = tbp->b_offset;
377 KASSERT(bp->b_offset != NOOFFSET, (
"cluster_rbuild: no buffer offset"));
380 TAILQ_INIT(&bp->b_cluster.cluster_head);
388 for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
390 if ((bp->b_npages * PAGE_SIZE) +
391 round_page(size) > vp->v_mount->mnt_iosize_max) {
395 tbp =
getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT |
396 (gbflags & GB_UNMAPPED));
410 if ((tbp->b_vflags & BV_BKGRDINPROG) ||
411 (tbp->b_flags & B_CACHE) ||
412 (tbp->b_flags & B_VMIO) == 0) {
426 VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
427 for (j = 0; tsize > 0; j++) {
428 toff = off & PAGE_MASK;
430 if (toff + tinc > PAGE_SIZE)
431 tinc = PAGE_SIZE - toff;
432 VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object,
434 if ((tbp->b_pages[j]->valid &
435 vm_page_bits(toff, tinc)) != 0)
440 VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
449 if ((fbp && (i == 1)) || (i == (run - 1)))
450 tbp->b_flags |= B_RAM;
459 tbp->b_flags |= B_ASYNC;
460 tbp->b_iocmd = BIO_READ;
461 if (tbp->b_blkno == tbp->b_lblkno) {
463 }
else if (tbp->b_blkno != bn) {
473 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
474 tbp, b_cluster.cluster_entry);
475 VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
476 for (j = 0; j < tbp->b_npages; j += 1) {
480 vm_object_pip_add(m->object, 1);
481 if ((bp->b_npages == 0) ||
482 (bp->b_pages[bp->b_npages-1] != m)) {
483 bp->b_pages[bp->b_npages] = m;
486 if (m->valid == VM_PAGE_BITS_ALL)
489 VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
495 if (tbp->b_bcount != size)
496 printf(
"warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
497 if (tbp->b_bufsize != size)
498 printf(
"warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
499 bp->b_bcount += size;
500 bp->b_bufsize += size;
507 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
508 for (j = 0; j < bp->b_npages; j++) {
509 VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED);
510 if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
513 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
514 if (bp->b_bufsize > bp->b_kvasize)
515 panic(
"cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
516 bp->b_bufsize, bp->b_kvasize);
517 bp->b_kvasize = bp->b_bufsize;
519 if ((bp->b_flags & B_UNMAPPED) == 0) {
520 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
521 (vm_page_t *)bp->b_pages, bp->b_npages);
536 struct buf *nbp, *tbp;
542 if (bp->b_ioflags & BIO_ERROR)
545 if ((bp->b_flags & B_UNMAPPED) == 0) {
546 pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
553 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
555 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
557 tbp->b_ioflags |= BIO_ERROR;
558 tbp->b_error = error;
560 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
561 tbp->b_flags &= ~B_INVAL;
562 tbp->b_ioflags &= ~BIO_ERROR;
570 if (tbp->b_flags & B_DIRECT)
571 tbp->b_flags |= B_RELBUF;
576 relpbuf(bp, &cluster_pbuf_freecnt);
630 int seqcount,
int gbflags)
633 int maxclen, cursize;
637 if (!unmapped_buf_allowed)
638 gbflags &= ~GB_UNMAPPED;
640 if (vp->v_type == VREG) {
641 async = DOINGASYNC(vp);
642 lblocksize = vp->v_mount->mnt_stat.f_iosize;
645 lblocksize = bp->b_bufsize;
648 KASSERT(bp->b_offset != NOOFFSET, (
"cluster_write: no buffer offset"));
652 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
654 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
655 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
656 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
657 if (vp->v_clen != 0) {
674 cursize = vp->v_lastw - vp->v_cstart + 1;
675 if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
676 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
677 if (!async && seqcount > 0) {
679 vp->v_cstart, cursize, gbflags);
682 struct buf **bpp, **endbp;
683 struct cluster_save *buflist;
686 endbp = &buflist->bs_children
687 [buflist->bs_nchildren - 1];
688 if (VOP_REALLOCBLKS(vp, buflist)) {
697 for (bpp = buflist->bs_children;
700 free(buflist, M_SEGMENT);
703 lblocksize, vp->v_cstart,
710 for (bpp = buflist->bs_children;
713 free(buflist, M_SEGMENT);
715 vp->v_lasta = bp->b_blkno;
725 if ((vp->v_type == VREG) &&
726 ((u_quad_t) bp->b_offset + lblocksize) != filesize &&
727 (bp->b_blkno == bp->b_lblkno) &&
728 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
729 bp->b_blkno == -1)) {
732 vp->v_lasta = bp->b_blkno;
733 vp->v_cstart = lbn + 1;
737 vp->v_clen = maxclen;
738 if (!async && maxclen == 0) {
739 vp->v_cstart = lbn + 1;
745 }
else if (lbn == vp->v_cstart + vp->v_clen) {
754 vp->v_clen + 1, gbflags);
757 vp->v_cstart = lbn + 1;
758 }
else if (vm_page_count_severe()) {
770 vp->v_lasta = bp->b_blkno;
791 struct buf *bp, *tbp;
794 int totalwritten = 0;
795 int dbsize = btodb(size);
797 if (!unmapped_buf_allowed)
798 gbflags &= ~GB_UNMAPPED;
808 if ((tbp =
gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
809 (tbp->b_vflags & BV_BKGRDINPROG)) {
816 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_MTX(bo))) {
821 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
827 if (tbp->b_pin_count > 0) {
834 tbp->b_flags &= ~B_DONE;
844 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) !=
845 (B_CLUSTEROK | B_VMIO)) ||
846 (tbp->b_bcount != tbp->b_bufsize) ||
847 (tbp->b_bcount != size) ||
849 ((bp = (vp->v_vflag & VV_MD) != 0 ?
850 trypbuf(&cluster_pbuf_freecnt) :
851 getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
852 totalwritten += tbp->b_bufsize;
863 TAILQ_INIT(&bp->b_cluster.cluster_head);
867 if (tbp->b_wcred != NOCRED)
868 bp->b_wcred =
crhold(tbp->b_wcred);
870 bp->b_blkno = tbp->b_blkno;
871 bp->b_lblkno = tbp->b_lblkno;
872 bp->b_offset = tbp->b_offset;
880 if ((gbflags & GB_UNMAPPED) == 0 ||
881 (tbp->b_flags & B_VMIO) == 0) {
882 bp->b_data = (
char *)((vm_offset_t)bp->b_data |
883 ((vm_offset_t)tbp->b_data & PAGE_MASK));
885 bp->b_flags |= B_UNMAPPED;
888 bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
897 for (i = 0; i < len; ++i, ++start_lbn) {
904 if ((tbp =
gbincore(bo, start_lbn)) == NULL ||
905 (tbp->b_vflags & BV_BKGRDINPROG)) {
918 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
922 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
923 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
924 != (B_DELWRI | B_CLUSTEROK |
925 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
926 tbp->b_wcred != bp->b_wcred) {
936 if ((tbp->b_bcount != size) ||
937 ((bp->b_blkno + (dbsize * i)) !=
939 ((tbp->b_npages + bp->b_npages) >
940 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
948 if (tbp->b_pin_count > 0) {
959 tbp->b_flags &= ~B_DONE;
969 if (tbp->b_flags & B_VMIO) {
972 VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
974 for (j = 0; j < tbp->b_npages; j += 1) {
976 if (m->oflags & VPO_BUSY) {
984 for (j = 0; j < tbp->b_npages; j += 1) {
987 vm_object_pip_add(m->object, 1);
988 if ((bp->b_npages == 0) ||
989 (bp->b_pages[bp->b_npages - 1] != m)) {
990 bp->b_pages[bp->b_npages] = m;
994 VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
996 bp->b_bcount += size;
997 bp->b_bufsize += size;
1003 bp->b_flags |= (tbp->b_flags & B_BARRIER);
1004 tbp->b_flags &= ~(B_DONE | B_BARRIER);
1005 tbp->b_flags |= B_ASYNC;
1006 tbp->b_ioflags &= ~BIO_ERROR;
1007 tbp->b_iocmd = BIO_WRITE;
1012 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
1013 tbp, b_cluster.cluster_entry);
1016 if ((bp->b_flags & B_UNMAPPED) == 0) {
1017 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1018 (vm_page_t *)bp->b_pages, bp->b_npages);
1020 if (bp->b_bufsize > bp->b_kvasize)
1022 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
1023 bp->b_bufsize, bp->b_kvasize);
1024 bp->b_kvasize = bp->b_bufsize;
1025 totalwritten += bp->b_bufsize;
1027 bp->b_dirtyend = bp->b_bufsize;
1032 return totalwritten;
1039 static struct cluster_save *
1042 struct cluster_save *buflist;
1047 len = vp->v_lastw - vp->v_cstart + 1;
1048 buflist =
malloc(
sizeof(
struct buf *) * (len + 1) +
sizeof(*buflist),
1049 M_SEGMENT, M_WAITOK);
1050 buflist->bs_nchildren = 0;
1051 buflist->bs_children = (
struct buf **) (buflist + 1);
1052 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1053 (void)
bread_gb(vp, lbn, last_bp->b_bcount, NOCRED,
1055 buflist->bs_children[i] = bp;
1056 if (bp->b_blkno == bp->b_lblkno)
1057 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1060 buflist->bs_children[i] = bp = last_bp;
1061 if (bp->b_blkno == bp->b_lblkno)
1062 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1063 buflist->bs_nchildren = i + 1;
static struct cluster_save * cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int gbflags)
void cluster_write_gb(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount, int gbflags)
static MALLOC_DEFINE(M_SEGMENT,"cl_savebuf","cluster_save buffer")
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
void bufobj_wref(struct bufobj *bo)
void panic(const char *fmt,...)
void cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount)
void bufdone(struct buf *bp)
int cluster_read_gb(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size, struct ucred *cred, long totread, int seqcount, int gbflags, struct buf **bpp)
void brelse(struct buf *bp)
int cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len)
struct buf * getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo, int flags)
void reassignbuf(struct buf *bp)
int cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size, struct ucred *cred, long totread, int seqcount, struct buf **bpp)
void bdwrite(struct buf *bp)
static __inline int cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len, int gbflags)
static void cluster_callback(struct buf *)
SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW,&write_behind, 0,"Cluster write-behind; 0: disable, 1: enable, 2: backed off")
void bundirty(struct buf *bp)
struct ucred * crhold(struct ucred *cr)
void bawrite(struct buf *bp)
void free(void *addr, struct malloc_type *mtp)
int bufwait(struct buf *bp)
struct buf * gbincore(struct bufobj *bo, daddr_t lblkno)
int printf(const char *fmt,...)
static struct buf * cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
void bqrelse(struct buf *bp)
int bread_gb(struct vnode *vp, daddr_t blkno, int cnt, struct ucred *cred, int gbflags, struct buf **bpp)
void bremfree(struct buf *bp)
void vfs_busy_pages(struct buf *bp, int clear_modify)
int cluster_wbuild_gb(struct vnode *vp, long size, daddr_t start_lbn, int len, int gbflags)