61 #include <sys/cdefs.h>
64 #include "opt_debug_lockf.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/limits.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
80 #include <sys/lockf.h>
81 #include <sys/taskqueue.h>
84 #include <sys/sysctl.h>
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
89 static int lockf_debug = 0;
90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0,
"");
93 static MALLOC_DEFINE(M_LOCKF,
"lockf",
"Byte-range locking structures");
97 struct owner_vertex_list;
100 #define NOLOCKF (struct lockf_entry *)0
107 static struct lockf_entry *
110 static int lf_clearlock(
struct lockf *,
struct lockf_entry *);
111 static int lf_overlaps(
struct lockf_entry *,
struct lockf_entry *);
112 static int lf_blocks(
struct lockf_entry *,
struct lockf_entry *);
114 static struct lockf_edge *
117 static int lf_add_edge(
struct lockf_entry *,
struct lockf_entry *);
123 static int lf_findoverlap(
struct lockf_entry **,
struct lockf_entry *,
125 static struct lockf_entry *
127 static int lf_getlock(
struct lockf *,
struct lockf_entry *,
struct flock *);
131 int all,
struct lockf_entry_list *);
132 static void lf_set_start(
struct lockf *,
struct lockf_entry *, off_t,
133 struct lockf_entry_list*);
134 static void lf_set_end(
struct lockf *,
struct lockf_entry *, off_t,
135 struct lockf_entry_list*);
136 static int lf_setlock(
struct lockf *,
struct lockf_entry *,
137 struct vnode *,
void **cookiep);
138 static int lf_cancel(
struct lockf *,
struct lockf_entry *,
void *);
139 static void lf_split(
struct lockf *,
struct lockf_entry *,
140 struct lockf_entry *,
struct lockf_entry_list *);
143 struct owner_vertex_list *
path);
144 static void graph_check(
struct owner_graph *g,
int checkorder);
145 static void graph_print_vertices(
struct owner_vertex_list *
set);
149 struct owner_vertex_list *delta);
152 struct owner_vertex_list *delta);
154 struct owner_vertex_list *
set);
156 int nextunused,
struct owner_vertex_list *
set);
167 static void lf_print(
char *,
struct lockf_entry *);
168 static void lf_printlist(
char *,
struct lockf_entry *);
169 static void lf_print_owner(
struct lock_owner *);
191 #define LOCK_OWNER_HASH_SIZE 256
254 struct owner_edge_list v_outedges;
255 struct owner_edge_list v_inedges;
299 if (flags & F_REMOTE) {
300 h = HASHSTEP(0, fl->l_pid);
301 h = HASHSTEP(h, fl->l_sysid);
302 }
else if (flags & F_FLOCK) {
303 h = ((uintptr_t)
id) >> 7;
305 struct proc *p = (
struct proc *)
id;
306 h = HASHSTEP(0, p->p_pid);
321 if (flags & F_REMOTE) {
322 return lo->lo_pid == fl->l_pid
323 && lo->lo_sysid == fl->l_sysid;
325 return lo->lo_id == id;
329 static struct lockf_entry *
332 struct lockf_entry *lf;
334 lf =
malloc(
sizeof(
struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
338 printf(
"Allocated lock %p\n", lf);
354 KASSERT(lock->lf_refs > 0, (
"lockf_entry negative ref count %p", lock));
355 if (--lock->lf_refs > 0)
364 KASSERT(LIST_EMPTY(&lock->lf_outedges),
365 (
"freeing lock with dependancies"));
366 KASSERT(LIST_EMPTY(&lock->lf_inedges),
367 (
"freeing lock with dependants"));
369 KASSERT(lo->lo_refs > 0, (
"lock owner refcount"));
371 if (lo->lo_refs == 0) {
374 printf(
"lf_free_lock: freeing lock owner %p\n",
383 LIST_REMOVE(lo, lo_link);
387 printf(
"Freed lock owner %p\n", lo);
392 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
393 vrele(lock->lf_vnode);
394 lock->lf_vnode = NULL;
398 printf(
"Freed lock %p\n", lock);
411 struct lockf *state, *freestate = NULL;
412 struct flock *fl = ap->a_fl;
413 struct lockf_entry *lock;
414 struct vnode *vp = ap->a_vp;
415 caddr_t
id = ap->a_id;
416 int flags = ap->a_flags;
419 off_t
start, end, oadd;
426 if (ap->a_op == F_UNLCKSYS) {
434 switch (fl->l_whence) {
446 if (size > OFF_MAX ||
447 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
449 start = size + fl->l_start;
464 }
else if (fl->l_len == 0) {
467 oadd = fl->l_len - 1;
468 if (oadd > OFF_MAX - start)
479 if ((*statep) == NULL) {
480 if (ap->a_op != F_SETLK) {
481 fl->l_type = F_UNLCK;
507 printf(
"Allocated lock owner %p\n", lo);
511 lo->lo_flags = flags;
513 if (flags & F_REMOTE) {
514 lo->lo_pid = fl->l_pid;
515 lo->lo_sysid = fl->l_sysid;
516 }
else if (flags & F_FLOCK) {
520 struct proc *p = (
struct proc *)
id;
521 lo->lo_pid = p->p_pid;
524 lo->lo_vertex = NULL;
527 if (lockf_debug & 1) {
528 printf(
"lf_advlockasync: new lock owner %p ", lo);
552 lock->lf_start =
start;
556 if (flags & F_REMOTE) {
571 lock->lf_inode = (
struct inode *)0;
572 lock->lf_type = fl->l_type;
573 LIST_INIT(&lock->lf_outedges);
574 LIST_INIT(&lock->lf_inedges);
575 lock->lf_async_task = ap->a_task;
576 lock->lf_flags = ap->a_flags;
585 if (vp->v_iflag & VI_DOOMED) {
600 ls =
malloc(
sizeof(
struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
601 sx_init(&ls->ls_lock,
"ls_lock");
602 LIST_INIT(&ls->ls_active);
603 LIST_INIT(&ls->ls_pending);
615 if (vp->v_iflag & VI_DOOMED) {
618 LIST_REMOVE(ls, ls_link);
625 if ((*statep) == NULL) {
626 state = *statep = ls;
634 LIST_REMOVE(ls, ls_link);
644 sx_xlock(&state->ls_lock);
651 if (vp->v_iflag & VI_DOOMED) {
655 sx_xunlock(&state->ls_lock);
663 error =
lf_setlock(state, lock, vp, ap->a_cookiep);
678 error =
lf_cancel(state, lock, *ap->a_cookiep);
697 LIST_FOREACH(lock, &state->ls_active, lf_link) {
698 struct lockf_entry *lf;
699 if (LIST_NEXT(lock, lf_link))
700 KASSERT((lock->lf_start
701 <= LIST_NEXT(lock, lf_link)->lf_start),
702 (
"locks disordered"));
703 LIST_FOREACH(lf, &state->ls_active, lf_link) {
707 (
"two conflicting active locks"));
708 if (lock->lf_owner == lf->lf_owner)
710 (
"two overlapping locks from same owner"));
713 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
714 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
715 (
"pending lock which should be active"));
718 sx_xunlock(&state->ls_lock);
734 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
735 KASSERT(LIST_EMPTY(&state->ls_pending),
736 (
"freeing state with pending locks"));
745 LIST_REMOVE(freestate, ls_link);
748 free(freestate, M_LOCKF);
751 if (error == EDOOFUS) {
752 KASSERT(ap->a_op == F_SETLK, (
"EDOOFUS"));
759 lf_advlock(
struct vop_advlock_args *ap,
struct lockf **statep, u_quad_t size)
761 struct vop_advlockasync_args a;
767 a.a_flags = ap->a_flags;
778 struct lockf_entry *lock, *nlock;
788 KASSERT(vp->v_iflag & VI_DOOMED,
789 (
"lf_purgelocks: vp %p has not vgone yet", vp));
796 sx_xlock(&state->ls_lock);
798 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
799 LIST_REMOVE(lock, lf_link);
808 if (lock->lf_async_task) {
811 lock->lf_flags |= F_INTR;
816 sx_xunlock(&state->ls_lock);
823 while (state->ls_threads > 1)
824 msleep(state, VI_MTX(vp), 0,
"purgelocks", 0);
833 KASSERT(LIST_EMPTY(&state->ls_pending),
834 (
"lock pending for %p", state));
835 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
836 LIST_REMOVE(lock, lf_link);
840 LIST_REMOVE(state, ls_link);
843 free(state, M_LOCKF);
856 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
866 return x->lf_owner != y->lf_owner
867 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
874 static struct lockf_edge *
878 return (
malloc(
sizeof(
struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
901 if (!lock->lf_owner->lo_vertex)
902 lock->lf_owner->lo_vertex =
914 struct lockf_edge *e;
918 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
919 KASSERT(e->le_to != y, (
"adding lock edge twice"));
929 y->lf_owner->lo_vertex);
934 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
935 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
949 struct lockf_entry *x = e->le_from;
950 struct lockf_entry *y = e->le_to;
953 LIST_REMOVE(e, le_outlink);
954 LIST_REMOVE(e, le_inlink);
966 struct lockf_edge *e;
968 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
979 struct lockf_edge *e;
981 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
993 struct lockf_entry *overlap;
996 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1001 if (overlap->lf_start > lock->lf_end)
1031 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1061 struct lockf_entry *overlap;
1064 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1094 struct lockf_entry *lf, *lfprev;
1096 if (LIST_EMPTY(&state->ls_active)) {
1097 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1102 LIST_FOREACH(lf, &state->ls_active, lf_link) {
1103 if (lf->lf_start > lock->lf_start) {
1104 LIST_INSERT_BEFORE(lf, lock, lf_link);
1109 LIST_INSERT_AFTER(lfprev, lock, lf_link);
1126 LIST_REMOVE(wakelock, lf_link);
1128 if (lockf_debug & 1)
1129 lf_print(
"lf_wakeup_lock: awakening", wakelock);
1131 if (wakelock->lf_async_task) {
1147 struct lockf_entry_list *granted)
1149 struct lockf_edge *e, *ne;
1150 struct lockf_entry *deplock;
1152 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1153 deplock = e->le_from;
1158 if (LIST_EMPTY(&deplock->lf_outedges)) {
1160 LIST_INSERT_HEAD(granted, deplock, lf_link);
1171 lf_set_start(
struct lockf *state,
struct lockf_entry *lock, off_t new_start,
1172 struct lockf_entry_list *granted)
1175 KASSERT(new_start >= lock->lf_start, (
"can't increase lock"));
1176 lock->lf_start = new_start;
1177 LIST_REMOVE(lock, lf_link);
1187 lf_set_end(
struct lockf *state,
struct lockf_entry *lock, off_t new_end,
1188 struct lockf_entry_list *granted)
1191 KASSERT(new_end <= lock->lf_end, (
"can't increase lock"));
1192 lock->lf_end = new_end;
1214 struct lockf_entry *overlap, *lf;
1215 struct lockf_entry_list granted;
1218 LIST_INIT(&granted);
1219 LIST_INSERT_HEAD(&granted, lock, lf_link);
1221 while (!LIST_EMPTY(&granted)) {
1222 lock = LIST_FIRST(&granted);
1223 LIST_REMOVE(lock, lf_link);
1229 overlap = LIST_FIRST(&state->ls_active);
1234 if (ovcase && (lockf_debug & 2)) {
1235 printf(
"lf_setlock: overlap %d", ovcase);
1236 lf_print(
"", overlap);
1259 LIST_REMOVE(overlap, lf_link);
1269 lf_split(state, overlap, lock, &granted);
1277 lf = LIST_NEXT(overlap, lf_link);
1278 LIST_REMOVE(overlap, lf_link);
1290 lf_set_end(state, overlap, lock->lf_start - 1,
1292 overlap = LIST_NEXT(overlap, lf_link);
1307 if (lockf_debug & 1) {
1308 if (lock->lf_type != F_UNLCK)
1309 lf_print(
"lf_activate_lock: activated", lock);
1311 lf_print(
"lf_activate_lock: unlocked", lock);
1312 lf_printlist(
"lf_activate_lock", lock);
1315 if (lock->lf_type != F_UNLCK)
1327 struct lockf_entry_list granted;
1344 LIST_REMOVE(lock, lf_link);
1358 LIST_INIT(&granted);
1365 while (!LIST_EMPTY(&granted)) {
1366 lock = LIST_FIRST(&granted);
1367 LIST_REMOVE(lock, lf_link);
1376 lf_setlock(
struct lockf *state,
struct lockf_entry *lock,
struct vnode *vp,
1379 static char lockstr[] =
"lockf";
1383 if (lockf_debug & 1)
1384 lf_print(
"lf_setlock", lock);
1391 if (lock->lf_type == F_WRLCK)
1393 if (!(lock->lf_flags & F_NOINTR))
1402 if ((lock->lf_flags & F_WAIT) == 0
1403 && lock->lf_async_task == NULL) {
1414 if ((lock->lf_flags & F_FLOCK) &&
1415 lock->lf_type == F_WRLCK) {
1416 lock->lf_type = F_UNLCK;
1418 lock->lf_type = F_WRLCK;
1433 if (lockf_debug & 1)
1434 lf_print(
"lf_setlock: deadlock", lock);
1444 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1446 if (lockf_debug & 1) {
1447 struct lockf_edge *e;
1448 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1449 lf_print(
"lf_setlock: blocking on", e->le_to);
1450 lf_printlist(
"lf_setlock", e->le_to);
1455 if ((lock->lf_flags & F_WAIT) == 0) {
1462 *cookiep = (
void *) lock;
1463 error = EINPROGRESS;
1468 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1495 if (lock->lf_flags & F_INTR) {
1500 if (LIST_EMPTY(&lock->lf_outedges)) {
1507 if (lockf_debug & 1) {
1508 lf_print(
"lf_setlock: granted", lock);
1523 if (lockf_debug & 1)
1524 lf_print(
"lf_setlock: deadlock", lock);
1550 struct lockf_entry *overlap;
1552 overlap = LIST_FIRST(&state->ls_active);
1557 if (unlock->lf_type != F_UNLCK)
1558 panic(
"lf_clearlock: bad type");
1559 if (lockf_debug & 1)
1560 lf_print(
"lf_clearlock", unlock);
1573 lf_getlock(
struct lockf *state,
struct lockf_entry *lock,
struct flock *fl)
1575 struct lockf_entry *block;
1578 if (lockf_debug & 1)
1579 lf_print(
"lf_getlock", lock);
1583 fl->l_type = block->lf_type;
1584 fl->l_whence = SEEK_SET;
1585 fl->l_start = block->lf_start;
1586 if (block->lf_end == OFF_MAX)
1589 fl->l_len = block->lf_end - block->lf_start + 1;
1590 fl->l_pid = block->lf_owner->lo_pid;
1591 fl->l_sysid = block->lf_owner->lo_sysid;
1593 fl->l_type = F_UNLCK;
1602 lf_cancel(
struct lockf *state,
struct lockf_entry *lock,
void *cookie)
1604 struct lockf_entry *reallock;
1610 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1611 if ((
void *) reallock == cookie) {
1617 if (!(reallock->lf_vnode == lock->lf_vnode
1618 && reallock->lf_start == lock->lf_start
1619 && reallock->lf_end == lock->lf_end)) {
1627 if (!reallock->lf_async_task) {
1654 static struct lockf_entry *
1657 struct lockf_entry *overlap;
1659 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1664 if (overlap->lf_start > lock->lf_end)
1701 struct lockf_entry *lf;
1709 if (lockf_debug & 2)
1710 lf_print(
"lf_findoverlap: looking for overlap in", lock);
1712 start = lock->lf_start;
1717 if (lf->lf_start > end)
1719 if (((type &
SELF) && lf->lf_owner != lock->lf_owner) ||
1720 ((type &
OTHERS) && lf->lf_owner == lock->lf_owner)) {
1721 *overlap = LIST_NEXT(lf, lf_link);
1725 if (lockf_debug & 2)
1726 lf_print(
"\tchecking", lf);
1739 if (start > lf->lf_end) {
1742 if (lockf_debug & 2)
1745 *overlap = LIST_NEXT(lf, lf_link);
1748 if (lf->lf_start == start && lf->lf_end == end) {
1751 if (lockf_debug & 2)
1752 printf(
"overlap == lock\n");
1757 if (lf->lf_start <= start && lf->lf_end >= end) {
1760 if (lockf_debug & 2)
1761 printf(
"overlap contains lock\n");
1766 if (start <= lf->lf_start && end >= lf->lf_end) {
1769 if (lockf_debug & 2)
1770 printf(
"lock contains overlap\n");
1775 if (lf->lf_start < start && lf->lf_end >= start) {
1778 if (lockf_debug & 2)
1779 printf(
"overlap starts before lock\n");
1784 if (lf->lf_start > start && lf->lf_end > end) {
1787 if (lockf_debug & 2)
1788 printf(
"overlap ends after lock\n");
1793 panic(
"lf_findoverlap: default");
1807 lf_split(
struct lockf *state,
struct lockf_entry *lock1,
1808 struct lockf_entry *lock2,
struct lockf_entry_list *granted)
1810 struct lockf_entry *splitlock;
1813 if (lockf_debug & 2) {
1814 lf_print(
"lf_split", lock1);
1815 lf_print(
"splitting from", lock2);
1821 if (lock1->lf_start == lock2->lf_start) {
1822 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1825 if (lock1->lf_end == lock2->lf_end) {
1826 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1834 memcpy(splitlock, lock1,
sizeof *splitlock);
1835 splitlock->lf_refs = 1;
1836 if (splitlock->lf_flags & F_REMOTE)
1837 vref(splitlock->lf_vnode);
1846 splitlock->lf_start = lock2->lf_end + 1;
1847 LIST_INIT(&splitlock->lf_outedges);
1848 LIST_INIT(&splitlock->lf_inedges);
1853 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1872 struct lockf_entry *lf;
1874 struct lockdesclist locks;
1885 STAILQ_INIT(&locks);
1888 sx_xlock(&ls->ls_lock);
1889 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1890 if (lf->lf_owner->lo_sysid != sysid)
1895 ldesc->vp = lf->lf_vnode;
1897 ldesc->fl.l_start = lf->lf_start;
1898 if (lf->lf_end == OFF_MAX)
1899 ldesc->fl.l_len = 0;
1902 lf->lf_end - lf->lf_start + 1;
1903 ldesc->fl.l_whence = SEEK_SET;
1904 ldesc->fl.l_type = F_UNLCK;
1905 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1906 ldesc->fl.l_sysid = sysid;
1907 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1909 sx_xunlock(&ls->ls_lock);
1919 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1920 STAILQ_REMOVE_HEAD(&locks, link);
1922 error = fn(ldesc->vp, &ldesc->fl, arg);
1924 free(ldesc, M_LOCKF);
1934 struct lockf_entry *lf;
1936 struct lockdesclist locks;
1947 STAILQ_INIT(&locks);
1957 sx_xlock(&ls->ls_lock);
1958 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1961 ldesc->vp = lf->lf_vnode;
1963 ldesc->fl.l_start = lf->lf_start;
1964 if (lf->lf_end == OFF_MAX)
1965 ldesc->fl.l_len = 0;
1968 lf->lf_end - lf->lf_start + 1;
1969 ldesc->fl.l_whence = SEEK_SET;
1970 ldesc->fl.l_type = F_UNLCK;
1971 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1972 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1973 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1975 sx_xunlock(&ls->ls_lock);
1987 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1988 STAILQ_REMOVE_HEAD(&locks, link);
1990 error = fn(ldesc->vp, &ldesc->fl, arg);
1992 free(ldesc, M_LOCKF);
2002 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
2010 KASSERT(sysid != 0, (
"Can't clear local locks with F_UNLCKSYS"));
2025 if (lo->lo_sysid == sysid)
2026 count += lo->lo_refs;
2041 struct owner_vertex_list *
path)
2047 TAILQ_INSERT_HEAD(path, x, v_link);
2051 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2052 if (graph_reaches(e->e_to, y, path)) {
2054 TAILQ_INSERT_HEAD(path, x, v_link);
2067 graph_check(
struct owner_graph *g,
int checkorder)
2071 for (i = 0; i < g->
g_size; i++) {
2075 (
"lock graph vertices disordered"));
2077 for (j = 0; j < i; j++) {
2082 (
"lock graph vertices disordered"));
2089 graph_print_vertices(
struct owner_vertex_list *
set)
2094 TAILQ_FOREACH(v, set, v_link) {
2095 printf(
"%d:", v->v_order);
2096 lf_print_owner(v->v_owner);
2097 if (TAILQ_NEXT(v, v_link))
2113 struct owner_vertex *y,
struct owner_vertex_list *delta)
2128 TAILQ_INSERT_TAIL(delta, y, v_link);
2133 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2136 if (e->e_to->v_order < x->v_order
2137 && e->e_to->v_gen != gen) {
2138 e->e_to->v_gen = gen;
2139 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2143 v = TAILQ_NEXT(v, v_link);
2155 struct owner_vertex *y,
struct owner_vertex_list *delta)
2169 TAILQ_INSERT_TAIL(delta, x, v_link);
2174 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2175 if (e->e_from->v_order > y->v_order
2176 && e->e_from->v_gen != gen) {
2177 e->e_from->v_gen = gen;
2178 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2182 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2194 TAILQ_FOREACH(v, set, v_link) {
2196 i > 0 && indices[i - 1] > v->v_order; i--)
2198 for (j = n - 1; j >= i; j--)
2199 indices[j + 1] = indices[j];
2200 indices[i] = v->v_order;
2209 struct owner_vertex_list *set)
2213 while (!TAILQ_EMPTY(set)) {
2215 TAILQ_FOREACH(v, set, v_link) {
2216 if (!vlowest || v->v_order < vlowest->v_order)
2219 TAILQ_REMOVE(set, vlowest, v_link);
2220 vlowest->v_order = indices[nextunused];
2225 return (nextunused);
2233 struct owner_vertex_list deltaF, deltaB;
2234 int nF, nB, n, vi, i;
2239 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2247 if (lockf_debug & 8) {
2248 printf(
"adding edge %d:", x->v_order);
2249 lf_print_owner(x->v_owner);
2250 printf(
" -> %d:", y->v_order);
2251 lf_print_owner(y->v_owner);
2255 if (y->v_order < x->v_order) {
2270 if (g->
g_gen == 0) {
2274 for (vi = 0; vi < g->
g_size; vi++) {
2282 if (lockf_debug & 8) {
2283 struct owner_vertex_list path;
2286 graph_reaches(y, x, &path);
2287 graph_print_vertices(&path);
2294 if (lockf_debug & 8) {
2295 printf(
"re-ordering graph vertices\n");
2297 graph_print_vertices(&deltaF);
2304 if (lockf_debug & 8) {
2306 graph_print_vertices(&deltaB);
2333 if (lockf_debug & 8) {
2334 struct owner_vertex_list set;
2336 for (i = 0; i < nB + nF; i++)
2337 TAILQ_INSERT_TAIL(&set,
2339 printf(
"new ordering = ");
2340 graph_print_vertices(&set);
2345 KASSERT(x->v_order < y->v_order, (
"Failed to re-order graph"));
2348 if (lockf_debug & 8) {
2349 graph_check(g, TRUE);
2355 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2356 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2375 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2379 KASSERT(e, (
"Removing non-existent edge from deadlock graph"));
2382 if (e->e_refs == 0) {
2384 if (lockf_debug & 8) {
2385 printf(
"removing edge %d:", x->v_order);
2386 lf_print_owner(x->v_owner);
2387 printf(
" -> %d:", y->v_order);
2388 lf_print_owner(y->v_owner);
2392 LIST_REMOVE(e, e_outlink);
2393 LIST_REMOVE(e, e_inlink);
2420 v->v_gen = g->
g_gen;
2424 LIST_INIT(&v->v_outedges);
2425 LIST_INIT(&v->v_inedges);
2439 KASSERT(LIST_EMPTY(&v->v_outedges), (
"Freeing vertex with edges"));
2440 KASSERT(LIST_EMPTY(&v->v_inedges), (
"Freeing vertex with edges"));
2446 for (i = v->v_order + 1; i < g->g_size; i++) {
2478 if (lo->lo_flags & F_REMOTE) {
2479 printf(
"remote pid %d, system %d",
2480 lo->lo_pid, lo->lo_sysid);
2481 }
else if (lo->lo_flags & F_FLOCK) {
2482 printf(
"file %p", lo->lo_id);
2484 printf(
"local pid %d", lo->lo_pid);
2492 lf_print(
char *tag,
struct lockf_entry *lock)
2495 printf(
"%s: lock %p for ", tag, (
void *)lock);
2496 lf_print_owner(lock->lf_owner);
2497 if (lock->lf_inode != (
struct inode *)0)
2498 printf(
" in ino %ju on dev <%s>,",
2499 (uintmax_t)lock->lf_inode->i_number,
2501 printf(
" %s, start %jd, end ",
2502 lock->lf_type == F_RDLCK ?
"shared" :
2503 lock->lf_type == F_WRLCK ?
"exclusive" :
2504 lock->lf_type == F_UNLCK ?
"unlock" :
"unknown",
2505 (intmax_t)lock->lf_start);
2506 if (lock->lf_end == OFF_MAX)
2509 printf(
"%jd", (intmax_t)lock->lf_end);
2510 if (!LIST_EMPTY(&lock->lf_outedges))
2512 (
void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2518 lf_printlist(
char *tag,
struct lockf_entry *lock)
2520 struct lockf_entry *lf, *blk;
2521 struct lockf_edge *e;
2523 if (lock->lf_inode == (
struct inode *)0)
2526 printf(
"%s: Lock list for ino %ju on dev <%s>:\n",
2527 tag, (uintmax_t)lock->lf_inode->i_number,
2529 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2530 printf(
"\tlock %p for ",(
void *)lf);
2531 lf_print_owner(lock->lf_owner);
2532 printf(
", %s, start %jd, end %jd",
2533 lf->lf_type == F_RDLCK ?
"shared" :
2534 lf->lf_type == F_WRLCK ?
"exclusive" :
2535 lf->lf_type == F_UNLCK ?
"unlock" :
2536 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2537 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2539 printf(
"\n\t\tlock request %p for ", (
void *)blk);
2540 lf_print_owner(blk->lf_owner);
2541 printf(
", %s, start %jd, end %jd",
2542 blk->lf_type == F_RDLCK ?
"shared" :
2543 blk->lf_type == F_WRLCK ?
"exclusive" :
2544 blk->lf_type == F_UNLCK ?
"unlock" :
2545 "unknown", (intmax_t)blk->lf_start,
2546 (intmax_t)blk->lf_end);
2547 if (!LIST_EMPTY(&blk->lf_inedges))
2548 panic(
"lf_printlist: bad list");
static void lf_remove_edge(struct lockf_edge *)
static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, struct lockf_entry_list *)
int lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
static void lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL)
static void lf_remove_incoming(struct lockf_entry *)
static struct sx lf_lock_states_lock
int lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, u_quad_t size)
static void lf_alloc_vertex(struct lockf_entry *)
void * realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
static MALLOC_DEFINE(M_LOCKF,"lockf","Byte-range locking structures")
static void lf_init(void *)
static int graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
static int lf_clearlock(struct lockf *, struct lockf_entry *)
static struct sx lf_lock_owners_lock
static struct lockf_edge * lf_alloc_edge(void)
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]
static struct lockf_entry * lf_alloc_lock(struct lock_owner *)
void panic(const char *fmt,...)
static void lf_insert_lock(struct lockf *, struct lockf_entry *)
static void lf_split(struct lockf *, struct lockf_entry *, struct lockf_entry *, struct lockf_entry_list *)
static int lf_blocks(struct lockf_entry *, struct lockf_entry *)
static int lf_add_edge(struct lockf_entry *, struct lockf_entry *)
SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD,&boothowto, 0,"Boot control flags, passed from loader")
int lf_countlocks(int sysid)
static struct lockf_entry * lf_getblock(struct lockf *, struct lockf_entry *)
static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, struct lockf_entry_list *)
int lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
static struct owner_vertex * graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
void lf_clearremotesys(int sysid)
void vref(struct vnode *vp)
LIST_HEAD(lock_owner_list, lock_owner)
static void graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
struct owner_vertex ** g_vertices
static void graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y)
int lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, int)
static int lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
static struct owner_graph lf_owner_graph
static int lf_cancel(struct lockf *, struct lockf_entry *, void *)
static int lf_add_incoming(struct lockf *, struct lockf_entry *)
static int lf_hash_owner(caddr_t, struct flock *, int)
void free(void *addr, struct malloc_type *mtp)
int printf(const char *fmt,...)
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *)
static int graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta)
TAILQ_HEAD(owner_vertex_list, owner_vertex)
static void lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
static void lf_update_dependancies(struct lockf *, struct lockf_entry *, int all, struct lockf_entry_list *)
static int graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, struct owner_vertex_list *set)
static int graph_add_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y)
static void lf_wakeup_lock(struct lockf *, struct lockf_entry *)
void vrele(struct vnode *vp)
void lf_purgelocks(struct vnode *vp, struct lockf **statep)
const char * devtoname(struct cdev *dev)
static void lf_remove_outgoing(struct lockf_entry *)
static void lf_free_edge(struct lockf_edge *)
static struct sx lf_owner_graph_lock
static struct owner_graph * graph_init(struct owner_graph *g)
static struct lockf_list lf_lock_states
static int lf_setlock(struct lockf *, struct lockf_entry *, struct vnode *, void **cookiep)
static int lf_add_outgoing(struct lockf *, struct lockf_entry *)
static int graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta)
static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, int)
STAILQ_HEAD(lockdesclist, lockdesc)
static int lf_free_lock(struct lockf_entry *)
static int lf_overlaps(struct lockf_entry *, struct lockf_entry *)
#define LOCK_OWNER_HASH_SIZE
void sx_destroy(struct sx *sx)