28 #include <sys/cdefs.h>
31 #include "opt_compat.h"
32 #include "opt_umtx_profiling.h"
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
42 #include <sys/sched.h>
44 #include <sys/sysctl.h>
45 #include <sys/sysent.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/eventhandler.h>
53 #include <vm/vm_param.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_object.h>
58 #include <machine/cpu.h>
61 #include <compat/compat32bit/compat32bit_proto.h>
65 #define _UMUTEX_WAIT 2
79 TAILQ_ENTRY(
umtx_pi) pi_hashlink;
85 struct umtx_key pi_key;
91 TAILQ_ENTRY(umtx_q) uq_link;
94 struct umtx_key uq_key;
98 #define UQF_UMTXQ 0x0001
101 struct thread *uq_thread;
111 TAILQ_ENTRY(umtx_q) uq_lockq;
117 u_char uq_inherited_pri;
130 struct umtxq_head head;
144 struct umtxq_list uc_queue[2];
145 #define UMTX_SHARED_QUEUE 0
146 #define UMTX_EXCLUSIVE_QUEUE 1
159 #ifdef UMTX_PROFILING
165 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
166 #define UMTXQ_BUSY_ASSERT(uc) KASSERT(&(uc)->uc_busy, ("umtx chain is not busy"))
177 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
178 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
179 PRI_MAX_TIMESHARE : (td)->td_user_pri)
181 #define GOLDEN_RATIO_PRIME 2654404609U
182 #define UMTX_CHAINS 512
183 #define UMTX_SHIFTS (__WORD_BIT - 9)
185 #define GET_SHARE(flags) \
186 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
188 #define BUSY_SPINS 200
195 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0,
"umtx debug");
196 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
197 &umtx_pi_allocated, 0,
"Allocated umtx_pi");
199 #ifdef UMTX_PROFILING
200 static long max_length;
201 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0,
"max_length");
202 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0,
"umtx chain stats");
218 static int do_unlock_pp(
struct thread *td,
struct umutex *m, uint32_t flags);
220 static void umtx_exec_hook(
void *arg __unused,
struct proc *p __unused,
221 struct image_params *imgp __unused);
224 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
225 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
226 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
230 #ifdef UMTX_PROFILING
232 umtx_init_profiling(
void)
234 struct sysctl_oid *chain_oid;
239 snprintf(chain_name,
sizeof(chain_name),
"%d", i);
240 chain_oid = SYSCTL_ADD_NODE(NULL,
241 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
242 chain_name, CTLFLAG_RD, NULL,
"umtx hash stats");
243 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
244 "max_length0", CTLFLAG_RD, &
umtxq_chains[0][i].max_length, 0, NULL);
245 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
246 "max_length1", CTLFLAG_RD, &
umtxq_chains[1][i].max_length, 0, NULL);
256 umtx_pi_zone = uma_zcreate(
"umtx pi",
sizeof(
struct umtx_pi),
257 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
258 for (i = 0; i < 2; ++i) {
261 MTX_DEF | MTX_DUPOK);
268 #ifdef UMTX_PROFILING
274 #ifdef UMTX_PROFILING
275 umtx_init_profiling();
279 EVENTHANDLER_PRI_ANY);
287 uq =
malloc(
sizeof(
struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
288 uq->uq_spare_queue =
malloc(
sizeof(
struct umtxq_queue), M_UMTX, M_WAITOK | M_ZERO);
289 TAILQ_INIT(&uq->uq_spare_queue->head);
290 TAILQ_INIT(&uq->uq_pi_contested);
291 uq->uq_inherited_pri = PRI_MAX;
298 MPASS(uq->uq_spare_queue != NULL);
299 free(uq->uq_spare_queue, M_UMTX);
306 unsigned n = (uintptr_t)key->info.both.a + key->info.both.b;
310 static inline struct umtxq_chain *
313 if (key->type <= TYPE_SEM)
324 struct umtxq_chain *uc;
336 struct umtxq_chain *uc;
349 struct umtxq_chain *uc;
352 mtx_assert(&uc->
uc_lock, MA_OWNED);
359 while (uc->uc_busy && --count > 0)
365 while (uc->uc_busy) {
367 msleep(uc, &uc->
uc_lock, 0,
"umtxqb", 0);
380 struct umtxq_chain *uc;
383 mtx_assert(&uc->
uc_lock, MA_OWNED);
384 KASSERT(uc->uc_busy != 0, (
"not busy"));
390 static struct umtxq_queue *
393 struct umtxq_queue *uh;
394 struct umtxq_chain *uc;
398 LIST_FOREACH(uh, &uc->
uc_queue[q], link) {
399 if (umtx_key_match(&uh->
key, key))
409 struct umtxq_queue *uh;
410 struct umtxq_chain *uc;
414 KASSERT((uq->uq_flags &
UQF_UMTXQ) == 0, (
"umtx_q is already on queue"));
417 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
419 uh = uq->uq_spare_queue;
420 uh->
key = uq->uq_key;
421 LIST_INSERT_HEAD(&uc->
uc_queue[q], uh, link);
423 uq->uq_spare_queue = NULL;
425 TAILQ_INSERT_TAIL(&uh->
head, uq, uq_link);
427 #ifdef UMTX_PROFILING
429 if (uc->length > uc->max_length) {
430 uc->max_length = uc->length;
431 if (uc->max_length > max_length)
432 max_length = uc->max_length;
436 uq->uq_cur_queue = uh;
443 struct umtxq_chain *uc;
444 struct umtxq_queue *uh;
449 uh = uq->uq_cur_queue;
450 TAILQ_REMOVE(&uh->
head, uq, uq_link);
452 #ifdef UMTX_PROFILING
456 if (TAILQ_EMPTY(&uh->
head)) {
457 KASSERT(uh->length == 0,
458 (
"inconsistent umtxq_queue length"));
459 LIST_REMOVE(uh, link);
461 uh = LIST_FIRST(&uc->uc_spare_queue);
462 KASSERT(uh != NULL, (
"uc_spare_queue is empty"));
463 LIST_REMOVE(uh, link);
465 uq->uq_spare_queue = uh;
466 uq->uq_cur_queue = NULL;
476 struct umtxq_chain *uc;
477 struct umtxq_queue *uh;
494 struct umtxq_chain *uc;
495 struct umtxq_queue *uh;
502 *first = TAILQ_FIRST(&uh->
head);
518 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
523 if (P_SHOULDSTOP(p) ||
524 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
525 if (p->p_flag & P_SINGLE_EXIT)
541 struct umtxq_chain *uc;
542 struct umtxq_queue *uh;
551 while ((uq = TAILQ_FIRST(&uh->
head)) != NULL) {
568 struct umtxq_chain *uc;
583 struct umtxq_chain *uc;
590 error = msleep(uq, &uc->
uc_lock, PCATCH, wmesg, timo);
591 if (error == EWOULDBLOCK)
602 struct thread *td = curthread;
604 vm_map_entry_t entry;
610 if (share == THREAD_SHARE) {
612 key->info.private.vs = td->td_proc->p_vmspace;
613 key->info.private.addr = (uintptr_t)addr;
615 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
616 map = &td->td_proc->p_vmspace->vm_map;
617 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
618 &entry, &key->info.shared.object, &pindex, &prot,
619 &wired) != KERN_SUCCESS) {
623 if ((share == PROCESS_SHARE) ||
624 (share == AUTO_SHARE &&
625 VM_INHERIT_SHARE == entry->inheritance)) {
627 key->info.shared.offset = entry->offset + entry->start -
629 vm_object_reference(key->info.shared.object);
632 key->info.private.vs = td->td_proc->p_vmspace;
633 key->info.private.addr = (uintptr_t)addr;
635 vm_map_lookup_done(map, entry);
649 vm_object_deallocate(key->info.shared.object);
673 owner = casuword(&umtx->u_owner, UMTX_UNOWNED,
id);
676 if (owner == UMTX_UNOWNED)
684 if (owner == UMTX_CONTESTED) {
685 owner = casuword(&umtx->u_owner,
686 UMTX_CONTESTED,
id | UMTX_CONTESTED);
688 if (owner == UMTX_CONTESTED)
711 AUTO_SHARE, &uq->uq_key)) != 0)
726 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
763 struct timespec ts, ts2, ts3;
767 if (timeout == NULL) {
774 timespecadd(&ts, timeout);
775 TIMESPEC_TO_TIMEVAL(&tv, timeout);
778 if (error != ETIMEDOUT)
781 if (timespeccmp(&ts2, &ts, >=)) {
786 timespecsub(&ts3, &ts2);
787 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
790 if (error == ERESTART)
811 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
815 if ((owner & ~UMTX_CONTESTED) !=
id)
819 if ((owner & UMTX_CONTESTED) == 0) {
820 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
829 if ((error =
umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
843 old = casuword(&umtx->u_owner, owner,
844 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
863 _do_lock_umtx32(
struct thread *td, uint32_t *m, uint32_t
id,
int timo)
880 owner = casuword32(m, UMUTEX_UNOWNED,
id);
883 if (owner == UMUTEX_UNOWNED)
891 if (owner == UMUTEX_CONTESTED) {
892 owner = casuword32(m,
893 UMUTEX_CONTESTED,
id | UMUTEX_CONTESTED);
894 if (owner == UMUTEX_CONTESTED)
917 AUTO_SHARE, &uq->uq_key)) != 0)
932 old = casuword32(m, owner, owner | UMUTEX_CONTESTED);
966 do_lock_umtx32(
struct thread *td,
void *m, uint32_t
id,
969 struct timespec
ts, ts2, ts3;
973 if (timeout == NULL) {
974 error = _do_lock_umtx32(td, m,
id, 0);
980 timespecadd(&
ts, timeout);
981 TIMESPEC_TO_TIMEVAL(&tv, timeout);
983 error = _do_lock_umtx32(td, m,
id,
tvtohz(&tv));
984 if (error != ETIMEDOUT)
987 if (timespeccmp(&ts2, &
ts, >=)) {
992 timespecsub(&ts3, &ts2);
993 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
996 if (error == ERESTART)
1006 do_unlock_umtx32(
struct thread *td, uint32_t *m, uint32_t
id)
1008 struct umtx_key key;
1017 owner = fuword32(m);
1021 if ((owner & ~UMUTEX_CONTESTED) !=
id)
1025 if ((owner & UMUTEX_CONTESTED) == 0) {
1026 old = casuword32(m, owner, UMUTEX_UNOWNED);
1035 if ((error =
umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1049 old = casuword32(m, owner,
1050 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1069 struct timespec *timeout,
int compat32,
int is_private)
1072 struct timespec ts, ts2, ts3;
1079 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
1088 tmp = (
unsigned int)fuword32(addr);
1093 }
else if (timeout == NULL) {
1100 timespecadd(&ts, timeout);
1101 TIMESPEC_TO_TIMEVAL(&tv, timeout);
1109 if (error != ETIMEDOUT)
1113 if (timespeccmp(&ts2, &ts, >=)) {
1119 timespecsub(&ts3, &ts2);
1120 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
1127 if (error == ERESTART)
1138 struct umtx_key key;
1142 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1159 uint32_t owner, old, id;
1170 owner = fuword32(__DEVOLATILE(
void *, &m->m_owner));
1172 if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED)
1178 owner = casuword32(&m->m_owner, UMUTEX_UNOWNED,
id);
1181 if (owner == UMUTEX_UNOWNED)
1189 if (owner == UMUTEX_CONTESTED) {
1190 owner = casuword32(&m->m_owner,
1191 UMUTEX_CONTESTED,
id | UMUTEX_CONTESTED);
1193 if (owner == UMUTEX_CONTESTED)
1209 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1210 (owner & ~UMUTEX_CONTESTED) ==
id)
1238 old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1279 struct umtx_key key;
1280 uint32_t owner, old, id;
1288 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1292 if ((owner & ~UMUTEX_CONTESTED) !=
id)
1295 if ((owner & UMUTEX_CONTESTED) == 0) {
1296 old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1319 old = casuword32(&m->m_owner, owner,
1320 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1340 struct umtx_key key;
1346 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1350 if ((owner & ~UMUTEX_CONTESTED) != 0)
1353 flags = fuword32(&m->m_flags);
1366 owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED);
1369 if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1383 struct umtx_key key;
1384 uint32_t owner, old;
1389 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
1391 type = TYPE_NORMAL_UMUTEX;
1393 case UMUTEX_PRIO_INHERIT:
1394 type = TYPE_PI_UMUTEX;
1396 case UMUTEX_PRIO_PROTECT:
1397 type = TYPE_PP_UMUTEX;
1417 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1418 while ((owner & UMUTEX_CONTESTED) ==0) {
1419 old = casuword32(&m->m_owner, owner,
1420 owner|UMUTEX_CONTESTED);
1430 }
else if (count == 1) {
1431 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1432 while ((owner & ~UMUTEX_CONTESTED) != 0 &&
1433 (owner & UMUTEX_CONTESTED) == 0) {
1434 old = casuword32(&m->m_owner, owner,
1435 owner|UMUTEX_CONTESTED);
1451 else if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1459 static inline struct umtx_pi *
1464 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1465 TAILQ_INIT(&pi->pi_blocked);
1466 atomic_add_int(&umtx_pi_allocated, 1);
1473 uma_zfree(umtx_pi_zone, pi);
1474 atomic_add_int(&umtx_pi_allocated, -1);
1484 struct umtx_q *uq, *uq1, *uq2;
1498 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1499 uq2 = TAILQ_NEXT(uq, uq_lockq);
1500 if ((uq1 != NULL &&
UPRI(td) <
UPRI(uq1->uq_thread)) ||
1501 (uq2 != NULL &&
UPRI(td) >
UPRI(uq2->uq_thread))) {
1506 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1507 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1508 td1 = uq1->uq_thread;
1509 MPASS(td1->td_proc->p_magic == P_MAGIC);
1515 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1517 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1536 pi = uq->uq_pi_blocked;
1542 if (td == NULL || td == curthread)
1545 MPASS(td->td_proc != NULL);
1546 MPASS(td->td_proc->p_magic == P_MAGIC);
1549 if (td->td_lend_user_pri > pri)
1561 pi = uq->uq_pi_blocked;
1576 struct umtx_q *uq, *uq_owner;
1582 while (pi != NULL && pi->
pi_owner != NULL) {
1586 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1587 uq = TAILQ_FIRST(&pi2->pi_blocked);
1589 if (pri >
UPRI(uq->uq_thread))
1590 pri =
UPRI(uq->uq_thread);
1594 if (pri > uq_owner->uq_inherited_pri)
1595 pri = uq_owner->uq_inherited_pri;
1599 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1612 uq_owner = owner->td_umtxq;
1615 panic(
"pi_ower != NULL");
1617 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1626 struct umtx_q *uq, *uq_owner;
1628 uq_owner = owner->td_umtxq;
1643 uq = TAILQ_FIRST(&pi->pi_blocked);
1647 pri =
UPRI(uq->uq_thread);
1649 if (pri <
UPRI(owner))
1651 thread_unlock(owner);
1672 pi = uq->uq_pi_blocked;
1685 uint32_t owner,
const char *wmesg,
int timo)
1687 struct umtxq_chain *uc;
1688 struct thread *td, *td1;
1694 KASSERT(td == curthread, (
"inconsistent uq_thread"));
1703 td1 =
tdfind(owner, curproc->p_pid);
1708 PROC_UNLOCK(td1->td_proc);
1712 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1713 pri =
UPRI(uq1->uq_thread);
1719 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1721 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1723 uq->uq_pi_blocked = pi;
1725 td->td_flags |= TDF_UPIBLOCKED;
1732 error = msleep(uq, &uc->
uc_lock, PCATCH, wmesg, timo);
1733 if (error == EWOULDBLOCK)
1740 uq->uq_pi_blocked = NULL;
1742 td->td_flags &= ~TDF_UPIBLOCKED;
1744 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1758 struct umtxq_chain *uc;
1772 struct umtxq_chain *uc;
1776 KASSERT(pi->
pi_refcount > 0, (
"invalid reference count"));
1780 TAILQ_REMOVE(&pi->
pi_owner->td_umtxq->uq_pi_contested,
1784 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1785 (
"blocked queue not empty"));
1787 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1798 struct umtxq_chain *uc;
1804 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1805 if (umtx_key_match(&pi->pi_key, key)) {
1818 struct umtxq_chain *uc;
1822 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1829 _do_lock_pi(
struct thread *td,
struct umutex *m, uint32_t flags,
int timo,
1834 uint32_t id, owner, old;
1847 if (new_pi == NULL) {
1857 if (new_pi != NULL) {
1858 new_pi->pi_key = uq->uq_key;
1874 owner = casuword32(&m->m_owner, UMUTEX_UNOWNED,
id);
1877 if (owner == UMUTEX_UNOWNED) {
1889 if (owner == UMUTEX_CONTESTED) {
1890 owner = casuword32(&m->m_owner,
1891 UMUTEX_CONTESTED,
id | UMUTEX_CONTESTED);
1893 if (owner == UMUTEX_CONTESTED) {
1916 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1917 (owner & ~UMUTEX_CONTESTED) ==
id) {
1944 old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1988 struct umtx_key key;
1989 struct umtx_q *uq_first, *uq_first2, *uq_me;
1991 uint32_t owner, old, id;
2000 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
2004 if ((owner & ~UMUTEX_CONTESTED) !=
id)
2008 if ((owner & UMUTEX_CONTESTED) == 0) {
2009 old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
2025 if (uq_first != NULL) {
2027 pi = uq_first->uq_pi_blocked;
2028 KASSERT(pi != NULL, (
"pi == NULL?"));
2037 uq_me = curthread->td_umtxq;
2039 TAILQ_REMOVE(&uq_me->uq_pi_contested, pi, pi_link);
2041 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2042 while (uq_first != NULL &&
2043 (uq_first->uq_flags &
UQF_UMTXQ) == 0) {
2044 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2047 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2048 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2049 if (uq_first2 != NULL) {
2050 if (pri >
UPRI(uq_first2->uq_thread))
2051 pri =
UPRI(uq_first2->uq_thread);
2054 thread_lock(curthread);
2056 thread_unlock(curthread);
2068 old = casuword32(&m->m_owner, owner,
2069 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
2086 _do_lock_pp(
struct thread *td,
struct umutex *m, uint32_t flags,
int timo,
2093 int error, pri, old_inherited_pri, su;
2100 su = (
priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2102 old_inherited_pri = uq->uq_inherited_pri;
2107 ceiling = RTP_PRIO_MAX - fuword32(&m->m_ceilings[0]);
2108 if (ceiling > RTP_PRIO_MAX) {
2114 if (
UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2119 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2120 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2122 if (uq->uq_inherited_pri <
UPRI(td))
2128 owner = casuword32(&m->m_owner,
2129 UMUTEX_CONTESTED,
id | UMUTEX_CONTESTED);
2131 if (owner == UMUTEX_CONTESTED) {
2142 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
2143 (owner & ~UMUTEX_CONTESTED) ==
id) {
2168 uq->uq_inherited_pri = old_inherited_pri;
2170 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2171 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2173 if (pri >
UPRI(uq2->uq_thread))
2174 pri =
UPRI(uq2->uq_thread);
2177 if (pri > uq->uq_inherited_pri)
2178 pri = uq->uq_inherited_pri;
2187 uq->uq_inherited_pri = old_inherited_pri;
2189 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2190 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2192 if (pri >
UPRI(uq2->uq_thread))
2193 pri =
UPRI(uq2->uq_thread);
2196 if (pri > uq->uq_inherited_pri)
2197 pri = uq->uq_inherited_pri;
2218 struct umtx_key key;
2223 int error, pri, new_inherited_pri, su;
2227 su = (
priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2232 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
2236 if ((owner & ~UMUTEX_CONTESTED) !=
id)
2239 error = copyin(&m->m_ceilings[1], &rceiling,
sizeof(uint32_t));
2244 new_inherited_pri = PRI_MAX;
2246 rceiling = RTP_PRIO_MAX - rceiling;
2247 if (rceiling > RTP_PRIO_MAX)
2249 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2264 error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2278 uq->uq_inherited_pri = new_inherited_pri;
2280 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2281 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2283 if (pri >
UPRI(uq2->uq_thread))
2284 pri =
UPRI(uq2->uq_thread);
2287 if (pri > uq->uq_inherited_pri)
2288 pri = uq->uq_inherited_pri;
2300 uint32_t *old_ceiling)
2303 uint32_t save_ceiling;
2308 flags = fuword32(&m->m_flags);
2309 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2311 if (ceiling > RTP_PRIO_MAX)
2323 save_ceiling = fuword32(&m->m_ceilings[0]);
2325 owner = casuword32(&m->m_owner,
2326 UMUTEX_CONTESTED,
id | UMUTEX_CONTESTED);
2328 if (owner == UMUTEX_CONTESTED) {
2329 suword32(&m->m_ceilings[0], ceiling);
2330 suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2342 if ((owner & ~UMUTEX_CONTESTED) ==
id) {
2343 suword32(&m->m_ceilings[0], ceiling);
2373 if (error == 0 && old_ceiling != NULL)
2374 suword32(old_ceiling, save_ceiling);
2382 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2385 case UMUTEX_PRIO_INHERIT:
2387 case UMUTEX_PRIO_PROTECT:
2398 struct timespec *timeout,
int mode)
2400 struct timespec ts, ts2, ts3;
2405 flags = fuword32(&m->m_flags);
2409 if (timeout == NULL) {
2416 timespecadd(&ts, timeout);
2417 TIMESPEC_TO_TIMEVAL(&tv, timeout);
2420 if (error != ETIMEDOUT)
2423 if (timespeccmp(&ts2, &ts, >=)) {
2428 timespecsub(&ts3, &ts2);
2429 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2432 if (error == ERESTART)
2446 flags = fuword32(&m->m_flags);
2450 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2453 case UMUTEX_PRIO_INHERIT:
2455 case UMUTEX_PRIO_PROTECT:
2463 do_cv_wait(
struct thread *td,
struct ucond *cv,
struct umutex *m,
2464 struct timespec *timeout, u_long wflags)
2468 struct timespec cts, ets, tts;
2474 flags = fuword32(&cv->c_flags);
2479 if ((wflags & CVWAIT_CLOCKID) != 0) {
2480 clockid = fuword32(&cv->c_clockid);
2481 if (clockid < CLOCK_REALTIME ||
2482 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2487 clockid = CLOCK_REALTIME;
2499 if (fuword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters)) == 0)
2500 suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1);
2510 if (timeout == NULL) {
2513 if ((wflags & CVWAIT_ABSTIME) == 0) {
2515 timespecadd(&ets, timeout);
2521 timespecsub(&tts, &cts);
2523 TIMESPEC_TO_TIMEVAL(&tv, &tts);
2526 if (error != ETIMEDOUT)
2529 if (timespeccmp(&cts, &ets, >=)) {
2534 timespecsub(&tts, &cts);
2535 TIMESPEC_TO_TIMEVAL(&tv, &tts);
2550 int oldlen = uq->uq_cur_queue->length;
2555 __DEVOLATILE(uint32_t *,
2556 &cv->c_has_waiters), 0);
2561 if (error == ERESTART)
2576 struct umtx_key key;
2577 int error, cnt, nwake;
2580 flags = fuword32(&cv->c_flags);
2590 __DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2602 struct umtx_key key;
2606 flags = fuword32(&cv->c_flags);
2615 error = suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2626 do_rw_rdlock(
struct thread *td,
struct urwlock *rwlock,
long fflag,
int timo)
2629 uint32_t flags, wrflags;
2630 int32_t state, oldstate;
2631 int32_t blocked_readers;
2635 flags = fuword32(&rwlock->rw_flags);
2640 wrflags = URWLOCK_WRITE_OWNER;
2641 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2642 wrflags |= URWLOCK_WRITE_WAITERS;
2645 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2647 while (!(state & wrflags)) {
2648 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2652 oldstate = casuword32(&rwlock->rw_state, state, state + 1);
2653 if (oldstate == -1) {
2657 if (oldstate == state) {
2679 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2682 while ((state & wrflags) && !(state & URWLOCK_READ_WAITERS)) {
2683 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_READ_WAITERS);
2684 if (oldstate == -1) {
2688 if (oldstate == state)
2703 if (!(state & wrflags)) {
2715 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2716 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2718 while (state & wrflags) {
2730 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2734 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2735 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2736 if (blocked_readers == 1) {
2737 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2739 oldstate = casuword32(&rwlock->rw_state, state,
2740 state & ~URWLOCK_READ_WAITERS);
2741 if (oldstate == -1) {
2745 if (oldstate == state)
2767 struct timespec ts, ts2, ts3;
2772 timespecadd(&ts, timeout);
2773 TIMESPEC_TO_TIMEVAL(&tv, timeout);
2776 if (error != ETIMEDOUT)
2779 if (timespeccmp(&ts2, &ts, >=)) {
2784 timespecsub(&ts3, &ts2);
2785 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2787 if (error == ERESTART)
2797 int32_t state, oldstate;
2798 int32_t blocked_writers;
2799 int32_t blocked_readers;
2803 flags = fuword32(&rwlock->rw_flags);
2808 blocked_readers = 0;
2810 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2811 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2812 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER);
2813 if (oldstate == -1) {
2817 if (oldstate == state) {
2828 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2829 blocked_readers != 0) {
2849 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2851 while (((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) &&
2852 (state & URWLOCK_WRITE_WAITERS) == 0) {
2853 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_WAITERS);
2854 if (oldstate == -1) {
2858 if (oldstate == state)
2872 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2882 blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2883 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2885 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2897 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2900 blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2901 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2902 if (blocked_writers == 1) {
2903 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2905 oldstate = casuword32(&rwlock->rw_state, state,
2906 state & ~URWLOCK_WRITE_WAITERS);
2907 if (oldstate == -1) {
2911 if (oldstate == state)
2923 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2925 blocked_readers = 0;
2939 struct timespec ts, ts2, ts3;
2944 timespecadd(&ts, timeout);
2945 TIMESPEC_TO_TIMEVAL(&tv, timeout);
2948 if (error != ETIMEDOUT)
2951 if (timespeccmp(&ts2, &ts, >=)) {
2956 timespecsub(&ts3, &ts2);
2957 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2959 if (error == ERESTART)
2969 int32_t state, oldstate;
2970 int error, q,
count;
2973 flags = fuword32(&rwlock->rw_flags);
2978 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2979 if (state & URWLOCK_WRITE_OWNER) {
2981 oldstate = casuword32(&rwlock->rw_state, state,
2982 state & ~URWLOCK_WRITE_OWNER);
2983 if (oldstate == -1) {
2987 if (oldstate != state) {
2989 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
2999 }
else if (URWLOCK_READER_COUNT(state) != 0) {
3001 oldstate = casuword32(&rwlock->rw_state, state,
3003 if (oldstate == -1) {
3007 if (oldstate != state) {
3009 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3026 if (!(flags & URWLOCK_PREFER_READER)) {
3027 if (state & URWLOCK_WRITE_WAITERS) {
3030 }
else if (state & URWLOCK_READ_WAITERS) {
3035 if (state & URWLOCK_READ_WAITERS) {
3038 }
else if (state & URWLOCK_WRITE_WAITERS) {
3061 struct timespec cts, ets, tts;
3062 uint32_t flags,
count;
3066 flags = fuword32(&sem->_flags);
3075 if (fuword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters)) == 0)
3076 casuword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 0, 1);
3078 count = fuword32(__DEVOLATILE(uint32_t *, &sem->_count));
3093 if (timeout == NULL) {
3097 timespecadd(&ets, timeout);
3098 TIMESPEC_TO_TIMEVAL(&tv, timeout);
3101 if (error != ETIMEDOUT)
3104 if (timespeccmp(&cts, &ets, >=)) {
3109 timespecsub(&tts, &cts);
3110 TIMESPEC_TO_TIMEVAL(&tv, &tts);
3119 if (error == ERESTART && timeout != NULL)
3133 struct umtx_key key;
3134 int error, cnt, nwake;
3137 flags = fuword32(&sem->_flags);
3147 __DEVOLATILE(uint32_t *, &sem->_has_waiters), 0);
3175 error = copyin(addr, tsp,
sizeof(
struct timespec));
3177 if (tsp->tv_sec < 0 ||
3178 tsp->tv_nsec >= 1000000000 ||
3192 if (uap->uaddr2 == NULL)
3215 if (uap->uaddr2 == NULL)
3223 return do_wait(td, uap->obj, uap->val, ts, 0, 0);
3232 if (uap->uaddr2 == NULL)
3240 return do_wait(td, uap->obj, uap->val, ts, 1, 0);
3249 if (uap->uaddr2 == NULL)
3257 return do_wait(td, uap->obj, uap->val, ts, 1, 1);
3266 #define BATCH_SIZE 128
3270 int count = uap->val;
3272 char **upp = (
char **)uap->obj;
3281 error = copyin(upp+pos, uaddrs, tocopy *
sizeof(
char *));
3284 for (i = 0; i < tocopy; ++i)
3305 if (uap->uaddr2 == NULL)
3329 if (uap->uaddr2 == NULL)
3365 if (uap->uaddr2 == NULL)
3373 return (
do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3391 struct timespec timeout;
3395 if (uap->uaddr2 == NULL) {
3409 struct timespec timeout;
3413 if (uap->uaddr2 == NULL) {
3438 if (uap->uaddr2 == NULL)
3492 if ((
unsigned)uap->op < UMTX_OP_MAX)
3493 return (*op_table[uap->op])(td, uap);
3499 compat32bit_umtx_lock(
struct thread *td,
struct compat32bit_umtx_lock_args *uap)
3502 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
3506 compat32bit_umtx_unlock(
struct thread *td,
struct compat32bit_umtx_unlock_args *uap)
3509 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
3518 umtx_copyin_timeout32(
void *addr,
struct timespec *tsp)
3520 struct timespec32 ts32;
3523 error = copyin(addr, &ts32,
sizeof(
struct timespec32));
3525 if (ts32.tv_sec < 0 ||
3526 ts32.tv_nsec >= 1000000000 ||
3530 tsp->tv_sec = ts32.tv_sec;
3531 tsp->tv_nsec = ts32.tv_nsec;
3538 __umtx_op_lock_umtx_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3544 if (uap->uaddr2 == NULL)
3547 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3552 return (do_lock_umtx32(td, uap->obj, uap->val, ts));
3556 __umtx_op_unlock_umtx_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3558 return (do_unlock_umtx32(td, uap->obj, (uint32_t)uap->val));
3562 __umtx_op_wait_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3567 if (uap->uaddr2 == NULL)
3570 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3575 return do_wait(td, uap->obj, uap->val, ts, 1, 0);
3579 __umtx_op_lock_umutex_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3585 if (uap->uaddr2 == NULL)
3588 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3597 __umtx_op_wait_umutex_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3603 if (uap->uaddr2 == NULL)
3606 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3615 __umtx_op_cv_wait_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3621 if (uap->uaddr2 == NULL)
3624 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3629 return (
do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3633 __umtx_op_rw_rdlock_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3635 struct timespec timeout;
3639 if (uap->uaddr2 == NULL) {
3642 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3651 __umtx_op_rw_wrlock_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3653 struct timespec timeout;
3657 if (uap->uaddr2 == NULL) {
3660 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3670 __umtx_op_wait_uint_private_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3675 if (uap->uaddr2 == NULL)
3678 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3683 return do_wait(td, uap->obj, uap->val, ts, 1, 1);
3687 __umtx_op_sem_wait_compat32(
struct thread *td,
struct _umtx_op_args *uap)
3693 if (uap->uaddr2 == NULL)
3696 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3705 __umtx_op_nwake_private32(
struct thread *td,
struct _umtx_op_args *uap)
3707 int count = uap->val;
3709 uint32_t **upp = (uint32_t **)uap->obj;
3718 error = copyin(upp+pos, uaddrs, tocopy *
sizeof(uint32_t));
3721 for (i = 0; i < tocopy; ++i)
3731 __umtx_op_lock_umtx_compat32,
3732 __umtx_op_unlock_umtx_compat32,
3733 __umtx_op_wait_compat32,
3736 __umtx_op_lock_umutex_compat32,
3739 __umtx_op_cv_wait_compat32,
3742 __umtx_op_wait_compat32,
3743 __umtx_op_rw_rdlock_compat32,
3744 __umtx_op_rw_wrlock_compat32,
3746 __umtx_op_wait_uint_private_compat32,
3748 __umtx_op_wait_umutex_compat32,
3750 __umtx_op_sem_wait_compat32,
3752 __umtx_op_nwake_private32,
3757 compat32bit_umtx_op(
struct thread *td,
struct compat32bit_umtx_op_args *uap)
3759 if ((
unsigned)uap->op < UMTX_OP_MAX)
3760 return (*op_table_compat32[uap->op])(td,
3761 (
struct _umtx_op_args *)uap);
3770 td->td_umtxq->uq_thread = td;
3788 uq->uq_inherited_pri = PRI_MAX;
3790 KASSERT(uq->uq_flags == 0, (
"uq_flags != 0"));
3791 KASSERT(uq->uq_thread == td, (
"uq_thread != td"));
3792 KASSERT(uq->uq_pi_blocked == NULL, (
"uq_pi_blocked != NULL"));
3793 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), (
"uq_pi_contested is not empty"));
3801 struct image_params *imgp __unused)
3824 if ((uq = td->td_umtxq) == NULL)
3828 uq->uq_inherited_pri = PRI_MAX;
3829 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
3831 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
static MALLOC_DEFINE(M_UMTX,"umtx","UMTX queue memory")
void umtx_pi_adjust(struct thread *td, u_char oldpri)
TAILQ_HEAD(umtxq_head, umtx_q)
#define UMTX_EXCLUSIVE_QUEUE
void umtx_thread_fini(struct thread *td)
void umtx_thread_exit(struct thread *td)
static int umtxq_count(struct umtx_key *key)
static int do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags)
int tvtohz(struct timeval *tv)
static int __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
void umtx_thread_alloc(struct thread *td)
struct callout_handle timeout(timeout_t *ftn, void *arg, int to_ticks)
static int __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
static int do_lock_umutex(struct thread *td, struct umutex *m, struct timespec *timeout, int mode)
static int __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap)
static int do_sem_wake(struct thread *td, struct _usem *sem)
int snprintf(char *str, size_t size, const char *format,...)
static void umtx_pi_ref(struct umtx_pi *pi)
void umtx_key_release(struct umtx_key *key)
static int __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
static int do_rw_rdlock2(struct thread *td, void *obj, long val, struct timespec *timeout)
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
static int __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
static int __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
static void umtxq_sysinit(void *)
static int do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, int timo)
static void umtx_pi_unref(struct umtx_pi *pi)
void panic(const char *fmt,...)
int(* _umtx_op_func)(struct thread *td, struct _umtx_op_args *uap)
static int __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
int umtx_key_get(void *addr, int type, int share, struct umtx_key *key)
static int umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
int sys__umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
static int _do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, int timo, int try)
static void umtxq_remove_queue(struct umtx_q *uq, int q)
static int __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
static int _do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, int timo, int try)
static void umtx_propagate_priority(struct thread *td)
static int _do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, int timo, int mode)
int sys__umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
static int do_cv_broadcast(struct thread *td, struct ucond *cv)
static void umtx_exec_hook(void *arg __unused, struct proc *p __unused, struct image_params *imgp __unused)
struct umtxq_list uc_queue[2]
void getnanouptime(struct timespec *tsp)
static void umtx_thread_cleanup(struct thread *td)
void wakeup_one(void *ident)
void umtxq_free(struct umtx_q *uq)
static int __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
static int do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id, struct timespec *timeout)
static int __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap)
static int do_rw_wrlock2(struct thread *td, void *obj, struct timespec *timeout)
static int do_unlock_umutex(struct thread *td, struct umutex *m)
int sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
int priv_check(struct thread *td, int priv)
static int umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner, const char *wmesg, int timo)
static void umtxq_insert_queue(struct umtx_q *uq, int q)
static void umtx_repropagate_priority(struct umtx_pi *pi)
static int _do_lock_umutex(struct thread *td, struct umutex *m, int flags, int timo, int mode)
static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0,"umtx debug")
#define UMTX_SHARED_QUEUE
static int __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, int timo)
SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL)
static int umtx_pi_allocated
static void umtx_pi_free(struct umtx_pi *pi)
static int do_rw_wrlock(struct thread *td, struct urwlock *rwlock, int timo)
static int __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
#define GOLDEN_RATIO_PRIME
static int do_rw_unlock(struct thread *td, struct urwlock *rwlock)
static int umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
int kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
int umtx_copyin_timeout(const void *addr, struct timespec *tsp)
struct umtx_q * umtxq_alloc(void)
static void umtxq_hash(struct umtx_key *key)
static int __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
SYSCTL_LONG(_hw, OID_AUTO, availpages, CTLFLAG_RD,&physmem, 0,"")
static int __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
static int __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
static struct mtx umtx_lock
static int __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
static int do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m, struct timespec *timeout, u_long wflags)
LIST_HEAD(umtxq_list, umtxq_queue)
static int umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
static int __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
static int umtxq_check_susp(struct thread *td)
static void umtxq_unlock(struct umtx_key *key)
void umtx_thread_init(struct thread *td)
#define UMTXQ_LOCKED_ASSERT(uc)
void free(void *addr, struct malloc_type *mtp)
static struct umtxq_chain * umtxq_getchain(struct umtx_key *key)
static int __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
#define umtxq_signal(key, nwake)
static int do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
void sched_lend_user_prio(struct thread *td, u_char prio)
static _umtx_op_func op_table[]
static struct umtxq_queue * umtxq_queue_lookup(struct umtx_key *key, int q)
static void umtxq_lock(struct umtx_key *key)
static uma_zone_t umtx_pi_zone
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
static void umtxq_signal_thread(struct umtx_q *uq)
static int umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
int kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats)
static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
static int do_sem_wait(struct thread *td, struct _usem *sem, struct timespec *timeout)
static int do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
static int do_cv_signal(struct thread *td, struct ucond *cv)
static void umtxq_unbusy(struct umtx_key *key)
static int do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id)
static int __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
static int do_wake_umutex(struct thread *td, struct umutex *m)
static void umtx_pi_insert(struct umtx_pi *pi)
static struct umtx_pi * umtx_pi_alloc(int)
struct thread * tdfind(lwpid_t tid, pid_t pid)
static int _do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id, int timo)
static struct umtx_pi * umtx_pi_lookup(struct umtx_key *key)
static int __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
static int __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,&umtx_pi_allocated, 0,"Allocated umtx_pi")
static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS]
static int __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
#define UMTXQ_BUSY_ASSERT(uc)
static int do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling, uint32_t *old_ceiling)
static int __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
static void umtxq_busy(struct umtx_key *key)
static int do_wait(struct thread *td, void *addr, u_long id, struct timespec *timeout, int compat32, int is_private)
static void umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)