28 #include <sys/cdefs.h>
31 #include "opt_param.h"
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/systm.h>
37 #include <sys/domain.h>
38 #include <sys/eventhandler.h>
39 #include <sys/kernel.h>
40 #include <sys/protosw.h>
42 #include <sys/sysctl.h>
44 #include <security/mac/mac_framework.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
52 #include <vm/uma_int.h>
53 #include <vm/uma_dbg.h>
110 "Maximum real memory allocateable to various mbuf types");
125 realmem = qmin((quad_t)physmem * PAGE_SIZE,
126 vm_map_max(kmem_map) - vm_map_min(kmem_map));
128 TUNABLE_QUAD_FETCH(
"kern.ipc.maxmbufmem", &
maxmbufmem);
132 TUNABLE_INT_FETCH(
"kern.ipc.nmbclusters", &
nmbclusters);
136 TUNABLE_INT_FETCH(
"kern.ipc.nmbjumbop", &
nmbjumbop);
140 TUNABLE_INT_FETCH(
"kern.ipc.nmbjumbo9", &
nmbjumbo9);
144 TUNABLE_INT_FETCH(
"kern.ipc.nmbjumbo16", &
nmbjumbo16);
152 TUNABLE_INT_FETCH(
"kern.ipc.nmbufs", &
nmbufs);
162 int error, newnmbclusters;
166 if (error == 0 && req->newptr && newnmbclusters !=
nmbclusters) {
171 EVENTHANDLER_INVOKE(nmbclusters_change);
179 "Maximum number of mbuf clusters allowed");
184 int error, newnmbjumbop;
188 if (error == 0 && req->newptr && newnmbjumbop !=
nmbjumbop) {
200 "Maximum number of mbuf page size jumbo clusters allowed");
205 int error, newnmbjumbo9;
209 if (error == 0 && req->newptr && newnmbjumbo9 !=
nmbjumbo9) {
221 "Maximum number of mbuf 9k jumbo clusters allowed");
226 int error, newnmbjumbo16;
230 if (error == 0 && req->newptr && newnmbjumbo16 !=
nmbjumbo16) {
242 "Maximum number of mbuf 16k jumbo clusters allowed");
247 int error, newnmbufs;
251 if (error == 0 && req->newptr && newnmbufs !=
nmbufs) {
255 EVENTHANDLER_INVOKE(nmbufs_change);
263 "Maximum number of mbufs allowed");
266 "Mbuf general information and statistics");
295 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
307 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
310 trash_init, trash_fini,
314 MSIZE - 1, UMA_ZONE_MAXBUCKET);
318 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
321 trash_init, trash_fini,
325 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
333 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
336 trash_init, trash_fini,
340 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
344 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
347 trash_init, trash_fini,
351 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
356 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
359 trash_init, trash_fini,
363 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
371 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
380 EVENTHANDLER_REGISTER(vm_lowmem,
mb_reclaim, NULL,
381 EVENTHANDLER_PRI_FIRST);
392 mbstat.m_mclbytes = MCLBYTES;
393 mbstat.m_minclsize = MINCLSIZE;
396 mbstat.m_numtypes = MT_NTYPES;
415 *flags = UMA_SLAB_KERNEL;
416 return ((
void *)kmem_alloc_contig(kernel_map, bytes, wait,
417 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
431 struct mb_args *args;
439 trash_ctor(mem, size, arg, how);
441 m = (
struct mbuf *)mem;
442 args = (
struct mb_args *)arg;
450 if (type == MT_NOINIT)
458 if (flags & M_PKTHDR) {
459 m->m_data = m->m_pktdat;
460 m->m_pkthdr.rcvif = NULL;
461 m->m_pkthdr.header = NULL;
463 m->m_pkthdr.csum_flags = 0;
464 m->m_pkthdr.csum_data = 0;
465 m->m_pkthdr.tso_segsz = 0;
466 m->m_pkthdr.ether_vtag = 0;
467 m->m_pkthdr.flowid = 0;
468 SLIST_INIT(&m->m_pkthdr.tags);
471 error = mac_mbuf_init(m, how);
476 m->m_data = m->m_dat;
489 m = (
struct mbuf *)mem;
490 flags = (
unsigned long)arg;
492 if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
494 KASSERT((m->m_flags & M_EXT) == 0, (
"%s: M_EXT set", __func__));
495 KASSERT((m->m_flags & M_NOFREE) == 0, (
"%s: M_NOFREE set", __func__));
497 trash_dtor(mem, size, arg);
509 m = (
struct mbuf *)mem;
510 if ((m->m_flags & M_PKTHDR) != 0)
514 KASSERT((m->m_flags & M_EXT) == M_EXT, (
"%s: M_EXT not set", __func__));
515 KASSERT(m->m_ext.ext_buf != NULL, (
"%s: ext_buf == NULL", __func__));
516 KASSERT(m->m_ext.ext_free == NULL, (
"%s: ext_free != NULL", __func__));
517 KASSERT(m->m_ext.ext_arg1 == NULL, (
"%s: ext_arg1 != NULL", __func__));
518 KASSERT(m->m_ext.ext_arg2 == NULL, (
"%s: ext_arg2 != NULL", __func__));
519 KASSERT(m->m_ext.ext_size == MCLBYTES, (
"%s: ext_size != MCLBYTES", __func__));
520 KASSERT(m->m_ext.ext_type == EXT_PACKET, (
"%s: ext_type != EXT_PACKET", __func__));
521 KASSERT(*m->m_ext.ref_cnt == 1, (
"%s: ref_cnt != 1", __func__));
523 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
554 trash_ctor(mem, size, arg, how);
561 #if MJUMPAGESIZE != MCLBYTES
576 panic(
"unknown cluster size");
580 m = (
struct mbuf *)arg;
581 refcnt = uma_find_refcnt(zone, mem);
584 m->m_ext.ext_buf = (caddr_t)mem;
585 m->m_data = m->m_ext.ext_buf;
587 m->m_ext.ext_free = NULL;
588 m->m_ext.ext_arg1 = NULL;
589 m->m_ext.ext_arg2 = NULL;
590 m->m_ext.ext_size = size;
591 m->m_ext.ext_type =
type;
592 m->m_ext.ref_cnt = refcnt;
607 zone = m_getzone(size);
608 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
609 (
"%s: refcnt incorrect %u", __func__,
610 *(uma_find_refcnt(zone, mem))) );
612 trash_dtor(mem, size, arg);
625 m = (
struct mbuf *)mem;
626 if (uma_zalloc_arg(
zone_clust, m, how) == NULL ||
627 m->m_ext.ext_buf == NULL)
629 m->m_ext.ext_type = EXT_PACKET;
631 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
645 m = (
struct mbuf *)mem;
647 trash_fini(m->m_ext.ext_buf, MCLBYTES);
649 uma_zfree_arg(
zone_clust, m->m_ext.ext_buf, NULL);
651 trash_dtor(mem, size, NULL);
662 struct mb_args *args;
669 m = (
struct mbuf *)mem;
670 args = (
struct mb_args *)arg;
675 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
679 m->m_data = m->m_ext.ext_buf;
681 m->m_flags = (flags | M_EXT);
684 if (flags & M_PKTHDR) {
685 m->m_pkthdr.rcvif = NULL;
687 m->m_pkthdr.header = NULL;
688 m->m_pkthdr.csum_flags = 0;
689 m->m_pkthdr.csum_data = 0;
690 m->m_pkthdr.tso_segsz = 0;
691 m->m_pkthdr.ether_vtag = 0;
692 m->m_pkthdr.flowid = 0;
693 SLIST_INIT(&m->m_pkthdr.tags);
696 error = mac_mbuf_init(m, how);
712 m->m_data = m->m_pktdat;
713 SLIST_INIT(&m->m_pkthdr.tags);
714 m->m_pkthdr.rcvif = NULL;
715 m->m_pkthdr.header = NULL;
717 m->m_pkthdr.flowid = 0;
718 m->m_pkthdr.csum_flags = 0;
719 m->m_pkthdr.csum_data = 0;
720 m->m_pkthdr.tso_segsz = 0;
721 m->m_pkthdr.ether_vtag = 0;
724 error = mac_mbuf_init(m, how);
745 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
748 for (dp =
domains; dp != NULL; dp = dp->dom_next)
749 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
750 if (pr->pr_drain != NULL)
static int mb_ctor_pack(void *, int, void *, int)
static void tunable_mbinit(void *dummy)
SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD,&mbstat, mbstat,"Mbuf general information and statistics")
uma_zone_t zone_ext_refcnt
CTASSERT((((MSIZE-1)^MSIZE)+1) >> 1==MSIZE)
static void mb_dtor_pack(void *, int, void *)
SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN,&maxmbufmem, 0,"Maximum real memory allocateable to various mbuf types")
void panic(const char *fmt,...)
static int sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
static void mb_dtor_mbuf(void *, int, void *)
static void mb_zfini_pack(void *, int)
static void mb_reclaim(void *)
static int mb_ctor_clust(void *, int, void *, int)
static int mb_ctor_mbuf(void *, int, void *, int)
int m_pkthdr_init(struct mbuf *m, int how)
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
static int sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
static int sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
static void mbuf_init(void *dummy)
SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,&nmbclusters, 0, sysctl_nmbclusters,"IU","Maximum number of mbuf clusters allowed")
static void * mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int)
SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL)
static int sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
static int mb_zinit_pack(void *, int, int)
static void mb_dtor_clust(void *, int, void *)
static struct pollrec pr[POLL_LIST_LEN]
static int sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
void m_tag_delete_chain(struct mbuf *m, struct m_tag *t)