32 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
42 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
47 #include <machine/cpu.h>
48 #include <machine/smp.h>
50 #include "opt_sched.h"
53 volatile cpuset_t stopped_cpus;
54 volatile cpuset_t started_cpus;
55 cpuset_t hlt_cpus_mask;
56 cpuset_t logical_cpus_mask;
58 void (*cpustop_restartfunc)(void);
70 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
77 0,
"Max number of CPUs that the system was compiled for.");
81 "Number of Auxillary Processors (APs) that were successfully started");
84 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
85 &
smp_disabled, 0,
"SMP has been disabled from the loader");
90 "Number of CPUs online");
94 "Topology override setting; 0 is default provided by hardware.");
99 static int forward_signal_enabled = 1;
100 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
101 &forward_signal_enabled, 0,
102 "Forwarding of a signal to a process on a different CPU");
105 static volatile int smp_rv_ncpus;
106 static void (*
volatile smp_rv_setup_func)(
void *arg);
107 static void (*
volatile smp_rv_action_func)(
void *arg);
108 static void (*
volatile smp_rv_teardown_func)(
void *arg);
109 static void *
volatile smp_rv_func_arg;
110 static volatile int smp_rv_waiters[4];
118 struct mtx smp_ipi_mtx;
124 mp_setmaxid(
void *
dummy)
128 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
134 mp_start(
void *
dummy)
137 mtx_init(&smp_ipi_mtx,
"smp rendezvous", NULL, MTX_SPIN);
142 CPU_SETOF(PCPU_GET(cpuid), &
all_cpus);
147 printf(
"BSDSUniX/SMP: Multiprocessor System Detected: %d CPUs\n",
151 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
154 forward_signal(
struct thread *td)
163 THREAD_LOCK_ASSERT(td, MA_OWNED);
164 KASSERT(TD_IS_RUNNING(td),
165 (
"forward_signal: thread is not TDS_RUNNING"));
167 CTR1(KTR_SMP,
"forward_signal(%p)", td->td_proc);
171 if (!forward_signal_enabled)
181 ipi_cpu(
id, IPI_AST);
200 generic_stop_cpus(cpuset_t map, u_int
type)
203 char cpusetbuf[CPUSETBUFSIZ];
205 static volatile u_int stopping_cpu = NOCPU;
209 #
if defined(__amd64__) || defined(__i386__)
210 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
212 type == IPI_STOP || type == IPI_STOP_HARD,
214 (
"%s: invalid stop type", __func__));
219 CTR2(KTR_SMP,
"stop_cpus(%s) with %u type",
222 if (stopping_cpu != PCPU_GET(cpuid))
223 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
224 PCPU_GET(cpuid)) == 0)
225 while (stopping_cpu != NOCPU)
229 ipi_selected(map, type);
232 while (!CPU_SUBSET(&stopped_cpus, &map)) {
236 if (i == 100000000) {
237 printf(
"timeout stopping cpus\n");
242 stopping_cpu = NOCPU;
247 stop_cpus(cpuset_t map)
250 return (generic_stop_cpus(map, IPI_STOP));
254 stop_cpus_hard(cpuset_t map)
257 return (generic_stop_cpus(map, IPI_STOP_HARD));
260 #if defined(__amd64__) || defined(__i386__)
262 suspend_cpus(cpuset_t map)
265 return (generic_stop_cpus(map, IPI_SUSPEND));
283 restart_cpus(cpuset_t map)
286 char cpusetbuf[CPUSETBUFSIZ];
295 CPU_COPY_STORE_REL(&map, &started_cpus);
298 while (CPU_OVERLAP(&stopped_cpus, &map))
314 smp_rendezvous_action(
void)
317 void *local_func_arg;
318 void (*local_setup_func)(
void*);
319 void (*local_action_func)(
void*);
320 void (*local_teardown_func)(
void*);
326 atomic_add_acq_int(&smp_rv_waiters[0], 1);
327 while (smp_rv_waiters[0] < smp_rv_ncpus)
331 local_func_arg = smp_rv_func_arg;
332 local_setup_func = smp_rv_setup_func;
333 local_action_func = smp_rv_action_func;
334 local_teardown_func = smp_rv_teardown_func;
361 owepreempt = td->td_owepreempt;
370 if (smp_rv_setup_func != NULL)
371 smp_rv_setup_func(smp_rv_func_arg);
372 atomic_add_int(&smp_rv_waiters[1], 1);
373 while (smp_rv_waiters[1] < smp_rv_ncpus)
377 if (local_action_func != NULL)
378 local_action_func(local_func_arg);
386 atomic_add_int(&smp_rv_waiters[2], 1);
387 while (smp_rv_waiters[2] < smp_rv_ncpus)
390 if (local_teardown_func != NULL)
391 local_teardown_func(local_func_arg);
400 atomic_add_int(&smp_rv_waiters[3], 1);
403 KASSERT(owepreempt == td->td_owepreempt,
404 (
"rendezvous action changed td_owepreempt"));
409 void (* setup_func)(
void *),
410 void (* action_func)(
void *),
411 void (* teardown_func)(
void *),
414 int curcpumap, i, ncpus = 0;
419 if (setup_func != NULL)
421 if (action_func != NULL)
423 if (teardown_func != NULL)
430 if (CPU_ISSET(i, &map))
434 panic(
"ncpus is 0 with non-zero map");
436 mtx_lock_spin(&smp_ipi_mtx);
439 smp_rv_ncpus = ncpus;
440 smp_rv_setup_func = setup_func;
441 smp_rv_action_func = action_func;
442 smp_rv_teardown_func = teardown_func;
443 smp_rv_func_arg = arg;
444 smp_rv_waiters[1] = 0;
445 smp_rv_waiters[2] = 0;
446 smp_rv_waiters[3] = 0;
447 atomic_store_rel_int(&smp_rv_waiters[0], 0);
453 curcpumap = CPU_ISSET(curcpu, &map);
454 CPU_CLR(curcpu, &map);
455 ipi_selected(map, IPI_RENDEZVOUS);
459 smp_rendezvous_action();
467 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
470 mtx_unlock_spin(&smp_ipi_mtx);
475 void (* action_func)(
void *),
476 void (* teardown_func)(
void *),
482 static struct cpu_group group[MAXCPU];
487 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
488 struct cpu_group *top;
496 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
500 top = smp_topo_none();
504 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
508 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
512 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
516 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
520 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
532 panic(
"Built bad topology at %p. CPU count %d != %d",
534 if (CPU_CMP(&top->cg_mask, &
all_cpus))
535 panic(
"Built bad topology at %p. CPU mask (%s) != (%s)",
544 struct cpu_group *top;
547 top->cg_parent = NULL;
548 top->cg_child = NULL;
551 top->cg_children = 0;
552 top->cg_level = CG_SHARE_NONE;
559 smp_topo_addleaf(
struct cpu_group *
parent,
struct cpu_group *child,
int share,
562 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
567 for (i = 0; i <
count; i++, start++)
568 CPU_SET(start, &mask);
569 child->cg_parent =
parent;
570 child->cg_child = NULL;
571 child->cg_children = 0;
572 child->cg_level = share;
573 child->cg_count =
count;
574 child->cg_flags = flags;
575 child->cg_mask =
mask;
576 parent->cg_children++;
577 for (; parent != NULL; parent = parent->cg_parent) {
578 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
579 panic(
"Duplicate children in %p. mask (%s) child (%s)",
583 CPU_OR(&parent->cg_mask, &child->cg_mask);
584 parent->cg_count += child->cg_count;
591 smp_topo_1level(
int share,
int count,
int flags)
593 struct cpu_group *child;
594 struct cpu_group *top;
602 top->cg_child = child = &group[1];
603 top->cg_level = CG_SHARE_NONE;
604 for (i = 0; i < packages; i++, child++)
605 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
610 smp_topo_2level(
int l2share,
int l2count,
int l1share,
int l1count,
613 struct cpu_group *top;
614 struct cpu_group *l1g;
615 struct cpu_group *l2g;
624 top->cg_level = CG_SHARE_NONE;
625 top->cg_children =
mp_ncpus / (l2count * l1count);
626 l1g = l2g + top->cg_children;
627 for (i = 0; i < top->cg_children; i++, l2g++) {
628 l2g->cg_parent = top;
630 l2g->cg_level = l2share;
631 for (j = 0; j < l2count; j++, l1g++)
632 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
640 smp_topo_find(
struct cpu_group *top,
int cpu)
642 struct cpu_group *cg;
647 CPU_SETOF(cpu, &mask);
650 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
652 if (cg->cg_children == 0)
654 children = cg->cg_children;
655 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
656 if (CPU_OVERLAP(&cg->cg_mask, &mask))
665 void (*setup_func)(
void *),
666 void (*action_func)(
void *),
667 void (*teardown_func)(
void *),
675 if (setup_func != NULL)
677 if (action_func != NULL)
679 if (teardown_func != NULL)
686 void (*action_func)(
void *),
687 void (*teardown_func)(
void *),
693 if (setup_func != NULL)
695 if (action_func != NULL)
697 if (teardown_func != NULL)
712 KASSERT(PCPU_GET(cpuid) == 0, (
"UP must have a CPU ID of zero"));
714 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
722 KASSERT((!
smp_started),(
"smp_no_rendevous called and smp is started"));
static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,"Kernel SMP")
void smp_no_rendevous_barrier(void *dummy)
static void mp_setvariables_for_up(void *dummy)
void panic(const char *fmt,...)
void smp_rendezvous(void(*setup_func)(void *), void(*action_func)(void *), void(*teardown_func)(void *), void *arg)
int printf(const char *fmt,...)
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setvariables_for_up, NULL)
void smp_rendezvous_cpus(cpuset_t map, void(*setup_func)(void *), void(*action_func)(void *), void(*teardown_func)(void *), void *arg)
TUNABLE_INT("kern.smp.disabled",&smp_disabled)
SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD,&mp_maxid, 0,"Max CPU ID.")
char * cpusetobj_strprint(char *buf, const cpuset_t *set)