FreeBSD kernel kern code
kern_racct.c
Go to the documentation of this file.
1 /*-
2  * Copyright (c) 2010 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Edward Tomasz Napierala under sponsorship
6  * from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in the
15  * documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $BSDSUniX$
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$BSDSUniX$");
34 
35 #include "opt_kdtrace.h"
36 #include "opt_sched.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/eventhandler.h>
41 #include <sys/jail.h>
42 #include <sys/kernel.h>
43 #include <sys/kthread.h>
44 #include <sys/lock.h>
45 #include <sys/loginclass.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/racct.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sbuf.h>
52 #include <sys/sched.h>
53 #include <sys/sdt.h>
54 #include <sys/smp.h>
55 #include <sys/sx.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysent.h>
58 #include <sys/sysproto.h>
59 #include <sys/umtx.h>
60 #include <machine/smp.h>
61 
62 #ifdef RCTL
63 #include <sys/rctl.h>
64 #endif
65 
66 #ifdef RACCT
67 
68 FEATURE(racct, "Resource Accounting");
69 
70 /*
71  * Do not block processes that have their %cpu usage <= pcpu_threshold.
72  */
73 static int pcpu_threshold = 1;
74 
75 SYSCTL_NODE(_kern, OID_AUTO, racct, CTLFLAG_RW, 0, "Resource Accounting");
76 SYSCTL_UINT(_kern_racct, OID_AUTO, pcpu_threshold, CTLFLAG_RW, &pcpu_threshold,
77  0, "Processes with higher %cpu usage than this value can be throttled.");
78 
79 /*
80  * How many seconds it takes to use the scheduler %cpu calculations. When a
81  * process starts, we compute its %cpu usage by dividing its runtime by the
82  * process wall clock time. After RACCT_PCPU_SECS pass, we use the value
83  * provided by the scheduler.
84  */
85 #define RACCT_PCPU_SECS 3
86 
87 static struct mtx racct_lock;
88 MTX_SYSINIT(racct_lock, &racct_lock, "racct lock", MTX_DEF);
89 
90 static uma_zone_t racct_zone;
91 
92 static void racct_sub_racct(struct racct *dest, const struct racct *src);
93 static void racct_sub_cred_locked(struct ucred *cred, int resource,
94  uint64_t amount);
95 static void racct_add_cred_locked(struct ucred *cred, int resource,
96  uint64_t amount);
97 
98 SDT_PROVIDER_DEFINE(racct);
99 SDT_PROBE_DEFINE3(racct, kernel, rusage, add, "struct proc *", "int",
100  "uint64_t");
101 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__failure,
102  "struct proc *", "int", "uint64_t");
103 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__cred, "struct ucred *",
104  "int", "uint64_t");
105 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__force, "struct proc *",
106  "int", "uint64_t");
107 SDT_PROBE_DEFINE3(racct, kernel, rusage, set, "struct proc *", "int",
108  "uint64_t");
109 SDT_PROBE_DEFINE3(racct, kernel, rusage, set__failure,
110  "struct proc *", "int", "uint64_t");
111 SDT_PROBE_DEFINE3(racct, kernel, rusage, sub, "struct proc *", "int",
112  "uint64_t");
113 SDT_PROBE_DEFINE3(racct, kernel, rusage, sub__cred, "struct ucred *",
114  "int", "uint64_t");
115 SDT_PROBE_DEFINE1(racct, kernel, racct, create, "struct racct *");
116 SDT_PROBE_DEFINE1(racct, kernel, racct, destroy, "struct racct *");
117 SDT_PROBE_DEFINE2(racct, kernel, racct, join, "struct racct *",
118  "struct racct *");
119 SDT_PROBE_DEFINE2(racct, kernel, racct, join__failure,
120  "struct racct *", "struct racct *");
121 SDT_PROBE_DEFINE2(racct, kernel, racct, leave, "struct racct *",
122  "struct racct *");
123 
124 int racct_types[] = {
125  [RACCT_CPU] =
126  RACCT_IN_MILLIONS,
127  [RACCT_DATA] =
128  RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
129  [RACCT_STACK] =
130  RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
131  [RACCT_CORE] =
132  RACCT_DENIABLE,
133  [RACCT_RSS] =
134  RACCT_RECLAIMABLE,
135  [RACCT_MEMLOCK] =
136  RACCT_RECLAIMABLE | RACCT_DENIABLE,
137  [RACCT_NPROC] =
138  RACCT_RECLAIMABLE | RACCT_DENIABLE,
139  [RACCT_NOFILE] =
140  RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
141  [RACCT_VMEM] =
142  RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
143  [RACCT_NPTS] =
144  RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
145  [RACCT_SWAP] =
146  RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
147  [RACCT_NTHR] =
148  RACCT_RECLAIMABLE | RACCT_DENIABLE,
149  [RACCT_MSGQQUEUED] =
150  RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
151  [RACCT_MSGQSIZE] =
152  RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
153  [RACCT_NMSGQ] =
154  RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
155  [RACCT_NSEM] =
156  RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
157  [RACCT_NSEMOP] =
158  RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
159  [RACCT_NSHM] =
160  RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
161  [RACCT_SHMSIZE] =
162  RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
163  [RACCT_WALLCLOCK] =
164  RACCT_IN_MILLIONS,
165  [RACCT_PCTCPU] =
166  RACCT_DECAYING | RACCT_DENIABLE | RACCT_IN_MILLIONS };
167 
168 static const fixpt_t RACCT_DECAY_FACTOR = 0.3 * FSCALE;
169 
170 #ifdef SCHED_4BSD
171 /*
172  * Contains intermediate values for %cpu calculations to avoid using floating
173  * point in the kernel.
174  * ccpu_exp[k] = FSCALE * (ccpu/FSCALE)^k = FSCALE * exp(-k/20)
175  * It is needed only for the 4BSD scheduler, because in ULE, the ccpu equals to
176  * zero so the calculations are more straightforward.
177  */
178 fixpt_t ccpu_exp[] = {
179  [0] = FSCALE * 1,
180  [1] = FSCALE * 0.95122942450071400909,
181  [2] = FSCALE * 0.90483741803595957316,
182  [3] = FSCALE * 0.86070797642505780722,
183  [4] = FSCALE * 0.81873075307798185866,
184  [5] = FSCALE * 0.77880078307140486824,
185  [6] = FSCALE * 0.74081822068171786606,
186  [7] = FSCALE * 0.70468808971871343435,
187  [8] = FSCALE * 0.67032004603563930074,
188  [9] = FSCALE * 0.63762815162177329314,
189  [10] = FSCALE * 0.60653065971263342360,
190  [11] = FSCALE * 0.57694981038048669531,
191  [12] = FSCALE * 0.54881163609402643262,
192  [13] = FSCALE * 0.52204577676101604789,
193  [14] = FSCALE * 0.49658530379140951470,
194  [15] = FSCALE * 0.47236655274101470713,
195  [16] = FSCALE * 0.44932896411722159143,
196  [17] = FSCALE * 0.42741493194872666992,
197  [18] = FSCALE * 0.40656965974059911188,
198  [19] = FSCALE * 0.38674102345450120691,
199  [20] = FSCALE * 0.36787944117144232159,
200  [21] = FSCALE * 0.34993774911115535467,
201  [22] = FSCALE * 0.33287108369807955328,
202  [23] = FSCALE * 0.31663676937905321821,
203  [24] = FSCALE * 0.30119421191220209664,
204  [25] = FSCALE * 0.28650479686019010032,
205  [26] = FSCALE * 0.27253179303401260312,
206  [27] = FSCALE * 0.25924026064589150757,
207  [28] = FSCALE * 0.24659696394160647693,
208  [29] = FSCALE * 0.23457028809379765313,
209  [30] = FSCALE * 0.22313016014842982893,
210  [31] = FSCALE * 0.21224797382674305771,
211  [32] = FSCALE * 0.20189651799465540848,
212  [33] = FSCALE * 0.19204990862075411423,
213  [34] = FSCALE * 0.18268352405273465022,
214  [35] = FSCALE * 0.17377394345044512668,
215  [36] = FSCALE * 0.16529888822158653829,
216  [37] = FSCALE * 0.15723716631362761621,
217  [38] = FSCALE * 0.14956861922263505264,
218  [39] = FSCALE * 0.14227407158651357185,
219  [40] = FSCALE * 0.13533528323661269189,
220  [41] = FSCALE * 0.12873490358780421886,
221  [42] = FSCALE * 0.12245642825298191021,
222  [43] = FSCALE * 0.11648415777349695786,
223  [44] = FSCALE * 0.11080315836233388333,
224  [45] = FSCALE * 0.10539922456186433678,
225  [46] = FSCALE * 0.10025884372280373372,
226  [47] = FSCALE * 0.09536916221554961888,
227  [48] = FSCALE * 0.09071795328941250337,
228  [49] = FSCALE * 0.08629358649937051097,
229  [50] = FSCALE * 0.08208499862389879516,
230  [51] = FSCALE * 0.07808166600115315231,
231  [52] = FSCALE * 0.07427357821433388042,
232  [53] = FSCALE * 0.07065121306042958674,
233  [54] = FSCALE * 0.06720551273974976512,
234  [55] = FSCALE * 0.06392786120670757270,
235  [56] = FSCALE * 0.06081006262521796499,
236  [57] = FSCALE * 0.05784432087483846296,
237  [58] = FSCALE * 0.05502322005640722902,
238  [59] = FSCALE * 0.05233970594843239308,
239  [60] = FSCALE * 0.04978706836786394297,
240  [61] = FSCALE * 0.04735892439114092119,
241  [62] = FSCALE * 0.04504920239355780606,
242  [63] = FSCALE * 0.04285212686704017991,
243  [64] = FSCALE * 0.04076220397836621516,
244  [65] = FSCALE * 0.03877420783172200988,
245  [66] = FSCALE * 0.03688316740124000544,
246  [67] = FSCALE * 0.03508435410084502588,
247  [68] = FSCALE * 0.03337326996032607948,
248  [69] = FSCALE * 0.03174563637806794323,
249  [70] = FSCALE * 0.03019738342231850073,
250  [71] = FSCALE * 0.02872463965423942912,
251  [72] = FSCALE * 0.02732372244729256080,
252  [73] = FSCALE * 0.02599112877875534358,
253  [74] = FSCALE * 0.02472352647033939120,
254  [75] = FSCALE * 0.02351774585600910823,
255  [76] = FSCALE * 0.02237077185616559577,
256  [77] = FSCALE * 0.02127973643837716938,
257  [78] = FSCALE * 0.02024191144580438847,
258  [79] = FSCALE * 0.01925470177538692429,
259  [80] = FSCALE * 0.01831563888873418029,
260  [81] = FSCALE * 0.01742237463949351138,
261  [82] = FSCALE * 0.01657267540176124754,
262  [83] = FSCALE * 0.01576441648485449082,
263  [84] = FSCALE * 0.01499557682047770621,
264  [85] = FSCALE * 0.01426423390899925527,
265  [86] = FSCALE * 0.01356855901220093175,
266  [87] = FSCALE * 0.01290681258047986886,
267  [88] = FSCALE * 0.01227733990306844117,
268  [89] = FSCALE * 0.01167856697039544521,
269  [90] = FSCALE * 0.01110899653824230649,
270  [91] = FSCALE * 0.01056720438385265337,
271  [92] = FSCALE * 0.01005183574463358164,
272  [93] = FSCALE * 0.00956160193054350793,
273  [94] = FSCALE * 0.00909527710169581709,
274  [95] = FSCALE * 0.00865169520312063417,
275  [96] = FSCALE * 0.00822974704902002884,
276  [97] = FSCALE * 0.00782837754922577143,
277  [98] = FSCALE * 0.00744658307092434051,
278  [99] = FSCALE * 0.00708340892905212004,
279  [100] = FSCALE * 0.00673794699908546709,
280  [101] = FSCALE * 0.00640933344625638184,
281  [102] = FSCALE * 0.00609674656551563610,
282  [103] = FSCALE * 0.00579940472684214321,
283  [104] = FSCALE * 0.00551656442076077241,
284  [105] = FSCALE * 0.00524751839918138427,
285  [106] = FSCALE * 0.00499159390691021621,
286  [107] = FSCALE * 0.00474815099941147558,
287  [108] = FSCALE * 0.00451658094261266798,
288  [109] = FSCALE * 0.00429630469075234057,
289  [110] = FSCALE * 0.00408677143846406699,
290 };
291 #endif
292 
293 #define CCPU_EXP_MAX 110
294 
295 /*
296  * This function is analogical to the getpcpu() function in the ps(1) command.
297  * They should both calculate in the same way so that the racct %cpu
298  * calculations are consistent with the values showed by the ps(1) tool.
299  * The calculations are more complex in the 4BSD scheduler because of the value
300  * of the ccpu variable. In ULE it is defined to be zero which saves us some
301  * work.
302  */
303 static uint64_t
304 racct_getpcpu(struct proc *p, u_int pcpu)
305 {
306  u_int swtime;
307 #ifdef SCHED_4BSD
308  fixpt_t pctcpu, pctcpu_next;
309 #endif
310 #ifdef SMP
311  struct pcpu *pc;
312  int found;
313 #endif
314  fixpt_t p_pctcpu;
315  struct thread *td;
316 
317  /*
318  * If the process is swapped out, we count its %cpu usage as zero.
319  * This behaviour is consistent with the userland ps(1) tool.
320  */
321  if ((p->p_flag & P_INMEM) == 0)
322  return (0);
323  swtime = (ticks - p->p_swtick) / hz;
324 
325  /*
326  * For short-lived processes, the sched_pctcpu() returns small
327  * values even for cpu intensive processes. Therefore we use
328  * our own estimate in this case.
329  */
330  if (swtime < RACCT_PCPU_SECS)
331  return (pcpu);
332 
333  p_pctcpu = 0;
334  FOREACH_THREAD_IN_PROC(p, td) {
335  if (td == PCPU_GET(idlethread))
336  continue;
337 #ifdef SMP
338  found = 0;
339  STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
340  if (td == pc->pc_idlethread) {
341  found = 1;
342  break;
343  }
344  }
345  if (found)
346  continue;
347 #endif
348  thread_lock(td);
349 #ifdef SCHED_4BSD
350  pctcpu = sched_pctcpu(td);
351  /* Count also the yet unfinished second. */
352  pctcpu_next = (pctcpu * ccpu_exp[1]) >> FSHIFT;
353  pctcpu_next += sched_pctcpu_delta(td);
354  p_pctcpu += max(pctcpu, pctcpu_next);
355 #else
356  /*
357  * In ULE the %cpu statistics are updated on every
358  * sched_pctcpu() call. So special calculations to
359  * account for the latest (unfinished) second are
360  * not needed.
361  */
362  p_pctcpu += sched_pctcpu(td);
363 #endif
364  thread_unlock(td);
365  }
366 
367 #ifdef SCHED_4BSD
368  if (swtime <= CCPU_EXP_MAX)
369  return ((100 * (uint64_t)p_pctcpu * 1000000) /
370  (FSCALE - ccpu_exp[swtime]));
371 #endif
372 
373  return ((100 * (uint64_t)p_pctcpu * 1000000) / FSCALE);
374 }
375 
376 static void
377 racct_add_racct(struct racct *dest, const struct racct *src)
378 {
379  int i;
380 
381  mtx_assert(&racct_lock, MA_OWNED);
382 
383  /*
384  * Update resource usage in dest.
385  */
386  for (i = 0; i <= RACCT_MAX; i++) {
387  KASSERT(dest->r_resources[i] >= 0,
388  ("%s: resource %d propagation meltdown: dest < 0",
389  __func__, i));
390  KASSERT(src->r_resources[i] >= 0,
391  ("%s: resource %d propagation meltdown: src < 0",
392  __func__, i));
393  dest->r_resources[i] += src->r_resources[i];
394  }
395 }
396 
397 static void
398 racct_sub_racct(struct racct *dest, const struct racct *src)
399 {
400  int i;
401 
402  mtx_assert(&racct_lock, MA_OWNED);
403 
404  /*
405  * Update resource usage in dest.
406  */
407  for (i = 0; i <= RACCT_MAX; i++) {
408  if (!RACCT_IS_SLOPPY(i) && !RACCT_IS_DECAYING(i)) {
409  KASSERT(dest->r_resources[i] >= 0,
410  ("%s: resource %d propagation meltdown: dest < 0",
411  __func__, i));
412  KASSERT(src->r_resources[i] >= 0,
413  ("%s: resource %d propagation meltdown: src < 0",
414  __func__, i));
415  KASSERT(src->r_resources[i] <= dest->r_resources[i],
416  ("%s: resource %d propagation meltdown: src > dest",
417  __func__, i));
418  }
419  if (RACCT_CAN_DROP(i)) {
420  dest->r_resources[i] -= src->r_resources[i];
421  if (dest->r_resources[i] < 0) {
422  KASSERT(RACCT_IS_SLOPPY(i) ||
423  RACCT_IS_DECAYING(i),
424  ("%s: resource %d usage < 0", __func__, i));
425  dest->r_resources[i] = 0;
426  }
427  }
428  }
429 }
430 
431 void
432 racct_create(struct racct **racctp)
433 {
434 
435  SDT_PROBE1(racct, kernel, racct, create, racctp);
436 
437  KASSERT(*racctp == NULL, ("racct already allocated"));
438 
439  *racctp = uma_zalloc(racct_zone, M_WAITOK | M_ZERO);
440 }
441 
442 static void
443 racct_destroy_locked(struct racct **racctp)
444 {
445  int i;
446  struct racct *racct;
447 
448  SDT_PROBE1(racct, kernel, racct, destroy, racctp);
449 
450  mtx_assert(&racct_lock, MA_OWNED);
451  KASSERT(racctp != NULL, ("NULL racctp"));
452  KASSERT(*racctp != NULL, ("NULL racct"));
453 
454  racct = *racctp;
455 
456  for (i = 0; i <= RACCT_MAX; i++) {
457  if (RACCT_IS_SLOPPY(i))
458  continue;
459  if (!RACCT_IS_RECLAIMABLE(i))
460  continue;
461  KASSERT(racct->r_resources[i] == 0,
462  ("destroying non-empty racct: "
463  "%ju allocated for resource %d\n",
464  racct->r_resources[i], i));
465  }
466  uma_zfree(racct_zone, racct);
467  *racctp = NULL;
468 }
469 
470 void
471 racct_destroy(struct racct **racct)
472 {
473 
474  mtx_lock(&racct_lock);
475  racct_destroy_locked(racct);
476  mtx_unlock(&racct_lock);
477 }
478 
479 /*
480  * Increase consumption of 'resource' by 'amount' for 'racct'
481  * and all its parents. Differently from other cases, 'amount' here
482  * may be less than zero.
483  */
484 static void
485 racct_alloc_resource(struct racct *racct, int resource,
486  uint64_t amount)
487 {
488 
489  mtx_assert(&racct_lock, MA_OWNED);
490  KASSERT(racct != NULL, ("NULL racct"));
491 
492  racct->r_resources[resource] += amount;
493  if (racct->r_resources[resource] < 0) {
494  KASSERT(RACCT_IS_SLOPPY(resource) || RACCT_IS_DECAYING(resource),
495  ("%s: resource %d usage < 0", __func__, resource));
496  racct->r_resources[resource] = 0;
497  }
498 
499  /*
500  * There are some cases where the racct %cpu resource would grow
501  * beyond 100%.
502  * For example in racct_proc_exit() we add the process %cpu usage
503  * to the ucred racct containers. If too many processes terminated
504  * in a short time span, the ucred %cpu resource could grow too much.
505  * Also, the 4BSD scheduler sometimes returns for a thread more than
506  * 100% cpu usage. So we set a boundary here to 100%.
507  */
508  if ((resource == RACCT_PCTCPU) &&
509  (racct->r_resources[RACCT_PCTCPU] > 100 * 1000000))
510  racct->r_resources[RACCT_PCTCPU] = 100 * 1000000;
511 }
512 
513 static int
514 racct_add_locked(struct proc *p, int resource, uint64_t amount)
515 {
516 #ifdef RCTL
517  int error;
518 #endif
519 
520  SDT_PROBE3(racct, kernel, rusage, add, p, resource, amount);
521 
522  /*
523  * We need proc lock to dereference p->p_ucred.
524  */
525  PROC_LOCK_ASSERT(p, MA_OWNED);
526 
527 #ifdef RCTL
528  error = rctl_enforce(p, resource, amount);
529  if (error && RACCT_IS_DENIABLE(resource)) {
530  SDT_PROBE3(racct, kernel, rusage, add__failure, p, resource,
531  amount);
532  return (error);
533  }
534 #endif
535  racct_alloc_resource(p->p_racct, resource, amount);
536  racct_add_cred_locked(p->p_ucred, resource, amount);
537 
538  return (0);
539 }
540 
541 /*
542  * Increase allocation of 'resource' by 'amount' for process 'p'.
543  * Return 0 if it's below limits, or errno, if it's not.
544  */
545 int
546 racct_add(struct proc *p, int resource, uint64_t amount)
547 {
548  int error;
549 
550  mtx_lock(&racct_lock);
551  error = racct_add_locked(p, resource, amount);
552  mtx_unlock(&racct_lock);
553  return (error);
554 }
555 
556 static void
557 racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount)
558 {
559  struct prison *pr;
560 
561  SDT_PROBE3(racct, kernel, rusage, add__cred, cred, resource, amount);
562 
563  racct_alloc_resource(cred->cr_ruidinfo->ui_racct, resource, amount);
564  for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent)
565  racct_alloc_resource(pr->pr_prison_racct->prr_racct, resource,
566  amount);
567  racct_alloc_resource(cred->cr_loginclass->lc_racct, resource, amount);
568 }
569 
570 /*
571  * Increase allocation of 'resource' by 'amount' for credential 'cred'.
572  * Doesn't check for limits and never fails.
573  *
574  * XXX: Shouldn't this ever return an error?
575  */
576 void
577 racct_add_cred(struct ucred *cred, int resource, uint64_t amount)
578 {
579 
580  mtx_lock(&racct_lock);
581  racct_add_cred_locked(cred, resource, amount);
582  mtx_unlock(&racct_lock);
583 }
584 
585 /*
586  * Increase allocation of 'resource' by 'amount' for process 'p'.
587  * Doesn't check for limits and never fails.
588  */
589 void
590 racct_add_force(struct proc *p, int resource, uint64_t amount)
591 {
592 
593  SDT_PROBE3(racct, kernel, rusage, add__force, p, resource, amount);
594 
595  /*
596  * We need proc lock to dereference p->p_ucred.
597  */
598  PROC_LOCK_ASSERT(p, MA_OWNED);
599 
600  mtx_lock(&racct_lock);
601  racct_alloc_resource(p->p_racct, resource, amount);
602  mtx_unlock(&racct_lock);
603  racct_add_cred(p->p_ucred, resource, amount);
604 }
605 
606 static int
607 racct_set_locked(struct proc *p, int resource, uint64_t amount)
608 {
609  int64_t old_amount, decayed_amount;
610  int64_t diff_proc, diff_cred;
611 #ifdef RCTL
612  int error;
613 #endif
614 
615  SDT_PROBE3(racct, kernel, rusage, set, p, resource, amount);
616 
617  /*
618  * We need proc lock to dereference p->p_ucred.
619  */
620  PROC_LOCK_ASSERT(p, MA_OWNED);
621 
622  old_amount = p->p_racct->r_resources[resource];
623  /*
624  * The diffs may be negative.
625  */
626  diff_proc = amount - old_amount;
627  if (RACCT_IS_DECAYING(resource)) {
628  /*
629  * Resources in per-credential racct containers may decay.
630  * If this is the case, we need to calculate the difference
631  * between the new amount and the proportional value of the
632  * old amount that has decayed in the ucred racct containers.
633  */
634  decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE;
635  diff_cred = amount - decayed_amount;
636  } else
637  diff_cred = diff_proc;
638 #ifdef notyet
639  KASSERT(diff_proc >= 0 || RACCT_CAN_DROP(resource),
640  ("%s: usage of non-droppable resource %d dropping", __func__,
641  resource));
642 #endif
643 #ifdef RCTL
644  if (diff_proc > 0) {
645  error = rctl_enforce(p, resource, diff_proc);
646  if (error && RACCT_IS_DENIABLE(resource)) {
647  SDT_PROBE3(racct, kernel, rusage, set__failure, p,
648  resource, amount);
649  return (error);
650  }
651  }
652 #endif
653  racct_alloc_resource(p->p_racct, resource, diff_proc);
654  if (diff_cred > 0)
655  racct_add_cred_locked(p->p_ucred, resource, diff_cred);
656  else if (diff_cred < 0)
657  racct_sub_cred_locked(p->p_ucred, resource, -diff_cred);
658 
659  return (0);
660 }
661 
662 /*
663  * Set allocation of 'resource' to 'amount' for process 'p'.
664  * Return 0 if it's below limits, or errno, if it's not.
665  *
666  * Note that decreasing the allocation always returns 0,
667  * even if it's above the limit.
668  */
669 int
670 racct_set(struct proc *p, int resource, uint64_t amount)
671 {
672  int error;
673 
674  mtx_lock(&racct_lock);
675  error = racct_set_locked(p, resource, amount);
676  mtx_unlock(&racct_lock);
677  return (error);
678 }
679 
680 static void
681 racct_set_force_locked(struct proc *p, int resource, uint64_t amount)
682 {
683  int64_t old_amount, decayed_amount;
684  int64_t diff_proc, diff_cred;
685 
686  SDT_PROBE3(racct, kernel, rusage, set, p, resource, amount);
687 
688  /*
689  * We need proc lock to dereference p->p_ucred.
690  */
691  PROC_LOCK_ASSERT(p, MA_OWNED);
692 
693  old_amount = p->p_racct->r_resources[resource];
694  /*
695  * The diffs may be negative.
696  */
697  diff_proc = amount - old_amount;
698  if (RACCT_IS_DECAYING(resource)) {
699  /*
700  * Resources in per-credential racct containers may decay.
701  * If this is the case, we need to calculate the difference
702  * between the new amount and the proportional value of the
703  * old amount that has decayed in the ucred racct containers.
704  */
705  decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE;
706  diff_cred = amount - decayed_amount;
707  } else
708  diff_cred = diff_proc;
709 
710  racct_alloc_resource(p->p_racct, resource, diff_proc);
711  if (diff_cred > 0)
712  racct_add_cred_locked(p->p_ucred, resource, diff_cred);
713  else if (diff_cred < 0)
714  racct_sub_cred_locked(p->p_ucred, resource, -diff_cred);
715 }
716 
717 void
718 racct_set_force(struct proc *p, int resource, uint64_t amount)
719 {
720  mtx_lock(&racct_lock);
721  racct_set_force_locked(p, resource, amount);
722  mtx_unlock(&racct_lock);
723 }
724 
725 /*
726  * Returns amount of 'resource' the process 'p' can keep allocated.
727  * Allocating more than that would be denied, unless the resource
728  * is marked undeniable. Amount of already allocated resource does
729  * not matter.
730  */
731 uint64_t
732 racct_get_limit(struct proc *p, int resource)
733 {
734 
735 #ifdef RCTL
736  return (rctl_get_limit(p, resource));
737 #else
738  return (UINT64_MAX);
739 #endif
740 }
741 
742 /*
743  * Returns amount of 'resource' the process 'p' can keep allocated.
744  * Allocating more than that would be denied, unless the resource
745  * is marked undeniable. Amount of already allocated resource does
746  * matter.
747  */
748 uint64_t
749 racct_get_available(struct proc *p, int resource)
750 {
751 
752 #ifdef RCTL
753  return (rctl_get_available(p, resource));
754 #else
755  return (UINT64_MAX);
756 #endif
757 }
758 
759 /*
760  * Returns amount of the %cpu resource that process 'p' can add to its %cpu
761  * utilization. Adding more than that would lead to the process being
762  * throttled.
763  */
764 static int64_t
765 racct_pcpu_available(struct proc *p)
766 {
767 
768 #ifdef RCTL
769  return (rctl_pcpu_available(p));
770 #else
771  return (INT64_MAX);
772 #endif
773 }
774 
775 /*
776  * Decrease allocation of 'resource' by 'amount' for process 'p'.
777  */
778 void
779 racct_sub(struct proc *p, int resource, uint64_t amount)
780 {
781 
782  SDT_PROBE3(racct, kernel, rusage, sub, p, resource, amount);
783 
784  /*
785  * We need proc lock to dereference p->p_ucred.
786  */
787  PROC_LOCK_ASSERT(p, MA_OWNED);
788  KASSERT(RACCT_CAN_DROP(resource),
789  ("%s: called for non-droppable resource %d", __func__, resource));
790 
791  mtx_lock(&racct_lock);
792  KASSERT(amount <= p->p_racct->r_resources[resource],
793  ("%s: freeing %ju of resource %d, which is more "
794  "than allocated %jd for %s (pid %d)", __func__, amount, resource,
795  (intmax_t)p->p_racct->r_resources[resource], p->p_comm, p->p_pid));
796 
797  racct_alloc_resource(p->p_racct, resource, -amount);
798  racct_sub_cred_locked(p->p_ucred, resource, amount);
799  mtx_unlock(&racct_lock);
800 }
801 
802 static void
803 racct_sub_cred_locked(struct ucred *cred, int resource, uint64_t amount)
804 {
805  struct prison *pr;
806 
807  SDT_PROBE3(racct, kernel, rusage, sub__cred, cred, resource, amount);
808 
809 #ifdef notyet
810  KASSERT(RACCT_CAN_DROP(resource),
811  ("%s: called for resource %d which can not drop", __func__,
812  resource));
813 #endif
814 
815  racct_alloc_resource(cred->cr_ruidinfo->ui_racct, resource, -amount);
816  for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent)
817  racct_alloc_resource(pr->pr_prison_racct->prr_racct, resource,
818  -amount);
819  racct_alloc_resource(cred->cr_loginclass->lc_racct, resource, -amount);
820 }
821 
822 /*
823  * Decrease allocation of 'resource' by 'amount' for credential 'cred'.
824  */
825 void
826 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount)
827 {
828 
829  mtx_lock(&racct_lock);
830  racct_sub_cred_locked(cred, resource, amount);
831  mtx_unlock(&racct_lock);
832 }
833 
834 /*
835  * Inherit resource usage information from the parent process.
836  */
837 int
838 racct_proc_fork(struct proc *parent, struct proc *child)
839 {
840  int i, error = 0;
841 
842  /*
843  * Create racct for the child process.
844  */
845  racct_create(&child->p_racct);
846 
847  PROC_LOCK(parent);
848  PROC_LOCK(child);
849  mtx_lock(&racct_lock);
850 
851 #ifdef RCTL
852  error = rctl_proc_fork(parent, child);
853  if (error != 0)
854  goto out;
855 #endif
856 
857  /* Init process cpu time. */
858  child->p_prev_runtime = 0;
859  child->p_throttled = 0;
860 
861  /*
862  * Inherit resource usage.
863  */
864  for (i = 0; i <= RACCT_MAX; i++) {
865  if (parent->p_racct->r_resources[i] == 0 ||
866  !RACCT_IS_INHERITABLE(i))
867  continue;
868 
869  error = racct_set_locked(child, i,
870  parent->p_racct->r_resources[i]);
871  if (error != 0)
872  goto out;
873  }
874 
875  error = racct_add_locked(child, RACCT_NPROC, 1);
876  error += racct_add_locked(child, RACCT_NTHR, 1);
877 
878 out:
879  mtx_unlock(&racct_lock);
880  PROC_UNLOCK(child);
881  PROC_UNLOCK(parent);
882 
883  if (error != 0)
884  racct_proc_exit(child);
885 
886  return (error);
887 }
888 
889 /*
890  * Called at the end of fork1(), to handle rules that require the process
891  * to be fully initialized.
892  */
893 void
894 racct_proc_fork_done(struct proc *child)
895 {
896 
897 #ifdef RCTL
898  PROC_LOCK(child);
899  mtx_lock(&racct_lock);
900  rctl_enforce(child, RACCT_NPROC, 0);
901  rctl_enforce(child, RACCT_NTHR, 0);
902  mtx_unlock(&racct_lock);
903  PROC_UNLOCK(child);
904 #endif
905 }
906 
907 void
908 racct_proc_exit(struct proc *p)
909 {
910  int i;
911  uint64_t runtime;
912  struct timeval wallclock;
913  uint64_t pct_estimate, pct;
914 
915  PROC_LOCK(p);
916  /*
917  * We don't need to calculate rux, proc_reap() has already done this.
918  */
919  runtime = cputick2usec(p->p_rux.rux_runtime);
920 #ifdef notyet
921  KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime"));
922 #else
923  if (runtime < p->p_prev_runtime)
924  runtime = p->p_prev_runtime;
925 #endif
926  microuptime(&wallclock);
927  timevalsub(&wallclock, &p->p_stats->p_start);
928  if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) {
929  pct_estimate = (1000000 * runtime * 100) /
930  ((uint64_t)wallclock.tv_sec * 1000000 +
931  wallclock.tv_usec);
932  } else
933  pct_estimate = 0;
934  pct = racct_getpcpu(p, pct_estimate);
935 
936  mtx_lock(&racct_lock);
937  racct_set_locked(p, RACCT_CPU, runtime);
938  racct_add_cred_locked(p->p_ucred, RACCT_PCTCPU, pct);
939 
940  for (i = 0; i <= RACCT_MAX; i++) {
941  if (p->p_racct->r_resources[i] == 0)
942  continue;
943  if (!RACCT_IS_RECLAIMABLE(i))
944  continue;
945  racct_set_locked(p, i, 0);
946  }
947 
948  mtx_unlock(&racct_lock);
949  PROC_UNLOCK(p);
950 
951 #ifdef RCTL
952  rctl_racct_release(p->p_racct);
953 #endif
954  racct_destroy(&p->p_racct);
955 }
956 
957 /*
958  * Called after credentials change, to move resource utilisation
959  * between raccts.
960  */
961 void
962 racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred,
963  struct ucred *newcred)
964 {
965  struct uidinfo *olduip, *newuip;
966  struct loginclass *oldlc, *newlc;
967  struct prison *oldpr, *newpr, *pr;
968 
969  PROC_LOCK_ASSERT(p, MA_NOTOWNED);
970 
971  newuip = newcred->cr_ruidinfo;
972  olduip = oldcred->cr_ruidinfo;
973  newlc = newcred->cr_loginclass;
974  oldlc = oldcred->cr_loginclass;
975  newpr = newcred->cr_prison;
976  oldpr = oldcred->cr_prison;
977 
978  mtx_lock(&racct_lock);
979  if (newuip != olduip) {
980  racct_sub_racct(olduip->ui_racct, p->p_racct);
981  racct_add_racct(newuip->ui_racct, p->p_racct);
982  }
983  if (newlc != oldlc) {
984  racct_sub_racct(oldlc->lc_racct, p->p_racct);
985  racct_add_racct(newlc->lc_racct, p->p_racct);
986  }
987  if (newpr != oldpr) {
988  for (pr = oldpr; pr != NULL; pr = pr->pr_parent)
989  racct_sub_racct(pr->pr_prison_racct->prr_racct,
990  p->p_racct);
991  for (pr = newpr; pr != NULL; pr = pr->pr_parent)
992  racct_add_racct(pr->pr_prison_racct->prr_racct,
993  p->p_racct);
994  }
995  mtx_unlock(&racct_lock);
996 
997 #ifdef RCTL
998  rctl_proc_ucred_changed(p, newcred);
999 #endif
1000 }
1001 
1002 void
1003 racct_move(struct racct *dest, struct racct *src)
1004 {
1005 
1006  mtx_lock(&racct_lock);
1007 
1008  racct_add_racct(dest, src);
1009  racct_sub_racct(src, src);
1010 
1011  mtx_unlock(&racct_lock);
1012 }
1013 
1014 static void
1015 racct_proc_throttle(struct proc *p)
1016 {
1017  struct thread *td;
1018 #ifdef SMP
1019  int cpuid;
1020 #endif
1021 
1022  PROC_LOCK_ASSERT(p, MA_OWNED);
1023 
1024  /*
1025  * Do not block kernel processes. Also do not block processes with
1026  * low %cpu utilization to improve interactivity.
1027  */
1028  if (((p->p_flag & (P_SYSTEM | P_KTHREAD)) != 0) ||
1029  (p->p_racct->r_resources[RACCT_PCTCPU] <= pcpu_threshold))
1030  return;
1031  p->p_throttled = 1;
1032 
1033  FOREACH_THREAD_IN_PROC(p, td) {
1034  thread_lock(td);
1035  switch (td->td_state) {
1036  case TDS_RUNQ:
1037  /*
1038  * If the thread is on the scheduler run-queue, we can
1039  * not just remove it from there. So we set the flag
1040  * TDF_NEEDRESCHED for the thread, so that once it is
1041  * running, it is taken off the cpu as soon as possible.
1042  */
1043  td->td_flags |= TDF_NEEDRESCHED;
1044  break;
1045  case TDS_RUNNING:
1046  /*
1047  * If the thread is running, we request a context
1048  * switch for it by setting the TDF_NEEDRESCHED flag.
1049  */
1050  td->td_flags |= TDF_NEEDRESCHED;
1051 #ifdef SMP
1052  cpuid = td->td_oncpu;
1053  if ((cpuid != NOCPU) && (td != curthread))
1054  ipi_cpu(cpuid, IPI_AST);
1055 #endif
1056  break;
1057  default:
1058  break;
1059  }
1060  thread_unlock(td);
1061  }
1062 }
1063 
1064 static void
1065 racct_proc_wakeup(struct proc *p)
1066 {
1067  PROC_LOCK_ASSERT(p, MA_OWNED);
1068 
1069  if (p->p_throttled) {
1070  p->p_throttled = 0;
1071  wakeup(p->p_racct);
1072  }
1073 }
1074 
1075 static void
1076 racct_decay_resource(struct racct *racct, void * res, void* dummy)
1077 {
1078  int resource;
1079  int64_t r_old, r_new;
1080 
1081  resource = *(int *)res;
1082  r_old = racct->r_resources[resource];
1083 
1084  /* If there is nothing to decay, just exit. */
1085  if (r_old <= 0)
1086  return;
1087 
1088  mtx_lock(&racct_lock);
1089  r_new = r_old * RACCT_DECAY_FACTOR / FSCALE;
1090  racct->r_resources[resource] = r_new;
1091  mtx_unlock(&racct_lock);
1092 }
1093 
1094 static void
1095 racct_decay(int resource)
1096 {
1097  ui_racct_foreach(racct_decay_resource, &resource, NULL);
1098  loginclass_racct_foreach(racct_decay_resource, &resource, NULL);
1099  prison_racct_foreach(racct_decay_resource, &resource, NULL);
1100 }
1101 
1102 static void
1103 racctd(void)
1104 {
1105  struct thread *td;
1106  struct proc *p;
1107  struct timeval wallclock;
1108  uint64_t runtime;
1109  uint64_t pct, pct_estimate;
1110 
1111  for (;;) {
1112  racct_decay(RACCT_PCTCPU);
1113 
1114  sx_slock(&allproc_lock);
1115 
1116  LIST_FOREACH(p, &zombproc, p_list) {
1117  PROC_LOCK(p);
1118  racct_set(p, RACCT_PCTCPU, 0);
1119  PROC_UNLOCK(p);
1120  }
1121 
1122  FOREACH_PROC_IN_SYSTEM(p) {
1123  PROC_LOCK(p);
1124  if (p->p_state != PRS_NORMAL) {
1125  PROC_UNLOCK(p);
1126  continue;
1127  }
1128 
1129  microuptime(&wallclock);
1130  timevalsub(&wallclock, &p->p_stats->p_start);
1131  PROC_SLOCK(p);
1132  FOREACH_THREAD_IN_PROC(p, td)
1133  ruxagg(p, td);
1134  runtime = cputick2usec(p->p_rux.rux_runtime);
1135  PROC_SUNLOCK(p);
1136 #ifdef notyet
1137  KASSERT(runtime >= p->p_prev_runtime,
1138  ("runtime < p_prev_runtime"));
1139 #else
1140  if (runtime < p->p_prev_runtime)
1141  runtime = p->p_prev_runtime;
1142 #endif
1143  p->p_prev_runtime = runtime;
1144  if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) {
1145  pct_estimate = (1000000 * runtime * 100) /
1146  ((uint64_t)wallclock.tv_sec * 1000000 +
1147  wallclock.tv_usec);
1148  } else
1149  pct_estimate = 0;
1150  pct = racct_getpcpu(p, pct_estimate);
1151  mtx_lock(&racct_lock);
1152  racct_set_force_locked(p, RACCT_PCTCPU, pct);
1153  racct_set_locked(p, RACCT_CPU, runtime);
1154  racct_set_locked(p, RACCT_WALLCLOCK,
1155  (uint64_t)wallclock.tv_sec * 1000000 +
1156  wallclock.tv_usec);
1157  mtx_unlock(&racct_lock);
1158  PROC_UNLOCK(p);
1159  }
1160 
1161  /*
1162  * To ensure that processes are throttled in a fair way, we need
1163  * to iterate over all processes again and check the limits
1164  * for %cpu resource only after ucred racct containers have been
1165  * properly filled.
1166  */
1167  FOREACH_PROC_IN_SYSTEM(p) {
1168  PROC_LOCK(p);
1169  if (p->p_state != PRS_NORMAL) {
1170  PROC_UNLOCK(p);
1171  continue;
1172  }
1173 
1174  if (racct_pcpu_available(p) <= 0)
1175  racct_proc_throttle(p);
1176  else if (p->p_throttled)
1177  racct_proc_wakeup(p);
1178  PROC_UNLOCK(p);
1179  }
1180  sx_sunlock(&allproc_lock);
1181  pause("-", hz);
1182  }
1183 }
1184 
1185 static struct kproc_desc racctd_kp = {
1186  "racctd",
1187  racctd,
1188  NULL
1189 };
1190 SYSINIT(racctd, SI_SUB_RACCTD, SI_ORDER_FIRST, kproc_start, &racctd_kp);
1191 
1192 static void
1193 racct_init(void)
1194 {
1195 
1196  racct_zone = uma_zcreate("racct", sizeof(struct racct),
1197  NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
1198  /*
1199  * XXX: Move this somewhere.
1200  */
1201  prison0.pr_prison_racct = prison_racct_find("0");
1202 }
1203 SYSINIT(racct, SI_SUB_RACCT, SI_ORDER_FIRST, racct_init, NULL);
1204 
1205 #else /* !RACCT */
1206 
1207 int
1208 racct_add(struct proc *p, int resource, uint64_t amount)
1209 {
1210 
1211  return (0);
1212 }
1213 
1214 void
1215 racct_add_cred(struct ucred *cred, int resource, uint64_t amount)
1216 {
1217 }
1218 
1219 void
1220 racct_add_force(struct proc *p, int resource, uint64_t amount)
1221 {
1222 
1223  return;
1224 }
1225 
1226 int
1227 racct_set(struct proc *p, int resource, uint64_t amount)
1228 {
1229 
1230  return (0);
1231 }
1232 
1233 void
1234 racct_set_force(struct proc *p, int resource, uint64_t amount)
1235 {
1236 }
1237 
1238 void
1239 racct_sub(struct proc *p, int resource, uint64_t amount)
1240 {
1241 }
1242 
1243 void
1244 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount)
1245 {
1246 }
1247 
1248 uint64_t
1249 racct_get_limit(struct proc *p, int resource)
1250 {
1251 
1252  return (UINT64_MAX);
1253 }
1254 
1255 uint64_t
1256 racct_get_available(struct proc *p, int resource)
1257 {
1258 
1259  return (UINT64_MAX);
1260 }
1261 
1262 void
1263 racct_create(struct racct **racctp)
1264 {
1265 }
1266 
1267 void
1268 racct_destroy(struct racct **racctp)
1269 {
1270 }
1271 
1272 int
1273 racct_proc_fork(struct proc *parent, struct proc *child)
1274 {
1275 
1276  return (0);
1277 }
1278 
1279 void
1280 racct_proc_fork_done(struct proc *child)
1281 {
1282 }
1283 
1284 void
1285 racct_proc_exit(struct proc *p)
1286 {
1287 }
1288 
1289 #endif /* !RACCT */
SDT_PROBE_DEFINE2(sched,,, tick,"struct thread *","struct proc *")
METHOD int set
Definition: cpufreq_if.m:43
SDT_PROBE_DEFINE1(proc, kernel,, exec,"char *")
int racct_add(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1208
__FBSDID("$BSDSUniX$")
MTX_SYSINIT(et_eventtimers_init,&et_eventtimers_mtx,"et_mtx", MTX_DEF)
void racct_sub_cred(struct ucred *cred, int resource, uint64_t amount)
Definition: kern_racct.c:1244
static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL,"cpufreq debugging")
int racct_set(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1227
void racct_set_force(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1234
SYSINIT(placeholder, SI_SUB_DUMMY, SI_ORDER_ANY, NULL, NULL)
device_t parent
Definition: device_if.m:171
local int destroy(gz_stream *s)
Definition: kern_gzio.c:185
int racct_proc_fork(struct proc *parent, struct proc *child)
Definition: kern_racct.c:1273
SDT_PROVIDER_DEFINE(priv)
SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW,&idletick, 0,"Run periodic events when idle")
void racct_add_cred(struct ucred *cred, int resource, uint64_t amount)
Definition: kern_racct.c:1215
void timevalsub(struct timeval *t1, const struct timeval *t2)
Definition: kern_time.c:885
void racct_sub(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1239
struct sx allproc_lock
Definition: kern_proc.c:136
void racct_add_force(struct proc *p, int resource, uint64_t amount)
Definition: kern_racct.c:1220
static int dummy
void racct_proc_fork_done(struct proc *child)
Definition: kern_racct.c:1280
uint64_t cputick2usec(uint64_t tick)
Definition: kern_tc.c:975
uint64_t racct_get_available(struct proc *p, int resource)
Definition: kern_racct.c:1256
struct prison prison0
Definition: kern_jail.c:99
void ruxagg(struct proc *p, struct thread *td)
void racct_destroy(struct racct **racctp)
Definition: kern_racct.c:1268
struct proclist zombproc
Definition: kern_proc.c:135
int pause(const char *wmesg, int timo)
Definition: kern_synch.c:350
void prison_racct_foreach(void(*callback)(struct racct *racct, void *arg2, void *arg3), void *arg2, void *arg3)
Definition: kern_jail.c:4405
SDT_PROBE_DEFINE3(proc, kernel,, create,"struct proc *","struct proc *","int")
void racct_proc_exit(struct proc *p)
Definition: kern_racct.c:1285
void wakeup(void *ident)
Definition: kern_synch.c:378
void kproc_start(void *udata) const
Definition: kern_kthread.c:57
volatile int ticks
Definition: kern_clock.c:387
void microuptime(struct timeval *tvp)
Definition: kern_tc.c:194
void loginclass_racct_foreach(void(*callback)(struct racct *racct, void *arg2, void *arg3), void *arg2, void *arg3)
FEATURE(kdtrace_hooks,"Kernel DTrace hooks which are required to load DTrace kernel modules")
void ui_racct_foreach(void(*callback)(struct racct *racct, void *arg2, void *arg3), void *arg2, void *arg3)
struct prison_racct * prison_racct_find(const char *name)
Definition: kern_jail.c:4447
void racct_create(struct racct **racctp)
Definition: kern_racct.c:1263
uint64_t racct_get_limit(struct proc *p, int resource)
Definition: kern_racct.c:1249
static struct pollrec pr[POLL_LIST_LEN]
Definition: kern_poll.c:254
int hz
Definition: subr_param.c:84
fixpt_t sched_pctcpu(struct thread *td)
Definition: sched_4bsd.c:1582