FreeBSD kernel kern code
subr_rman.c
Go to the documentation of this file.
1 /*-
2  * Copyright 1998 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission. M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose. It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * The kernel resource manager. This code is responsible for keeping track
32  * of hardware resources which are apportioned out to various drivers.
33  * It does not actually assign those resources, and it is not expected
34  * that end-device drivers will call into this code directly. Rather,
35  * the code which implements the buses that those devices are attached to,
36  * and the code which manages CPU resources, will call this code, and the
37  * end-device drivers will make upcalls to that code to actually perform
38  * the allocation.
39  *
40  * There are two sorts of resources managed by this code. The first is
41  * the more familiar array (RMAN_ARRAY) type; resources in this class
42  * consist of a sequence of individually-allocatable objects which have
43  * been numbered in some well-defined order. Most of the resources
44  * are of this type, as it is the most familiar. The second type is
45  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46  * resources in which each instance is indistinguishable from every
47  * other instance). The principal anticipated application of gauges
48  * is in the context of power consumption, where a bus may have a specific
49  * power budget which all attached devices share. RMAN_GAUGE is not
50  * implemented yet.
51  *
52  * For array resources, we make one simplifying assumption: two clients
53  * sharing the same resource must use the same range of indices. That
54  * is to say, sharing of overlapping-but-not-identical regions is not
55  * permitted.
56  */
57 
58 #include "opt_ddb.h"
59 
60 #include <sys/cdefs.h>
61 __FBSDID("$BSDSUniX$");
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/limits.h>
67 #include <sys/lock.h>
68 #include <sys/malloc.h>
69 #include <sys/mutex.h>
70 #include <sys/bus.h> /* XXX debugging */
71 #include <machine/bus.h>
72 #include <sys/rman.h>
73 #include <sys/sysctl.h>
74 
75 #ifdef DDB
76 #include <ddb/ddb.h>
77 #endif
78 
79 /*
80  * We use a linked list rather than a bitmap because we need to be able to
81  * represent potentially huge objects (like all of a processor's physical
82  * address space). That is also why the indices are defined to have type
83  * `unsigned long' -- that being the largest integral type in ISO C (1990).
84  * The 1999 version of C allows `long long'; we may need to switch to that
85  * at some point in the future, particularly if we want to support 36-bit
86  * addresses on IA32 hardware.
87  */
88 struct resource_i {
89  struct resource r_r;
90  TAILQ_ENTRY(resource_i) r_link;
91  LIST_ENTRY(resource_i) r_sharelink;
92  LIST_HEAD(, resource_i) *r_sharehead;
93  u_long r_start; /* index of the first entry in this resource */
94  u_long r_end; /* index of the last entry (inclusive) */
95  u_int r_flags;
96  void *r_virtual; /* virtual address of this resource */
97  struct device *r_dev; /* device which has allocated this resource */
98  struct rman *r_rm; /* resource manager from whence this came */
99  int r_rid; /* optional rid for this resource. */
100 };
101 
102 static int rman_debug = 0;
103 TUNABLE_INT("debug.rman_debug", &rman_debug);
104 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105  &rman_debug, 0, "rman debug");
106 
107 #define DPRINTF(params) if (rman_debug) printf params
108 
109 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
110 
112 static struct mtx rman_mtx; /* mutex to protect rman_head */
113 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
114 
115 static __inline struct resource_i *
116 int_alloc_resource(int malloc_flag)
117 {
118  struct resource_i *r;
119 
120  r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
121  if (r != NULL) {
122  r->r_r.__r_i = r;
123  }
124  return (r);
125 }
126 
127 int
128 rman_init(struct rman *rm)
129 {
130  static int once = 0;
131 
132  if (once == 0) {
133  once = 1;
134  TAILQ_INIT(&rman_head);
135  mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
136  }
137 
138  if (rm->rm_start == 0 && rm->rm_end == 0)
139  rm->rm_end = ~0ul;
140  if (rm->rm_type == RMAN_UNINIT)
141  panic("rman_init");
142  if (rm->rm_type == RMAN_GAUGE)
143  panic("implement RMAN_GAUGE");
144 
145  TAILQ_INIT(&rm->rm_list);
146  rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
147  if (rm->rm_mtx == NULL)
148  return ENOMEM;
149  mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
150 
151  mtx_lock(&rman_mtx);
152  TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
153  mtx_unlock(&rman_mtx);
154  return 0;
155 }
156 
157 int
158 rman_manage_region(struct rman *rm, u_long start, u_long end)
159 {
160  struct resource_i *r, *s, *t;
161 
162  DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
163  rm->rm_descr, start, end));
164  if (start < rm->rm_start || end > rm->rm_end)
165  return EINVAL;
166  r = int_alloc_resource(M_NOWAIT);
167  if (r == NULL)
168  return ENOMEM;
169  r->r_start = start;
170  r->r_end = end;
171  r->r_rm = rm;
172 
173  mtx_lock(rm->rm_mtx);
174 
175  /* Skip entries before us. */
176  TAILQ_FOREACH(s, &rm->rm_list, r_link) {
177  if (s->r_end == ULONG_MAX)
178  break;
179  if (s->r_end + 1 >= r->r_start)
180  break;
181  }
182 
183  /* If we ran off the end of the list, insert at the tail. */
184  if (s == NULL) {
185  TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
186  } else {
187  /* Check for any overlap with the current region. */
188  if (r->r_start <= s->r_end && r->r_end >= s->r_start)
189  return EBUSY;
190 
191  /* Check for any overlap with the next region. */
192  t = TAILQ_NEXT(s, r_link);
193  if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
194  return EBUSY;
195 
196  /*
197  * See if this region can be merged with the next region. If
198  * not, clear the pointer.
199  */
200  if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
201  t = NULL;
202 
203  /* See if we can merge with the current region. */
204  if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
205  /* Can we merge all 3 regions? */
206  if (t != NULL) {
207  s->r_end = t->r_end;
208  TAILQ_REMOVE(&rm->rm_list, t, r_link);
209  free(r, M_RMAN);
210  free(t, M_RMAN);
211  } else {
212  s->r_end = r->r_end;
213  free(r, M_RMAN);
214  }
215  } else if (t != NULL) {
216  /* Can we merge with just the next region? */
217  t->r_start = r->r_start;
218  free(r, M_RMAN);
219  } else if (s->r_end < r->r_start) {
220  TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
221  } else {
222  TAILQ_INSERT_BEFORE(s, r, r_link);
223  }
224  }
225 
226  mtx_unlock(rm->rm_mtx);
227  return 0;
228 }
229 
230 int
231 rman_init_from_resource(struct rman *rm, struct resource *r)
232 {
233  int rv;
234 
235  if ((rv = rman_init(rm)) != 0)
236  return (rv);
237  return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
238 }
239 
240 int
241 rman_fini(struct rman *rm)
242 {
243  struct resource_i *r;
244 
245  mtx_lock(rm->rm_mtx);
246  TAILQ_FOREACH(r, &rm->rm_list, r_link) {
247  if (r->r_flags & RF_ALLOCATED) {
248  mtx_unlock(rm->rm_mtx);
249  return EBUSY;
250  }
251  }
252 
253  /*
254  * There really should only be one of these if we are in this
255  * state and the code is working properly, but it can't hurt.
256  */
257  while (!TAILQ_EMPTY(&rm->rm_list)) {
258  r = TAILQ_FIRST(&rm->rm_list);
259  TAILQ_REMOVE(&rm->rm_list, r, r_link);
260  free(r, M_RMAN);
261  }
262  mtx_unlock(rm->rm_mtx);
263  mtx_lock(&rman_mtx);
264  TAILQ_REMOVE(&rman_head, rm, rm_link);
265  mtx_unlock(&rman_mtx);
266  mtx_destroy(rm->rm_mtx);
267  free(rm->rm_mtx, M_RMAN);
268 
269  return 0;
270 }
271 
272 int
273 rman_first_free_region(struct rman *rm, u_long *start, u_long *end)
274 {
275  struct resource_i *r;
276 
277  mtx_lock(rm->rm_mtx);
278  TAILQ_FOREACH(r, &rm->rm_list, r_link) {
279  if (!(r->r_flags & RF_ALLOCATED)) {
280  *start = r->r_start;
281  *end = r->r_end;
282  mtx_unlock(rm->rm_mtx);
283  return (0);
284  }
285  }
286  mtx_unlock(rm->rm_mtx);
287  return (ENOENT);
288 }
289 
290 int
291 rman_last_free_region(struct rman *rm, u_long *start, u_long *end)
292 {
293  struct resource_i *r;
294 
295  mtx_lock(rm->rm_mtx);
296  TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
297  if (!(r->r_flags & RF_ALLOCATED)) {
298  *start = r->r_start;
299  *end = r->r_end;
300  mtx_unlock(rm->rm_mtx);
301  return (0);
302  }
303  }
304  mtx_unlock(rm->rm_mtx);
305  return (ENOENT);
306 }
307 
308 /* Shrink or extend one or both ends of an allocated resource. */
309 int
310 rman_adjust_resource(struct resource *rr, u_long start, u_long end)
311 {
312  struct resource_i *r, *s, *t, *new;
313  struct rman *rm;
314 
315  /* Not supported for shared resources. */
316  r = rr->__r_i;
317  if (r->r_flags & RF_SHAREABLE)
318  return (EINVAL);
319 
320  /*
321  * This does not support wholesale moving of a resource. At
322  * least part of the desired new range must overlap with the
323  * existing resource.
324  */
325  if (end < r->r_start || r->r_end < start)
326  return (EINVAL);
327 
328  /*
329  * Find the two resource regions immediately adjacent to the
330  * allocated resource.
331  */
332  rm = r->r_rm;
333  mtx_lock(rm->rm_mtx);
334 #ifdef INVARIANTS
335  TAILQ_FOREACH(s, &rm->rm_list, r_link) {
336  if (s == r)
337  break;
338  }
339  if (s == NULL)
340  panic("resource not in list");
341 #endif
342  s = TAILQ_PREV(r, resource_head, r_link);
343  t = TAILQ_NEXT(r, r_link);
344  KASSERT(s == NULL || s->r_end + 1 == r->r_start,
345  ("prev resource mismatch"));
346  KASSERT(t == NULL || r->r_end + 1 == t->r_start,
347  ("next resource mismatch"));
348 
349  /*
350  * See if the changes are permitted. Shrinking is always allowed,
351  * but growing requires sufficient room in the adjacent region.
352  */
353  if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
354  s->r_start > start)) {
355  mtx_unlock(rm->rm_mtx);
356  return (EBUSY);
357  }
358  if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
359  t->r_end < end)) {
360  mtx_unlock(rm->rm_mtx);
361  return (EBUSY);
362  }
363 
364  /*
365  * While holding the lock, grow either end of the resource as
366  * needed and shrink either end if the shrinking does not require
367  * allocating a new resource. We can safely drop the lock and then
368  * insert a new range to handle the shrinking case afterwards.
369  */
370  if (start < r->r_start ||
371  (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
372  KASSERT(s->r_flags == 0, ("prev is busy"));
373  r->r_start = start;
374  if (s->r_start == start) {
375  TAILQ_REMOVE(&rm->rm_list, s, r_link);
376  free(s, M_RMAN);
377  } else
378  s->r_end = start - 1;
379  }
380  if (end > r->r_end ||
381  (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
382  KASSERT(t->r_flags == 0, ("next is busy"));
383  r->r_end = end;
384  if (t->r_end == end) {
385  TAILQ_REMOVE(&rm->rm_list, t, r_link);
386  free(t, M_RMAN);
387  } else
388  t->r_start = end + 1;
389  }
390  mtx_unlock(rm->rm_mtx);
391 
392  /*
393  * Handle the shrinking cases that require allocating a new
394  * resource to hold the newly-free region. We have to recheck
395  * if we still need this new region after acquiring the lock.
396  */
397  if (start > r->r_start) {
398  new = int_alloc_resource(M_WAITOK);
399  new->r_start = r->r_start;
400  new->r_end = start - 1;
401  new->r_rm = rm;
402  mtx_lock(rm->rm_mtx);
403  r->r_start = start;
404  s = TAILQ_PREV(r, resource_head, r_link);
405  if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
406  s->r_end = start - 1;
407  free(new, M_RMAN);
408  } else
409  TAILQ_INSERT_BEFORE(r, new, r_link);
410  mtx_unlock(rm->rm_mtx);
411  }
412  if (end < r->r_end) {
413  new = int_alloc_resource(M_WAITOK);
414  new->r_start = end + 1;
415  new->r_end = r->r_end;
416  new->r_rm = rm;
417  mtx_lock(rm->rm_mtx);
418  r->r_end = end;
419  t = TAILQ_NEXT(r, r_link);
420  if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
421  t->r_start = end + 1;
422  free(new, M_RMAN);
423  } else
424  TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
425  mtx_unlock(rm->rm_mtx);
426  }
427  return (0);
428 }
429 
430 #define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_PREFETCHABLE))
431 
432 struct resource *
433 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
434  u_long count, u_long bound, u_int flags,
435  struct device *dev)
436 {
437  u_int new_rflags;
438  struct resource_i *r, *s, *rv;
439  u_long rstart, rend, amask, bmask;
440 
441  rv = NULL;
442 
443  DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
444  "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
445  count, flags,
446  dev == NULL ? "<null>" : device_get_nameunit(dev)));
447  KASSERT((flags & RF_FIRSTSHARE) == 0,
448  ("invalid flags %#x", flags));
449  new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
450 
451  mtx_lock(rm->rm_mtx);
452 
453  for (r = TAILQ_FIRST(&rm->rm_list);
454  r && r->r_end < start + count - 1;
455  r = TAILQ_NEXT(r, r_link))
456  ;
457 
458  if (r == NULL) {
459  DPRINTF(("could not find a region\n"));
460  goto out;
461  }
462 
463  amask = (1ul << RF_ALIGNMENT(flags)) - 1;
464  KASSERT(start <= ULONG_MAX - amask,
465  ("start (%#lx) + amask (%#lx) would wrap around", start, amask));
466 
467  /* If bound is 0, bmask will also be 0 */
468  bmask = ~(bound - 1);
469  /*
470  * First try to find an acceptable totally-unshared region.
471  */
472  for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
473  DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
474  /*
475  * The resource list is sorted, so there is no point in
476  * searching further once r_start is too large.
477  */
478  if (s->r_start > end - (count - 1)) {
479  DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
480  s->r_start, end));
481  break;
482  }
483  if (s->r_start > ULONG_MAX - amask) {
484  DPRINTF(("s->r_start (%#lx) + amask (%#lx) too large\n",
485  s->r_start, amask));
486  break;
487  }
488  if (s->r_flags & RF_ALLOCATED) {
489  DPRINTF(("region is allocated\n"));
490  continue;
491  }
492  rstart = ulmax(s->r_start, start);
493  /*
494  * Try to find a region by adjusting to boundary and alignment
495  * until both conditions are satisfied. This is not an optimal
496  * algorithm, but in most cases it isn't really bad, either.
497  */
498  do {
499  rstart = (rstart + amask) & ~amask;
500  if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
501  rstart += bound - (rstart & ~bmask);
502  } while ((rstart & amask) != 0 && rstart < end &&
503  rstart < s->r_end);
504  rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
505  if (rstart > rend) {
506  DPRINTF(("adjusted start exceeds end\n"));
507  continue;
508  }
509  DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
510  rstart, rend, (rend - rstart + 1), count));
511 
512  if ((rend - rstart + 1) >= count) {
513  DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
514  rstart, rend, (rend - rstart + 1)));
515  if ((s->r_end - s->r_start + 1) == count) {
516  DPRINTF(("candidate region is entire chunk\n"));
517  rv = s;
518  rv->r_flags = new_rflags;
519  rv->r_dev = dev;
520  goto out;
521  }
522 
523  /*
524  * If s->r_start < rstart and
525  * s->r_end > rstart + count - 1, then
526  * we need to split the region into three pieces
527  * (the middle one will get returned to the user).
528  * Otherwise, we are allocating at either the
529  * beginning or the end of s, so we only need to
530  * split it in two. The first case requires
531  * two new allocations; the second requires but one.
532  */
533  rv = int_alloc_resource(M_NOWAIT);
534  if (rv == NULL)
535  goto out;
536  rv->r_start = rstart;
537  rv->r_end = rstart + count - 1;
538  rv->r_flags = new_rflags;
539  rv->r_dev = dev;
540  rv->r_rm = rm;
541 
542  if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
543  DPRINTF(("splitting region in three parts: "
544  "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
545  s->r_start, rv->r_start - 1,
546  rv->r_start, rv->r_end,
547  rv->r_end + 1, s->r_end));
548  /*
549  * We are allocating in the middle.
550  */
551  r = int_alloc_resource(M_NOWAIT);
552  if (r == NULL) {
553  free(rv, M_RMAN);
554  rv = NULL;
555  goto out;
556  }
557  r->r_start = rv->r_end + 1;
558  r->r_end = s->r_end;
559  r->r_flags = s->r_flags;
560  r->r_rm = rm;
561  s->r_end = rv->r_start - 1;
562  TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
563  r_link);
564  TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
565  r_link);
566  } else if (s->r_start == rv->r_start) {
567  DPRINTF(("allocating from the beginning\n"));
568  /*
569  * We are allocating at the beginning.
570  */
571  s->r_start = rv->r_end + 1;
572  TAILQ_INSERT_BEFORE(s, rv, r_link);
573  } else {
574  DPRINTF(("allocating at the end\n"));
575  /*
576  * We are allocating at the end.
577  */
578  s->r_end = rv->r_start - 1;
579  TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
580  r_link);
581  }
582  goto out;
583  }
584  }
585 
586  /*
587  * Now find an acceptable shared region, if the client's requirements
588  * allow sharing. By our implementation restriction, a candidate
589  * region must match exactly by both size and sharing type in order
590  * to be considered compatible with the client's request. (The
591  * former restriction could probably be lifted without too much
592  * additional work, but this does not seem warranted.)
593  */
594  DPRINTF(("no unshared regions found\n"));
595  if ((flags & RF_SHAREABLE) == 0)
596  goto out;
597 
598  for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
599  if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
600  s->r_start >= start &&
601  (s->r_end - s->r_start + 1) == count &&
602  (s->r_start & amask) == 0 &&
603  ((s->r_start ^ s->r_end) & bmask) == 0) {
604  rv = int_alloc_resource(M_NOWAIT);
605  if (rv == NULL)
606  goto out;
607  rv->r_start = s->r_start;
608  rv->r_end = s->r_end;
609  rv->r_flags = new_rflags;
610  rv->r_dev = dev;
611  rv->r_rm = rm;
612  if (s->r_sharehead == NULL) {
613  s->r_sharehead = malloc(sizeof *s->r_sharehead,
614  M_RMAN, M_NOWAIT | M_ZERO);
615  if (s->r_sharehead == NULL) {
616  free(rv, M_RMAN);
617  rv = NULL;
618  goto out;
619  }
620  LIST_INIT(s->r_sharehead);
621  LIST_INSERT_HEAD(s->r_sharehead, s,
622  r_sharelink);
623  s->r_flags |= RF_FIRSTSHARE;
624  }
625  rv->r_sharehead = s->r_sharehead;
626  LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
627  goto out;
628  }
629  }
630  /*
631  * We couldn't find anything.
632  */
633 
634 out:
635  mtx_unlock(rm->rm_mtx);
636  return (rv == NULL ? NULL : &rv->r_r);
637 }
638 
639 struct resource *
640 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
641  u_int flags, struct device *dev)
642 {
643 
644  return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
645  dev));
646 }
647 
648 int
649 rman_activate_resource(struct resource *re)
650 {
651  struct resource_i *r;
652  struct rman *rm;
653 
654  r = re->__r_i;
655  rm = r->r_rm;
656  mtx_lock(rm->rm_mtx);
657  r->r_flags |= RF_ACTIVE;
658  mtx_unlock(rm->rm_mtx);
659  return 0;
660 }
661 
662 int
663 rman_deactivate_resource(struct resource *r)
664 {
665  struct rman *rm;
666 
667  rm = r->__r_i->r_rm;
668  mtx_lock(rm->rm_mtx);
669  r->__r_i->r_flags &= ~RF_ACTIVE;
670  mtx_unlock(rm->rm_mtx);
671  return 0;
672 }
673 
674 static int
675 int_rman_release_resource(struct rman *rm, struct resource_i *r)
676 {
677  struct resource_i *s, *t;
678 
679  if (r->r_flags & RF_ACTIVE)
680  r->r_flags &= ~RF_ACTIVE;
681 
682  /*
683  * Check for a sharing list first. If there is one, then we don't
684  * have to think as hard.
685  */
686  if (r->r_sharehead) {
687  /*
688  * If a sharing list exists, then we know there are at
689  * least two sharers.
690  *
691  * If we are in the main circleq, appoint someone else.
692  */
693  LIST_REMOVE(r, r_sharelink);
694  s = LIST_FIRST(r->r_sharehead);
695  if (r->r_flags & RF_FIRSTSHARE) {
696  s->r_flags |= RF_FIRSTSHARE;
697  TAILQ_INSERT_BEFORE(r, s, r_link);
698  TAILQ_REMOVE(&rm->rm_list, r, r_link);
699  }
700 
701  /*
702  * Make sure that the sharing list goes away completely
703  * if the resource is no longer being shared at all.
704  */
705  if (LIST_NEXT(s, r_sharelink) == NULL) {
706  free(s->r_sharehead, M_RMAN);
707  s->r_sharehead = NULL;
708  s->r_flags &= ~RF_FIRSTSHARE;
709  }
710  goto out;
711  }
712 
713  /*
714  * Look at the adjacent resources in the list and see if our
715  * segment can be merged with any of them. If either of the
716  * resources is allocated or is not exactly adjacent then they
717  * cannot be merged with our segment.
718  */
719  s = TAILQ_PREV(r, resource_head, r_link);
720  if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
721  s->r_end + 1 != r->r_start))
722  s = NULL;
723  t = TAILQ_NEXT(r, r_link);
724  if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
725  r->r_end + 1 != t->r_start))
726  t = NULL;
727 
728  if (s != NULL && t != NULL) {
729  /*
730  * Merge all three segments.
731  */
732  s->r_end = t->r_end;
733  TAILQ_REMOVE(&rm->rm_list, r, r_link);
734  TAILQ_REMOVE(&rm->rm_list, t, r_link);
735  free(t, M_RMAN);
736  } else if (s != NULL) {
737  /*
738  * Merge previous segment with ours.
739  */
740  s->r_end = r->r_end;
741  TAILQ_REMOVE(&rm->rm_list, r, r_link);
742  } else if (t != NULL) {
743  /*
744  * Merge next segment with ours.
745  */
746  t->r_start = r->r_start;
747  TAILQ_REMOVE(&rm->rm_list, r, r_link);
748  } else {
749  /*
750  * At this point, we know there is nothing we
751  * can potentially merge with, because on each
752  * side, there is either nothing there or what is
753  * there is still allocated. In that case, we don't
754  * want to remove r from the list; we simply want to
755  * change it to an unallocated region and return
756  * without freeing anything.
757  */
758  r->r_flags &= ~RF_ALLOCATED;
759  r->r_dev = NULL;
760  return 0;
761  }
762 
763 out:
764  free(r, M_RMAN);
765  return 0;
766 }
767 
768 int
769 rman_release_resource(struct resource *re)
770 {
771  int rv;
772  struct resource_i *r;
773  struct rman *rm;
774 
775  r = re->__r_i;
776  rm = r->r_rm;
777  mtx_lock(rm->rm_mtx);
778  rv = int_rman_release_resource(rm, r);
779  mtx_unlock(rm->rm_mtx);
780  return (rv);
781 }
782 
783 uint32_t
785 {
786  int i;
787 
788  /*
789  * Find the hightest bit set, and add one if more than one bit
790  * set. We're effectively computing the ceil(log2(size)) here.
791  */
792  for (i = 31; i > 0; i--)
793  if ((1 << i) & size)
794  break;
795  if (~(1 << i) & size)
796  i++;
797 
798  return(RF_ALIGNMENT_LOG2(i));
799 }
800 
801 void
802 rman_set_start(struct resource *r, u_long start)
803 {
804 
805  r->__r_i->r_start = start;
806 }
807 
808 u_long
809 rman_get_start(struct resource *r)
810 {
811 
812  return (r->__r_i->r_start);
813 }
814 
815 void
816 rman_set_end(struct resource *r, u_long end)
817 {
818 
819  r->__r_i->r_end = end;
820 }
821 
822 u_long
823 rman_get_end(struct resource *r)
824 {
825 
826  return (r->__r_i->r_end);
827 }
828 
829 u_long
830 rman_get_size(struct resource *r)
831 {
832 
833  return (r->__r_i->r_end - r->__r_i->r_start + 1);
834 }
835 
836 u_int
837 rman_get_flags(struct resource *r)
838 {
839 
840  return (r->__r_i->r_flags);
841 }
842 
843 void
844 rman_set_virtual(struct resource *r, void *v)
845 {
846 
847  r->__r_i->r_virtual = v;
848 }
849 
850 void *
851 rman_get_virtual(struct resource *r)
852 {
853 
854  return (r->__r_i->r_virtual);
855 }
856 
857 void
858 rman_set_bustag(struct resource *r, bus_space_tag_t t)
859 {
860 
861  r->r_bustag = t;
862 }
863 
864 bus_space_tag_t
865 rman_get_bustag(struct resource *r)
866 {
867 
868  return (r->r_bustag);
869 }
870 
871 void
872 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
873 {
874 
875  r->r_bushandle = h;
876 }
877 
878 bus_space_handle_t
879 rman_get_bushandle(struct resource *r)
880 {
881 
882  return (r->r_bushandle);
883 }
884 
885 void
886 rman_set_rid(struct resource *r, int rid)
887 {
888 
889  r->__r_i->r_rid = rid;
890 }
891 
892 int
893 rman_get_rid(struct resource *r)
894 {
895 
896  return (r->__r_i->r_rid);
897 }
898 
899 void
900 rman_set_device(struct resource *r, struct device *dev)
901 {
902 
903  r->__r_i->r_dev = dev;
904 }
905 
906 struct device *
907 rman_get_device(struct resource *r)
908 {
909 
910  return (r->__r_i->r_dev);
911 }
912 
913 int
914 rman_is_region_manager(struct resource *r, struct rman *rm)
915 {
916 
917  return (r->__r_i->r_rm == rm);
918 }
919 
920 /*
921  * Sysctl interface for scanning the resource lists.
922  *
923  * We take two input parameters; the index into the list of resource
924  * managers, and the resource offset into the list.
925  */
926 static int
927 sysctl_rman(SYSCTL_HANDLER_ARGS)
928 {
929  int *name = (int *)arg1;
930  u_int namelen = arg2;
931  int rman_idx, res_idx;
932  struct rman *rm;
933  struct resource_i *res;
934  struct resource_i *sres;
935  struct u_rman urm;
936  struct u_resource ures;
937  int error;
938 
939  if (namelen != 3)
940  return (EINVAL);
941 
942  if (bus_data_generation_check(name[0]))
943  return (EINVAL);
944  rman_idx = name[1];
945  res_idx = name[2];
946 
947  /*
948  * Find the indexed resource manager
949  */
950  mtx_lock(&rman_mtx);
951  TAILQ_FOREACH(rm, &rman_head, rm_link) {
952  if (rman_idx-- == 0)
953  break;
954  }
955  mtx_unlock(&rman_mtx);
956  if (rm == NULL)
957  return (ENOENT);
958 
959  /*
960  * If the resource index is -1, we want details on the
961  * resource manager.
962  */
963  if (res_idx == -1) {
964  bzero(&urm, sizeof(urm));
965  urm.rm_handle = (uintptr_t)rm;
966  if (rm->rm_descr != NULL)
967  strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
968  urm.rm_start = rm->rm_start;
969  urm.rm_size = rm->rm_end - rm->rm_start + 1;
970  urm.rm_type = rm->rm_type;
971 
972  error = SYSCTL_OUT(req, &urm, sizeof(urm));
973  return (error);
974  }
975 
976  /*
977  * Find the indexed resource and return it.
978  */
979  mtx_lock(rm->rm_mtx);
980  TAILQ_FOREACH(res, &rm->rm_list, r_link) {
981  if (res->r_sharehead != NULL) {
982  LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
983  if (res_idx-- == 0) {
984  res = sres;
985  goto found;
986  }
987  }
988  else if (res_idx-- == 0)
989  goto found;
990  }
991  mtx_unlock(rm->rm_mtx);
992  return (ENOENT);
993 
994 found:
995  bzero(&ures, sizeof(ures));
996  ures.r_handle = (uintptr_t)res;
997  ures.r_parent = (uintptr_t)res->r_rm;
998  ures.r_device = (uintptr_t)res->r_dev;
999  if (res->r_dev != NULL) {
1000  if (device_get_name(res->r_dev) != NULL) {
1001  snprintf(ures.r_devname, RM_TEXTLEN,
1002  "%s%d",
1003  device_get_name(res->r_dev),
1004  device_get_unit(res->r_dev));
1005  } else {
1006  strlcpy(ures.r_devname, "nomatch",
1007  RM_TEXTLEN);
1008  }
1009  } else {
1010  ures.r_devname[0] = '\0';
1011  }
1012  ures.r_start = res->r_start;
1013  ures.r_size = res->r_end - res->r_start + 1;
1014  ures.r_flags = res->r_flags;
1015 
1016  mtx_unlock(rm->rm_mtx);
1017  error = SYSCTL_OUT(req, &ures, sizeof(ures));
1018  return (error);
1019 }
1020 
1021 static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1022  "kernel resource manager");
1023 
1024 #ifdef DDB
1025 static void
1026 dump_rman_header(struct rman *rm)
1027 {
1028 
1029  if (db_pager_quit)
1030  return;
1031  db_printf("rman %p: %s (0x%lx-0x%lx full range)\n",
1032  rm, rm->rm_descr, rm->rm_start, rm->rm_end);
1033 }
1034 
1035 static void
1036 dump_rman(struct rman *rm)
1037 {
1038  struct resource_i *r;
1039  const char *devname;
1040 
1041  if (db_pager_quit)
1042  return;
1043  TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1044  if (r->r_dev != NULL) {
1045  devname = device_get_nameunit(r->r_dev);
1046  if (devname == NULL)
1047  devname = "nomatch";
1048  } else
1049  devname = NULL;
1050  db_printf(" 0x%lx-0x%lx (RID=%d) ",
1051  r->r_start, r->r_end, r->r_rid);
1052  if (devname != NULL)
1053  db_printf("(%s)\n", devname);
1054  else
1055  db_printf("----\n");
1056  if (db_pager_quit)
1057  return;
1058  }
1059 }
1060 
1061 DB_SHOW_COMMAND(rman, db_show_rman)
1062 {
1063 
1064  if (have_addr) {
1065  dump_rman_header((struct rman *)addr);
1066  dump_rman((struct rman *)addr);
1067  }
1068 }
1069 
1070 DB_SHOW_COMMAND(rmans, db_show_rmans)
1071 {
1072  struct rman *rm;
1073 
1074  TAILQ_FOREACH(rm, &rman_head, rm_link) {
1075  dump_rman_header(rm);
1076  }
1077 }
1078 
1079 DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1080 {
1081  struct rman *rm;
1082 
1083  TAILQ_FOREACH(rm, &rman_head, rm_link) {
1084  dump_rman_header(rm);
1085  dump_rman(rm);
1086  }
1087 }
1088 DB_SHOW_ALIAS(allrman, db_show_all_rman);
1089 #endif
int rman_adjust_resource(struct resource *rr, u_long start, u_long end)
Definition: subr_rman.c:310
u_int rman_get_flags(struct resource *r)
Definition: subr_rman.c:837
#define SHARE_TYPE(f)
Definition: subr_rman.c:430
TUNABLE_INT("debug.rman_debug",&rman_debug)
int rman_init(struct rman *rm)
Definition: subr_rman.c:128
struct resource * rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end, u_long count, u_long bound, u_int flags, struct device *dev)
Definition: subr_rman.c:433
int snprintf(char *str, size_t size, const char *format,...)
Definition: subr_prf.c:509
int rman_get_rid(struct resource *r)
Definition: subr_rman.c:893
void *** start
Definition: linker_if.m:86
void rman_set_device(struct resource *r, struct device *dev)
Definition: subr_rman.c:900
static LIST_HEAD(alq)
Definition: kern_alq.c:97
void * malloc(unsigned long size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:454
struct resource r_r
Definition: subr_rman.c:89
bus_space_handle_t rman_get_bushandle(struct resource *r)
Definition: subr_rman.c:879
void * rman_get_virtual(struct resource *r)
Definition: subr_rman.c:851
int rman_activate_resource(struct resource *re)
Definition: subr_rman.c:649
void panic(const char *fmt,...)
uint32_t rman_make_alignment_flags(uint32_t size)
Definition: subr_rman.c:784
int device_get_unit(device_t dev)
Return the device's unit number.
Definition: subr_bus.c:2256
bus_space_tag_t rman_get_bustag(struct resource *r)
Definition: subr_rman.c:865
const char * name
Definition: kern_fail.c:97
void rman_set_end(struct resource *r, u_long end)
Definition: subr_rman.c:816
static __inline struct resource_i * int_alloc_resource(int malloc_flag)
Definition: subr_rman.c:116
int rman_first_free_region(struct rman *rm, u_long *start, u_long *end)
Definition: subr_rman.c:273
u_long rman_get_start(struct resource *r)
Definition: subr_rman.c:809
struct rman_head rman_head
Definition: subr_rman.c:111
Implementation of device.
Definition: subr_bus.c:100
struct resource * rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, u_int flags, struct device *dev)
Definition: subr_rman.c:640
void rman_set_start(struct resource *r, u_long start)
Definition: subr_rman.c:802
static int int_rman_release_resource(struct rman *rm, struct resource_i *r)
Definition: subr_rman.c:675
void rman_set_virtual(struct resource *r, void *v)
Definition: subr_rman.c:844
SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,&rman_debug, 0,"rman debug")
int bus_data_generation_check(int generation)
Definition: subr_bus.c:4819
struct device * rman_get_device(struct resource *r)
Definition: subr_rman.c:907
u_long rman_get_size(struct resource *r)
Definition: subr_rman.c:830
void rman_set_bustag(struct resource *r, bus_space_tag_t t)
Definition: subr_rman.c:858
int rman_init_from_resource(struct rman *rm, struct resource *r)
Definition: subr_rman.c:231
void rman_set_rid(struct resource *r, int rid)
Definition: subr_rman.c:886
__FBSDID("$BSDSUniX$")
int rman_deactivate_resource(struct resource *r)
Definition: subr_rman.c:663
static MALLOC_DEFINE(M_RMAN,"rman","Resource manager")
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:554
int rman_is_region_manager(struct resource *r, struct rman *rm)
Definition: subr_rman.c:914
void rman_set_bushandle(struct resource *r, bus_space_handle_t h)
Definition: subr_rman.c:872
int rman_manage_region(struct rman *rm, u_long start, u_long end)
Definition: subr_rman.c:158
#define DPRINTF(params)
Definition: subr_rman.c:107
int rman_fini(struct rman *rm)
Definition: subr_rman.c:241
void mtx_init(struct mtx *m, const char *name, const char *type, int opts)
Definition: kern_mutex.c:837
static struct mtx rman_mtx
Definition: subr_rman.c:112
static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,"kernel resource manager")
u_long rman_get_end(struct resource *r)
Definition: subr_rman.c:823
int rman_release_resource(struct resource *re)
Definition: subr_rman.c:769
int rman_last_free_region(struct rman *rm, u_long *start, u_long *end)
Definition: subr_rman.c:291
const char * device_get_nameunit(device_t dev)
Return a string containing the device's devclass name followed by an ascii representation of the devi...
Definition: subr_bus.c:2247
const char * device_get_name(device_t dev)
Return the name of the device's devclass or NULL if there is none.
Definition: subr_bus.c:2234
static int sysctl_rman(SYSCTL_HANDLER_ARGS)
Definition: subr_rman.c:927
void mtx_destroy(struct mtx *m)
Definition: kern_mutex.c:884
static int rman_debug
Definition: subr_rman.c:102
int * count
Definition: cpufreq_if.m:63