1 | /* $NetBSD: subr_extent.c,v 1.87 2017/12/31 09:25:19 skrll Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 1996, 1998, 2007 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe and Matthias Drochner. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /* |
33 | * General purpose extent manager. |
34 | */ |
35 | |
36 | #include <sys/cdefs.h> |
37 | __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.87 2017/12/31 09:25:19 skrll Exp $" ); |
38 | |
39 | #ifdef _KERNEL |
40 | #ifdef _KERNEL_OPT |
41 | #include "opt_lockdebug.h" |
42 | #endif |
43 | |
44 | #include <sys/param.h> |
45 | #include <sys/extent.h> |
46 | #include <sys/kmem.h> |
47 | #include <sys/pool.h> |
48 | #include <sys/time.h> |
49 | #include <sys/systm.h> |
50 | #include <sys/proc.h> |
51 | |
52 | #include <uvm/uvm_extern.h> |
53 | |
54 | #elif defined(_EXTENT_TESTING) |
55 | |
56 | /* |
57 | * user-land definitions, so it can fit into a testing harness. |
58 | */ |
59 | #include <sys/param.h> |
60 | #include <sys/pool.h> |
61 | #include <sys/extent.h> |
62 | |
63 | #include <errno.h> |
64 | #include <stdlib.h> |
65 | #include <stdio.h> |
66 | #include <string.h> |
67 | |
68 | static inline void no_op(void) { return; } |
69 | |
70 | /* |
71 | * Use multi-line #defines to avoid screwing up the kernel tags file; |
72 | * without this, ctags produces a tags file where panic() shows up |
73 | * in subr_extent.c rather than subr_prf.c. |
74 | */ |
75 | #define \ |
76 | kmem_alloc(s, flags) malloc(s) |
77 | #define \ |
78 | kmem_free(p, s) free(p) |
79 | #define \ |
80 | cv_wait_sig(cv, lock) (EWOULDBLOCK) |
81 | #define \ |
82 | pool_get(pool, flags) kmem_alloc((pool)->pr_size,0) |
83 | #define \ |
84 | pool_put(pool, rp) kmem_free(rp,0) |
85 | #define \ |
86 | panic(a ...) printf(a) |
87 | #define mutex_init(a, b, c) no_op() |
88 | #define mutex_destroy(a) no_op() |
89 | #define mutex_enter(l) no_op() |
90 | #define mutex_exit(l) no_op() |
91 | #define cv_wait(cv, lock) no_op() |
92 | #define cv_broadcast(cv) no_op() |
93 | #define cv_init(a, b) no_op() |
94 | #define cv_destroy(a) no_op() |
95 | #define KMEM_IS_RUNNING (1) |
96 | #define IPL_VM (0) |
97 | #define MUTEX_DEFAULT (0) |
98 | #define KASSERT(exp) |
99 | #endif |
100 | |
101 | static struct pool expool; |
102 | |
103 | /* |
104 | * Macro to align to an arbitrary power-of-two boundary. |
105 | */ |
106 | #define EXTENT_ALIGN(_start, _align, _skew) \ |
107 | (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew)) |
108 | |
109 | /* |
110 | * Create the extent_region pool. |
111 | */ |
112 | void |
113 | extent_init(void) |
114 | { |
115 | |
116 | #if defined(_KERNEL) |
117 | pool_init(&expool, sizeof(struct extent_region), 0, 0, 0, |
118 | "extent" , NULL, IPL_VM); |
119 | #else |
120 | expool.pr_size = sizeof(struct extent_region); |
121 | #endif |
122 | } |
123 | |
124 | /* |
125 | * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED. |
126 | * We will handle any locking we may need. |
127 | */ |
128 | static struct extent_region * |
129 | extent_alloc_region_descriptor(struct extent *ex, int flags) |
130 | { |
131 | struct extent_region *rp; |
132 | int error; |
133 | |
134 | if (ex->ex_flags & EXF_FIXED) { |
135 | struct extent_fixed *fex = (struct extent_fixed *)ex; |
136 | |
137 | if (!(ex->ex_flags & EXF_EARLY)) |
138 | mutex_enter(&ex->ex_lock); |
139 | for (;;) { |
140 | if ((rp = LIST_FIRST(&fex->fex_freelist)) != NULL) { |
141 | /* |
142 | * Don't muck with flags after pulling it off |
143 | * the freelist; it may have been dynamically |
144 | * allocated, and kindly given to us. We |
145 | * need to remember that information. |
146 | */ |
147 | LIST_REMOVE(rp, er_link); |
148 | if (!(ex->ex_flags & EXF_EARLY)) |
149 | mutex_exit(&ex->ex_lock); |
150 | return (rp); |
151 | } |
152 | if (flags & EX_MALLOCOK) { |
153 | if (!(ex->ex_flags & EXF_EARLY)) |
154 | mutex_exit(&ex->ex_lock); |
155 | goto alloc; |
156 | } |
157 | if ((flags & EX_WAITOK) == 0) { |
158 | if (!(ex->ex_flags & EXF_EARLY)) |
159 | mutex_exit(&ex->ex_lock); |
160 | return (NULL); |
161 | } |
162 | KASSERT(mutex_owned(&ex->ex_lock)); |
163 | ex->ex_flwanted = true; |
164 | if ((flags & EX_CATCH) != 0) |
165 | error = cv_wait_sig(&ex->ex_cv, &ex->ex_lock); |
166 | else { |
167 | cv_wait(&ex->ex_cv, &ex->ex_lock); |
168 | error = 0; |
169 | } |
170 | if (error != 0) { |
171 | mutex_exit(&ex->ex_lock); |
172 | return (NULL); |
173 | } |
174 | } |
175 | } |
176 | |
177 | alloc: |
178 | rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : PR_NOWAIT); |
179 | |
180 | if (rp != NULL) |
181 | rp->er_flags = ER_ALLOC; |
182 | |
183 | return (rp); |
184 | } |
185 | |
186 | /* |
187 | * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! |
188 | */ |
189 | static void |
190 | extent_free_region_descriptor(struct extent *ex, struct extent_region *rp) |
191 | { |
192 | |
193 | if (ex->ex_flags & EXF_FIXED) { |
194 | struct extent_fixed *fex = (struct extent_fixed *)ex; |
195 | |
196 | /* |
197 | * If someone's waiting for a region descriptor, |
198 | * be nice and give them this one, rather than |
199 | * just free'ing it back to the system. |
200 | */ |
201 | if (rp->er_flags & ER_ALLOC) { |
202 | if (ex->ex_flwanted) { |
203 | /* Clear all but ER_ALLOC flag. */ |
204 | rp->er_flags = ER_ALLOC; |
205 | LIST_INSERT_HEAD(&fex->fex_freelist, rp, |
206 | er_link); |
207 | goto wake_em_up; |
208 | } else |
209 | pool_put(&expool, rp); |
210 | } else { |
211 | /* Clear all flags. */ |
212 | rp->er_flags = 0; |
213 | LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); |
214 | } |
215 | |
216 | wake_em_up: |
217 | if (!(ex->ex_flags & EXF_EARLY)) { |
218 | ex->ex_flwanted = false; |
219 | cv_broadcast(&ex->ex_cv); |
220 | } |
221 | return; |
222 | } |
223 | |
224 | /* |
225 | * We know it's dynamically allocated if we get here. |
226 | */ |
227 | pool_put(&expool, rp); |
228 | } |
229 | |
230 | /* |
231 | * Allocate and initialize an extent map. |
232 | */ |
233 | struct extent * |
234 | extent_create(const char *name, u_long start, u_long end, |
235 | void *storage, size_t storagesize, int flags) |
236 | { |
237 | struct extent *ex; |
238 | char *cp = storage; |
239 | size_t sz = storagesize; |
240 | struct extent_region *rp; |
241 | int fixed_extent = (storage != NULL); |
242 | |
243 | #ifndef _KERNEL |
244 | extent_init(); |
245 | #endif |
246 | |
247 | #ifdef DIAGNOSTIC |
248 | /* Check arguments. */ |
249 | if (name == NULL) |
250 | panic("extent_create: name == NULL" ); |
251 | if (end < start) { |
252 | printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n" , |
253 | name, start, end); |
254 | panic("extent_create: end < start" ); |
255 | } |
256 | if (fixed_extent && (storagesize < sizeof(struct extent_fixed))) |
257 | panic("extent_create: fixed extent, bad storagesize 0x%lx" , |
258 | (u_long)storagesize); |
259 | if (fixed_extent == 0 && (storagesize != 0 || storage != NULL)) |
260 | panic("extent_create: storage provided for non-fixed" ); |
261 | #endif |
262 | |
263 | /* Allocate extent descriptor. */ |
264 | if (fixed_extent) { |
265 | struct extent_fixed *fex; |
266 | |
267 | memset(storage, 0, storagesize); |
268 | |
269 | /* |
270 | * Align all descriptors on "long" boundaries. |
271 | */ |
272 | fex = (struct extent_fixed *)cp; |
273 | ex = (struct extent *)fex; |
274 | cp += ALIGN(sizeof(struct extent_fixed)); |
275 | sz -= ALIGN(sizeof(struct extent_fixed)); |
276 | fex->fex_storage = storage; |
277 | fex->fex_storagesize = storagesize; |
278 | |
279 | /* |
280 | * In a fixed extent, we have to pre-allocate region |
281 | * descriptors and place them in the extent's freelist. |
282 | */ |
283 | LIST_INIT(&fex->fex_freelist); |
284 | while (sz >= ALIGN(sizeof(struct extent_region))) { |
285 | rp = (struct extent_region *)cp; |
286 | cp += ALIGN(sizeof(struct extent_region)); |
287 | sz -= ALIGN(sizeof(struct extent_region)); |
288 | LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); |
289 | } |
290 | } else { |
291 | ex = kmem_alloc(sizeof(*ex), |
292 | (flags & EX_WAITOK) ? KM_SLEEP : KM_NOSLEEP); |
293 | if (ex == NULL) |
294 | return (NULL); |
295 | } |
296 | |
297 | /* Fill in the extent descriptor and return it to the caller. */ |
298 | if ((flags & EX_EARLY) == 0) { |
299 | mutex_init(&ex->ex_lock, MUTEX_DEFAULT, IPL_VM); |
300 | cv_init(&ex->ex_cv, "extent" ); |
301 | } |
302 | LIST_INIT(&ex->ex_regions); |
303 | ex->ex_name = name; |
304 | ex->ex_start = start; |
305 | ex->ex_end = end; |
306 | ex->ex_flags = 0; |
307 | ex->ex_flwanted = false; |
308 | if (fixed_extent) |
309 | ex->ex_flags |= EXF_FIXED; |
310 | if (flags & EX_NOCOALESCE) |
311 | ex->ex_flags |= EXF_NOCOALESCE; |
312 | if (flags & EX_EARLY) |
313 | ex->ex_flags |= EXF_EARLY; |
314 | return (ex); |
315 | } |
316 | |
317 | /* |
318 | * Destroy an extent map. |
319 | * Since we're freeing the data, there can't be any references |
320 | * so we don't need any locking. |
321 | */ |
322 | void |
323 | extent_destroy(struct extent *ex) |
324 | { |
325 | struct extent_region *rp, *orp; |
326 | |
327 | #ifdef DIAGNOSTIC |
328 | /* Check arguments. */ |
329 | if (ex == NULL) |
330 | panic("extent_destroy: NULL extent" ); |
331 | #endif |
332 | |
333 | /* Free all region descriptors in extent. */ |
334 | for (rp = LIST_FIRST(&ex->ex_regions); rp != NULL; ) { |
335 | orp = rp; |
336 | rp = LIST_NEXT(rp, er_link); |
337 | LIST_REMOVE(orp, er_link); |
338 | extent_free_region_descriptor(ex, orp); |
339 | } |
340 | |
341 | cv_destroy(&ex->ex_cv); |
342 | mutex_destroy(&ex->ex_lock); |
343 | |
344 | /* If we're not a fixed extent, free the extent descriptor itself. */ |
345 | if ((ex->ex_flags & EXF_FIXED) == 0) |
346 | kmem_free(ex, sizeof(*ex)); |
347 | } |
348 | |
349 | /* |
350 | * Insert a region descriptor into the sorted region list after the |
351 | * entry "after" or at the head of the list (if "after" is NULL). |
352 | * The region descriptor we insert is passed in "rp". We must |
353 | * allocate the region descriptor before calling this function! |
354 | * If we don't need the region descriptor, it will be freed here. |
355 | */ |
356 | static void |
357 | extent_insert_and_optimize(struct extent *ex, u_long start, u_long size, |
358 | int flags, struct extent_region *after, struct extent_region *rp) |
359 | { |
360 | struct extent_region *nextr; |
361 | int appended = 0; |
362 | |
363 | if (after == NULL) { |
364 | /* |
365 | * We're the first in the region list. If there's |
366 | * a region after us, attempt to coalesce to save |
367 | * descriptor overhead. |
368 | */ |
369 | if (((ex->ex_flags & EXF_NOCOALESCE) == 0) && |
370 | (LIST_FIRST(&ex->ex_regions) != NULL) && |
371 | ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) { |
372 | /* |
373 | * We can coalesce. Prepend us to the first region. |
374 | */ |
375 | LIST_FIRST(&ex->ex_regions)->er_start = start; |
376 | extent_free_region_descriptor(ex, rp); |
377 | return; |
378 | } |
379 | |
380 | /* |
381 | * Can't coalesce. Fill in the region descriptor |
382 | * in, and insert us at the head of the region list. |
383 | */ |
384 | rp->er_start = start; |
385 | rp->er_end = start + (size - 1); |
386 | LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link); |
387 | return; |
388 | } |
389 | |
390 | /* |
391 | * If EXF_NOCOALESCE is set, coalescing is disallowed. |
392 | */ |
393 | if (ex->ex_flags & EXF_NOCOALESCE) |
394 | goto cant_coalesce; |
395 | |
396 | /* |
397 | * Attempt to coalesce with the region before us. |
398 | */ |
399 | if ((after->er_end + 1) == start) { |
400 | /* |
401 | * We can coalesce. Append ourselves and make |
402 | * note of it. |
403 | */ |
404 | after->er_end = start + (size - 1); |
405 | appended = 1; |
406 | } |
407 | |
408 | /* |
409 | * Attempt to coalesce with the region after us. |
410 | */ |
411 | if ((LIST_NEXT(after, er_link) != NULL) && |
412 | ((start + size) == LIST_NEXT(after, er_link)->er_start)) { |
413 | /* |
414 | * We can coalesce. Note that if we appended ourselves |
415 | * to the previous region, we exactly fit the gap, and |
416 | * can free the "next" region descriptor. |
417 | */ |
418 | if (appended) { |
419 | /* |
420 | * Yup, we can free it up. |
421 | */ |
422 | after->er_end = LIST_NEXT(after, er_link)->er_end; |
423 | nextr = LIST_NEXT(after, er_link); |
424 | LIST_REMOVE(nextr, er_link); |
425 | extent_free_region_descriptor(ex, nextr); |
426 | } else { |
427 | /* |
428 | * Nope, just prepend us to the next region. |
429 | */ |
430 | LIST_NEXT(after, er_link)->er_start = start; |
431 | } |
432 | |
433 | extent_free_region_descriptor(ex, rp); |
434 | return; |
435 | } |
436 | |
437 | /* |
438 | * We weren't able to coalesce with the next region, but |
439 | * we don't need to allocate a region descriptor if we |
440 | * appended ourselves to the previous region. |
441 | */ |
442 | if (appended) { |
443 | extent_free_region_descriptor(ex, rp); |
444 | return; |
445 | } |
446 | |
447 | cant_coalesce: |
448 | |
449 | /* |
450 | * Fill in the region descriptor and insert ourselves |
451 | * into the region list. |
452 | */ |
453 | rp->er_start = start; |
454 | rp->er_end = start + (size - 1); |
455 | LIST_INSERT_AFTER(after, rp, er_link); |
456 | } |
457 | |
458 | /* |
459 | * Allocate a specific region in an extent map. |
460 | */ |
461 | int |
462 | extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags) |
463 | { |
464 | struct extent_region *rp, *last, *myrp; |
465 | u_long end = start + (size - 1); |
466 | int error; |
467 | |
468 | #ifdef DIAGNOSTIC |
469 | /* Check arguments. */ |
470 | if (ex == NULL) |
471 | panic("extent_alloc_region: NULL extent" ); |
472 | if (size < 1) { |
473 | printf("extent_alloc_region: extent `%s', size 0x%lx\n" , |
474 | ex->ex_name, size); |
475 | panic("extent_alloc_region: bad size" ); |
476 | } |
477 | if (end < start) { |
478 | printf( |
479 | "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n" , |
480 | ex->ex_name, start, size); |
481 | panic("extent_alloc_region: overflow" ); |
482 | } |
483 | #endif |
484 | #ifdef LOCKDEBUG |
485 | if (flags & EX_WAITSPACE) { |
486 | ASSERT_SLEEPABLE(); |
487 | } |
488 | #endif |
489 | |
490 | /* |
491 | * Make sure the requested region lies within the |
492 | * extent. |
493 | * |
494 | * We don't lock to check the range, because those values |
495 | * are never modified, and if another thread deletes the |
496 | * extent, we're screwed anyway. |
497 | */ |
498 | if ((start < ex->ex_start) || (end > ex->ex_end)) { |
499 | #ifdef DIAGNOSTIC |
500 | printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n" , |
501 | ex->ex_name, ex->ex_start, ex->ex_end); |
502 | printf("extent_alloc_region: start 0x%lx, end 0x%lx\n" , |
503 | start, end); |
504 | panic("extent_alloc_region: region lies outside extent" ); |
505 | #else |
506 | return (EINVAL); |
507 | #endif |
508 | } |
509 | |
510 | /* |
511 | * Allocate the region descriptor. It will be freed later |
512 | * if we can coalesce with another region. Don't lock before |
513 | * here! This could block. |
514 | */ |
515 | myrp = extent_alloc_region_descriptor(ex, flags); |
516 | if (myrp == NULL) { |
517 | #ifdef DIAGNOSTIC |
518 | printf( |
519 | "extent_alloc_region: can't allocate region descriptor\n" ); |
520 | #endif |
521 | return (ENOMEM); |
522 | } |
523 | |
524 | if (!(ex->ex_flags & EXF_EARLY)) |
525 | mutex_enter(&ex->ex_lock); |
526 | alloc_start: |
527 | |
528 | /* |
529 | * Attempt to place ourselves in the desired area of the |
530 | * extent. We save ourselves some work by keeping the list sorted. |
531 | * In other words, if the start of the current region is greater |
532 | * than the end of our region, we don't have to search any further. |
533 | */ |
534 | |
535 | /* |
536 | * Keep a pointer to the last region we looked at so |
537 | * that we don't have to traverse the list again when |
538 | * we insert ourselves. If "last" is NULL when we |
539 | * finally insert ourselves, we go at the head of the |
540 | * list. See extent_insert_and_optimize() for details. |
541 | */ |
542 | last = NULL; |
543 | |
544 | LIST_FOREACH(rp, &ex->ex_regions, er_link) { |
545 | if (rp->er_start > end) { |
546 | /* |
547 | * We lie before this region and don't |
548 | * conflict. |
549 | */ |
550 | break; |
551 | } |
552 | |
553 | /* |
554 | * The current region begins before we end. |
555 | * Check for a conflict. |
556 | */ |
557 | if (rp->er_end >= start) { |
558 | /* |
559 | * We conflict. If we can (and want to) wait, |
560 | * do so. |
561 | */ |
562 | if (flags & EX_WAITSPACE) { |
563 | KASSERT(!(ex->ex_flags & EXF_EARLY)); |
564 | if ((flags & EX_CATCH) != 0) |
565 | error = cv_wait_sig(&ex->ex_cv, |
566 | &ex->ex_lock); |
567 | else { |
568 | cv_wait(&ex->ex_cv, &ex->ex_lock); |
569 | error = 0; |
570 | } |
571 | if (error == 0) |
572 | goto alloc_start; |
573 | mutex_exit(&ex->ex_lock); |
574 | } else { |
575 | if (!(ex->ex_flags & EXF_EARLY)) |
576 | mutex_exit(&ex->ex_lock); |
577 | error = EAGAIN; |
578 | } |
579 | extent_free_region_descriptor(ex, myrp); |
580 | return error; |
581 | } |
582 | /* |
583 | * We don't conflict, but this region lies before |
584 | * us. Keep a pointer to this region, and keep |
585 | * trying. |
586 | */ |
587 | last = rp; |
588 | } |
589 | |
590 | /* |
591 | * We don't conflict with any regions. "last" points |
592 | * to the region we fall after, or is NULL if we belong |
593 | * at the beginning of the region list. Insert ourselves. |
594 | */ |
595 | extent_insert_and_optimize(ex, start, size, flags, last, myrp); |
596 | if (!(ex->ex_flags & EXF_EARLY)) |
597 | mutex_exit(&ex->ex_lock); |
598 | return (0); |
599 | } |
600 | |
601 | /* |
602 | * Macro to check (x + y) <= z. This check is designed to fail |
603 | * if an overflow occurs. |
604 | */ |
605 | #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z))) |
606 | |
607 | /* |
608 | * Allocate a region in an extent map subregion. |
609 | * |
610 | * If EX_FAST is specified, we return the first fit in the map. |
611 | * Otherwise, we try to minimize fragmentation by finding the |
612 | * smallest gap that will hold the request. |
613 | * |
614 | * The allocated region is aligned to "alignment", which must be |
615 | * a power of 2. |
616 | */ |
617 | int |
618 | extent_alloc_subregion1(struct extent *ex, u_long substart, u_long subend, |
619 | u_long size, u_long alignment, u_long skew, u_long boundary, |
620 | int flags, u_long *result) |
621 | { |
622 | struct extent_region *rp, *myrp, *last, *bestlast; |
623 | u_long newstart, newend, exend, beststart, bestovh, ovh; |
624 | u_long dontcross; |
625 | int error; |
626 | |
627 | #ifdef DIAGNOSTIC |
628 | /* |
629 | * Check arguments. |
630 | * |
631 | * We don't lock to check these, because these values |
632 | * are never modified, and if another thread deletes the |
633 | * extent, we're screwed anyway. |
634 | */ |
635 | if (ex == NULL) |
636 | panic("extent_alloc_subregion: NULL extent" ); |
637 | if (result == NULL) |
638 | panic("extent_alloc_subregion: NULL result pointer" ); |
639 | if ((substart < ex->ex_start) || (substart > ex->ex_end) || |
640 | (subend > ex->ex_end) || (subend < ex->ex_start)) { |
641 | printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n" , |
642 | ex->ex_name, ex->ex_start, ex->ex_end); |
643 | printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n" , |
644 | substart, subend); |
645 | panic("extent_alloc_subregion: bad subregion" ); |
646 | } |
647 | if ((size < 1) || ((size - 1) > (subend - substart))) { |
648 | printf("extent_alloc_subregion: extent `%s', size 0x%lx\n" , |
649 | ex->ex_name, size); |
650 | panic("extent_alloc_subregion: bad size" ); |
651 | } |
652 | if (alignment == 0) |
653 | panic("extent_alloc_subregion: bad alignment" ); |
654 | if (boundary && (boundary < size)) { |
655 | printf( |
656 | "extent_alloc_subregion: extent `%s', size 0x%lx, " |
657 | "boundary 0x%lx\n" , ex->ex_name, size, boundary); |
658 | panic("extent_alloc_subregion: bad boundary" ); |
659 | } |
660 | #endif |
661 | #ifdef LOCKDEBUG |
662 | if (flags & EX_WAITSPACE) { |
663 | ASSERT_SLEEPABLE(); |
664 | } |
665 | #endif |
666 | |
667 | /* |
668 | * Allocate the region descriptor. It will be freed later |
669 | * if we can coalesce with another region. Don't lock before |
670 | * here! This could block. |
671 | */ |
672 | myrp = extent_alloc_region_descriptor(ex, flags); |
673 | if (myrp == NULL) { |
674 | #ifdef DIAGNOSTIC |
675 | printf( |
676 | "extent_alloc_subregion: can't allocate region descriptor\n" ); |
677 | #endif |
678 | return (ENOMEM); |
679 | } |
680 | |
681 | alloc_start: |
682 | mutex_enter(&ex->ex_lock); |
683 | |
684 | /* |
685 | * Keep a pointer to the last region we looked at so |
686 | * that we don't have to traverse the list again when |
687 | * we insert ourselves. If "last" is NULL when we |
688 | * finally insert ourselves, we go at the head of the |
689 | * list. See extent_insert_and_optimize() for deatails. |
690 | */ |
691 | last = NULL; |
692 | |
693 | /* |
694 | * Keep track of size and location of the smallest |
695 | * chunk we fit in. |
696 | * |
697 | * Since the extent can be as large as the numeric range |
698 | * of the CPU (0 - 0xffffffff for 32-bit systems), the |
699 | * best overhead value can be the maximum unsigned integer. |
700 | * Thus, we initialize "bestovh" to 0, since we insert ourselves |
701 | * into the region list immediately on an exact match (which |
702 | * is the only case where "bestovh" would be set to 0). |
703 | */ |
704 | bestovh = 0; |
705 | beststart = 0; |
706 | bestlast = NULL; |
707 | |
708 | /* |
709 | * Keep track of end of free region. This is either the end of extent |
710 | * or the start of a region past the subend. |
711 | */ |
712 | exend = ex->ex_end; |
713 | |
714 | /* |
715 | * For N allocated regions, we must make (N + 1) |
716 | * checks for unallocated space. The first chunk we |
717 | * check is the area from the beginning of the subregion |
718 | * to the first allocated region after that point. |
719 | */ |
720 | newstart = EXTENT_ALIGN(substart, alignment, skew); |
721 | if (newstart < ex->ex_start) { |
722 | #ifdef DIAGNOSTIC |
723 | printf( |
724 | "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n" , |
725 | ex->ex_name, ex->ex_start, ex->ex_end, alignment); |
726 | mutex_exit(&ex->ex_lock); |
727 | panic("extent_alloc_subregion: overflow after alignment" ); |
728 | #else |
729 | extent_free_region_descriptor(ex, myrp); |
730 | mutex_exit(&ex->ex_lock); |
731 | return (EINVAL); |
732 | #endif |
733 | } |
734 | |
735 | /* |
736 | * Find the first allocated region that begins on or after |
737 | * the subregion start, advancing the "last" pointer along |
738 | * the way. |
739 | */ |
740 | LIST_FOREACH(rp, &ex->ex_regions, er_link) { |
741 | if (rp->er_start >= newstart) |
742 | break; |
743 | last = rp; |
744 | } |
745 | |
746 | /* |
747 | * Relocate the start of our candidate region to the end of |
748 | * the last allocated region (if there was one overlapping |
749 | * our subrange). |
750 | */ |
751 | if (last != NULL && last->er_end >= newstart) |
752 | newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew); |
753 | |
754 | for (; rp != NULL; rp = LIST_NEXT(rp, er_link)) { |
755 | /* |
756 | * If the region pasts the subend, bail out and see |
757 | * if we fit against the subend. |
758 | */ |
759 | if (rp->er_start > subend) { |
760 | exend = rp->er_start; |
761 | break; |
762 | } |
763 | |
764 | /* |
765 | * Check the chunk before "rp". Note that our |
766 | * comparison is safe from overflow conditions. |
767 | */ |
768 | if (LE_OV(newstart, size, rp->er_start)) { |
769 | /* |
770 | * Do a boundary check, if necessary. Note |
771 | * that a region may *begin* on the boundary, |
772 | * but it must end before the boundary. |
773 | */ |
774 | if (boundary) { |
775 | newend = newstart + (size - 1); |
776 | |
777 | /* |
778 | * Calculate the next boundary after the start |
779 | * of this region. |
780 | */ |
781 | dontcross = EXTENT_ALIGN(newstart+1, boundary, |
782 | (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) |
783 | - 1; |
784 | |
785 | #if 0 |
786 | printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n" , |
787 | newstart, newend, ex->ex_start, ex->ex_end, |
788 | boundary, dontcross); |
789 | #endif |
790 | |
791 | /* Check for overflow */ |
792 | if (dontcross < ex->ex_start) |
793 | dontcross = ex->ex_end; |
794 | else if (newend > dontcross) { |
795 | /* |
796 | * Candidate region crosses boundary. |
797 | * Throw away the leading part and see |
798 | * if we still fit. |
799 | */ |
800 | newstart = dontcross + 1; |
801 | newend = newstart + (size - 1); |
802 | dontcross += boundary; |
803 | if (!LE_OV(newstart, size, rp->er_start)) |
804 | goto skip; |
805 | } |
806 | |
807 | /* |
808 | * If we run past the end of |
809 | * the extent or the boundary |
810 | * overflows, then the request |
811 | * can't fit. |
812 | */ |
813 | if (newstart + size - 1 > ex->ex_end || |
814 | dontcross < newstart) |
815 | goto fail; |
816 | } |
817 | |
818 | /* |
819 | * We would fit into this space. Calculate |
820 | * the overhead (wasted space). If we exactly |
821 | * fit, or we're taking the first fit, insert |
822 | * ourselves into the region list. |
823 | */ |
824 | ovh = rp->er_start - newstart - size; |
825 | if ((flags & EX_FAST) || (ovh == 0)) |
826 | goto found; |
827 | |
828 | /* |
829 | * Don't exactly fit, but check to see |
830 | * if we're better than any current choice. |
831 | */ |
832 | if ((bestovh == 0) || (ovh < bestovh)) { |
833 | bestovh = ovh; |
834 | beststart = newstart; |
835 | bestlast = last; |
836 | } |
837 | } |
838 | |
839 | skip: |
840 | /* |
841 | * Skip past the current region and check again. |
842 | */ |
843 | newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew); |
844 | if (newstart < rp->er_end) { |
845 | /* |
846 | * Overflow condition. Don't error out, since |
847 | * we might have a chunk of space that we can |
848 | * use. |
849 | */ |
850 | goto fail; |
851 | } |
852 | |
853 | last = rp; |
854 | } |
855 | |
856 | /* |
857 | * The final check is from the current starting point to the |
858 | * end of the subregion. If there were no allocated regions, |
859 | * "newstart" is set to the beginning of the subregion, or |
860 | * just past the end of the last allocated region, adjusted |
861 | * for alignment in either case. |
862 | */ |
863 | if (LE_OV(newstart, (size - 1), subend)) { |
864 | /* |
865 | * Do a boundary check, if necessary. Note |
866 | * that a region may *begin* on the boundary, |
867 | * but it must end before the boundary. |
868 | */ |
869 | if (boundary) { |
870 | newend = newstart + (size - 1); |
871 | |
872 | /* |
873 | * Calculate the next boundary after the start |
874 | * of this region. |
875 | */ |
876 | dontcross = EXTENT_ALIGN(newstart+1, boundary, |
877 | (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) |
878 | - 1; |
879 | |
880 | #if 0 |
881 | printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n" , |
882 | newstart, newend, ex->ex_start, ex->ex_end, |
883 | boundary, dontcross); |
884 | #endif |
885 | |
886 | /* Check for overflow */ |
887 | if (dontcross < ex->ex_start) |
888 | dontcross = ex->ex_end; |
889 | else if (newend > dontcross) { |
890 | /* |
891 | * Candidate region crosses boundary. |
892 | * Throw away the leading part and see |
893 | * if we still fit. |
894 | */ |
895 | newstart = dontcross + 1; |
896 | newend = newstart + (size - 1); |
897 | dontcross += boundary; |
898 | if (!LE_OV(newstart, (size - 1), subend)) |
899 | goto fail; |
900 | } |
901 | |
902 | /* |
903 | * If we run past the end of |
904 | * the extent or the boundary |
905 | * overflows, then the request |
906 | * can't fit. |
907 | */ |
908 | if (newstart + size - 1 > ex->ex_end || |
909 | dontcross < newstart) |
910 | goto fail; |
911 | } |
912 | |
913 | /* |
914 | * We would fit into this space. Calculate |
915 | * the overhead (wasted space). If we exactly |
916 | * fit, or we're taking the first fit, insert |
917 | * ourselves into the region list. |
918 | */ |
919 | ovh = exend - newstart - (size - 1); |
920 | if ((flags & EX_FAST) || (ovh == 0)) |
921 | goto found; |
922 | |
923 | /* |
924 | * Don't exactly fit, but check to see |
925 | * if we're better than any current choice. |
926 | */ |
927 | if ((bestovh == 0) || (ovh < bestovh)) { |
928 | bestovh = ovh; |
929 | beststart = newstart; |
930 | bestlast = last; |
931 | } |
932 | } |
933 | |
934 | fail: |
935 | /* |
936 | * One of the following two conditions have |
937 | * occurred: |
938 | * |
939 | * There is no chunk large enough to hold the request. |
940 | * |
941 | * If EX_FAST was not specified, there is not an |
942 | * exact match for the request. |
943 | * |
944 | * Note that if we reach this point and EX_FAST is |
945 | * set, then we know there is no space in the extent for |
946 | * the request. |
947 | */ |
948 | if (((flags & EX_FAST) == 0) && (bestovh != 0)) { |
949 | /* |
950 | * We have a match that's "good enough". |
951 | */ |
952 | newstart = beststart; |
953 | last = bestlast; |
954 | goto found; |
955 | } |
956 | |
957 | /* |
958 | * No space currently available. Wait for it to free up, |
959 | * if possible. |
960 | */ |
961 | if (flags & EX_WAITSPACE) { |
962 | if ((flags & EX_CATCH) != 0) { |
963 | error = cv_wait_sig(&ex->ex_cv, &ex->ex_lock); |
964 | } else { |
965 | cv_wait(&ex->ex_cv, &ex->ex_lock); |
966 | error = 0; |
967 | } |
968 | if (error == 0) |
969 | goto alloc_start; |
970 | mutex_exit(&ex->ex_lock); |
971 | } else { |
972 | mutex_exit(&ex->ex_lock); |
973 | error = EAGAIN; |
974 | } |
975 | |
976 | extent_free_region_descriptor(ex, myrp); |
977 | return error; |
978 | |
979 | found: |
980 | /* |
981 | * Insert ourselves into the region list. |
982 | */ |
983 | extent_insert_and_optimize(ex, newstart, size, flags, last, myrp); |
984 | mutex_exit(&ex->ex_lock); |
985 | *result = newstart; |
986 | return (0); |
987 | } |
988 | |
989 | int |
990 | extent_alloc_subregion(struct extent *ex, u_long start, u_long end, u_long size, |
991 | u_long alignment, u_long boundary, int flags, u_long *result) |
992 | { |
993 | |
994 | return (extent_alloc_subregion1(ex, start, end, size, alignment, |
995 | 0, boundary, flags, result)); |
996 | } |
997 | |
998 | int |
999 | extent_alloc(struct extent *ex, u_long size, u_long alignment, u_long boundary, |
1000 | int flags, u_long *result) |
1001 | { |
1002 | |
1003 | return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end, |
1004 | size, alignment, 0, boundary, |
1005 | flags, result)); |
1006 | } |
1007 | |
1008 | int |
1009 | extent_alloc1(struct extent *ex, u_long size, u_long alignment, u_long skew, |
1010 | u_long boundary, int flags, u_long *result) |
1011 | { |
1012 | |
1013 | return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end, |
1014 | size, alignment, skew, boundary, |
1015 | flags, result)); |
1016 | } |
1017 | |
1018 | int |
1019 | extent_free(struct extent *ex, u_long start, u_long size, int flags) |
1020 | { |
1021 | struct extent_region *rp, *nrp = NULL; |
1022 | u_long end = start + (size - 1); |
1023 | |
1024 | #ifdef DIAGNOSTIC |
1025 | /* |
1026 | * Check arguments. |
1027 | * |
1028 | * We don't lock to check these, because these values |
1029 | * are never modified, and if another thread deletes the |
1030 | * extent, we're screwed anyway. |
1031 | */ |
1032 | if (ex == NULL) |
1033 | panic("extent_free: NULL extent" ); |
1034 | if ((start < ex->ex_start) || (end > ex->ex_end)) { |
1035 | extent_print(ex); |
1036 | printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n" , |
1037 | ex->ex_name, start, size); |
1038 | panic("extent_free: extent `%s', region not within extent" , |
1039 | ex->ex_name); |
1040 | } |
1041 | /* Check for an overflow. */ |
1042 | if (end < start) { |
1043 | extent_print(ex); |
1044 | printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n" , |
1045 | ex->ex_name, start, size); |
1046 | panic("extent_free: overflow" ); |
1047 | } |
1048 | #endif |
1049 | |
1050 | /* |
1051 | * If we're allowing coalescing, we must allocate a region |
1052 | * descriptor now, since it might block. |
1053 | */ |
1054 | const bool coalesce = (ex->ex_flags & EXF_NOCOALESCE) == 0; |
1055 | |
1056 | if (coalesce) { |
1057 | /* Allocate a region descriptor. */ |
1058 | nrp = extent_alloc_region_descriptor(ex, flags); |
1059 | if (nrp == NULL) |
1060 | return (ENOMEM); |
1061 | } |
1062 | |
1063 | if (!(ex->ex_flags & EXF_EARLY)) |
1064 | mutex_enter(&ex->ex_lock); |
1065 | |
1066 | /* |
1067 | * Find region and deallocate. Several possibilities: |
1068 | * |
1069 | * 1. (start == er_start) && (end == er_end): |
1070 | * Free descriptor. |
1071 | * |
1072 | * 2. (start == er_start) && (end < er_end): |
1073 | * Adjust er_start. |
1074 | * |
1075 | * 3. (start > er_start) && (end == er_end): |
1076 | * Adjust er_end. |
1077 | * |
1078 | * 4. (start > er_start) && (end < er_end): |
1079 | * Fragment region. Requires descriptor alloc. |
1080 | * |
1081 | * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag |
1082 | * is not set. |
1083 | */ |
1084 | LIST_FOREACH(rp, &ex->ex_regions, er_link) { |
1085 | /* |
1086 | * Save ourselves some comparisons; does the current |
1087 | * region end before chunk to be freed begins? If so, |
1088 | * then we haven't found the appropriate region descriptor. |
1089 | */ |
1090 | if (rp->er_end < start) |
1091 | continue; |
1092 | |
1093 | /* |
1094 | * Save ourselves some traversal; does the current |
1095 | * region begin after the chunk to be freed ends? If so, |
1096 | * then we've already passed any possible region descriptors |
1097 | * that might have contained the chunk to be freed. |
1098 | */ |
1099 | if (rp->er_start > end) |
1100 | break; |
1101 | |
1102 | /* Case 1. */ |
1103 | if ((start == rp->er_start) && (end == rp->er_end)) { |
1104 | LIST_REMOVE(rp, er_link); |
1105 | extent_free_region_descriptor(ex, rp); |
1106 | goto done; |
1107 | } |
1108 | |
1109 | /* |
1110 | * The following cases all require that EXF_NOCOALESCE |
1111 | * is not set. |
1112 | */ |
1113 | if (!coalesce) |
1114 | continue; |
1115 | |
1116 | /* Case 2. */ |
1117 | if ((start == rp->er_start) && (end < rp->er_end)) { |
1118 | rp->er_start = (end + 1); |
1119 | goto done; |
1120 | } |
1121 | |
1122 | /* Case 3. */ |
1123 | if ((start > rp->er_start) && (end == rp->er_end)) { |
1124 | rp->er_end = (start - 1); |
1125 | goto done; |
1126 | } |
1127 | |
1128 | /* Case 4. */ |
1129 | if ((start > rp->er_start) && (end < rp->er_end)) { |
1130 | /* Fill in new descriptor. */ |
1131 | nrp->er_start = end + 1; |
1132 | nrp->er_end = rp->er_end; |
1133 | |
1134 | /* Adjust current descriptor. */ |
1135 | rp->er_end = start - 1; |
1136 | |
1137 | /* Insert new descriptor after current. */ |
1138 | LIST_INSERT_AFTER(rp, nrp, er_link); |
1139 | |
1140 | /* We used the new descriptor, so don't free it below */ |
1141 | nrp = NULL; |
1142 | goto done; |
1143 | } |
1144 | } |
1145 | |
1146 | /* Region not found, or request otherwise invalid. */ |
1147 | if (!(ex->ex_flags & EXF_EARLY)) |
1148 | mutex_exit(&ex->ex_lock); |
1149 | extent_print(ex); |
1150 | printf("extent_free: start 0x%lx, end 0x%lx\n" , start, end); |
1151 | panic("extent_free: region not found" ); |
1152 | |
1153 | done: |
1154 | if (nrp != NULL) |
1155 | extent_free_region_descriptor(ex, nrp); |
1156 | if (!(ex->ex_flags & EXF_EARLY)) { |
1157 | cv_broadcast(&ex->ex_cv); |
1158 | mutex_exit(&ex->ex_lock); |
1159 | } |
1160 | return (0); |
1161 | } |
1162 | |
1163 | void |
1164 | extent_print(struct extent *ex) |
1165 | { |
1166 | struct extent_region *rp; |
1167 | |
1168 | if (ex == NULL) |
1169 | panic("extent_print: NULL extent" ); |
1170 | |
1171 | if (!(ex->ex_flags & EXF_EARLY)) |
1172 | mutex_enter(&ex->ex_lock); |
1173 | |
1174 | printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n" , ex->ex_name, |
1175 | ex->ex_start, ex->ex_end, ex->ex_flags); |
1176 | |
1177 | LIST_FOREACH(rp, &ex->ex_regions, er_link) |
1178 | printf(" 0x%lx - 0x%lx\n" , rp->er_start, rp->er_end); |
1179 | |
1180 | if (!(ex->ex_flags & EXF_EARLY)) |
1181 | mutex_exit(&ex->ex_lock); |
1182 | } |
1183 | |