1 | /* $NetBSD: malloc.c,v 1.2 2003/08/07 16:42:01 agc Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 1983, 1993 |
5 | * The Regents of the University of California. All rights reserved. |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions |
9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. |
15 | * 3. Neither the name of the University nor the names of its contributors |
16 | * may be used to endorse or promote products derived from this software |
17 | * without specific prior written permission. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
29 | * SUCH DAMAGE. |
30 | */ |
31 | |
32 | #include <sys/cdefs.h> |
33 | #if defined(LIBC_SCCS) && !defined(lint) |
34 | #if 0 |
35 | static char sccsid[] = "@(#)malloc.c 8.1 (Berkeley) 6/4/93" ; |
36 | #else |
37 | __RCSID("$NetBSD: malloc.c,v 1.2 2003/08/07 16:42:01 agc Exp $" ); |
38 | #endif |
39 | #endif /* LIBC_SCCS and not lint */ |
40 | |
41 | /* |
42 | * malloc.c (Caltech) 2/21/82 |
43 | * Chris Kingsley, kingsley@cit-20. |
44 | * |
45 | * This is a very fast storage allocator. It allocates blocks of a small |
46 | * number of different sizes, and keeps free lists of each size. Blocks that |
47 | * don't exactly fit are passed up to the next larger size. In this |
48 | * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. |
49 | * This is designed for use in a virtual memory environment. |
50 | */ |
51 | |
52 | #include <sys/types.h> |
53 | #if defined(DEBUG) || defined(RCHECK) |
54 | #include <sys/uio.h> |
55 | #endif |
56 | #if defined(RCHECK) || defined(MSTATS) |
57 | #include <stdio.h> |
58 | #endif |
59 | #include <stdlib.h> |
60 | #include <string.h> |
61 | #include <unistd.h> |
62 | #include "reentrant.h" |
63 | |
64 | |
65 | /* |
66 | * The overhead on a block is at least 4 bytes. When free, this space |
67 | * contains a pointer to the next free block, and the bottom two bits must |
68 | * be zero. When in use, the first byte is set to MAGIC, and the second |
69 | * byte is the size index. The remaining bytes are for alignment. |
70 | * If range checking is enabled then a second word holds the size of the |
71 | * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). |
72 | * The order of elements is critical: ov_magic must overlay the low order |
73 | * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. |
74 | */ |
75 | union overhead { |
76 | union overhead *ov_next; /* when free */ |
77 | struct { |
78 | u_char ovu_magic; /* magic number */ |
79 | u_char ovu_index; /* bucket # */ |
80 | #ifdef RCHECK |
81 | u_short ovu_rmagic; /* range magic number */ |
82 | u_long ovu_size; /* actual block size */ |
83 | #endif |
84 | } ovu; |
85 | #define ov_magic ovu.ovu_magic |
86 | #define ov_index ovu.ovu_index |
87 | #define ov_rmagic ovu.ovu_rmagic |
88 | #define ov_size ovu.ovu_size |
89 | }; |
90 | |
91 | #define MAGIC 0xef /* magic # on accounting info */ |
92 | #ifdef RCHECK |
93 | #define RMAGIC 0x5555 /* magic # on range info */ |
94 | #endif |
95 | |
96 | #ifdef RCHECK |
97 | #define RSLOP sizeof (u_short) |
98 | #else |
99 | #define RSLOP 0 |
100 | #endif |
101 | |
102 | /* |
103 | * nextf[i] is the pointer to the next free block of size 2^(i+3). The |
104 | * smallest allocatable block is 8 bytes. The overhead information |
105 | * precedes the data area returned to the user. |
106 | */ |
107 | #define NBUCKETS 30 |
108 | static union overhead *nextf[NBUCKETS]; |
109 | |
110 | static long pagesz; /* page size */ |
111 | static int pagebucket; /* page size bucket */ |
112 | |
113 | #ifdef MSTATS |
114 | /* |
115 | * nmalloc[i] is the difference between the number of mallocs and frees |
116 | * for a given block size. |
117 | */ |
118 | static u_int nmalloc[NBUCKETS]; |
119 | #endif |
120 | |
121 | #ifdef _REENT |
122 | static mutex_t malloc_mutex = MUTEX_INITIALIZER; |
123 | #endif |
124 | |
125 | static void morecore __P((int)); |
126 | static int findbucket __P((union overhead *, int)); |
127 | #ifdef MSTATS |
128 | void mstats __P((const char *)); |
129 | #endif |
130 | |
131 | #if defined(DEBUG) || defined(RCHECK) |
132 | #define ASSERT(p) if (!(p)) botch(__STRING(p)) |
133 | |
134 | static void botch __P((const char *)); |
135 | |
136 | /* |
137 | * NOTE: since this may be called while malloc_mutex is locked, stdio must not |
138 | * be used in this function. |
139 | */ |
140 | static void |
141 | botch(s) |
142 | const char *s; |
143 | { |
144 | struct iovec iov[3]; |
145 | |
146 | iov[0].iov_base = "\nassertion botched: " ; |
147 | iov[0].iov_len = 20; |
148 | iov[1].iov_base = (void *)s; |
149 | iov[1].iov_len = strlen(s); |
150 | iov[2].iov_base = "\n" ; |
151 | iov[2].iov_len = 1; |
152 | |
153 | /* |
154 | * This place deserves a word of warning: a cancellation point will |
155 | * occur when executing writev(), and we might be still owning |
156 | * malloc_mutex. At this point we need to disable cancellation |
157 | * until `after' abort() because i) establishing a cancellation handler |
158 | * might, depending on the implementation, result in another malloc() |
159 | * to be executed, and ii) it is really not desirable to let execution |
160 | * continue. `Fix me.' |
161 | * |
162 | * Note that holding mutex_lock during abort() is safe. |
163 | */ |
164 | |
165 | (void)writev(STDERR_FILENO, iov, 3); |
166 | abort(); |
167 | } |
168 | #else |
169 | #define ASSERT(p) |
170 | #endif |
171 | |
172 | void * |
173 | malloc(nbytes) |
174 | size_t nbytes; |
175 | { |
176 | union overhead *op; |
177 | int bucket; |
178 | long n; |
179 | unsigned amt; |
180 | |
181 | mutex_lock(&malloc_mutex); |
182 | |
183 | /* |
184 | * First time malloc is called, setup page size and |
185 | * align break pointer so all data will be page aligned. |
186 | */ |
187 | if (pagesz == 0) { |
188 | pagesz = n = getpagesize(); |
189 | ASSERT(pagesz > 0); |
190 | op = (union overhead *)(void *)sbrk(0); |
191 | n = n - sizeof (*op) - ((long)op & (n - 1)); |
192 | if (n < 0) |
193 | n += pagesz; |
194 | if (n) { |
195 | if (sbrk((int)n) == (void *)-1) { |
196 | mutex_unlock(&malloc_mutex); |
197 | return (NULL); |
198 | } |
199 | } |
200 | bucket = 0; |
201 | amt = 8; |
202 | while (pagesz > amt) { |
203 | amt <<= 1; |
204 | bucket++; |
205 | } |
206 | pagebucket = bucket; |
207 | } |
208 | /* |
209 | * Convert amount of memory requested into closest block size |
210 | * stored in hash buckets which satisfies request. |
211 | * Account for space used per block for accounting. |
212 | */ |
213 | if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { |
214 | #ifndef RCHECK |
215 | amt = 8; /* size of first bucket */ |
216 | bucket = 0; |
217 | #else |
218 | amt = 16; /* size of first bucket */ |
219 | bucket = 1; |
220 | #endif |
221 | n = -((long)sizeof (*op) + RSLOP); |
222 | } else { |
223 | amt = (unsigned)pagesz; |
224 | bucket = pagebucket; |
225 | } |
226 | while (nbytes > amt + n) { |
227 | amt <<= 1; |
228 | if (amt == 0) |
229 | return (NULL); |
230 | bucket++; |
231 | } |
232 | /* |
233 | * If nothing in hash bucket right now, |
234 | * request more memory from the system. |
235 | */ |
236 | if ((op = nextf[bucket]) == NULL) { |
237 | morecore(bucket); |
238 | if ((op = nextf[bucket]) == NULL) { |
239 | mutex_unlock(&malloc_mutex); |
240 | return (NULL); |
241 | } |
242 | } |
243 | /* remove from linked list */ |
244 | nextf[bucket] = op->ov_next; |
245 | op->ov_magic = MAGIC; |
246 | op->ov_index = bucket; |
247 | #ifdef MSTATS |
248 | nmalloc[bucket]++; |
249 | #endif |
250 | mutex_unlock(&malloc_mutex); |
251 | #ifdef RCHECK |
252 | /* |
253 | * Record allocated size of block and |
254 | * bound space with magic numbers. |
255 | */ |
256 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); |
257 | op->ov_rmagic = RMAGIC; |
258 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; |
259 | #endif |
260 | return ((void *)(op + 1)); |
261 | } |
262 | |
263 | /* |
264 | * Allocate more memory to the indicated bucket. |
265 | */ |
266 | static void |
267 | morecore(bucket) |
268 | int bucket; |
269 | { |
270 | union overhead *op; |
271 | long sz; /* size of desired block */ |
272 | long amt; /* amount to allocate */ |
273 | long nblks; /* how many blocks we get */ |
274 | |
275 | /* |
276 | * sbrk_size <= 0 only for big, FLUFFY, requests (about |
277 | * 2^30 bytes on a VAX, I think) or for a negative arg. |
278 | */ |
279 | sz = 1 << (bucket + 3); |
280 | #ifdef DEBUG |
281 | ASSERT(sz > 0); |
282 | #else |
283 | if (sz <= 0) |
284 | return; |
285 | #endif |
286 | if (sz < pagesz) { |
287 | amt = pagesz; |
288 | nblks = amt / sz; |
289 | } else { |
290 | amt = sz + pagesz; |
291 | nblks = 1; |
292 | } |
293 | op = (union overhead *)(void *)sbrk((int)amt); |
294 | /* no more room! */ |
295 | if ((long)op == -1) |
296 | return; |
297 | /* |
298 | * Add new memory allocated to that on |
299 | * free list for this hash bucket. |
300 | */ |
301 | nextf[bucket] = op; |
302 | while (--nblks > 0) { |
303 | op->ov_next = |
304 | (union overhead *)(void *)((caddr_t)(void *)op+(size_t)sz); |
305 | op = op->ov_next; |
306 | } |
307 | } |
308 | |
309 | void |
310 | free(cp) |
311 | void *cp; |
312 | { |
313 | long size; |
314 | union overhead *op; |
315 | |
316 | if (cp == NULL) |
317 | return; |
318 | op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead)); |
319 | #ifdef DEBUG |
320 | ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ |
321 | #else |
322 | if (op->ov_magic != MAGIC) |
323 | return; /* sanity */ |
324 | #endif |
325 | #ifdef RCHECK |
326 | ASSERT(op->ov_rmagic == RMAGIC); |
327 | ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); |
328 | #endif |
329 | size = op->ov_index; |
330 | ASSERT(size < NBUCKETS); |
331 | mutex_lock(&malloc_mutex); |
332 | op->ov_next = nextf[(unsigned int)size];/* also clobbers ov_magic */ |
333 | nextf[(unsigned int)size] = op; |
334 | #ifdef MSTATS |
335 | nmalloc[(size_t)size]--; |
336 | #endif |
337 | mutex_unlock(&malloc_mutex); |
338 | } |
339 | |
340 | /* |
341 | * When a program attempts "storage compaction" as mentioned in the |
342 | * old malloc man page, it realloc's an already freed block. Usually |
343 | * this is the last block it freed; occasionally it might be farther |
344 | * back. We have to search all the free lists for the block in order |
345 | * to determine its bucket: 1st we make one pass thru the lists |
346 | * checking only the first block in each; if that fails we search |
347 | * ``__realloc_srchlen'' blocks in each list for a match (the variable |
348 | * is extern so the caller can modify it). If that fails we just copy |
349 | * however many bytes was given to realloc() and hope it's not huge. |
350 | */ |
351 | int __realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ |
352 | |
353 | void * |
354 | realloc(cp, nbytes) |
355 | void *cp; |
356 | size_t nbytes; |
357 | { |
358 | u_long onb; |
359 | long i; |
360 | union overhead *op; |
361 | char *res; |
362 | int was_alloced = 0; |
363 | |
364 | if (cp == NULL) |
365 | return (malloc(nbytes)); |
366 | if (nbytes == 0) { |
367 | free (cp); |
368 | return (NULL); |
369 | } |
370 | op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead)); |
371 | mutex_lock(&malloc_mutex); |
372 | if (op->ov_magic == MAGIC) { |
373 | was_alloced++; |
374 | i = op->ov_index; |
375 | } else { |
376 | /* |
377 | * Already free, doing "compaction". |
378 | * |
379 | * Search for the old block of memory on the |
380 | * free list. First, check the most common |
381 | * case (last element free'd), then (this failing) |
382 | * the last ``__realloc_srchlen'' items free'd. |
383 | * If all lookups fail, then assume the size of |
384 | * the memory block being realloc'd is the |
385 | * largest possible (so that all "nbytes" of new |
386 | * memory are copied into). Note that this could cause |
387 | * a memory fault if the old area was tiny, and the moon |
388 | * is gibbous. However, that is very unlikely. |
389 | */ |
390 | if ((i = findbucket(op, 1)) < 0 && |
391 | (i = findbucket(op, __realloc_srchlen)) < 0) |
392 | i = NBUCKETS; |
393 | } |
394 | onb = (u_long)1 << (u_long)(i + 3); |
395 | if (onb < pagesz) |
396 | onb -= sizeof (*op) + RSLOP; |
397 | else |
398 | onb += pagesz - sizeof (*op) - RSLOP; |
399 | /* avoid the copy if same size block */ |
400 | if (was_alloced) { |
401 | if (i) { |
402 | i = (long)1 << (long)(i + 2); |
403 | if (i < pagesz) |
404 | i -= sizeof (*op) + RSLOP; |
405 | else |
406 | i += pagesz - sizeof (*op) - RSLOP; |
407 | } |
408 | if (nbytes <= onb && nbytes > i) { |
409 | #ifdef RCHECK |
410 | op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); |
411 | *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; |
412 | #endif |
413 | mutex_unlock(&malloc_mutex); |
414 | return (cp); |
415 | |
416 | } |
417 | #ifndef _REENT |
418 | else |
419 | free(cp); |
420 | #endif |
421 | } |
422 | mutex_unlock(&malloc_mutex); |
423 | if ((res = malloc(nbytes)) == NULL) { |
424 | #ifdef _REENT |
425 | free(cp); |
426 | #endif |
427 | return (NULL); |
428 | } |
429 | #ifndef _REENT |
430 | if (cp != res) /* common optimization if "compacting" */ |
431 | (void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb)); |
432 | #else |
433 | (void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb)); |
434 | free(cp); |
435 | #endif |
436 | return (res); |
437 | } |
438 | |
439 | /* |
440 | * Search ``srchlen'' elements of each free list for a block whose |
441 | * header starts at ``freep''. If srchlen is -1 search the whole list. |
442 | * Return bucket number, or -1 if not found. |
443 | */ |
444 | static int |
445 | findbucket(freep, srchlen) |
446 | union overhead *freep; |
447 | int srchlen; |
448 | { |
449 | union overhead *p; |
450 | int i, j; |
451 | |
452 | for (i = 0; i < NBUCKETS; i++) { |
453 | j = 0; |
454 | for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { |
455 | if (p == freep) |
456 | return (i); |
457 | j++; |
458 | } |
459 | } |
460 | return (-1); |
461 | } |
462 | |
463 | #ifdef MSTATS |
464 | /* |
465 | * mstats - print out statistics about malloc |
466 | * |
467 | * Prints two lines of numbers, one showing the length of the free list |
468 | * for each size category, the second showing the number of mallocs - |
469 | * frees for each size category. |
470 | */ |
471 | void |
472 | mstats(s) |
473 | char *s; |
474 | { |
475 | int i, j; |
476 | union overhead *p; |
477 | int totfree = 0, |
478 | totused = 0; |
479 | |
480 | fprintf(stderr, "Memory allocation statistics %s\nfree:\t" , s); |
481 | for (i = 0; i < NBUCKETS; i++) { |
482 | for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) |
483 | ; |
484 | fprintf(stderr, " %d" , j); |
485 | totfree += j * (1 << (i + 3)); |
486 | } |
487 | fprintf(stderr, "\nused:\t" ); |
488 | for (i = 0; i < NBUCKETS; i++) { |
489 | fprintf(stderr, " %d" , nmalloc[i]); |
490 | totused += nmalloc[i] * (1 << (i + 3)); |
491 | } |
492 | fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n" , |
493 | totused, totfree); |
494 | } |
495 | #endif |
496 | |