No issues found
1 #include "Python.h"
2
3 #ifdef WITH_PYMALLOC
4
5 #ifdef WITH_VALGRIND
6 #include <valgrind/valgrind.h>
7
8 /* If we're using GCC, use __builtin_expect() to reduce overhead of
9 the valgrind checks */
10 #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
11 # define UNLIKELY(value) __builtin_expect((value), 0)
12 #else
13 # define UNLIKELY(value) (value)
14 #endif
15
16 /* -1 indicates that we haven't checked that we're running on valgrind yet. */
17 static int running_on_valgrind = -1;
18 #endif
19
20 /* An object allocator for Python.
21
22 Here is an introduction to the layers of the Python memory architecture,
23 showing where the object allocator is actually used (layer +2), It is
24 called for every object allocation and deallocation (PyObject_New/Del),
25 unless the object-specific allocators implement a proprietary allocation
26 scheme (ex.: ints use a simple free list). This is also the place where
27 the cyclic garbage collector operates selectively on container objects.
28
29
30 Object-specific allocators
31 _____ ______ ______ ________
32 [ int ] [ dict ] [ list ] ... [ string ] Python core |
33 +3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
34 _______________________________ | |
35 [ Python's object allocator ] | |
36 +2 | ####### Object memory ####### | <------ Internal buffers ------> |
37 ______________________________________________________________ |
38 [ Python's raw memory allocator (PyMem_ API) ] |
39 +1 | <----- Python memory (under PyMem manager's control) ------> | |
40 __________________________________________________________________
41 [ Underlying general-purpose allocator (ex: C library malloc) ]
42 0 | <------ Virtual memory allocated for the python process -------> |
43
44 =========================================================================
45 _______________________________________________________________________
46 [ OS-specific Virtual Memory Manager (VMM) ]
47 -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
48 __________________________________ __________________________________
49 [ ] [ ]
50 -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
51
52 */
53 /*==========================================================================*/
54
55 /* A fast, special-purpose memory allocator for small blocks, to be used
56 on top of a general-purpose malloc -- heavily based on previous art. */
57
58 /* Vladimir Marangozov -- August 2000 */
59
60 /*
61 * "Memory management is where the rubber meets the road -- if we do the wrong
62 * thing at any level, the results will not be good. And if we don't make the
63 * levels work well together, we are in serious trouble." (1)
64 *
65 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
66 * "Dynamic Storage Allocation: A Survey and Critical Review",
67 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
68 */
69
70 /* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
71
72 /*==========================================================================*/
73
74 /*
75 * Allocation strategy abstract:
76 *
77 * For small requests, the allocator sub-allocates <Big> blocks of memory.
78 * Requests greater than 256 bytes are routed to the system's allocator.
79 *
80 * Small requests are grouped in size classes spaced 8 bytes apart, due
81 * to the required valid alignment of the returned address. Requests of
82 * a particular size are serviced from memory pools of 4K (one VMM page).
83 * Pools are fragmented on demand and contain free lists of blocks of one
84 * particular size class. In other words, there is a fixed-size allocator
85 * for each size class. Free pools are shared by the different allocators
86 * thus minimizing the space reserved for a particular size class.
87 *
88 * This allocation strategy is a variant of what is known as "simple
89 * segregated storage based on array of free lists". The main drawback of
90 * simple segregated storage is that we might end up with lot of reserved
91 * memory for the different free lists, which degenerate in time. To avoid
92 * this, we partition each free list in pools and we share dynamically the
93 * reserved space between all free lists. This technique is quite efficient
94 * for memory intensive programs which allocate mainly small-sized blocks.
95 *
96 * For small requests we have the following table:
97 *
98 * Request in bytes Size of allocated block Size class idx
99 * ----------------------------------------------------------------
100 * 1-8 8 0
101 * 9-16 16 1
102 * 17-24 24 2
103 * 25-32 32 3
104 * 33-40 40 4
105 * 41-48 48 5
106 * 49-56 56 6
107 * 57-64 64 7
108 * 65-72 72 8
109 * ... ... ...
110 * 241-248 248 30
111 * 249-256 256 31
112 *
113 * 0, 257 and up: routed to the underlying allocator.
114 */
115
116 /*==========================================================================*/
117
118 /*
119 * -- Main tunable settings section --
120 */
121
122 /*
123 * Alignment of addresses returned to the user. 8-bytes alignment works
124 * on most current architectures (with 32-bit or 64-bit address busses).
125 * The alignment value is also used for grouping small requests in size
126 * classes spaced ALIGNMENT bytes apart.
127 *
128 * You shouldn't change this unless you know what you are doing.
129 */
130 #define ALIGNMENT 8 /* must be 2^N */
131 #define ALIGNMENT_SHIFT 3
132 #define ALIGNMENT_MASK (ALIGNMENT - 1)
133
134 /* Return the number of bytes in size class I, as a uint. */
135 #define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
136
137 /*
138 * Max size threshold below which malloc requests are considered to be
139 * small enough in order to use preallocated memory pools. You can tune
140 * this value according to your application behaviour and memory needs.
141 *
142 * The following invariants must hold:
143 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
144 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
145 *
146 * Although not required, for better performance and space efficiency,
147 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
148 */
149 #define SMALL_REQUEST_THRESHOLD 256
150 #define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
151
152 /*
153 * The system's VMM page size can be obtained on most unices with a
154 * getpagesize() call or deduced from various header files. To make
155 * things simpler, we assume that it is 4K, which is OK for most systems.
156 * It is probably better if this is the native page size, but it doesn't
157 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
158 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
159 * violation fault. 4K is apparently OK for all the platforms that python
160 * currently targets.
161 */
162 #define SYSTEM_PAGE_SIZE (4 * 1024)
163 #define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
164
165 /*
166 * Maximum amount of memory managed by the allocator for small requests.
167 */
168 #ifdef WITH_MEMORY_LIMITS
169 #ifndef SMALL_MEMORY_LIMIT
170 #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
171 #endif
172 #endif
173
174 /*
175 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
176 * on a page boundary. This is a reserved virtual address space for the
177 * current process (obtained through a malloc call). In no way this means
178 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
179 * an address range reservation for <Big> bytes, unless all pages within this
180 * space are referenced subsequently. So malloc'ing big blocks and not using
181 * them does not mean "wasting memory". It's an addressable range wastage...
182 *
183 * Therefore, allocating arenas with malloc is not optimal, because there is
184 * some address space wastage, but this is the most portable way to request
185 * memory from the system across various platforms.
186 */
187 #define ARENA_SIZE (256 << 10) /* 256KB */
188
189 #ifdef WITH_MEMORY_LIMITS
190 #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
191 #endif
192
193 /*
194 * Size of the pools used for small blocks. Should be a power of 2,
195 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
196 */
197 #define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
198 #define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
199
200 /*
201 * -- End of tunable settings section --
202 */
203
204 /*==========================================================================*/
205
206 /*
207 * Locking
208 *
209 * To reduce lock contention, it would probably be better to refine the
210 * crude function locking with per size class locking. I'm not positive
211 * however, whether it's worth switching to such locking policy because
212 * of the performance penalty it might introduce.
213 *
214 * The following macros describe the simplest (should also be the fastest)
215 * lock object on a particular platform and the init/fini/lock/unlock
216 * operations on it. The locks defined here are not expected to be recursive
217 * because it is assumed that they will always be called in the order:
218 * INIT, [LOCK, UNLOCK]*, FINI.
219 */
220
221 /*
222 * Python's threads are serialized, so object malloc locking is disabled.
223 */
224 #define SIMPLELOCK_DECL(lock) /* simple lock declaration */
225 #define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
226 #define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
227 #define SIMPLELOCK_LOCK(lock) /* acquire released lock */
228 #define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
229
230 /*
231 * Basic types
232 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
233 */
234 #undef uchar
235 #define uchar unsigned char /* assuming == 8 bits */
236
237 #undef uint
238 #define uint unsigned int /* assuming >= 16 bits */
239
240 #undef ulong
241 #define ulong unsigned long /* assuming >= 32 bits */
242
243 #undef uptr
244 #define uptr Py_uintptr_t
245
246 /* When you say memory, my mind reasons in terms of (pointers to) blocks */
247 typedef uchar block;
248
249 /* Pool for small blocks. */
250 struct pool_header {
251 union { block *_padding;
252 uint count; } ref; /* number of allocated blocks */
253 block *freeblock; /* pool's free list head */
254 struct pool_header *nextpool; /* next pool of this size class */
255 struct pool_header *prevpool; /* previous pool "" */
256 uint arenaindex; /* index into arenas of base adr */
257 uint szidx; /* block size class index */
258 uint nextoffset; /* bytes to virgin block */
259 uint maxnextoffset; /* largest valid nextoffset */
260 };
261
262 typedef struct pool_header *poolp;
263
264 /* Record keeping for arenas. */
265 struct arena_object {
266 /* The address of the arena, as returned by malloc. Note that 0
267 * will never be returned by a successful malloc, and is used
268 * here to mark an arena_object that doesn't correspond to an
269 * allocated arena.
270 */
271 uptr address;
272
273 /* Pool-aligned pointer to the next pool to be carved off. */
274 block* pool_address;
275
276 /* The number of available pools in the arena: free pools + never-
277 * allocated pools.
278 */
279 uint nfreepools;
280
281 /* The total number of pools in the arena, whether or not available. */
282 uint ntotalpools;
283
284 /* Singly-linked list of available pools. */
285 struct pool_header* freepools;
286
287 /* Whenever this arena_object is not associated with an allocated
288 * arena, the nextarena member is used to link all unassociated
289 * arena_objects in the singly-linked `unused_arena_objects` list.
290 * The prevarena member is unused in this case.
291 *
292 * When this arena_object is associated with an allocated arena
293 * with at least one available pool, both members are used in the
294 * doubly-linked `usable_arenas` list, which is maintained in
295 * increasing order of `nfreepools` values.
296 *
297 * Else this arena_object is associated with an allocated arena
298 * all of whose pools are in use. `nextarena` and `prevarena`
299 * are both meaningless in this case.
300 */
301 struct arena_object* nextarena;
302 struct arena_object* prevarena;
303 };
304
305 #undef ROUNDUP
306 #define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
307 #define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
308
309 #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
310
311 /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
312 #define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
313
314 /* Return total number of blocks in pool of size index I, as a uint. */
315 #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
316
317 /*==========================================================================*/
318
319 /*
320 * This malloc lock
321 */
322 SIMPLELOCK_DECL(_malloc_lock)
323 #define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
324 #define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
325 #define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
326 #define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
327
328 /*
329 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
330
331 This is involved. For an index i, usedpools[i+i] is the header for a list of
332 all partially used pools holding small blocks with "size class idx" i. So
333 usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
334 16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
335
336 Pools are carved off an arena's highwater mark (an arena_object's pool_address
337 member) as needed. Once carved off, a pool is in one of three states forever
338 after:
339
340 used == partially used, neither empty nor full
341 At least one block in the pool is currently allocated, and at least one
342 block in the pool is not currently allocated (note this implies a pool
343 has room for at least two blocks).
344 This is a pool's initial state, as a pool is created only when malloc
345 needs space.
346 The pool holds blocks of a fixed size, and is in the circular list headed
347 at usedpools[i] (see above). It's linked to the other used pools of the
348 same size class via the pool_header's nextpool and prevpool members.
349 If all but one block is currently allocated, a malloc can cause a
350 transition to the full state. If all but one block is not currently
351 allocated, a free can cause a transition to the empty state.
352
353 full == all the pool's blocks are currently allocated
354 On transition to full, a pool is unlinked from its usedpools[] list.
355 It's not linked to from anything then anymore, and its nextpool and
356 prevpool members are meaningless until it transitions back to used.
357 A free of a block in a full pool puts the pool back in the used state.
358 Then it's linked in at the front of the appropriate usedpools[] list, so
359 that the next allocation for its size class will reuse the freed block.
360
361 empty == all the pool's blocks are currently available for allocation
362 On transition to empty, a pool is unlinked from its usedpools[] list,
363 and linked to the front of its arena_object's singly-linked freepools list,
364 via its nextpool member. The prevpool member has no meaning in this case.
365 Empty pools have no inherent size class: the next time a malloc finds
366 an empty list in usedpools[], it takes the first pool off of freepools.
367 If the size class needed happens to be the same as the size class the pool
368 last had, some pool initialization can be skipped.
369
370
371 Block Management
372
373 Blocks within pools are again carved out as needed. pool->freeblock points to
374 the start of a singly-linked list of free blocks within the pool. When a
375 block is freed, it's inserted at the front of its pool's freeblock list. Note
376 that the available blocks in a pool are *not* linked all together when a pool
377 is initialized. Instead only "the first two" (lowest addresses) blocks are
378 set up, returning the first such block, and setting pool->freeblock to a
379 one-block list holding the second such block. This is consistent with that
380 pymalloc strives at all levels (arena, pool, and block) never to touch a piece
381 of memory until it's actually needed.
382
383 So long as a pool is in the used state, we're certain there *is* a block
384 available for allocating, and pool->freeblock is not NULL. If pool->freeblock
385 points to the end of the free list before we've carved the entire pool into
386 blocks, that means we simply haven't yet gotten to one of the higher-address
387 blocks. The offset from the pool_header to the start of "the next" virgin
388 block is stored in the pool_header nextoffset member, and the largest value
389 of nextoffset that makes sense is stored in the maxnextoffset member when a
390 pool is initialized. All the blocks in a pool have been passed out at least
391 once when and only when nextoffset > maxnextoffset.
392
393
394 Major obscurity: While the usedpools vector is declared to have poolp
395 entries, it doesn't really. It really contains two pointers per (conceptual)
396 poolp entry, the nextpool and prevpool members of a pool_header. The
397 excruciating initialization code below fools C so that
398
399 usedpool[i+i]
400
401 "acts like" a genuine poolp, but only so long as you only reference its
402 nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
403 compensating for that a pool_header's nextpool and prevpool members
404 immediately follow a pool_header's first two members:
405
406 union { block *_padding;
407 uint count; } ref;
408 block *freeblock;
409
410 each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
411 contains is a fudged-up pointer p such that *if* C believes it's a poolp
412 pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
413 circular list is empty).
414
415 It's unclear why the usedpools setup is so convoluted. It could be to
416 minimize the amount of cache required to hold this heavily-referenced table
417 (which only *needs* the two interpool pointer members of a pool_header). OTOH,
418 referencing code has to remember to "double the index" and doing so isn't
419 free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
420 on that C doesn't insert any padding anywhere in a pool_header at or before
421 the prevpool member.
422 **************************************************************************** */
423
424 #define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
425 #define PT(x) PTA(x), PTA(x)
426
427 static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
428 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
429 #if NB_SMALL_SIZE_CLASSES > 8
430 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
431 #if NB_SMALL_SIZE_CLASSES > 16
432 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
433 #if NB_SMALL_SIZE_CLASSES > 24
434 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
435 #if NB_SMALL_SIZE_CLASSES > 32
436 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
437 #if NB_SMALL_SIZE_CLASSES > 40
438 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
439 #if NB_SMALL_SIZE_CLASSES > 48
440 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
441 #if NB_SMALL_SIZE_CLASSES > 56
442 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
443 #endif /* NB_SMALL_SIZE_CLASSES > 56 */
444 #endif /* NB_SMALL_SIZE_CLASSES > 48 */
445 #endif /* NB_SMALL_SIZE_CLASSES > 40 */
446 #endif /* NB_SMALL_SIZE_CLASSES > 32 */
447 #endif /* NB_SMALL_SIZE_CLASSES > 24 */
448 #endif /* NB_SMALL_SIZE_CLASSES > 16 */
449 #endif /* NB_SMALL_SIZE_CLASSES > 8 */
450 };
451
452 /*==========================================================================
453 Arena management.
454
455 `arenas` is a vector of arena_objects. It contains maxarenas entries, some of
456 which may not be currently used (== they're arena_objects that aren't
457 currently associated with an allocated arena). Note that arenas proper are
458 separately malloc'ed.
459
460 Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
461 we do try to free() arenas, and use some mild heuristic strategies to increase
462 the likelihood that arenas eventually can be freed.
463
464 unused_arena_objects
465
466 This is a singly-linked list of the arena_objects that are currently not
467 being used (no arena is associated with them). Objects are taken off the
468 head of the list in new_arena(), and are pushed on the head of the list in
469 PyObject_Free() when the arena is empty. Key invariant: an arena_object
470 is on this list if and only if its .address member is 0.
471
472 usable_arenas
473
474 This is a doubly-linked list of the arena_objects associated with arenas
475 that have pools available. These pools are either waiting to be reused,
476 or have not been used before. The list is sorted to have the most-
477 allocated arenas first (ascending order based on the nfreepools member).
478 This means that the next allocation will come from a heavily used arena,
479 which gives the nearly empty arenas a chance to be returned to the system.
480 In my unscientific tests this dramatically improved the number of arenas
481 that could be freed.
482
483 Note that an arena_object associated with an arena all of whose pools are
484 currently in use isn't on either list.
485 */
486
487 /* Array of objects used to track chunks of memory (arenas). */
488 static struct arena_object* arenas = NULL;
489 /* Number of slots currently allocated in the `arenas` vector. */
490 static uint maxarenas = 0;
491
492 /* The head of the singly-linked, NULL-terminated list of available
493 * arena_objects.
494 */
495 static struct arena_object* unused_arena_objects = NULL;
496
497 /* The head of the doubly-linked, NULL-terminated at each end, list of
498 * arena_objects associated with arenas that have pools available.
499 */
500 static struct arena_object* usable_arenas = NULL;
501
502 /* How many arena_objects do we initially allocate?
503 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
504 * `arenas` vector.
505 */
506 #define INITIAL_ARENA_OBJECTS 16
507
508 /* Number of arenas allocated that haven't been free()'d. */
509 static size_t narenas_currently_allocated = 0;
510
511 /* Total number of times malloc() called to allocate an arena. */
512 static size_t ntimes_arena_allocated = 0;
513 /* High water mark (max value ever seen) for narenas_currently_allocated. */
514 static size_t narenas_highwater = 0;
515
516 /* Allocate a new arena. If we run out of memory, return NULL. Else
517 * allocate a new arena, and return the address of an arena_object
518 * describing the new arena. It's expected that the caller will set
519 * `usable_arenas` to the return value.
520 */
521 static struct arena_object*
522 new_arena(void)
523 {
524 struct arena_object* arenaobj;
525 uint excess; /* number of bytes above pool alignment */
526
527 #ifdef PYMALLOC_DEBUG
528 if (Py_GETENV("PYTHONMALLOCSTATS"))
529 _PyObject_DebugMallocStats(stderr);
530 #endif
531 if (unused_arena_objects == NULL) {
532 uint i;
533 uint numarenas;
534 size_t nbytes;
535
536 /* Double the number of arena objects on each allocation.
537 * Note that it's possible for `numarenas` to overflow.
538 */
539 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
540 if (numarenas <= maxarenas)
541 return NULL; /* overflow */
542 #if SIZEOF_SIZE_T <= SIZEOF_INT
543 if (numarenas > PY_SIZE_MAX / sizeof(*arenas))
544 return NULL; /* overflow */
545 #endif
546 nbytes = numarenas * sizeof(*arenas);
547 arenaobj = (struct arena_object *)realloc(arenas, nbytes);
548 if (arenaobj == NULL)
549 return NULL;
550 arenas = arenaobj;
551
552 /* We might need to fix pointers that were copied. However,
553 * new_arena only gets called when all the pages in the
554 * previous arenas are full. Thus, there are *no* pointers
555 * into the old array. Thus, we don't have to worry about
556 * invalid pointers. Just to be sure, some asserts:
557 */
558 assert(usable_arenas == NULL);
559 assert(unused_arena_objects == NULL);
560
561 /* Put the new arenas on the unused_arena_objects list. */
562 for (i = maxarenas; i < numarenas; ++i) {
563 arenas[i].address = 0; /* mark as unassociated */
564 arenas[i].nextarena = i < numarenas - 1 ?
565 &arenas[i+1] : NULL;
566 }
567
568 /* Update globals. */
569 unused_arena_objects = &arenas[maxarenas];
570 maxarenas = numarenas;
571 }
572
573 /* Take the next available arena object off the head of the list. */
574 assert(unused_arena_objects != NULL);
575 arenaobj = unused_arena_objects;
576 unused_arena_objects = arenaobj->nextarena;
577 assert(arenaobj->address == 0);
578 arenaobj->address = (uptr)malloc(ARENA_SIZE);
579 if (arenaobj->address == 0) {
580 /* The allocation failed: return NULL after putting the
581 * arenaobj back.
582 */
583 arenaobj->nextarena = unused_arena_objects;
584 unused_arena_objects = arenaobj;
585 return NULL;
586 }
587
588 ++narenas_currently_allocated;
589 ++ntimes_arena_allocated;
590 if (narenas_currently_allocated > narenas_highwater)
591 narenas_highwater = narenas_currently_allocated;
592 arenaobj->freepools = NULL;
593 /* pool_address <- first pool-aligned address in the arena
594 nfreepools <- number of whole pools that fit after alignment */
595 arenaobj->pool_address = (block*)arenaobj->address;
596 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
597 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
598 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
599 if (excess != 0) {
600 --arenaobj->nfreepools;
601 arenaobj->pool_address += POOL_SIZE - excess;
602 }
603 arenaobj->ntotalpools = arenaobj->nfreepools;
604
605 return arenaobj;
606 }
607
608 /*
609 Py_ADDRESS_IN_RANGE(P, POOL)
610
611 Return true if and only if P is an address that was allocated by pymalloc.
612 POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
613 (the caller is asked to compute this because the macro expands POOL more than
614 once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
615 variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
616 called on every alloc/realloc/free, micro-efficiency is important here).
617
618 Tricky: Let B be the arena base address associated with the pool, B =
619 arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
620
621 B <= P < B + ARENA_SIZE
622
623 Subtracting B throughout, this is true iff
624
625 0 <= P-B < ARENA_SIZE
626
627 By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
628
629 Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
630 before the first arena has been allocated. `arenas` is still NULL in that
631 case. We're relying on that maxarenas is also 0 in that case, so that
632 (POOL)->arenaindex < maxarenas must be false, saving us from trying to index
633 into a NULL arenas.
634
635 Details: given P and POOL, the arena_object corresponding to P is AO =
636 arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
637 stores, etc), POOL is the correct address of P's pool, AO.address is the
638 correct base address of the pool's arena, and P must be within ARENA_SIZE of
639 AO.address. In addition, AO.address is not 0 (no arena can start at address 0
640 (NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
641 controls P.
642
643 Now suppose obmalloc does not control P (e.g., P was obtained via a direct
644 call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
645 in this case -- it may even be uninitialized trash. If the trash arenaindex
646 is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
647 control P.
648
649 Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
650 allocated arena, obmalloc controls all the memory in slice AO.address :
651 AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
652 so P doesn't lie in that slice, so the macro correctly reports that P is not
653 controlled by obmalloc.
654
655 Finally, if P is not controlled by obmalloc and AO corresponds to an unused
656 arena_object (one not currently associated with an allocated arena),
657 AO.address is 0, and the second test in the macro reduces to:
658
659 P < ARENA_SIZE
660
661 If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
662 that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
663 of the test still passes, and the third clause (AO.address != 0) is necessary
664 to get the correct result: AO.address is 0 in this case, so the macro
665 correctly reports that P is not controlled by obmalloc (despite that P lies in
666 slice AO.address : AO.address + ARENA_SIZE).
667
668 Note: The third (AO.address != 0) clause was added in Python 2.5. Before
669 2.5, arenas were never free()'ed, and an arenaindex < maxarena always
670 corresponded to a currently-allocated arena, so the "P is not controlled by
671 obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
672 was impossible.
673
674 Note that the logic is excruciating, and reading up possibly uninitialized
675 memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
676 creates problems for some memory debuggers. The overwhelming advantage is
677 that this test determines whether an arbitrary address is controlled by
678 obmalloc in a small constant time, independent of the number of arenas
679 obmalloc controls. Since this test is needed at every entry point, it's
680 extremely desirable that it be this fast.
681
682 Since Py_ADDRESS_IN_RANGE may be reading from memory which was not allocated
683 by Python, it is important that (POOL)->arenaindex is read only once, as
684 another thread may be concurrently modifying the value without holding the
685 GIL. To accomplish this, the arenaindex_temp variable is used to store
686 (POOL)->arenaindex for the duration of the Py_ADDRESS_IN_RANGE macro's
687 execution. The caller of the macro is responsible for declaring this
688 variable.
689 */
690 #define Py_ADDRESS_IN_RANGE(P, POOL) \
691 ((arenaindex_temp = (POOL)->arenaindex) < maxarenas && \
692 (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \
693 arenas[arenaindex_temp].address != 0)
694
695
696 /* This is only useful when running memory debuggers such as
697 * Purify or Valgrind. Uncomment to use.
698 *
699 #define Py_USING_MEMORY_DEBUGGER
700 */
701
702 #ifdef Py_USING_MEMORY_DEBUGGER
703
704 /* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
705 * This leads to thousands of spurious warnings when using
706 * Purify or Valgrind. By making a function, we can easily
707 * suppress the uninitialized memory reads in this one function.
708 * So we won't ignore real errors elsewhere.
709 *
710 * Disable the macro and use a function.
711 */
712
713 #undef Py_ADDRESS_IN_RANGE
714
715 #if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
716 (__GNUC__ >= 4))
717 #define Py_NO_INLINE __attribute__((__noinline__))
718 #else
719 #define Py_NO_INLINE
720 #endif
721
722 /* Don't make static, to try to ensure this isn't inlined. */
723 int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
724 #undef Py_NO_INLINE
725 #endif
726
727 /*==========================================================================*/
728
729 /* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
730 * from all other currently live pointers. This may not be possible.
731 */
732
733 /*
734 * The basic blocks are ordered by decreasing execution frequency,
735 * which minimizes the number of jumps in the most common cases,
736 * improves branching prediction and instruction scheduling (small
737 * block allocations typically result in a couple of instructions).
738 * Unless the optimizer reorders everything, being too smart...
739 */
740
741 #undef PyObject_Malloc
742 void *
743 PyObject_Malloc(size_t nbytes)
744 {
745 block *bp;
746 poolp pool;
747 poolp next;
748 uint size;
749
750 #ifdef WITH_VALGRIND
751 if (UNLIKELY(running_on_valgrind == -1))
752 running_on_valgrind = RUNNING_ON_VALGRIND;
753 if (UNLIKELY(running_on_valgrind))
754 goto redirect;
755 #endif
756
757 /*
758 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
759 * Most python internals blindly use a signed Py_ssize_t to track
760 * things without checking for overflows or negatives.
761 * As size_t is unsigned, checking for nbytes < 0 is not required.
762 */
763 if (nbytes > PY_SSIZE_T_MAX)
764 return NULL;
765
766 /*
767 * This implicitly redirects malloc(0).
768 */
769 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
770 LOCK();
771 /*
772 * Most frequent paths first
773 */
774 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
775 pool = usedpools[size + size];
776 if (pool != pool->nextpool) {
777 /*
778 * There is a used pool for this size class.
779 * Pick up the head block of its free list.
780 */
781 ++pool->ref.count;
782 bp = pool->freeblock;
783 assert(bp != NULL);
784 if ((pool->freeblock = *(block **)bp) != NULL) {
785 UNLOCK();
786 return (void *)bp;
787 }
788 /*
789 * Reached the end of the free list, try to extend it.
790 */
791 if (pool->nextoffset <= pool->maxnextoffset) {
792 /* There is room for another block. */
793 pool->freeblock = (block*)pool +
794 pool->nextoffset;
795 pool->nextoffset += INDEX2SIZE(size);
796 *(block **)(pool->freeblock) = NULL;
797 UNLOCK();
798 return (void *)bp;
799 }
800 /* Pool is full, unlink from used pools. */
801 next = pool->nextpool;
802 pool = pool->prevpool;
803 next->prevpool = pool;
804 pool->nextpool = next;
805 UNLOCK();
806 return (void *)bp;
807 }
808
809 /* There isn't a pool of the right size class immediately
810 * available: use a free pool.
811 */
812 if (usable_arenas == NULL) {
813 /* No arena has a free pool: allocate a new arena. */
814 #ifdef WITH_MEMORY_LIMITS
815 if (narenas_currently_allocated >= MAX_ARENAS) {
816 UNLOCK();
817 goto redirect;
818 }
819 #endif
820 usable_arenas = new_arena();
821 if (usable_arenas == NULL) {
822 UNLOCK();
823 goto redirect;
824 }
825 usable_arenas->nextarena =
826 usable_arenas->prevarena = NULL;
827 }
828 assert(usable_arenas->address != 0);
829
830 /* Try to get a cached free pool. */
831 pool = usable_arenas->freepools;
832 if (pool != NULL) {
833 /* Unlink from cached pools. */
834 usable_arenas->freepools = pool->nextpool;
835
836 /* This arena already had the smallest nfreepools
837 * value, so decreasing nfreepools doesn't change
838 * that, and we don't need to rearrange the
839 * usable_arenas list. However, if the arena has
840 * become wholly allocated, we need to remove its
841 * arena_object from usable_arenas.
842 */
843 --usable_arenas->nfreepools;
844 if (usable_arenas->nfreepools == 0) {
845 /* Wholly allocated: remove. */
846 assert(usable_arenas->freepools == NULL);
847 assert(usable_arenas->nextarena == NULL ||
848 usable_arenas->nextarena->prevarena ==
849 usable_arenas);
850
851 usable_arenas = usable_arenas->nextarena;
852 if (usable_arenas != NULL) {
853 usable_arenas->prevarena = NULL;
854 assert(usable_arenas->address != 0);
855 }
856 }
857 else {
858 /* nfreepools > 0: it must be that freepools
859 * isn't NULL, or that we haven't yet carved
860 * off all the arena's pools for the first
861 * time.
862 */
863 assert(usable_arenas->freepools != NULL ||
864 usable_arenas->pool_address <=
865 (block*)usable_arenas->address +
866 ARENA_SIZE - POOL_SIZE);
867 }
868 init_pool:
869 /* Frontlink to used pools. */
870 next = usedpools[size + size]; /* == prev */
871 pool->nextpool = next;
872 pool->prevpool = next;
873 next->nextpool = pool;
874 next->prevpool = pool;
875 pool->ref.count = 1;
876 if (pool->szidx == size) {
877 /* Luckily, this pool last contained blocks
878 * of the same size class, so its header
879 * and free list are already initialized.
880 */
881 bp = pool->freeblock;
882 pool->freeblock = *(block **)bp;
883 UNLOCK();
884 return (void *)bp;
885 }
886 /*
887 * Initialize the pool header, set up the free list to
888 * contain just the second block, and return the first
889 * block.
890 */
891 pool->szidx = size;
892 size = INDEX2SIZE(size);
893 bp = (block *)pool + POOL_OVERHEAD;
894 pool->nextoffset = POOL_OVERHEAD + (size << 1);
895 pool->maxnextoffset = POOL_SIZE - size;
896 pool->freeblock = bp + size;
897 *(block **)(pool->freeblock) = NULL;
898 UNLOCK();
899 return (void *)bp;
900 }
901
902 /* Carve off a new pool. */
903 assert(usable_arenas->nfreepools > 0);
904 assert(usable_arenas->freepools == NULL);
905 pool = (poolp)usable_arenas->pool_address;
906 assert((block*)pool <= (block*)usable_arenas->address +
907 ARENA_SIZE - POOL_SIZE);
908 pool->arenaindex = usable_arenas - arenas;
909 assert(&arenas[pool->arenaindex] == usable_arenas);
910 pool->szidx = DUMMY_SIZE_IDX;
911 usable_arenas->pool_address += POOL_SIZE;
912 --usable_arenas->nfreepools;
913
914 if (usable_arenas->nfreepools == 0) {
915 assert(usable_arenas->nextarena == NULL ||
916 usable_arenas->nextarena->prevarena ==
917 usable_arenas);
918 /* Unlink the arena: it is completely allocated. */
919 usable_arenas = usable_arenas->nextarena;
920 if (usable_arenas != NULL) {
921 usable_arenas->prevarena = NULL;
922 assert(usable_arenas->address != 0);
923 }
924 }
925
926 goto init_pool;
927 }
928
929 /* The small block allocator ends here. */
930
931 redirect:
932 /* Redirect the original request to the underlying (libc) allocator.
933 * We jump here on bigger requests, on error in the code above (as a
934 * last chance to serve the request) or when the max memory limit
935 * has been reached.
936 */
937 if (nbytes == 0)
938 nbytes = 1;
939 return (void *)malloc(nbytes);
940 }
941
942 /* free */
943
944 #undef PyObject_Free
945 void
946 PyObject_Free(void *p)
947 {
948 poolp pool;
949 block *lastfree;
950 poolp next, prev;
951 uint size;
952 #ifndef Py_USING_MEMORY_DEBUGGER
953 uint arenaindex_temp;
954 #endif
955
956 if (p == NULL) /* free(NULL) has no effect */
957 return;
958
959 #ifdef WITH_VALGRIND
960 if (UNLIKELY(running_on_valgrind > 0))
961 goto redirect;
962 #endif
963
964 pool = POOL_ADDR(p);
965 if (Py_ADDRESS_IN_RANGE(p, pool)) {
966 /* We allocated this address. */
967 LOCK();
968 /* Link p to the start of the pool's freeblock list. Since
969 * the pool had at least the p block outstanding, the pool
970 * wasn't empty (so it's already in a usedpools[] list, or
971 * was full and is in no list -- it's not in the freeblocks
972 * list in any case).
973 */
974 assert(pool->ref.count > 0); /* else it was empty */
975 *(block **)p = lastfree = pool->freeblock;
976 pool->freeblock = (block *)p;
977 if (lastfree) {
978 struct arena_object* ao;
979 uint nf; /* ao->nfreepools */
980
981 /* freeblock wasn't NULL, so the pool wasn't full,
982 * and the pool is in a usedpools[] list.
983 */
984 if (--pool->ref.count != 0) {
985 /* pool isn't empty: leave it in usedpools */
986 UNLOCK();
987 return;
988 }
989 /* Pool is now empty: unlink from usedpools, and
990 * link to the front of freepools. This ensures that
991 * previously freed pools will be allocated later
992 * (being not referenced, they are perhaps paged out).
993 */
994 next = pool->nextpool;
995 prev = pool->prevpool;
996 next->prevpool = prev;
997 prev->nextpool = next;
998
999 /* Link the pool to freepools. This is a singly-linked
1000 * list, and pool->prevpool isn't used there.
1001 */
1002 ao = &arenas[pool->arenaindex];
1003 pool->nextpool = ao->freepools;
1004 ao->freepools = pool;
1005 nf = ++ao->nfreepools;
1006
1007 /* All the rest is arena management. We just freed
1008 * a pool, and there are 4 cases for arena mgmt:
1009 * 1. If all the pools are free, return the arena to
1010 * the system free().
1011 * 2. If this is the only free pool in the arena,
1012 * add the arena back to the `usable_arenas` list.
1013 * 3. If the "next" arena has a smaller count of free
1014 * pools, we have to "slide this arena right" to
1015 * restore that usable_arenas is sorted in order of
1016 * nfreepools.
1017 * 4. Else there's nothing more to do.
1018 */
1019 if (nf == ao->ntotalpools) {
1020 /* Case 1. First unlink ao from usable_arenas.
1021 */
1022 assert(ao->prevarena == NULL ||
1023 ao->prevarena->address != 0);
1024 assert(ao ->nextarena == NULL ||
1025 ao->nextarena->address != 0);
1026
1027 /* Fix the pointer in the prevarena, or the
1028 * usable_arenas pointer.
1029 */
1030 if (ao->prevarena == NULL) {
1031 usable_arenas = ao->nextarena;
1032 assert(usable_arenas == NULL ||
1033 usable_arenas->address != 0);
1034 }
1035 else {
1036 assert(ao->prevarena->nextarena == ao);
1037 ao->prevarena->nextarena =
1038 ao->nextarena;
1039 }
1040 /* Fix the pointer in the nextarena. */
1041 if (ao->nextarena != NULL) {
1042 assert(ao->nextarena->prevarena == ao);
1043 ao->nextarena->prevarena =
1044 ao->prevarena;
1045 }
1046 /* Record that this arena_object slot is
1047 * available to be reused.
1048 */
1049 ao->nextarena = unused_arena_objects;
1050 unused_arena_objects = ao;
1051
1052 /* Free the entire arena. */
1053 free((void *)ao->address);
1054 ao->address = 0; /* mark unassociated */
1055 --narenas_currently_allocated;
1056
1057 UNLOCK();
1058 return;
1059 }
1060 if (nf == 1) {
1061 /* Case 2. Put ao at the head of
1062 * usable_arenas. Note that because
1063 * ao->nfreepools was 0 before, ao isn't
1064 * currently on the usable_arenas list.
1065 */
1066 ao->nextarena = usable_arenas;
1067 ao->prevarena = NULL;
1068 if (usable_arenas)
1069 usable_arenas->prevarena = ao;
1070 usable_arenas = ao;
1071 assert(usable_arenas->address != 0);
1072
1073 UNLOCK();
1074 return;
1075 }
1076 /* If this arena is now out of order, we need to keep
1077 * the list sorted. The list is kept sorted so that
1078 * the "most full" arenas are used first, which allows
1079 * the nearly empty arenas to be completely freed. In
1080 * a few un-scientific tests, it seems like this
1081 * approach allowed a lot more memory to be freed.
1082 */
1083 if (ao->nextarena == NULL ||
1084 nf <= ao->nextarena->nfreepools) {
1085 /* Case 4. Nothing to do. */
1086 UNLOCK();
1087 return;
1088 }
1089 /* Case 3: We have to move the arena towards the end
1090 * of the list, because it has more free pools than
1091 * the arena to its right.
1092 * First unlink ao from usable_arenas.
1093 */
1094 if (ao->prevarena != NULL) {
1095 /* ao isn't at the head of the list */
1096 assert(ao->prevarena->nextarena == ao);
1097 ao->prevarena->nextarena = ao->nextarena;
1098 }
1099 else {
1100 /* ao is at the head of the list */
1101 assert(usable_arenas == ao);
1102 usable_arenas = ao->nextarena;
1103 }
1104 ao->nextarena->prevarena = ao->prevarena;
1105
1106 /* Locate the new insertion point by iterating over
1107 * the list, using our nextarena pointer.
1108 */
1109 while (ao->nextarena != NULL &&
1110 nf > ao->nextarena->nfreepools) {
1111 ao->prevarena = ao->nextarena;
1112 ao->nextarena = ao->nextarena->nextarena;
1113 }
1114
1115 /* Insert ao at this point. */
1116 assert(ao->nextarena == NULL ||
1117 ao->prevarena == ao->nextarena->prevarena);
1118 assert(ao->prevarena->nextarena == ao->nextarena);
1119
1120 ao->prevarena->nextarena = ao;
1121 if (ao->nextarena != NULL)
1122 ao->nextarena->prevarena = ao;
1123
1124 /* Verify that the swaps worked. */
1125 assert(ao->nextarena == NULL ||
1126 nf <= ao->nextarena->nfreepools);
1127 assert(ao->prevarena == NULL ||
1128 nf > ao->prevarena->nfreepools);
1129 assert(ao->nextarena == NULL ||
1130 ao->nextarena->prevarena == ao);
1131 assert((usable_arenas == ao &&
1132 ao->prevarena == NULL) ||
1133 ao->prevarena->nextarena == ao);
1134
1135 UNLOCK();
1136 return;
1137 }
1138 /* Pool was full, so doesn't currently live in any list:
1139 * link it to the front of the appropriate usedpools[] list.
1140 * This mimics LRU pool usage for new allocations and
1141 * targets optimal filling when several pools contain
1142 * blocks of the same size class.
1143 */
1144 --pool->ref.count;
1145 assert(pool->ref.count > 0); /* else the pool is empty */
1146 size = pool->szidx;
1147 next = usedpools[size + size];
1148 prev = next->prevpool;
1149 /* insert pool before next: prev <-> pool <-> next */
1150 pool->nextpool = next;
1151 pool->prevpool = prev;
1152 next->prevpool = pool;
1153 prev->nextpool = pool;
1154 UNLOCK();
1155 return;
1156 }
1157
1158 #ifdef WITH_VALGRIND
1159 redirect:
1160 #endif
1161 /* We didn't allocate this address. */
1162 free(p);
1163 }
1164
1165 /* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1166 * then as the Python docs promise, we do not treat this like free(p), and
1167 * return a non-NULL result.
1168 */
1169
1170 #undef PyObject_Realloc
1171 void *
1172 PyObject_Realloc(void *p, size_t nbytes)
1173 {
1174 void *bp;
1175 poolp pool;
1176 size_t size;
1177 #ifndef Py_USING_MEMORY_DEBUGGER
1178 uint arenaindex_temp;
1179 #endif
1180
1181 if (p == NULL)
1182 return PyObject_Malloc(nbytes);
1183
1184 /*
1185 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
1186 * Most python internals blindly use a signed Py_ssize_t to track
1187 * things without checking for overflows or negatives.
1188 * As size_t is unsigned, checking for nbytes < 0 is not required.
1189 */
1190 if (nbytes > PY_SSIZE_T_MAX)
1191 return NULL;
1192
1193 #ifdef WITH_VALGRIND
1194 /* Treat running_on_valgrind == -1 the same as 0 */
1195 if (UNLIKELY(running_on_valgrind > 0))
1196 goto redirect;
1197 #endif
1198
1199 pool = POOL_ADDR(p);
1200 if (Py_ADDRESS_IN_RANGE(p, pool)) {
1201 /* We're in charge of this block */
1202 size = INDEX2SIZE(pool->szidx);
1203 if (nbytes <= size) {
1204 /* The block is staying the same or shrinking. If
1205 * it's shrinking, there's a tradeoff: it costs
1206 * cycles to copy the block to a smaller size class,
1207 * but it wastes memory not to copy it. The
1208 * compromise here is to copy on shrink only if at
1209 * least 25% of size can be shaved off.
1210 */
1211 if (4 * nbytes > 3 * size) {
1212 /* It's the same,
1213 * or shrinking and new/old > 3/4.
1214 */
1215 return p;
1216 }
1217 size = nbytes;
1218 }
1219 bp = PyObject_Malloc(nbytes);
1220 if (bp != NULL) {
1221 memcpy(bp, p, size);
1222 PyObject_Free(p);
1223 }
1224 return bp;
1225 }
1226 #ifdef WITH_VALGRIND
1227 redirect:
1228 #endif
1229 /* We're not managing this block. If nbytes <=
1230 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1231 * block. However, if we do, we need to copy the valid data from
1232 * the C-managed block to one of our blocks, and there's no portable
1233 * way to know how much of the memory space starting at p is valid.
1234 * As bug 1185883 pointed out the hard way, it's possible that the
1235 * C-managed block is "at the end" of allocated VM space, so that
1236 * a memory fault can occur if we try to copy nbytes bytes starting
1237 * at p. Instead we punt: let C continue to manage this block.
1238 */
1239 if (nbytes)
1240 return realloc(p, nbytes);
1241 /* C doesn't define the result of realloc(p, 0) (it may or may not
1242 * return NULL then), but Python's docs promise that nbytes==0 never
1243 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1244 * to begin with. Even then, we can't be sure that realloc() won't
1245 * return NULL.
1246 */
1247 bp = realloc(p, 1);
1248 return bp ? bp : p;
1249 }
1250
1251 #else /* ! WITH_PYMALLOC */
1252
1253 /*==========================================================================*/
1254 /* pymalloc not enabled: Redirect the entry points to malloc. These will
1255 * only be used by extensions that are compiled with pymalloc enabled. */
1256
1257 void *
1258 PyObject_Malloc(size_t n)
1259 {
1260 return PyMem_MALLOC(n);
1261 }
1262
1263 void *
1264 PyObject_Realloc(void *p, size_t n)
1265 {
1266 return PyMem_REALLOC(p, n);
1267 }
1268
1269 void
1270 PyObject_Free(void *p)
1271 {
1272 PyMem_FREE(p);
1273 }
1274 #endif /* WITH_PYMALLOC */
1275
1276 #ifdef PYMALLOC_DEBUG
1277 /*==========================================================================*/
1278 /* A x-platform debugging allocator. This doesn't manage memory directly,
1279 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1280 */
1281
1282 /* Special bytes broadcast into debug memory blocks at appropriate times.
1283 * Strings of these are unlikely to be valid addresses, floats, ints or
1284 * 7-bit ASCII.
1285 */
1286 #undef CLEANBYTE
1287 #undef DEADBYTE
1288 #undef FORBIDDENBYTE
1289 #define CLEANBYTE 0xCB /* clean (newly allocated) memory */
1290 #define DEADBYTE 0xDB /* dead (newly freed) memory */
1291 #define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
1292
1293 /* We tag each block with an API ID in order to tag API violations */
1294 #define _PYMALLOC_MEM_ID 'm' /* the PyMem_Malloc() API */
1295 #define _PYMALLOC_OBJ_ID 'o' /* The PyObject_Malloc() API */
1296
1297 static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
1298
1299 /* serialno is always incremented via calling this routine. The point is
1300 * to supply a single place to set a breakpoint.
1301 */
1302 static void
1303 bumpserialno(void)
1304 {
1305 ++serialno;
1306 }
1307
1308 #define SST SIZEOF_SIZE_T
1309
1310 /* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1311 static size_t
1312 read_size_t(const void *p)
1313 {
1314 const uchar *q = (const uchar *)p;
1315 size_t result = *q++;
1316 int i;
1317
1318 for (i = SST; --i > 0; ++q)
1319 result = (result << 8) | *q;
1320 return result;
1321 }
1322
1323 /* Write n as a big-endian size_t, MSB at address p, LSB at
1324 * p + sizeof(size_t) - 1.
1325 */
1326 static void
1327 write_size_t(void *p, size_t n)
1328 {
1329 uchar *q = (uchar *)p + SST - 1;
1330 int i;
1331
1332 for (i = SST; --i >= 0; --q) {
1333 *q = (uchar)(n & 0xff);
1334 n >>= 8;
1335 }
1336 }
1337
1338 #ifdef Py_DEBUG
1339 /* Is target in the list? The list is traversed via the nextpool pointers.
1340 * The list may be NULL-terminated, or circular. Return 1 if target is in
1341 * list, else 0.
1342 */
1343 static int
1344 pool_is_in_list(const poolp target, poolp list)
1345 {
1346 poolp origlist = list;
1347 assert(target != NULL);
1348 if (list == NULL)
1349 return 0;
1350 do {
1351 if (target == list)
1352 return 1;
1353 list = list->nextpool;
1354 } while (list != NULL && list != origlist);
1355 return 0;
1356 }
1357
1358 #else
1359 #define pool_is_in_list(X, Y) 1
1360
1361 #endif /* Py_DEBUG */
1362
1363 /* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1364 fills them with useful stuff, here calling the underlying malloc's result p:
1365
1366 p[0: S]
1367 Number of bytes originally asked for. This is a size_t, big-endian (easier
1368 to read in a memory dump).
1369 p[S: 2*S]
1370 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
1371 p[2*S: 2*S+n]
1372 The requested memory, filled with copies of CLEANBYTE.
1373 Used to catch reference to uninitialized memory.
1374 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
1375 handled the request itself.
1376 p[2*S+n: 2*S+n+S]
1377 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
1378 p[2*S+n+S: 2*S+n+2*S]
1379 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
1380 and _PyObject_DebugRealloc.
1381 This is a big-endian size_t.
1382 If "bad memory" is detected later, the serial number gives an
1383 excellent way to set a breakpoint on the next run, to capture the
1384 instant at which this block was passed out.
1385 */
1386
1387 /* debug replacements for the PyMem_* memory API */
1388 void *
1389 _PyMem_DebugMalloc(size_t nbytes)
1390 {
1391 return _PyObject_DebugMallocApi(_PYMALLOC_MEM_ID, nbytes);
1392 }
1393 void *
1394 _PyMem_DebugRealloc(void *p, size_t nbytes)
1395 {
1396 return _PyObject_DebugReallocApi(_PYMALLOC_MEM_ID, p, nbytes);
1397 }
1398 void
1399 _PyMem_DebugFree(void *p)
1400 {
1401 _PyObject_DebugFreeApi(_PYMALLOC_MEM_ID, p);
1402 }
1403
1404 /* debug replacements for the PyObject_* memory API */
1405 void *
1406 _PyObject_DebugMalloc(size_t nbytes)
1407 {
1408 return _PyObject_DebugMallocApi(_PYMALLOC_OBJ_ID, nbytes);
1409 }
1410 void *
1411 _PyObject_DebugRealloc(void *p, size_t nbytes)
1412 {
1413 return _PyObject_DebugReallocApi(_PYMALLOC_OBJ_ID, p, nbytes);
1414 }
1415 void
1416 _PyObject_DebugFree(void *p)
1417 {
1418 _PyObject_DebugFreeApi(_PYMALLOC_OBJ_ID, p);
1419 }
1420 void
1421 _PyObject_DebugCheckAddress(const void *p)
1422 {
1423 _PyObject_DebugCheckAddressApi(_PYMALLOC_OBJ_ID, p);
1424 }
1425
1426
1427 /* generic debug memory api, with an "id" to identify the API in use */
1428 void *
1429 _PyObject_DebugMallocApi(char id, size_t nbytes)
1430 {
1431 uchar *p; /* base address of malloc'ed block */
1432 uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1433 size_t total; /* nbytes + 4*SST */
1434
1435 bumpserialno();
1436 total = nbytes + 4*SST;
1437 if (total < nbytes)
1438 /* overflow: can't represent total as a size_t */
1439 return NULL;
1440
1441 p = (uchar *)PyObject_Malloc(total);
1442 if (p == NULL)
1443 return NULL;
1444
1445 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1446 write_size_t(p, nbytes);
1447 p[SST] = (uchar)id;
1448 memset(p + SST + 1 , FORBIDDENBYTE, SST-1);
1449
1450 if (nbytes > 0)
1451 memset(p + 2*SST, CLEANBYTE, nbytes);
1452
1453 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
1454 tail = p + 2*SST + nbytes;
1455 memset(tail, FORBIDDENBYTE, SST);
1456 write_size_t(tail + SST, serialno);
1457
1458 return p + 2*SST;
1459 }
1460
1461 /* The debug free first checks the 2*SST bytes on each end for sanity (in
1462 particular, that the FORBIDDENBYTEs with the api ID are still intact).
1463 Then fills the original bytes with DEADBYTE.
1464 Then calls the underlying free.
1465 */
1466 void
1467 _PyObject_DebugFreeApi(char api, void *p)
1468 {
1469 uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
1470 size_t nbytes;
1471
1472 if (p == NULL)
1473 return;
1474 _PyObject_DebugCheckAddressApi(api, p);
1475 nbytes = read_size_t(q);
1476 nbytes += 4*SST;
1477 if (nbytes > 0)
1478 memset(q, DEADBYTE, nbytes);
1479 PyObject_Free(q);
1480 }
1481
1482 void *
1483 _PyObject_DebugReallocApi(char api, void *p, size_t nbytes)
1484 {
1485 uchar *q = (uchar *)p;
1486 uchar *tail;
1487 size_t total; /* nbytes + 4*SST */
1488 size_t original_nbytes;
1489 int i;
1490
1491 if (p == NULL)
1492 return _PyObject_DebugMallocApi(api, nbytes);
1493
1494 _PyObject_DebugCheckAddressApi(api, p);
1495 bumpserialno();
1496 original_nbytes = read_size_t(q - 2*SST);
1497 total = nbytes + 4*SST;
1498 if (total < nbytes)
1499 /* overflow: can't represent total as a size_t */
1500 return NULL;
1501
1502 if (nbytes < original_nbytes) {
1503 /* shrinking: mark old extra memory dead */
1504 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes + 2*SST);
1505 }
1506
1507 /* Resize and add decorations. We may get a new pointer here, in which
1508 * case we didn't get the chance to mark the old memory with DEADBYTE,
1509 * but we live with that.
1510 */
1511 q = (uchar *)PyObject_Realloc(q - 2*SST, total);
1512 if (q == NULL)
1513 return NULL;
1514
1515 write_size_t(q, nbytes);
1516 assert(q[SST] == (uchar)api);
1517 for (i = 1; i < SST; ++i)
1518 assert(q[SST + i] == FORBIDDENBYTE);
1519 q += 2*SST;
1520 tail = q + nbytes;
1521 memset(tail, FORBIDDENBYTE, SST);
1522 write_size_t(tail + SST, serialno);
1523
1524 if (nbytes > original_nbytes) {
1525 /* growing: mark new extra memory clean */
1526 memset(q + original_nbytes, CLEANBYTE,
1527 nbytes - original_nbytes);
1528 }
1529
1530 return q;
1531 }
1532
1533 /* Check the forbidden bytes on both ends of the memory allocated for p.
1534 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
1535 * and call Py_FatalError to kill the program.
1536 * The API id, is also checked.
1537 */
1538 void
1539 _PyObject_DebugCheckAddressApi(char api, const void *p)
1540 {
1541 const uchar *q = (const uchar *)p;
1542 char msgbuf[64];
1543 char *msg;
1544 size_t nbytes;
1545 const uchar *tail;
1546 int i;
1547 char id;
1548
1549 if (p == NULL) {
1550 msg = "didn't expect a NULL pointer";
1551 goto error;
1552 }
1553
1554 /* Check the API id */
1555 id = (char)q[-SST];
1556 if (id != api) {
1557 msg = msgbuf;
1558 snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
1559 msgbuf[sizeof(msgbuf)-1] = 0;
1560 goto error;
1561 }
1562
1563 /* Check the stuff at the start of p first: if there's underwrite
1564 * corruption, the number-of-bytes field may be nuts, and checking
1565 * the tail could lead to a segfault then.
1566 */
1567 for (i = SST-1; i >= 1; --i) {
1568 if (*(q-i) != FORBIDDENBYTE) {
1569 msg = "bad leading pad byte";
1570 goto error;
1571 }
1572 }
1573
1574 nbytes = read_size_t(q - 2*SST);
1575 tail = q + nbytes;
1576 for (i = 0; i < SST; ++i) {
1577 if (tail[i] != FORBIDDENBYTE) {
1578 msg = "bad trailing pad byte";
1579 goto error;
1580 }
1581 }
1582
1583 return;
1584
1585 error:
1586 _PyObject_DebugDumpAddress(p);
1587 Py_FatalError(msg);
1588 }
1589
1590 /* Display info to stderr about the memory block at p. */
1591 void
1592 _PyObject_DebugDumpAddress(const void *p)
1593 {
1594 const uchar *q = (const uchar *)p;
1595 const uchar *tail;
1596 size_t nbytes, serial;
1597 int i;
1598 int ok;
1599 char id;
1600
1601 fprintf(stderr, "Debug memory block at address p=%p:", p);
1602 if (p == NULL) {
1603 fprintf(stderr, "\n");
1604 return;
1605 }
1606 id = (char)q[-SST];
1607 fprintf(stderr, " API '%c'\n", id);
1608
1609 nbytes = read_size_t(q - 2*SST);
1610 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1611 "requested\n", nbytes);
1612
1613 /* In case this is nuts, check the leading pad bytes first. */
1614 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
1615 ok = 1;
1616 for (i = 1; i <= SST-1; ++i) {
1617 if (*(q-i) != FORBIDDENBYTE) {
1618 ok = 0;
1619 break;
1620 }
1621 }
1622 if (ok)
1623 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1624 else {
1625 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1626 FORBIDDENBYTE);
1627 for (i = SST-1; i >= 1; --i) {
1628 const uchar byte = *(q-i);
1629 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1630 if (byte != FORBIDDENBYTE)
1631 fputs(" *** OUCH", stderr);
1632 fputc('\n', stderr);
1633 }
1634
1635 fputs(" Because memory is corrupted at the start, the "
1636 "count of bytes requested\n"
1637 " may be bogus, and checking the trailing pad "
1638 "bytes may segfault.\n", stderr);
1639 }
1640
1641 tail = q + nbytes;
1642 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1643 ok = 1;
1644 for (i = 0; i < SST; ++i) {
1645 if (tail[i] != FORBIDDENBYTE) {
1646 ok = 0;
1647 break;
1648 }
1649 }
1650 if (ok)
1651 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1652 else {
1653 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1654 FORBIDDENBYTE);
1655 for (i = 0; i < SST; ++i) {
1656 const uchar byte = tail[i];
1657 fprintf(stderr, " at tail+%d: 0x%02x",
1658 i, byte);
1659 if (byte != FORBIDDENBYTE)
1660 fputs(" *** OUCH", stderr);
1661 fputc('\n', stderr);
1662 }
1663 }
1664
1665 serial = read_size_t(tail + SST);
1666 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1667 "u to debug malloc/realloc.\n", serial);
1668
1669 if (nbytes > 0) {
1670 i = 0;
1671 fputs(" Data at p:", stderr);
1672 /* print up to 8 bytes at the start */
1673 while (q < tail && i < 8) {
1674 fprintf(stderr, " %02x", *q);
1675 ++i;
1676 ++q;
1677 }
1678 /* and up to 8 at the end */
1679 if (q < tail) {
1680 if (tail - q > 8) {
1681 fputs(" ...", stderr);
1682 q = tail - 8;
1683 }
1684 while (q < tail) {
1685 fprintf(stderr, " %02x", *q);
1686 ++q;
1687 }
1688 }
1689 fputc('\n', stderr);
1690 }
1691 }
1692
1693 #endif /* PYMALLOC_DEBUG */
1694
1695 static size_t
1696 printone(FILE *out, const char* msg, size_t value)
1697 {
1698 int i, k;
1699 char buf[100];
1700 size_t origvalue = value;
1701
1702 fputs(msg, out);
1703 for (i = (int)strlen(msg); i < 35; ++i)
1704 fputc(' ', out);
1705 fputc('=', out);
1706
1707 /* Write the value with commas. */
1708 i = 22;
1709 buf[i--] = '\0';
1710 buf[i--] = '\n';
1711 k = 3;
1712 do {
1713 size_t nextvalue = value / 10;
1714 uint digit = (uint)(value - nextvalue * 10);
1715 value = nextvalue;
1716 buf[i--] = (char)(digit + '0');
1717 --k;
1718 if (k == 0 && value && i >= 0) {
1719 k = 3;
1720 buf[i--] = ',';
1721 }
1722 } while (value && i >= 0);
1723
1724 while (i >= 0)
1725 buf[i--] = ' ';
1726 fputs(buf, out);
1727
1728 return origvalue;
1729 }
1730
1731 void
1732 _PyDebugAllocatorStats(FILE *out,
1733 const char *block_name, int num_blocks, size_t sizeof_block)
1734 {
1735 char buf1[128];
1736 char buf2[128];
1737 PyOS_snprintf(buf1, sizeof(buf1),
1738 "%d %ss * %zd bytes each",
1739 num_blocks, block_name, sizeof_block);
1740 PyOS_snprintf(buf2, sizeof(buf2),
1741 "%48s ", buf1);
1742 (void)printone(out, buf2, num_blocks * sizeof_block);
1743 }
1744
1745
1746 /* Print summary info to "out" about the state of pymalloc's structures.
1747 * In Py_DEBUG mode, also perform some expensive internal consistency
1748 * checks.
1749 */
1750 void
1751 _PyObject_DebugMallocStats(FILE *out)
1752 {
1753 uint i;
1754 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1755 /* # of pools, allocated blocks, and free blocks per class index */
1756 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1757 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1758 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1759 /* total # of allocated bytes in used and full pools */
1760 size_t allocated_bytes = 0;
1761 /* total # of available bytes in used pools */
1762 size_t available_bytes = 0;
1763 /* # of free pools + pools not yet carved out of current arena */
1764 uint numfreepools = 0;
1765 /* # of bytes for arena alignment padding */
1766 size_t arena_alignment = 0;
1767 /* # of bytes in used and full pools used for pool_headers */
1768 size_t pool_header_bytes = 0;
1769 /* # of bytes in used and full pools wasted due to quantization,
1770 * i.e. the necessarily leftover space at the ends of used and
1771 * full pools.
1772 */
1773 size_t quantization = 0;
1774 /* # of arenas actually allocated. */
1775 size_t narenas = 0;
1776 /* running total -- should equal narenas * ARENA_SIZE */
1777 size_t total;
1778 char buf[128];
1779
1780 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
1781 SMALL_REQUEST_THRESHOLD, numclasses);
1782
1783 for (i = 0; i < numclasses; ++i)
1784 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1785
1786 /* Because full pools aren't linked to from anything, it's easiest
1787 * to march over all the arenas. If we're lucky, most of the memory
1788 * will be living in full pools -- would be a shame to miss them.
1789 */
1790 for (i = 0; i < maxarenas; ++i) {
1791 uint j;
1792 uptr base = arenas[i].address;
1793
1794 /* Skip arenas which are not allocated. */
1795 if (arenas[i].address == (uptr)NULL)
1796 continue;
1797 narenas += 1;
1798
1799 numfreepools += arenas[i].nfreepools;
1800
1801 /* round up to pool alignment */
1802 if (base & (uptr)POOL_SIZE_MASK) {
1803 arena_alignment += POOL_SIZE;
1804 base &= ~(uptr)POOL_SIZE_MASK;
1805 base += POOL_SIZE;
1806 }
1807
1808 /* visit every pool in the arena */
1809 assert(base <= (uptr) arenas[i].pool_address);
1810 for (j = 0;
1811 base < (uptr) arenas[i].pool_address;
1812 ++j, base += POOL_SIZE) {
1813 poolp p = (poolp)base;
1814 const uint sz = p->szidx;
1815 uint freeblocks;
1816
1817 if (p->ref.count == 0) {
1818 /* currently unused */
1819 assert(pool_is_in_list(p, arenas[i].freepools));
1820 continue;
1821 }
1822 ++numpools[sz];
1823 numblocks[sz] += p->ref.count;
1824 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1825 numfreeblocks[sz] += freeblocks;
1826 #ifdef Py_DEBUG
1827 if (freeblocks > 0)
1828 assert(pool_is_in_list(p, usedpools[sz + sz]));
1829 #endif
1830 }
1831 }
1832 assert(narenas == narenas_currently_allocated);
1833
1834 fputc('\n', out);
1835 fputs("class size num pools blocks in use avail blocks\n"
1836 "----- ---- --------- ------------- ------------\n",
1837 out);
1838
1839 for (i = 0; i < numclasses; ++i) {
1840 size_t p = numpools[i];
1841 size_t b = numblocks[i];
1842 size_t f = numfreeblocks[i];
1843 uint size = INDEX2SIZE(i);
1844 if (p == 0) {
1845 assert(b == 0 && f == 0);
1846 continue;
1847 }
1848 fprintf(out, "%5u %6u "
1849 "%11" PY_FORMAT_SIZE_T "u "
1850 "%15" PY_FORMAT_SIZE_T "u "
1851 "%13" PY_FORMAT_SIZE_T "u\n",
1852 i, size, p, b, f);
1853 allocated_bytes += b * size;
1854 available_bytes += f * size;
1855 pool_header_bytes += p * POOL_OVERHEAD;
1856 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1857 }
1858 fputc('\n', out);
1859 #ifdef PYMALLOC_DEBUG
1860 (void)printone(out, "# times object malloc called", serialno);
1861 #endif
1862 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
1863 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
1864 (void)printone(out, "# arenas highwater mark", narenas_highwater);
1865 (void)printone(out, "# arenas allocated current", narenas);
1866
1867 PyOS_snprintf(buf, sizeof(buf),
1868 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1869 narenas, ARENA_SIZE);
1870 (void)printone(out, buf, narenas * ARENA_SIZE);
1871
1872 fputc('\n', out);
1873
1874 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
1875 total += printone(out, "# bytes in available blocks", available_bytes);
1876
1877 PyOS_snprintf(buf, sizeof(buf),
1878 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
1879 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
1880
1881 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
1882 total += printone(out, "# bytes lost to quantization", quantization);
1883 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
1884 (void)printone(out, "Total", total);
1885 }
1886
1887 #ifdef Py_USING_MEMORY_DEBUGGER
1888 /* Make this function last so gcc won't inline it since the definition is
1889 * after the reference.
1890 */
1891 int
1892 Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1893 {
1894 uint arenaindex_temp = pool->arenaindex;
1895
1896 return arenaindex_temp < maxarenas &&
1897 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&
1898 arenas[arenaindex_temp].address != 0;
1899 }
1900 #endif