void _int_free(mstate av, Void_t* mem) { mchunkptr p; /* chunk corresponding to mem */ INTERNAL_SIZE_T size; /* its size */ mfastbinptr* fb; /* associated fastbin */ mchunkptr nextchunk; /* next contiguous chunk */ INTERNAL_SIZE_T nextsize; /* its size */ int nextinuse; /* true if nextchunk is used */ INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */
const char *errstr = NULL;
p = mem2chunk(mem); size = chunksize(p);
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect (misaligned_chunk (p), 0)) { errstr = "free(): invalid pointer"; errout: malloc_printerr (check_action, errstr, mem); return; } /* We know that each chunk is at least MINSIZE bytes in size. */ if (__builtin_expect (size < MINSIZE, 0)) { errstr = "free(): invalid size"; goto errout; }
check_inuse_chunk(av, p);
/* 对于小于64个字节的free请求,直接将所free的块挂入fastbin,下次 malloc的时候,可以直接使用。 但是在定义了TRIM_FASTBINS的前提条件下,如果被释放的这个内存块 恰好紧邻top chunk, 即使小于64个字节,这个块也不会被放入fastbin, 而会被和他邻近的块做合并。 */
if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
#if TRIM_FASTBINS /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ && (chunk_at_offset(p, size) != av->top) #endif ) {
if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0) || __builtin_expect (chunksize (chunk_at_offset (p, size)) >= av->system_mem, 0)) { errstr = "free(): invalid next size (fast)"; goto errout; }
set_fastchunks(av); fb = &(av->fastbins[fastbin_index(size)]); /* Another simple check: make sure the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect (*fb == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; }
if (__builtin_expect (perturb_byte, 0)) free_perturb (mem, size - SIZE_SZ);
p->fd = *fb; *fb = p; }
/* × 对于所有大于64字节的内存块,只要不是通过mmap取得的, × 首先尝试和邻近的chunks包括top chunk做合并。再将经过 × 合并所得的chunk放入unsorted list, malloc在下次调用 × 的时候,对于大于64字节的请求,可能会尝试使用unsorted × list里面的chunks, 同时将unsorted list里面的chunks再 × 放入normal bins. */
else if (!chunk_is_mmapped(p)) { nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the top block. */ if (__builtin_expect (p == av->top, 0)) { errstr = "double free or corruption (top)"; goto errout; } /* Or whether the next chunk is beyond the boundaries of the arena. */ if (__builtin_expect (contiguous (av) && (char *) nextchunk >= ((char *) av->top + chunksize(av->top)), 0)) { errstr = "double free or corruption (out)"; goto errout; } /* Or whether the block is actually not marked used. */ if (__builtin_expect (!prev_inuse(nextchunk), 0)) { errstr = "double free or corruption (!prev)"; goto errout; }
nextsize = chunksize(nextchunk); if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0) || __builtin_expect (nextsize >= av->system_mem, 0)) { errstr = "free(): invalid next size (normal)"; goto errout; }
if (__builtin_expect (perturb_byte, 0)) free_perturb (mem, size - SIZE_SZ);
/* consolidate backward */ if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); }
if (nextchunk != av->top) { /* get and clear inuse bit */ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
/* consolidate forward */ if (!nextinuse) { unlink(nextchunk, bck, fwd); size += nextsize; } else clear_inuse_bit_at_offset(nextchunk, 0);
/* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */
bck = unsorted_chunks(av); fwd = bck->fd; p->bk = bck; p->fd = fwd; bck->fd = p; fwd->bk = p;
set_head(p, size | PREV_INUSE); set_foot(p, size);
check_free_chunk(av, p); }
/* If the chunk borders the current high end of memory, consolidate into top */ else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; check_chunk(av, p); }
/* If freeing a large space, consolidate possibly-surrounding chunks. Then, if the total unused topmost memory exceeds trim threshold, ask malloc_trim to reduce top.
Unless max_fast is 0, we don't know if there are fastbins bordering top, so we cannot tell for sure whether threshold has been reached unless fastbins are consolidated. But we don't want to consolidate on each free. As a compromise, consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */ /* * 只有在经过合并后的free chunk size大于64k的时候,free才会调用 * sbrk(-size)来向系统归还内存。但是注意即使有64k的free chunks, × 能否成功向系统归还内存还同时取决于top指针的位置,free只能归还 * top chunk的空间。 */ if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (have_fastchunks(av)) malloc_consolidate(av);
if (av == &main_arena) { #ifndef MORECORE_CANNOT_TRIM if ((unsigned long)(chunksize(av->top)) >= (unsigned long)(mp_.trim_threshold)) sYSTRIm(mp_.top_pad, av); #endif } else { /* Always try heap_trim(), even if the top chunk is not large, because the corresponding heap might go away. */ heap_info *heap = heap_for_ptr(top(av));
assert(heap->ar_ptr == av); heap_trim(heap, mp_.top_pad); } }
} /× END else if (!chunk_is_mmapped(p))×/ /* If the chunk was allocated via mmap, release via munmap(). Note that if HAVE_MMAP is false but chunk_is_mmapped is true, then user must have overwritten memory. There's nothing we can do to catch this error unless MALLOC_DEBUG is set, in which case check_inuse_chunk (above) will have triggered error. */ /* * 直接使用munmap来释放mmap分配的内存。 */ else { #if HAVE_MMAP munmap_chunk (p); #endif } }
|