推荐: blog.csdn.net/aquester https://github.com/eyjian https://www.cnblogs.com/aquester http://blog.chinaunix.net/uid/20682147.html
全部博文(594)
分类: C/C++
2015-10-09 15:31:49
__lll_mutex_lock_wait的错误原因.pdf
free时:
(gdb) bt #0 0x00002b9405ea1c38 in __lll_mutex_lock_wait () from /lib64/libc.so.6 #1 0x00002b9405e45e5f in _L_lock_4026 () from /lib64/libc.so.6 #2 0x00002b9405e42df1 in free () from /lib64/libc.so.6 #3 0x00002b9405e5b148 in tzset_internal () from /lib64/libc.so.6 #4 0x00002b9405e5b9d0 in tzset () from /lib64/libc.so.6 #5 0x00002b9405e5fe44 in strftime_l () from /lib64/libc.so.6 #6 0x00002b9405e93701 in __vsyslog_chk () from /lib64/libc.so.6 #7 0x00002b9405e3c6d0 in __libc_message () from /lib64/libc.so.6 #8 0x00002b9405e4177e in malloc_printerr () from /lib64/libc.so.6 #9 0x00002b9405e42dfc in free () from /lib64/libc.so.6 #10 0x00000000004007c9 in main (argc=1, argv=0x7fffa524f4d8) at x.cpp:17 |
malloc时:
#0 0x00002afca8597c38 in __lll_mutex_lock_wait () from /lib64/libc.so.6 #1 0x00002afca853be5f in _L_lock_4026 () from /lib64/libc.so.6 #2 0x00002afca8538df1 in free () from /lib64/libc.so.6 #3 0x00002afca8551148 in tzset_internal () from /lib64/libc.so.6 #4 0x00002afca85519d0 in tzset () from /lib64/libc.so.6 #5 0x00002afca8555e44 in strftime_l () from /lib64/libc.so.6 #6 0x00002afca8589701 in __vsyslog_chk () from /lib64/libc.so.6 #7 0x00002afca85326d0 in __libc_message () from /lib64/libc.so.6 #8 0x00002afca853777e in malloc_printerr () from /lib64/libc.so.6 #9 0x00002afca8539774 in _int_malloc () from /lib64/libc.so.6 #10 0x00002afca853b1b6 in malloc () from /lib64/libc.so.6 #11 0x00002afca8125fcd in operator new () from /usr/lib64/libstdc++.so.6 |
(gdb) bt
#0 0xbfffe402 in __kernel_vsyscall () #1 0xb7e4a18e in __lll_mutex_lock_wait () from /lib/libc.so.6 #2 0xb7de8d81 in _L_mutex_lock_4119 () from /lib/libc.so.6 #3 0xb7e8d509 in __PRETTY_FUNCTION__.7757 () from /lib/libc.so.6 #4 0x00000000 in ?? () |
#include
#include
#include
#include
int main() { setenv("LIBC_FATAL_STDERR_", "1", 1); close(STDERR_FILENO);
time_t now = time(NULL); struct tm* current = localtime(&now);
char* str = (char*)malloc(100); free(str); free(str); return 0; } |
运行下面的代码,即可重现上面的__lll_mutex_lock_wait()问题:
1 // g++ -g -o x x.cpp
2 include
3 #include
4 #include 5 6 int main(int argc, char** argv) 7 { 8 setenv("LIBC_FATAL_STDERR_", "1", 1); // 让__libc_message()写stderr 9 close(STDERR_FILENO); // 让__libc_message()将出错写到系统日志 10 11 time_t now = time(NULL); 12 struct tm* t = localtime(&now); 13 14 char *p1 = new char[1024]; 15 char *p2 = new char[4096]; 16 17 p1[1024 + sizeof(size_t)] = 1; // 破坏p2的malloc_chunk的size成员 18 delete []p2; 19 delete []p1; 20 return 0; 21 } |
当将上述代码中的“close(STDERR_FILENO)”注释掉后,亦即出错信息写标准输出:
// g++ -g -o b b.cpp
#include
#include
#include
int main(int argc, char** argv) { setenv("LIBC_FATAL_STDERR_", "1", 1); // close(STDERR_FILENO);
time_t now = time(NULL); struct tm* current = localtime(&now);
char *p1 = new char[1024]; char *p2 = new char[4096];
p1[1024 + sizeof(size_t)] = 1; delete []p2; delete []p1; return 0; } |
则看到的运行结果为:
*** glibc detected *** ./b: double free or corruption (!prev): 0x00000000005015a0 *** ======= Backtrace: ========= /lib64/libc.so.6[0x2acfbb87d77e] /lib64/libc.so.6(__libc_free+0x6c)[0x2acfbb87edfc] ./b(__gxx_personality_v0+0x13f)[0x40076f] /lib64/libc.so.6(__libc_start_main+0xf4)[0x2acfbb82f304] ./b(__gxx_personality_v0+0x39)[0x400669] ======= Memory map: ======== 00400000-00401000 r-xp 00000000 08:01 1261572 /tmp/X/b 00500000-00501000 rw-p 00000000 08:01 1261572 /tmp/X/b 00501000-00522000 rw-p 00501000 00:00 0 [heap] 2acfbb295000-2acfbb2b0000 r-xp 00000000 08:01 229419 /lib64/ld-2.4.so 2acfbb2b0000-2acfbb2b1000 rw-p 2acfbb2b0000 00:00 0 2acfbb2bf000-2acfbb2c0000 rw-p 2acfbb2bf000 00:00 0 2acfbb3af000-2acfbb3b1000 rw-p 0001a000 08:01 229419 /lib64/ld-2.4.so 2acfbb3b1000-2acfbb494000 r-xp 00000000 08:01 529172 /usr/lib64/libstdc++.so.6.0.8 2acfbb494000-2acfbb594000 ---p 000e3000 08:01 529172 /usr/lib64/libstdc++.so.6.0.8 2acfbb594000-2acfbb59a000 r--p 000e3000 08:01 529172 /usr/lib64/libstdc++.so.6.0.8 2acfbb59a000-2acfbb59d000 rw-p 000e9000 08:01 529172 /usr/lib64/libstdc++.so.6.0.8 2acfbb59d000-2acfbb5af000 rw-p 2acfbb59d000 00:00 0 2acfbb5af000-2acfbb603000 r-xp 00000000 08:01 229463 /lib64/libm-2.4.so 2acfbb603000-2acfbb702000 ---p 00054000 08:01 229463 /lib64/libm-2.4.so 2acfbb702000-2acfbb704000 rw-p 00053000 08:01 229463 /lib64/libm-2.4.so 2acfbb704000-2acfbb711000 r-xp 00000000 08:01 229456 /lib64/libgcc_s.so.1 2acfbb711000-2acfbb810000 ---p 0000d000 08:01 229456 /lib64/libgcc_s.so.1 2acfbb810000-2acfbb811000 rw-p 0000c000 08:01 229456 /lib64/libgcc_s.so.1 2acfbb811000-2acfbb812000 rw-p 2acfbb811000 00:00 0 2acfbb812000-2acfbb948000 r-xp 00000000 08:01 229436 /lib64/libc-2.4.so 2acfbb948000-2acfbba48000 ---p 00136000 08:01 229436 /lib64/libc-2.4.so 2acfbba48000-2acfbba4b000 r--p 00136000 08:01 229436 /lib64/libc-2.4.so 2acfbba4b000-2acfbba4d000 rw-p 00139000 08:01 229436 /lib64/libc-2.4.so 2acfbba4d000-2acfbba53000 rw-p 2acfbba4d000 00:00 0 2acfbbb00000-2acfbbb21000 rw-p 2acfbbb00000 00:00 0 2acfbbb21000-2acfbbc00000 ---p 2acfbbb21000 00:00 0 7fffef800000-7fffef815000 rw-p 7fffef800000 00:00 0 [stack] ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vdso] Aborted (core dumped) |
并会产生core文件:
(gdb) bt #0 0x00002abaa26ddf45 in raise () from /lib64/libc.so.6 #1 0x00002abaa26df340 in abort () from /lib64/libc.so.6 #2 0x00002abaa271478b in __libc_message () from /lib64/libc.so.6 #3 0x00002abaa271977e in malloc_printerr () from /lib64/libc.so.6 #4 0x00002abaa271adfc in free () from /lib64/libc.so.6 #5 0x000000000040076f in main (argc=1, argv=0x7fff08975c28) at b.cpp:18 |
如果将上面代码中的“delete [] p1;”和“delete [] p2;”先后顺序对调一下:
// g++ -g -o b b.cpp
#include
#include
#include
int main(int argc, char** argv) { setenv("LIBC_FATAL_STDERR_", "1", 1); //close(STDERR_FILENO);
time_t now = time(NULL); struct tm* current = localtime(&now);
char *p1 = new char[1024]; char *p2 = new char[4096];
p1[1024 + sizeof(size_t)] = 1; delete []p1; // core在这 delete []p2; return 0; } |
则运行时core:
(gdb) bt #0 0x00002b6e669df99e in () from /lib64/libc.so.6 #1 0x00002b6e669dfdfc in free () from /lib64/libc.so.6 #2 0x000000000040076f in main (argc=1, argv=0x7fff446b0938) at d.cpp:18 |
如果将代码改成如下(不调用time相关函数):
// g++ -g -o b b.cpp
#include
#include
#include
int main(int argc, char** argv) { setenv("LIBC_FATAL_STDERR_", "1", 1); close(STDERR_FILENO);
//time_t now = time(NULL); //struct tm* current = localtime(&now);
char *p1 = new char[1024]; char *p2 = new char[4096];
p1[1024 + sizeof(size_t)] = 1; delete [] p2; delete [] p1; return 0; } |
运行时会core:
(gdb) bt #0 0x00002ad55252af45 in raise () from /lib64/libc.so.6 #1 0x00002ad55252c340 in abort () from /lib64/libc.so.6 #2 0x00002ad55256178b in () from /lib64/libc.so.6 #3 0x00002ad55256677e in () from /lib64/libc.so.6 #4 0x00002ad552567dfc in free () from /lib64/libc.so.6 #5 0x000000000040070e in main (argc=1, argv=0x7fff58b28db8) at d.cpp:18 |
同时,系统日志文件/var/log/messages会包含(被认为已free过):
Oct 9 15:06:17 Tencent64 d: *** glibc detected *** /tmp/X/d: double free or corruption (!prev): 0x0000000000501670 *** |
有两种结构:
malloc_chunk相关的源代码:
#ifndef Void_t #if (__STD_C || defined(WIN32)) #define Void_t void #else #define Void_t char #endif #endif /*Void_t*/
#define INTERNAL_SIZE_T size_t #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ #define PREV_INUSE 0x1
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ #define IS_MMAPPED 0x2
/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained from a non-main arena. This is only set immediately before handing the chunk to the user, if necessary. */ #define NON_MAIN_ARENA 0x4
static struct malloc_state main_arena;
/* pad request bytes into a usable size -- internal version */
#define request2size(req) \ (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \ MINSIZE : \ ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
/* Same, except also perform argument check */
#define checked_request2size(req, sz) \ if (REQUEST_OUT_OF_RANGE(req)) { \ MALLOC_FAILURE_ACTION; \ return 0; \ } \ (sz) = request2size(req);
/* Bits to mask off when extracting size
Note: IS_MMAPPED is intentionally not masked off from size field in macros for which mmapped chunks should never be seen. This should cause helpful core dumps to occur if it is tried by accident by people extending or adapting this malloc. */ #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
/* extract inuse bit of previous chunk */ #define prev_inuse(p) ((p)->size & PREV_INUSE)
/* Get size, ignoring use bits */ #define chunksize(p) ((p)->size & ~(SIZE_BITS))
typedef struct malloc_chunk* mfastbinptr; #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
/* This struct declaration is misleading (but accurate and necessary). It declares a "view" into memory allowing access to necessary fields at known offsets from a given base. See explanation below. */
struct malloc_chunk {
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
struct malloc_chunk* fd; /* double links -- used only if free, Forward pointer to next chunk in list. */ struct malloc_chunk* bk; /* Back pointer to previous chunk in list */
/* Only used for large blocks: pointer to next larger size. */ struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */ struct malloc_chunk* bk_nextsize; };
#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
/* Ptr to next physical malloc_chunk. */ #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) )) /* Ptr to previous physical malloc_chunk */ #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
/* extract p's inuse bit */ #define inuse(p)\ ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE) /* set/clear chunk as being inuse without otherwise disturbing */ #define set_inuse(p)\ ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE #define clear_inuse(p)\ ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE) |
对于char* str = (char*)malloc(1024);,它的前8字节(x86上为4字节)分别为prev_size和size:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `head:' | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Forward pointer to next chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Back pointer to previous chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Unused space (may be 0 bytes long) . . . . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `foot:' | Size of chunk, in bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
static void malloc_printerr(int action, const char *str, void *ptr) { if ((action & 5) == 5) __libc_message (action & 2, "%s\n", str); else if (action & 1) { char buf[2 * sizeof (uintptr_t) + 1];
buf[sizeof (buf) - 1] = '\0'; char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0); while (cp > buf) *--cp = '0';
__libc_message (action & 2, "*** glibc detected *** %s: %s: 0x%s ***\n",
__libc_argv[0] ?: " } else if (action & 2) abort (); } |
void __libc_message(int do_abort, const char* fmt, ...) { va_list ap; va_list ap_copy; int fd = -1;
va_start (ap, fmt); va_copy (ap_copy, ap);
#ifdef FATAL_PREPARE FATAL_PREPARE; #endif
// 从下面代码可以看出,如果没有指定环境变量LIBC_FATAL_STDERR_,则错误输出到终端tty // 如果指定了,则输出到标准出错,环境变量LIBC_FATAL_STDERR_的值可以为任意值, // 写标准出错或终端失败时,就写系统日志。 // 测试代码调用了close(STDERR_FILENO),会导致前面的writev失败,从而写系统日志 /* Open a descriptor for /dev/tty unless the user explicitly requests errors on standard error. */ const char *on_2 = __secure_getenv ("LIBC_FATAL_STDERR_"); if (on_2 == NULL || *on_2 == '\0') fd = open_not_cancel_2 (_PATH_TTY, O_RDWR | O_NOCTTY | O_NDELAY);
if (fd == -1) fd = STDERR_FILENO;
struct str_list *list = NULL; int nlist = 0;
const char *cp = fmt; while (*cp != '\0') { /* Find the next "%s" or the end of the string. */ const char *next = cp; while (next[0] != '%' || next[1] != 's') { next = __strchrnul (next + 1, '%');
if (next[0] == '\0') break; }
/* Determine what to print. */ const char *str; size_t len; if (cp[0] == '%' && cp[1] == 's') { str = va_arg (ap, const char *); len = strlen (str); cp += 2; } else { str = cp; len = next - cp; cp = next; }
struct str_list *newp = alloca (sizeof (struct str_list)); newp->str = str; newp->len = len; newp->next = list; list = newp; ++nlist; }
bool written = false; if (nlist > 0) { struct iovec *iov = alloca (nlist * sizeof (struct iovec)); ssize_t total = 0;
for (int cnt = nlist - 1; cnt >= 0; --cnt) { iov[cnt].iov_base = (void *) list->str; iov[cnt].iov_len = list->len; total += list->len; list = list->next; }
INTERNAL_SYSCALL_DECL (err); ssize_t cnt; do cnt = INTERNAL_SYSCALL (writev, err, 3, fd, iov, nlist); while (INTERNAL_SYSCALL_ERROR_P (cnt, err) && INTERNAL_SYSCALL_ERRNO (cnt, err) == EINTR);
if (cnt == total) written = true; }
va_end (ap);
/* If we had no success writing the message, use syslog. */ if (! written) vsyslog (LOG_ERR, fmt, ap_copy); // 写标准出错或终端失败时,就写系统日志 // 测试代码调用了close(STDERR_FILENO),会导致前面的writev失败,从而写系统日志
va_end (ap_copy);
if (do_abort) { if (do_abort > 1 && written) { void *addrs[64]; #define naddrs (sizeof (addrs) / sizeof (addrs[0])) int n = __backtrace (addrs, naddrs); if (n > 2) { #define strnsize(str) str, strlen (str) #define writestr(str) write_not_cancel (fd, str) writestr (strnsize ("======= Backtrace: =========\n")); __backtrace_symbols_fd (addrs + 1, n - 1, fd);
writestr (strnsize ("======= Memory map: ========\n")); int fd2 = open_not_cancel_2 ("/proc/self/maps", O_RDONLY); char buf[1024]; ssize_t n2; while ((n2 = read_not_cancel (fd2, buf, sizeof (buf))) > 0) if (write_not_cancel (fd, buf, n2) != n2) break; close_not_cancel_no_status (fd2); } }
/* Terminate the process. */ abort (); } } |
static void #ifdef ATOMIC_FASTBINS _int_free(mstate av, mchunkptr p, int have_lock) #else _int_free(mstate av, mchunkptr p) #endif { INTERNAL_SIZE_T size; /* its size */ mfastbinptr* fb; /* associated fastbin */ mchunkptr nextchunk; /* next contiguous chunk */ INTERNAL_SIZE_T nextsize; /* its size */ int nextinuse; /* true if nextchunk is used */ INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */
const char *errstr = NULL; #ifdef ATOMIC_FASTBINS int locked = 0; #endif
size = chunksize(p);
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect (misaligned_chunk (p), 0)) { errstr = "free(): invalid pointer"; errout: #ifdef ATOMIC_FASTBINS if (! have_lock && locked) (void)mutex_unlock(&av->mutex); #endif malloc_printerr (check_action, errstr, chunk2mem(p)); return; } /* We know that each chunk is at least MINSIZE bytes in size. */ if (__builtin_expect (size < MINSIZE, 0)) { errstr = "free(): invalid size"; goto errout; }
check_inuse_chunk(av, p);
/* If eligible, place chunk on a fastbin so it can be found and used quickly in malloc. */
if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
#if TRIM_FASTBINS /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ && (chunk_at_offset(p, size) != av->top) #endif ) {
if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0) || __builtin_expect (chunksize (chunk_at_offset (p, size)) >= av->system_mem, 0)) { #ifdef ATOMIC_FASTBINS /* We might not have a lock at this point and concurrent modifications of system_mem might have let to a false positive. Redo the test after getting the lock. */ if (have_lock || ({ assert (locked == 0); mutex_lock(&av->mutex); locked = 1; chunk_at_offset (p, size)->size <= 2 * SIZE_SZ || chunksize (chunk_at_offset (p, size)) >= av->system_mem; })) #endif { errstr = "free(): invalid next size (fast)"; goto errout; } #ifdef ATOMIC_FASTBINS if (! have_lock) { (void)mutex_unlock(&av->mutex); locked = 0; } #endif }
if (__builtin_expect (perturb_byte, 0)) free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
set_fastchunks(av); unsigned int idx = fastbin_index(size); fb = &fastbin (av, idx);
#ifdef ATOMIC_FASTBINS mchunkptr fd; mchunkptr old = *fb; unsigned int old_idx = ~0u; do { /* Another simple check: make sure the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect (old == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; } if (old != NULL) old_idx = fastbin_index(chunksize(old)); p->fd = fd = old; } while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
if (fd != NULL && __builtin_expect (old_idx != idx, 0)) { errstr = "invalid fastbin entry (free)"; goto errout; } #else /* Another simple check: make sure the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect (*fb == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; } if (*fb != NULL && __builtin_expect (fastbin_index(chunksize(*fb)) != idx, 0)) { errstr = "invalid fastbin entry (free)"; goto errout; }
p->fd = *fb; *fb = p; #endif }
/* Consolidate other non-mmapped chunks as they arrive. */
else if (!chunk_is_mmapped(p)) { #ifdef ATOMIC_FASTBINS if (! have_lock) { # if THREAD_STATS if(!mutex_trylock(&av->mutex)) ++(av->stat_lock_direct); else { (void)mutex_lock(&av->mutex); ++(av->stat_lock_wait); } # else (void)mutex_lock(&av->mutex); # endif locked = 1; } #endif
nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the top block. */ if (__builtin_expect (p == av->top, 0)) { errstr = "double free or corruption (top)"; goto errout; } /* Or whether the next chunk is beyond the boundaries of the arena. */ if (__builtin_expect (contiguous (av) && (char *) nextchunk >= ((char *) av->top + chunksize(av->top)), 0)) { errstr = "double free or corruption (out)"; goto errout; } /* Or whether the block is actually not marked used. */ if (__builtin_expect (!prev_inuse(nextchunk), 0)) { errstr = "double free or corruption (!prev)"; goto errout; }
nextsize = chunksize(nextchunk); if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0) || __builtin_expect (nextsize >= av->system_mem, 0)) { errstr = "free(): invalid next size (normal)"; goto errout; }
if (__builtin_expect (perturb_byte, 0)) free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
/* consolidate backward */ if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); }
if (nextchunk != av->top) { /* get and clear inuse bit */ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
/* consolidate forward */ if (!nextinuse) { unlink(nextchunk, bck, fwd); size += nextsize; } else clear_inuse_bit_at_offset(nextchunk, 0);
/* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */
bck = unsorted_chunks(av); fwd = bck->fd; if (__builtin_expect (fwd->bk != bck, 0)) { errstr = "free(): corrupted unsorted chunks"; goto errout; } p->fd = fwd; p->bk = bck; if (!in_smallbin_range(size)) { p->fd_nextsize = NULL; p->bk_nextsize = NULL; } bck->fd = p; fwd->bk = p;
set_head(p, size | PREV_INUSE); set_foot(p, size);
check_free_chunk(av, p); }
/* If the chunk borders the current high end of memory, consolidate into top */
else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; check_chunk(av, p); }
/* If freeing a large space, consolidate possibly-surrounding chunks. Then, if the total unused topmost memory exceeds trim threshold, ask malloc_trim to reduce top.
Unless max_fast is 0, we don't know if there are fastbins bordering top, so we cannot tell for sure whether threshold has been reached unless fastbins are consolidated. But we don't want to consolidate on each free. As a compromise, consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (have_fastchunks(av)) malloc_consolidate(av);
if (av == &main_arena) { #ifndef MORECORE_CANNOT_TRIM if ((unsigned long)(chunksize(av->top)) >= (unsigned long)(mp_.trim_threshold)) sYSTRIm(mp_.top_pad, av); #endif } else { /* Always try heap_trim(), even if the top chunk is not large, because the corresponding heap might go away. */ heap_info *heap = heap_for_ptr(top(av));
assert(heap->ar_ptr == av); heap_trim(heap, mp_.top_pad); } }
#ifdef ATOMIC_FASTBINS if (! have_lock) { assert (locked); (void)mutex_unlock(&av->mutex); } #endif } /* If the chunk was allocated via mmap, release via munmap(). Note that if HAVE_MMAP is false but chunk_is_mmapped is true, then user must have overwritten memory. There's nothing we can do to catch this error unless MALLOC_DEBUG is set, in which case check_inuse_chunk (above) will have triggered error. */
else { #if HAVE_MMAP munmap_chunk (p); #endif } } |
// chunk2mem()等相关实现,请从一节摘取, // 或直接从malloc.c文件取: // void print_malloc_chunk(const char* tag, mchunkptr chunk) { printf("\n"); printf("[%s]p: %p\n", tag, chunk2mem(chunk)); printf("[%s]prev_size: %u, used(%d)\n", tag, chunk->prev_size, prev_inuse(chunk)); printf("[%s]size: %u/%u, used(%d)\n", tag, chunk->size, chunksize(chunk), inuse(chunk)); printf("[%s]Forward pointer: %p\n", tag, chunk->fd); printf("[%s]Back pointer: %p\n", tag, chunk->bk);
if (chunk->fd != NULL) { printf("[%s]Forward prev_size: %u, used(%d)\n", tag, chunk->fd->prev_size, inuse(chunk->fd)); printf("[%s]Forward size: %u, used(%d)\n", tag, chunksize(chunk->fd), inuse(chunk->fd)); } }
int main(int argc, char** argv) { char *p1 = new char[1024]; char *p2 = new char[4096]; char *p3 = new char[2048];
mchunkptr c1 = mem2chunk(p1); mchunkptr c2 = mem2chunk(p2); mchunkptr c3 = mem2chunk(p3); print_malloc_chunk("c11", c1); print_malloc_chunk("c21", c2); print_malloc_chunk("c31", c3);
printf("----------------------------------\n"); delete []p2; print_malloc_chunk("c12", c1); print_malloc_chunk("c22", c2); print_malloc_chunk("c32", c3);
delete []p3; delete []p1; return 0; } |
运行输出:
[c11]p: 0x501010 [c11]prev_size: 0, used(1) [c11]size: 1041/1040, used(1) [c11]Forward pointer: (nil) [c11]Back pointer: (nil)
[c21]p: 0x501420 [c21]prev_size: 0, used(1) [c21]size: 4113/4112, used(1) [c21]Forward pointer: (nil) [c21]Back pointer: (nil)
[c31]p: 0x502430 [c31]prev_size: 0, used(1) [c31]size: 2065/2064, used(1) [c31]Forward pointer: (nil) [c31]Back pointer: (nil) ---------------------------------- [c12]p: 0x501010 [c12]prev_size: 0, used(1) [c12]size: 1041/1040, used(1) [c12]Forward pointer: (nil) [c12]Back pointer: (nil)
[c22]p: 0x501420 [c22]prev_size: 0, used(1) [c22]size: 4113/4112, used(0) // 这里发生了变化,因为“delete []p2”将p2释放了,所以为free [c22]Forward pointer: 0x2ada9e7539f0 [c22]Back pointer: 0x2ada9e7539f0 [c22]Forward prev_size: 0, used(0) [c22]Forward size: 0, used(0)
[c32]p: 0x502430 [c32]prev_size: 4112, used(0) // 这里也发生了变化,为p2的chunk大小和状态 [c32]size: 2064/2064, used(1) [c32]Forward pointer: (nil) [c32]Back pointer: (nil) |
static Void_t* _int_malloc(mstate av, size_t bytes) { INTERNAL_SIZE_T nb; /* normalized request size */ unsigned int idx; /* associated bin index */ mbinptr bin; /* associated bin */
mchunkptr victim; /* inspected/selected chunk */ INTERNAL_SIZE_T size; /* its size */ int victim_index; /* its bin index */
mchunkptr remainder; /* remainder from a split */ unsigned long remainder_size; /* its size */
unsigned int block; /* bit map traverser */ unsigned int bit; /* bit map traverser */ unsigned int map; /* current word of binmap */
mchunkptr fwd; /* misc temp for linking */ mchunkptr bck; /* misc temp for linking */
const char *errstr = NULL;
/* Convert request size to internal form by adding SIZE_SZ bytes overhead plus possibly more to obtain necessary alignment and/or to obtain a size of at least MINSIZE, the smallest allocatable size. Also, checked_request2size traps (returning 0) request sizes that are so large that they wrap around zero when padded and aligned. */
checked_request2size(bytes, nb);
/* If the size qualifies as a fastbin, first check corresponding bin. This code is safe to execute even if av is not yet initialized, so we can try it without checking, which saves some time on this fast path. */
if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) { idx = fastbin_index(nb); mfastbinptr* fb = &fastbin (av, idx); #ifdef ATOMIC_FASTBINS mchunkptr pp = *fb; do { victim = pp; if (victim == NULL) break; } while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) != victim); #else victim = *fb; #endif if (victim != 0) { if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0)) { errstr = "malloc(): memory corruption (fast)"; errout: malloc_printerr (check_action, errstr, chunk2mem (victim)); return NULL; } #ifndef ATOMIC_FASTBINS *fb = victim->fd; #endif check_remalloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); return p; } }
/* If a small request, check regular bin. Since these "smallbins" hold one size each, no searching within bins is necessary. (For a large request, we need to wait until unsorted chunks are processed to find best fit. But for small ones, fits are exact anyway, so we can check now, which is faster.) */
if (in_smallbin_range(nb)) { idx = smallbin_index(nb); bin = bin_at(av,idx);
if ( (victim = last(bin)) != bin) { if (victim == 0) /* initialization check */ malloc_consolidate(av); else { bck = victim->bk; if (__builtin_expect (bck->fd != victim, 0)) { errstr = "malloc(): smallbin double linked list corrupted"; goto errout; } set_inuse_bit_at_offset(victim, nb); bin->bk = bck; bck->fd = bin;
if (av != &main_arena) victim->size |= NON_MAIN_ARENA; check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); return p; } } }
/* If this is a large request, consolidate fastbins before continuing. While it might look excessive to kill all fastbins before even seeing if there is space available, this avoids fragmentation problems normally associated with fastbins. Also, in practice, programs tend to have runs of either small or large requests, but less often mixtures, so consolidation is not invoked all that often in most programs. And the programs that it is called frequently in otherwise tend to fragment. */
else { idx = largebin_index(nb); if (have_fastchunks(av)) malloc_consolidate(av); }
/* Process recently freed or remaindered chunks, taking one only if it is exact fit, or, if this a small request, the chunk is remainder from the most recent non-exact fit. Place other traversed chunks in bins. Note that this step is the only place in any routine where chunks are placed in bins.
The outer loop here is needed because we might not realize until near the end of malloc that we should have consolidated, so must do so and retry. This happens at most once, and only when we would otherwise need to expand memory to service a "small" request. */
for(;;) {
int iters = 0; while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { bck = victim->bk; if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0) || __builtin_expect (victim->size > av->system_mem, 0)) malloc_printerr (check_action, "malloc(): memory corruption", chunk2mem (victim)); size = chunksize(victim);
/* If a small request, try to use last remainder if it is the only chunk in unsorted bin. This helps promote locality for runs of consecutive small requests. This is the only exception to best-fit, and applies only when there is no exact fit for a small chunk. */
if (in_smallbin_range(nb) && bck == unsorted_chunks(av) && victim == av->last_remainder && (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
/* split and reattach remainder */ remainder_size = size - nb; remainder = chunk_at_offset(victim, nb); unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; av->last_remainder = remainder; remainder->bk = remainder->fd = unsorted_chunks(av); if (!in_smallbin_range(remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; }
set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size);
check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); return p; }
/* remove from unsorted list */ unsorted_chunks(av)->bk = bck; bck->fd = unsorted_chunks(av);
/* Take now instead of binning if exact fit */
if (size == nb) { set_inuse_bit_at_offset(victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); return p; }
/* place chunk in bin */
if (in_smallbin_range(size)) { victim_index = smallbin_index(size); bck = bin_at(av, victim_index); fwd = bck->fd; } else { victim_index = largebin_index(size); bck = bin_at(av, victim_index); fwd = bck->fd;
/* maintain large bins in sorted order */ if (fwd != bck) { /* Or with inuse bit to speed comparisons */ size |= PREV_INUSE; /* if smaller than smallest, bypass loop below */ assert((bck->bk->size & NON_MAIN_ARENA) == 0); if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) { fwd = bck; bck = bck->bk;
victim->fd_nextsize = fwd->fd; victim->bk_nextsize = fwd->fd->bk_nextsize; fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim; } else { assert((fwd->size & NON_MAIN_ARENA) == 0); while ((unsigned long) size < fwd->size) { fwd = fwd->fd_nextsize; assert((fwd->size & NON_MAIN_ARENA) == 0); }
if ((unsigned long) size == (unsigned long) fwd->size) /* Always insert in the second position. */ fwd = fwd->fd; else { victim->fd_nextsize = fwd; victim->bk_nextsize = fwd->bk_nextsize; fwd->bk_nextsize = victim; victim->bk_nextsize->fd_nextsize = victim; } bck = fwd->bk; } } else victim->fd_nextsize = victim->bk_nextsize = victim; }
mark_bin(av, victim_index); victim->bk = bck; victim->fd = fwd; fwd->bk = victim; bck->fd = victim;
#define MAX_ITERS 10000 if (++iters >= MAX_ITERS) break; }
/* If a large request, scan through the chunks of current bin in sorted order to find smallest that fits. Use the skip list for this. */
if (!in_smallbin_range(nb)) { bin = bin_at(av, idx);
/* skip scan if empty or largest chunk is too small */ if ((victim = first(bin)) != bin && (unsigned long)(victim->size) >= (unsigned long)(nb)) {
victim = victim->bk_nextsize; while (((unsigned long)(size = chunksize(victim)) < (unsigned long)(nb))) victim = victim->bk_nextsize;
/* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ if (victim != last(bin) && victim->size == victim->fd->size) victim = victim->fd;
remainder_size = size - nb; unlink(victim, bck, fwd);
/* Exhaust */ if (remainder_size < MINSIZE) { set_inuse_bit_at_offset(victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; } /* Split */ else { remainder = chunk_at_offset(victim, nb); /* We cannot assume the unsorted list is empty and therefore have to perform a complete insert here. */ bck = unsorted_chunks(av); fwd = bck->fd; if (__builtin_expect (fwd->bk != bck, 0)) { errstr = "malloc(): corrupted unsorted chunks"; goto errout; } remainder->bk = bck; remainder->fd = fwd; bck->fd = remainder; fwd->bk = remainder; if (!in_smallbin_range(remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size); } check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); return p; } }
/* Search for a chunk by scanning bins, starting with next largest bin. This search is strictly by best-fit; i.e., the smallest (with ties going to approximately the least recently used) chunk that fits is selected.
The bitmap avoids needing to check that most blocks are nonempty. The particular case of skipping all bins during warm-up phases when no chunks have been returned yet is faster than it might look. */
++idx; bin = bin_at(av,idx); block = idx2block(idx); map = av->binmap[block]; bit = idx2bit(idx);
for (;;) {
/* Skip rest of block if there are no more set bits in this block. */ if (bit > map || bit == 0) { do { if (++block >= BINMAPSIZE) /* out of bins */ goto use_top; } while ( (map = av->binmap[block]) == 0);
bin = bin_at(av, (block << BINMAPSHIFT)); bit = 1; }
/* Advance to bin with set bit. There must be one. */ while ((bit & map) == 0) { bin = next_bin(bin); bit <<= 1; assert(bit != 0); }
/* Inspect the bin. It is likely to be non-empty */ victim = last(bin);
/* If a false alarm (empty bin), clear the bit. */ if (victim == bin) { av->binmap[block] = map &= ~bit; /* Write through */ bin = next_bin(bin); bit <<= 1; }
else { size = chunksize(victim);
/* We know the first chunk in this bin is big enough to use. */ assert((unsigned long)(size) >= (unsigned long)(nb));
remainder_size = size - nb;
/* unlink */ unlink(victim, bck, fwd);
/* Exhaust */ if (remainder_size < MINSIZE) { set_inuse_bit_at_offset(victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; }
/* Split */ else { remainder = chunk_at_offset(victim, nb);
/* We cannot assume the unsorted list is empty and therefore have to perform a complete insert here. */ bck = unsorted_chunks(av); fwd = bck->fd; if (__builtin_expect (fwd->bk != bck, 0)) { errstr = "malloc(): corrupted unsorted chunks 2"; goto errout; } remainder->bk = bck; remainder->fd = fwd; bck->fd = remainder; fwd->bk = remainder;
/* advertise as last remainder */ if (in_smallbin_range(nb)) av->last_remainder = remainder; if (!in_smallbin_range(remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size); } check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); return p; } }
use_top: /* If large enough, split off the chunk bordering the end of memory (held in av->top). Note that this is in accord with the best-fit search rule. In effect, av->top is treated as larger (and thus less well fitting) than any other available chunk since it can be extended to be as large as necessary (up to system limitations).
We require that av->top always exists (i.e., has size >= MINSIZE) after initialization, so if it would otherwise be exhausted by current request, it is replenished. (The main reason for ensuring it exists is that we may need MINSIZE space to put in fenceposts in sysmalloc.) */
victim = av->top; size = chunksize(victim);
if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { remainder_size = size - nb; remainder = chunk_at_offset(victim, nb); av->top = remainder; set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head(remainder, remainder_size | PREV_INUSE);
check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); return p; }
#ifdef ATOMIC_FASTBINS /* When we are using atomic ops to free fast chunks we can get here for all block sizes. */ else if (have_fastchunks(av)) { malloc_consolidate(av); /* restore original bin index */ if (in_smallbin_range(nb)) idx = smallbin_index(nb); else idx = largebin_index(nb); } #else /* If there is space available in fastbins, consolidate and retry, to possibly avoid expanding memory. This can occur only if nb is in smallbin range so we didn't consolidate upon entry. */
else if (have_fastchunks(av)) { assert(in_smallbin_range(nb)); malloc_consolidate(av); idx = smallbin_index(nb); /* restore original bin index */ } #endif
/* Otherwise, relay to handle system-dependent cases */ else { void *p = sYSMALLOc(nb, av); if (p != NULL && __builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); return p; } } } |