同时,还能看到一系列glibc-ports文件,该文件是ARM等平台的libc库实现,但并不是说glibc-ports完全独立于glibc。事实上,前者(glibc-ports)只包含与平台相关的代码,而平台无关的代码则与glibc共用。glibc-ports根目录下的README文件可证明以上观点,并摘录如下:
This is the glibc ports repository, an
add-on for the GNU C Library (glibc).
It contains code that is not maintained in
the official glibc source tree.
This includes working ports to GNU/Linux on
some machine architectures that
are not maintained in the official glibc
source tree. It also includes
some code once used by old libc ports now
defunct, which has been abandoned
but may be useful for some future porter to
examine. It may also include
some optimized functions tailored for
specific CPU implementations of an
architecture, to be selected using
--with-cpu.
其实,glibc一开始是支持ARM平台的,后来因为某些原因将ARM等平台代码独立的出来。glibc-ports第一个版本为2.3.5。由于glibc-ports依赖于glibc,所以前者必须版本号是与后者一致的。
这里以2.14版本为例进行讲解。
2.2 常用的宏
在glibc/glibc-ports代码库中,经常看到strong_alias等宏的定义,这里对其进行简单分析:
-
# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
-
# define _strong_alias(name, aliasname) \
-
extern __typeof (name) aliasname __attribute__ ((alias (#name)));
-
-
# define weak_alias(name, aliasname) _weak_alias (name, aliasname)
-
# define _weak_alias(name, aliasname) \
-
extern __typeof (name) aliasname __attribute__ ((weak, alias (#name)));
这里以glibc-2.14/sysdeps/unix/sysv/linux/connect.S为例:
-
#define __socket __libc_connect
-
#define NARGS 3
-
#define NEED_CANCELLATION
-
#include <socket.S>
-
strong_alias (__libc_connect, __connect_internal)
-
weak_alias (__libc_connect, __connect)
于是,strong_alias
(__libc_connect, __connect_internal)展开如下:
extern __typeof (__libc_connect)
__connect_internal __attribute__ ((alias ("__libc_connect")));
这里涉及到了“alias”,官方的解释见:
简而言之就是,符号__libc_connect是符号__connect_internal的别称。
而weak_alias
(__libc_connect, __connect)展开如下:
extern __typeof (__libc_connect) __connect
__attribute__ ((weak, alias ("__libc_connect")));
这里的weak表示当前符号是个弱类型符号(weak symbol),而非全局符号。weak修饰符的含义是让weak弱类型的函数可以被其它同名函数覆盖(即不会发生冲突),如果没有其它同名函数,就使用该weak函数,类似于是默认函数。也就是说,符号__libc_connect是符号__connect的弱类型别称。
3 函数分析
3.1 recvmsg
3.1.1 分析
3.1.1.1 源码1
glibc-2.14/socket/recvmsg.c
-
ssize_t
-
__recvmsg (fd, message, flags)
-
int fd;
-
struct msghdr *message;
-
int flags;
-
{
-
__set_errno (ENOSYS);
-
return -1;
-
}
-
-
weak_alias (__recvmsg, recvmsg)
所以,其它文件中必须定义函数recvmsg,否则使用该文件中的函数__recvmsg。
glibc-2.14/sysdeps/unix/sysv/linux/recvmsg.S
-
#define socket recvmsg
-
#define __socket __libc_recvmsg
-
#define NARGS 3
-
#define NEED_CANCELLATION
-
对于ARM,相当于#include glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/socket.S
-
#include <socket.S>
-
weak_alias (__libc_recvmsg, __recvmsg)
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/socket.S
-
#include <sysdep-cancel.h>
-
#include <socketcall.h>
-
-
#define P(a, b) P2(a, b)
-
#define P2(a, b) a##b
-
-
#define PUSHARGS_1 str a1, [sp, $-4]!
-
#define PUSHARGS_2 stmfd {a1, a2}
-
#define PUSHARGS_3 stmfd {a1, a2, a3}
-
#define PUSHARGS_4 stmfd {a1, a2, a3, a4}
-
#define PUSHARGS_5 stmfd {a1, a2, a3, a4} /* Caller has already pushed arg 5 */
-
#define PUSHARGS_6 stmfd {a1, a2, a3, a4}
-
-
#define POPARGS_1 add sp, sp, #4
-
#define POPARGS_2 add sp, sp, #8
-
#define POPARGS_3 add sp, sp, #12
-
#define POPARGS_4 add sp, sp, #16
-
#define POPARGS_5 add sp, sp, #16
-
#define POPARGS_6 add sp, sp, #16
-
-
#ifndef NARGS
-
#define NARGS 3 /* If we were called with no wrapper, this is really socket() */
-
#endif
-
-
.globl __socket
-
ENTRY (__socket)
-
/* Push args onto the stack. */
-
NARGS等于3,展开后相当于stmfd {a1, a2, a3},也就是将3个参数入栈
-
P(PUSHARGS_,NARGS)
-
-
/* Do the system call trap. */
-
-
mov a1, $P(SOCKOP_,socket)
-
mov a2, sp
-
-
-
swi SYS_ify(socketcall)
-
-
/* Pop args off the stack */
-
P(POPARGS_,NARGS)
-
-
/* r0 is < 0 if there was an error. */
-
cmn r0, $124
-
RETINSTR(cc, r14)
-
b PLTJMP(SYSCALL_ERROR)
-
-
PSEUDO_END (__socket)
-
-
#ifndef NO_WEAK_ALIAS
-
weak_alias (__socket, socket)
-
#endif
glibc-2.14/sysdeps/unix/sysv/linux/socketcall.h
-
#define SOCKOP_socket 1
-
#define SOCKOP_bind 2
-
#define SOCKOP_connect 3
-
#define SOCKOP_listen 4
-
#define SOCKOP_accept 5
-
#define SOCKOP_getsockname 6
-
#define SOCKOP_getpeername 7
-
#define SOCKOP_socketpair 8
-
#define SOCKOP_send 9
-
#define SOCKOP_recv 10
-
#define SOCKOP_sendto 11
-
#define SOCKOP_recvfrom 12
-
#define SOCKOP_shutdown 13
-
#define SOCKOP_setsockopt 14
-
#define SOCKOP_getsockopt 15
-
#define SOCKOP_sendmsg 16
-
#define SOCKOP_recvmsg 17
-
#define SOCKOP_accept4 18
-
#define SOCKOP_recvmmsg 19
-
#define SOCKOP_sendmmsg 20
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/sysdep.h
-
#define SYS_ify(syscall_name) (__NR_##syscall_name)
glibc-ports-2.14/sysdeps/
arm/sysdep.h
-
#define RETINSTR(cond, reg) \
-
bx##cond reg
-
#define PLTJMP(_x) _x
-
#define SYSCALL_ERROR __syscall_error
3.1.1.2 展开1
最终,glibc-2.14/sysdeps/unix/sysv/linux/recvmsg.S被展开成如下内容:
-
#include <sysdep-cancel.h>
-
#include <socketcall.h>
-
.text
-
.globl __libc_recvmsg
-
ENTRY (__libc_recvmsg)
-
-
/* Push args onto the stack. */
-
stmfd {a1, a2, a3}
-
-
/* Do the system call trap. */
-
mov a1, $17 将17存入r0(a1是r0寄存器的别名)中,17代表recvmsg请求
-
mov a2, sp 将栈地址存入r1寄存器中
-
系统陷入内核,并执行sys_socketcall,根据r0中的参数SOCKOP_recvmsg选择调用sys_recvmsg
-
swi __NR_socketcall
-
-
/* Pop args off the stack */
-
add sp, sp, #12 出栈
-
-
/* r0 is < 0 if there was an error. */
-
此时,r0中保存的是sys_recvmsg的返回值
-
cmn r0, $124 判断是否小于0
-
bxcc r14 若不是则直接返回(等价于bxcc lr)
-
b __syscall_error 否则执行该函数
-
-
PSEUDO_END (__libc_recvmsg)
-
-
weak_alias (__libc_recvmsg, __recvmsg)
-
weak_alias (__libc_recvmsg, recvmsg)
3.1.2.1 源码2
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/sysdep.S
-
ENTRY (__syscall_error)
-
rsb r0, r0, $0
-
-
#define __syscall_error __syscall_error_1
-
#include <sysdeps/unix/arm/sysdep.S>
glibc-ports-2.14/sysdeps/arm/sysdep.h
-
#define DO_RET(_reg) \
-
bx _reg
glibc -2.14/include/unix/libc-symbols.h
-
#define C_SYMBOL_NAME(name) name
glibc-ports-2.14/sysdeps/unix/arm/sysdep.S
-
syscall_error:
-
ldr r1, 1f
-
str r0, [r1]
-
mvn r0, $0
-
DO_RET (r14)
-
-
1: .long C_SYMBOL_NAME(errno)
-
#undef __syscall_error
-
END (__syscall_error)
3.1.2.2 展开2
-
ENTRY (__syscall_error)
-
-
rsb r0, r0, $0 将0 - r0赋值给r0,也即将r0中的值取反(相反数),因为内核总是将错误码取反返回
-
__syscall_error_1:
-
ldr r1, 1f 将errno地址存入r1寄存器中
-
str r0, [r1] 将r0中保存的返回值存入errno中
-
mvn r0, $0 将0按位取反(相当于-1)后存入r0中,这样recvmsg便返回了-1
-
bx lr 返回
-
1: .long errno
-
END (__syscall_error)
3.1.2 总结
由“weak_alias (__libc_recvmsg, recvmsg)”可知,recvmsg相当于__libc_recvmsg的别名,上面已经分析了__libc_recvmsg,而__libc_recvmsg最后几行代码如下:
-
/* r0 is < 0 if there was an error. */
-
cmn r0, $124
-
bxcc r14
-
b __syscall_error
意思是,若内核返回负值,则表示出错,此时执行__syscall_error,否则说明内核执行成功,则直接返回(r0中保存内核的返回值)。
而__syscall_error功能很简单:将内核的返回值的相反数赋值给errno,同时将r0中设置为-1,使得recvmsg返回-1,以告诉用户该函数执行失败。
所以,当recvmsg执行失败后,返回-1的同时,errno也被设置成了内核的返回值(代表出错原因)。
另外,这里要强调一下,很多socket系列的函数的实现方式都与recvmsg类似。例如:sendmsg、accept、connect等等。猜一下,下面的.S源文件实现的是哪个函数的功能:
-
#define socket accept
-
#define __socket __libc_accept
-
#define NARGS 3
-
#define NEED_CANCELLATION
-
#include <socket.S>
-
libc_hidden_def (accept)
3.2 pthread_mutex_lock
3.2.1 分析
3.2.1.1 源码
glibc-2.14/nptl/pthread_mutex_lock.c
-
#ifndef __pthread_mutex_lock
-
strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
-
strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
-
#endif
-
所以,__pthread_mutex_lock是pthread_mutex_lock的别称。
-
-
int
-
__pthread_mutex_lock (mutex)
-
pthread_mutex_t *mutex;
-
{
-
assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
-
-
unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
-
if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
-
return __pthread_mutex_lock_full (mutex);
-
-
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
-
-
if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
-
== PTHREAD_MUTEX_TIMED_NP)
-
{
-
simple:
-
/* Normal mutex. */
-
LLL_MUTEX_LOCK (mutex);
-
assert (mutex->__data.__owner == 0);
-
}
-
else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
-
{
-
/* Recursive mutex. */
-
-
/* Check whether we already hold the mutex. */
-
if (mutex->__data.__owner == id)
-
{
-
/* Just bump the counter. */
-
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
-
/* Overflow of the counter. */
-
return EAGAIN;
-
-
++mutex->__data.__count;
-
-
return 0;
-
}
-
-
/* We have to get the mutex. */
-
LLL_MUTEX_LOCK (mutex);
-
-
assert (mutex->__data.__owner == 0);
-
mutex->__data.__count = 1;
-
}
-
else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
-
{
-
if (! __is_smp)
-
goto simple;
-
-
if (LLL_MUTEX_TRYLOCK (mutex) != 0)
-
{
-
int cnt = 0;
-
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
-
mutex->__data.__spins * 2 + 10);
-
do
-
{
-
if (cnt++ >= max_cnt)
-
{
-
LLL_MUTEX_LOCK (mutex);
-
break;
-
}
-
-
#ifdef BUSY_WAIT_NOP
-
BUSY_WAIT_NOP;
-
#endif
-
}
-
while (LLL_MUTEX_TRYLOCK (mutex) != 0);
-
-
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
-
}
-
assert (mutex->__data.__owner == 0);
-
}
-
else
-
{
-
assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
-
/* Check whether we already hold the mutex. */
-
if (__builtin_expect (mutex->__data.__owner == id, 0))
-
return EDEADLK;
-
goto simple;
-
}
-
-
/* Record the ownership. */
-
mutex->__data.__owner = id;
-
#ifndef NO_INCR
-
++mutex->__data.__nusers;
-
#endif
-
-
return 0;
-
}
这里又调用了LLL_MUTEX_LOCK。
-
#ifndef LLL_MUTEX_LOCK
-
# define LLL_MUTEX_LOCK(mutex) \
-
lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
-
# define LLL_MUTEX_TRYLOCK(mutex) \
-
lll_trylock ((mutex)->__data.__lock)
-
# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
-
lll_robust_lock ((mutex)->__data.__lock, id, \
-
PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
-
#endif
继续跟踪lll_lock的实现。
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/nptl/lowlevellock.h
另一条线索,当__lll_lock加锁失败后,会调用__lll_lock_wait_private或__lll_lock_wait进行阻塞。这里指分析__lll_lock_wait_private。
-
#define lll_lock(futex, private) __lll_lock (&(futex), private)
-
-
#define __lll_lock(futex, private) \
-
((void) ({ \
-
int *__futex = (futex); \
-
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0), 0)) \
-
{ \
-
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
-
__lll_lock_wait_private (__futex); \
-
else \
-
__lll_lock_wait (__futex, private); \
-
} \
-
}))
glibc-2.14/include/atomic.h
-
#if !defined atomic_compare_and_exchange_val_acq \
-
&& defined __arch_compare_and_exchange_val_32_acq
-
# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
-
__atomic_val_bysize (__arch_compare_and_exchange_val,acq, \
-
mem, newval, oldval)
-
#endif
-
-
#define __atomic_val_bysize(pre, post, mem, ...) \
-
({ \
-
__typeof (*mem) __atg1_result; \
-
if (sizeof (*mem) == 1) \
-
__atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
-
else if (sizeof (*mem) == 2) \
-
__atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
-
else if (sizeof (*mem) == 4) \
-
__atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
-
else if (sizeof (*mem) == 8) \
-
__atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
-
else \
-
abort (); \
-
__atg1_result; \
-
})
glibc-ports-2.14/sysdeps/arm/bits/atomic.h
-
#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
-
({ __typeof (oldval) result, tmp; \
-
__asm__ ("\n" \
-
"0:\tldr\t%1,[%2]\n\t" \
-
"cmp\t%1,%4\n\t" \
-
"movne\t%0,%1\n\t" \
-
"bne\t1f\n\t" \
-
"swpb\t%0,%3,[%2]\n\t" \
-
"cmp\t%1,%0\n\t" \
-
"swpbne\t%1,%0,[%2]\n\t" \
-
"bne\t0b\n\t" \
-
"1:" \
-
: "=&r" (result), "=&r" (tmp) \
-
: "r" (mem), "r" (newval), "r" (oldval) \
-
: "cc", "memory"); \
-
result; })
-
#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
-
({ __arm_link_error (); oldval; })
-
-
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
-
({ __typeof (oldval) result, tmp; \
-
__asm__ ("\n" \
-
"0:\tldr\t%1,[%2]\n\t" \
-
"cmp\t%1,%4\n\t" \
-
"movne\t%0,%1\n\t" \
-
"bne\t1f\n\t" \
-
"swp\t%0,%3,[%2]\n\t" \
-
"cmp\t%1,%0\n\t" \
-
"swpne\t%1,%0,[%2]\n\t" \
-
"bne\t0b\n\t" \
-
"1:" \
-
: "=&r" (result), "=&r" (tmp) \
-
: "r" (mem), "r" (newval), "r" (oldval) \
-
: "cc", "memory"); \
-
result; })
-
-
#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
-
({ __arm_link_error (); oldval; })
另一条线索,当__lll_lock加锁失败后,会调用__lll_lock_wait_private或__lll_lock_wait进行阻塞。这里指分析__lll_lock_wait_private。
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/nptl/lowlevellock.c
-
void __lll_lock_wait_private (int *futex)
-
{
-
do
-
{
-
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
-
if (oldval != 0)
-
lll_futex_wait (futex, 2, LLL_PRIVATE);
-
}
-
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
-
}
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/nptl/lowlevellock.h
-
#define lll_futex_wait(futexp, val, private) \
-
lll_futex_timed_wait(futexp, val, NULL, private)
-
-
#define lll_futex_timed_wait(futexp, val, timespec, private) \
-
({ \
-
INTERNAL_SYSCALL_DECL (__err); \
-
long int __ret; \
-
futex系统调用
-
__ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
-
__lll_private_flag (FUTEX_WAIT, private), \
-
(val), (timespec)); \
-
__ret; \
-
})
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/sysdep.h
-
#define INTERNAL_SYSCALL(name, err, nr, args...) \
-
INTERNAL_SYSCALL_RAW(SYS_ify(name), err, nr, args)
-
-
#define INTERNAL_SYSCALL_RAW(name, err, nr, args...) \
-
({ unsigned int _sys_result; \
-
{ \
-
register int _a1 asm ("a1"); \
-
LOAD_ARGS_##nr (args) \
-
asm volatile ("swi %1 @ syscall " #name \
-
: "=r" (_a1) \
-
: "i" (name) ASM_ARGS_##nr \
-
: "memory"); \
-
_sys_result = _a1; \
-
} \
-
(int) _sys_result; })
3.2.1.2 展开
将__atomic_val_bysize展开后如下:
-
#define __atomic_val_bysize(pre, post, mem, ...) \
-
({ \
-
__typeof (*mem) __atg1_result; \
-
if (sizeof (*mem) == 1) \
-
__atg1_result = __arch_compare_and_exchange_val_8_acq (mem, __VA_ARGS__); \
-
else if (sizeof (*mem) == 2) \
-
__atg1_result = __arch_compare_and_exchange_val_16_acq (mem, __VA_ARGS__); \
-
else if (sizeof (*mem) == 4) \
-
__atg1_result = __arch_compare_and_exchange_val_32_acq (mem, __VA_ARGS__); \
-
else if (sizeof (*mem) == 8) \
-
__atg1_result = __arch_compare_and_exchange_val_64_acq (mem, __VA_ARGS__); \
-
else \
-
abort (); \
-
__atg1_result; \
-
})
而pthread_mutex_lock最终调用了__arch_compare_and_exchange_val_32_acq。
最终,lll_futex_wait(futexp,
val, private)展开如下:
-
({ \
-
INTERNAL_SYSCALL_DECL (__err); \
-
long int __ret; \
-
__ret = ({ unsigned int _sys_result; \
-
{ \
-
register int _a1 asm ("a1"); \
-
LOAD_ARGS_4 (futexp, FUTEX_WAIT, val, timespec) \
-
asm volatile ("swi %1 @ syscall futex" \
-
: "=r" (_a1) \
-
: "i" (__NR_futex) ASM_ARGS_4 \
-
: "memory"); \
-
_sys_result = _a1; \
-
} \
-
(int) _sys_result; })
-
__ret; \
-
})
这样,lll_futex_wait 完成系统调用futex,并传递参数FUTEX_WAIT给内核。内核执行futex_wait函数,使得当前线程睡眠。
3.2.2 总结
pthread_mutex_lock首先通过__arch_compare_and_exchange_val_32_acq进行加锁,若加锁失败,则通过__lll_lock_wait_private进行一系列后,执行futex系统调用陷入内核而阻塞,并等待被唤醒。
3.3 pthread_mutex_unlock
3.3.1 分析
3.3.1.1 源码
glibc-2.14/nptl/pthread_mutex_unlock.c
-
glibc-2.14/nptl/pthread_mutex_unlock.c
-
strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
-
所以,__pthread_mutex_unlock是pthread_mutex_unlock的别称。
-
-
int
-
__pthread_mutex_unlock (mutex)
-
pthread_mutex_t *mutex;
-
{
-
return __pthread_mutex_unlock_usercnt (mutex, 1);
-
}
-
-
int
-
internal_function attribute_hidden
-
__pthread_mutex_unlock_usercnt (mutex, decr)
-
pthread_mutex_t *mutex;
-
int decr;
-
{
-
int type = PTHREAD_MUTEX_TYPE (mutex);
-
if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
-
return __pthread_mutex_unlock_full (mutex, decr);
-
-
if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
-
== PTHREAD_MUTEX_TIMED_NP)
-
{
-
/* Always reset the owner field. */
-
normal:
-
mutex->__data.__owner = 0;
-
if (decr)
-
/* One less user. */
-
--mutex->__data.__nusers;
-
-
/* Unlock. */
-
lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
-
return 0;
-
}
-
else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
-
{
-
/* Recursive mutex. */
-
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
-
return EPERM;
-
-
if (--mutex->__data.__count != 0)
-
/* We still hold the mutex. */
-
return 0;
-
goto normal;
-
}
-
else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
-
goto normal;
-
else
-
{
-
/* Error checking mutex. */
-
assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
-
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
-
|| ! lll_islocked (mutex->__data.__lock))
-
return EPERM;
-
goto normal;
-
}
-
}
这里又调用了lll_unlock,继续跟踪该函数。
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/nptl/lowlevellock.h
-
#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
-
-
#define __lll_unlock(futex, private) \
-
(void) \
-
({ int *__futex = (futex); \
-
int __oldval = atomic_exchange_rel (__futex, 0); \
-
if (__builtin_expect (__oldval > 1, 0)) \
-
lll_futex_wake (__futex, 1, private); \
-
})
glibc-2.14/include/unix/atomic.h
-
#ifndef atomic_exchange_rel
-
# define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue)
-
#endif
glibc-ports-2.14/sysdeps/ arm/bits/atomic.h
-
#define atomic_exchange_acq(mem, newvalue) \
-
({ __typeof (*mem) result; \
-
if (sizeof (*mem) == 1) \
-
__asm__ __volatile__ ("swpb %0, %1, [%2]" \
-
: "=&r,&r" (result) \
-
: "r,0" (newvalue), "r,r" (mem) : "memory"); \
-
else if (sizeof (*mem) == 4) \
-
__asm__ __volatile__ ("swp %0, %1, [%2]" \
-
: "=&r,&r" (result) \
-
: "r,0" (newvalue), "r,r" (mem) : "memory"); \
-
else \
-
{ \
-
result = 0; \
-
abort (); \
-
} \
-
result; })
glibc-ports-2.14/sysdeps/unix/sysv/linux/arm/nptl/lowlevellock.h
-
#define lll_futex_wake(futexp, nr, private) \
-
({ \
-
INTERNAL_SYSCALL_DECL (__err); \
-
long int __ret; \
-
__ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
-
__lll_private_flag (FUTEX_WAKE, private), \
-
(nr), 0); \
-
__ret; \
-
})
3.3.1.2 展开
最终,lll_futex_wake(futexp, val, private)展开如下:
-
({ \
-
INTERNAL_SYSCALL_DECL (__err); \
-
long int __ret; \
-
__ret = ({ unsigned int _sys_result; \
-
{ \
-
register int _a1 asm ("a1"); \
-
LOAD_ARGS_4 (futexp, FUTEX_WAKE, val, timespec) \
-
asm volatile ("swi %1 @ syscall futex" \
-
: "=r" (_a1) \
-
: "i" (__NR_futex) ASM_ARGS_4 \
-
: "memory"); \
-
_sys_result = _a1; \
-
} \
-
(int) _sys_result; })
-
__ret; \
-
})
3.3.2 总结
尽管lll_futex_wait与lll_futex_wake均完成futex系统调用,但二者传递的参数不一样(前者:FUTEX_WAIT,后者:FUTEX_WAKE),导致实现的功能也不一样。
函数pthread_mutex_unlock最终调用lll_futex_wake,而后者通过futex系统调用,陷入内核执行内核futex_wake函数,实现唤醒因无法获取锁而睡眠的线程的功能。