Glibc-2.11
ports/sysdeps/unix/sysv/linux/arm/clone.S
66 #ifdef __ARM_EABI__
67 ldr r7, =SYS_ify(clone)
68 swi 0x0
69 #else
70 swi SYS_ify(clone)
71 #endif
|
The instruction SWI will put the arm processor into SVC mode, and set the register pc to the base address of vector table + 0x00000008(the offset of SWI handler). The register lr stores the address of next instruction. It is the address of SWI instruction plus 4.
The vector table can be put at the address of 0x00000000 or 0xffff0000, depending on SCTLR.V.
arch/arm/mm/proc-v7.S
278 adr r5, v7_crval
279 ldmia r5, {r5, r6}
280 #ifdef CONFIG_CPU_ENDIAN_BE8
281 orr r6, r6, #1 << 25 @ big-endian page tables
282 #endif
283 mrc p15, 0, r0, c1, c0, 0 @ read control register
284 bic r0, r0, r5 @ clear bits them
285 orr r0, r0, r6 @ set them
286 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
287 mov pc, lr @ return to head.S:__ret
288 ENDPROC(__v7_setup)
289
290 /* AT
291 * TFR EV X F I D LR S
292 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
293 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
294 * 1 0 110 0011 1100 .111 1101 < we want
295 */
296 .type v7_crval, #object
297 v7_crval:
298 crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
|
From the code above, we can see SCTLR.V(the bit 13) is set to 1. So the base address of the vector table is 0xffff0000.
arch/arm/kernel/traps.c
756 void __init early_trap_init(void)
757 {
758 unsigned long vectors = CONFIG_VECTORS_BASE;
759 extern char __stubs_start[], __stubs_end[];
760 extern char __vectors_start[], __vectors_end[];
761 extern char __kuser_helper_start[], __kuser_helper_end[];
762 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
763
764 /*
765 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
766 * into the vector page, mapped at 0xffff0000, and ensure these
767 * are visible to the instruction stream.
768 */
769 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
770 memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
771 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
772
773 /*
774 * Copy signal return handlers into the vector page, and
775 * set sigreturn to be a pointer to these.
776 */
777 memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
778 sizeof(sigreturn_codes));
779 memcpy((void *)KERN_RESTART_CODE, syscall_restart_code,
780 sizeof(syscall_restart_code));
781
782 flush_icache_range(vectors, vectors + PAGE_SIZE);
783 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
784 }
|
From the code above, we can see the vector table was moved to 0xffff0000, and the __stubs_start was moved to 0xffff0000 + 0x200.
The code below is the contents of the vector table. and it is the instruction “ldr pc, .Lcvswi + stubs_offset” at the address 0x00000008.
arch/arm/kernel/entry-armv.S
1216 /*
1217 * We group all the following data together to optimise
1218 * for CPUs with separate I & D caches.
1219 */
1220 .align 5
1221
1222 .LCvswi:
1223 .word vector_swi
1224
1225 .globl __stubs_end
1226 __stubs_end:
1227
1228 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1229
1230 .globl __vectors_start
1231 __vectors_start:
1232 ARM( swi SYS_ERROR0 )
1233 THUMB( svc #0 )
1234 THUMB( nop )
1235 W(b) vector_und + stubs_offset
1236 W(ldr) pc, .LCvswi + stubs_offset
1237 W(b) vector_pabt + stubs_offset
1238 W(b) vector_dabt + stubs_offset
1239 W(b) vector_addrexcptn + stubs_offset
1240 W(b) vector_irq + stubs_offset
1241 W(b) vector_fiq + stubs_offset
1242
1243 .globl __vectors_end
1244 __vectors_end:
|
Why it there a stubs_offset ? When assembler converts the ldr or b instruction to machine languages, it will store the offset of .Lcvswi relative to pc, not the value of .Lcvswi. We can get from early_trap_init() that the stubs and the vector was moved. The stubs was move from the position before the vector to the position after the vector, so the offset of stubs relative to vector added the value of (the size of stubs + the size of vector). The size of stubs is (__vectors_start - __stubs_start) and the size of vector is 0x200, so the offset added (__vectors_start - __stubs_start + 0x200), which is defined as stubs_offset. .Lcvswi is in the stubs, so its offset should add stubs_offset too.
Next we enters the vector_swi entry.
arch/arm/kernel/entry-header.S
168 /*
169 * These are the registers used in the syscall handler, and allow us to
170 * have in theory up to 7 arguments to a function - r0 to r6.
171 *
172 * r7 is reserved for the system call number for thumb mode.
173 *
174 * Note that tbl == why is intentional.
175 *
176 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
177 */
178 scno .req r7 @ syscall number
179 tbl .req r8 @ syscall table pointer
180 why .req r8 @ Linux syscall (!= 0)
181 tsk .req r9 @ current thread_info
|
arch/arm/kernel/entry-common.S
169 /*=============================================================================
170 * SWI handler
171 *-----------------------------------------------------------------------------
172 */
188 .align 5
189 ENTRY(vector_swi)
190 sub sp, sp, #S_FRAME_SIZE
|
S_FRAME_SIZE is defined to sizeof(struct pt_regs) in asm-offsets.c.
arch/arm/kernel/asm-offsets.c
4 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs))
|
pt_regs is defined in ptrace.h
arch/arm/include/asm/ptrace.h
107 struct pt_regs {
108 unsigned long uregs[18];
109 };
111
112 #define ARM_cpsr uregs[16]
113 #define ARM_pc uregs[15]
114 #define ARM_lr uregs[14]
115 #define ARM_sp uregs[13]
116 #define ARM_ip uregs[12]
117 #define ARM_fp uregs[11]
118 #define ARM_r10 uregs[10]
119 #define ARM_r9 uregs[9]
120 #define ARM_r8 uregs[8]
121 #define ARM_r7 uregs[7]
122 #define ARM_r6 uregs[6]
123 #define ARM_r5 uregs[5]
124 #define ARM_r4 uregs[4]
125 #define ARM_r3 uregs[3]
126 #define ARM_r2 uregs[2]
127 #define ARM_r1 uregs[1]
128 #define ARM_r0 uregs[0]
129 #define ARM_ORIG_r0 uregs[17]
|
191 stmia sp, {r0 - r12} @ Calling r0 - r12
192 ARM( add r8, sp, #S_PC ) @ S_PC is the offset of sp in pt_regs
193 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr, ^ means use mode registers
194 THUMB( mov r8, sp )
195 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
196 mrs r8, spsr @ called from non-FIQ mode, so ok.
197 str lr, [sp, #S_PC] @ Save calling PC
198 str r8, [sp, #S_PSR] @ Save CPSR
199 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
200 zero_fp @ set fp to zero, defined in entry-header.S
|
I skiped the “#if defined(CONFIG_OABI_COMPAT)” code, because the system call number is stored in r7 in EABI mode. In OABI_COMPAT mode, we can get the system call number by the following instructions:
ldr r10, [lr, #-4] @ get SWI instruction
/*
* If the swi argument is zero, this is an EABI call and we do nothing.
*
* If this is an old ABI call, get the syscall number into scno and
* get the old ABI syscall table address.
*/
bics r10, r10, #0xff000000
eorne scno, r10, #__NR_OABI_SYSCALL_BASE
ldrne tbl, =sys_oabi_call_table
|
253 #ifdef CONFIG_ALIGNMENT_TRAP
254 ldr ip, __cr_alignment
255 ldr ip, [ip]
256 mcr p15, 0, ip, c1, c0 @ update control register. See page B3-96 in armv7_arm.
257 #endif
258 enable_irq
259
260 get_thread_info tsk
|
get_thread_info is defined in entry-header.S
arch/arm/kernel/entry-header.S
111 .macro get_thread_info, rd
112 mov \rd, sp, lsr #13
113 mov \rd, \rd, lsl #13
114 .endm
|
261 adr tbl, sys_call_table @ load syscall table pointer
262 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing.TI_FLAGS is the offset of flags in thread_info.
279 stmdb {r4, r5} @ push fifth and sixth args
280 tst ip, #_TIF_SYSCALL_TRACE | _TIF_KERNEL_TRACE
281 bne __sys_trace @ are we tracing syscalls?
282
283 cmp scno, #NR_syscalls @ check upper syscall limit
284 adr lr, BSYM(ret_fast_syscall) @ return address
285 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
286
287 add r1, sp, #S_OFF
288 2: mov why, #0 @ no longer a real syscall
289 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
290 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
291 bcs arm_syscall
292 b sys_ni_syscall @ not private func
293 ENDPROC(vector_swi)
|
After syscall routine returns, ret_fast_syscall is called.
19 .align 5
20 /*
21 * This is the fast syscall return path. We do as little as
22 * possible here, and this includes saving r0 back into the SVC
23 * stack.
24 */
25 ret_fast_syscall:
26 UNWIND(.fnstart )
27 UNWIND(.cantunwind )
28 disable_irq @ disable interrupts
29 ldr r1, [tsk, #TI_FLAGS]
30 tst r1, #_TIF_WORK_MASK
31 bne fast_work_pending
32
33 /* perform architecture specific actions before user return */
34 arch_ret_to_user r1, lr
35
36 restore_user_regs fast = 1, offset = S_OFF
|
If we don't have work pending, then we return to user mode.
arch/arm/kernel/entry-header.S
91 .macro restore_user_regs, fast = 0, offset = 0
92 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
93 ldr lr, [sp, #\offset + S_PC]! @ get pc
94 msr spsr_cxsf, r1 @ save in spsr_svc
95 #if defined(CONFIG_CPU_32v6K)
96 clrex @ clear the exclusive monitor
97 #elif defined (CONFIG_CPU_V6)
98 strex r1, r2, [sp] @ clear the exclusive monitor
99 #endif
100 .if \fast
101 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
102 .else
103 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
104 .endif
105 mov r0, r0 @ ARMv5T and earlier require a nop
106 @ after ldm {}^
107 add sp, sp, #S_FRAME_SIZE - S_PC
108 movs pc, lr @ return & move spsr_svc into cpsr
109 .endm
|
37 UNWIND(.fnend )
38
39 /*
40 * Ok, we need to do extra processing, enter the slow path.
41 */
42 fast_work_pending:
43 tst r1, #_TIF_KERNEL_TRACE @ flag can be set asynchronously
44 bne __sys_trace_return
45 str r0, [sp, #S_R0+S_OFF]! @ returned r0
46 work_pending:
47 tst r1, #_TIF_NEED_RESCHED
48 bne work_resched
49 tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
50 beq no_work_pending
51 mov r0, sp @ 'regs'
52 mov r2, why @ 'syscall'
53 bl do_notify_resume
54 b ret_slow_syscall @ Check work again
55
56 work_resched:
57 bl schedule
58 /*
59 * "slow" syscall return path. "why" tells us if this was a real syscall.
60 */
61 ENTRY(ret_to_user)
62 ret_slow_syscall:
63 disable_irq @ disable interrupts
64 ldr r1, [tsk, #TI_FLAGS]
65 tst r1, #_TIF_WORK_MASK
66 bne work_pending
67 no_work_pending:
68 /* perform architecture specific actions before user return */
69 arch_ret_to_user r1, lr
70
71 restore_user_regs fast = 0, offset = 0
72 ENDPROC(ret_to_user)
|
阅读(2465) | 评论(0) | 转发(0) |