]> git.zerfleddert.de Git - ms2-kexec/blobdiff - entry-header.S
update offsets to new european GB kernel, set software booting from USB
[ms2-kexec] / entry-header.S
index 87ab4e157997f9f335d461c501964750a52296d2..051166c2a932cfed1620bb3a5612383ffff12149 100644 (file)
 #endif
        .endm
 
-       .macro  get_thread_info, rd
-       mov     \rd, sp, lsr #13
-       mov     \rd, \rd, lsl #13
-       .endm
-
        .macro  alignment_trap, rtemp
 #ifdef CONFIG_ALIGNMENT_TRAP
        ldr     \rtemp, .LCcralign
 #endif
        .endm
 
+       @
+       @ Store/load the USER SP and LR registers by switching to the SYS
+       @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
+       @ available. Should only be called from SVC mode
+       @
+       .macro  store_user_sp_lr, rd, rtemp, offset = 0
+       mrs     \rtemp, cpsr
+       eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+       msr     cpsr_c, \rtemp                  @ switch to the SYS mode
+
+       str     sp, [\rd, #\offset]             @ save sp_usr
+       str     lr, [\rd, #\offset + 4]         @ save lr_usr
+
+       eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+       msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
+       .endm
+
+       .macro  load_user_sp_lr, rd, rtemp, offset = 0
+       mrs     \rtemp, cpsr
+       eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+       msr     cpsr_c, \rtemp                  @ switch to the SYS mode
+
+       ldr     sp, [\rd, #\offset]             @ load sp_usr
+       ldr     lr, [\rd, #\offset + 4]         @ load lr_usr
+
+       eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+       msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
+       .endm
+
+#ifndef CONFIG_THUMB2_KERNEL
+       .macro  svc_exit, rpsr
+       msr     spsr_cxsf, \rpsr
+#if defined(CONFIG_CPU_V6)
+       ldr     r0, [sp]
+       strex   r1, r2, [sp]                    @ clear the exclusive monitor
+       ldmib   sp, {r1 - pc}^                  @ load r1 - pc, cpsr
+#elif defined(CONFIG_CPU_32v6K)
+       clrex                                   @ clear the exclusive monitor
+       ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
+#else
+       ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
+#endif
+       .endm
+
+       .macro  restore_user_regs, fast = 0, offset = 0
+       ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
+       ldr     lr, [sp, #\offset + S_PC]!      @ get pc
+       msr     spsr_cxsf, r1                   @ save in spsr_svc
+#if defined(CONFIG_CPU_V6)
+       strex   r1, r2, [sp]                    @ clear the exclusive monitor
+#elif defined(CONFIG_CPU_32v6K)
+       clrex                                   @ clear the exclusive monitor
+#endif
+       .if     \fast
+       ldmdb   sp, {r1 - lr}^                  @ get calling r1 - lr
+       .else
+       ldmdb   sp, {r0 - lr}^                  @ get calling r0 - lr
+       .endif
+       mov     r0, r0                          @ ARMv5T and earlier require a nop
+                                               @ after ldm {}^
+       add     sp, sp, #S_FRAME_SIZE - S_PC
+       movs    pc, lr                          @ return & move spsr_svc into cpsr
+       .endm
+
+       .macro  get_thread_info, rd
+       mov     \rd, sp, lsr #13
+       mov     \rd, \rd, lsl #13
+       .endm
+
+       @
+       @ 32-bit wide "mov pc, reg"
+       @
+       .macro  movw_pc, reg
+       mov     pc, \reg
+       .endm
+#else  /* CONFIG_THUMB2_KERNEL */
+       .macro  svc_exit, rpsr
+       clrex                                   @ clear the exclusive monitor
+       ldr     r0, [sp, #S_SP]                 @ top of the stack
+       ldr     r1, [sp, #S_PC]                 @ return address
+       tst     r0, #4                          @ orig stack 8-byte aligned?
+       stmdb   r0, {r1, \rpsr}                 @ rfe context
+       ldmia   sp, {r0 - r12}
+       ldr     lr, [sp, #S_LR]
+       addeq   sp, sp, #S_FRAME_SIZE - 8       @ aligned
+       addne   sp, sp, #S_FRAME_SIZE - 4       @ not aligned
+       rfeia   sp!
+       .endm
+
+       .macro  restore_user_regs, fast = 0, offset = 0
+       clrex                                   @ clear the exclusive monitor
+       mov     r2, sp
+       load_user_sp_lr r2, r3, \offset + S_SP  @ calling sp, lr
+       ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
+       ldr     lr, [sp, #\offset + S_PC]       @ get pc
+       add     sp, sp, #\offset + S_SP
+       msr     spsr_cxsf, r1                   @ save in spsr_svc
+       .if     \fast
+       ldmdb   sp, {r1 - r12}                  @ get calling r1 - r12
+       .else
+       ldmdb   sp, {r0 - r12}                  @ get calling r0 - r12
+       .endif
+       add     sp, sp, #S_FRAME_SIZE - S_SP
+       movs    pc, lr                          @ return & move spsr_svc into cpsr
+       .endm
+
+       .macro  get_thread_info, rd
+       mov     \rd, sp
+       lsr     \rd, \rd, #13
+       mov     \rd, \rd, lsl #13
+       .endm
+
+       @
+       @ 32-bit wide "mov pc, reg"
+       @
+       .macro  movw_pc, reg
+       mov     pc, \reg
+       nop
+       .endm
+#endif /* !CONFIG_THUMB2_KERNEL */
+
+       @
+       @ Debug exceptions are taken as prefetch or data aborts.
+       @ We must disable preemption during the handler so that
+       @ we can access the debug registers safely.
+       @
+       .macro  debug_entry, fsr
+#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
+       ldr     r4, =0x40f              @ mask out fsr.fs
+       and     r5, r4, \fsr
+       cmp     r5, #2                  @ debug exception
+       bne     1f
+       get_thread_info r10
+       ldr     r6, [r10, #TI_PREEMPT]  @ get preempt count
+       add     r11, r6, #1             @ increment it
+       str     r11, [r10, #TI_PREEMPT]
+1:
+#endif
+       .endm
 
 /*
  * These are the registers used in the syscall handler, and allow us to
Impressum, Datenschutz