]> git.zerfleddert.de Git - ms2-kexec/blob - proc-macros.S
54b1f721dec883f99d0ac5c9cdc6d8dc75fa747a
[ms2-kexec] / proc-macros.S
1 /*
2 * We need constants.h for:
3 * VMA_VM_MM
4 * VMA_VM_FLAGS
5 * VM_EXEC
6 */
7 #include <asm/asm-offsets.h>
8 #include <asm/thread_info.h>
9
10 /*
11 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
12 */
13 .macro vma_vm_mm, rd, rn
14 ldr \rd, [\rn, #VMA_VM_MM]
15 .endm
16
17 /*
18 * vma_vm_flags - get vma->vm_flags
19 */
20 .macro vma_vm_flags, rd, rn
21 ldr \rd, [\rn, #VMA_VM_FLAGS]
22 .endm
23
24 .macro tsk_mm, rd, rn
25 ldr \rd, [\rn, #TI_TASK]
26 ldr \rd, [\rd, #TSK_ACTIVE_MM]
27 .endm
28
29 /*
30 * act_mm - get current->active_mm
31 */
32 .macro act_mm, rd
33 bic \rd, sp, #8128
34 bic \rd, \rd, #63
35 ldr \rd, [\rd, #TI_TASK]
36 ldr \rd, [\rd, #TSK_ACTIVE_MM]
37 .endm
38
39 /*
40 * mmid - get context id from mm pointer (mm->context.id)
41 */
42 .macro mmid, rd, rn
43 ldr \rd, [\rn, #MM_CONTEXT_ID]
44 .endm
45
46 /*
47 * mask_asid - mask the ASID from the context ID
48 */
49 .macro asid, rd, rn
50 and \rd, \rn, #255
51 .endm
52
53 .macro crval, clear, mmuset, ucset
54 #ifdef CONFIG_MMU
55 .word \clear
56 .word \mmuset
57 #else
58 .word \clear
59 .word \ucset
60 #endif
61 .endm
62
63 /*
64 * cache_line_size - get the cache line size from the CSIDR register
65 * (available on ARMv7+). It assumes that the CSSR register was configured
66 * to access the L1 data cache CSIDR.
67 */
68 .macro dcache_line_size, reg, tmp
69 mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR
70 and \tmp, \tmp, #7 @ cache line size encoding
71 mov \reg, #16 @ size offset
72 mov \reg, \reg, lsl \tmp @ actual cache line size
73 .endm
74
75
76 /*
77 * Sanity check the PTE configuration for the code below - which makes
78 * certain assumptions about how these bits are layed out.
79 */
80 #if L_PTE_SHARED != PTE_EXT_SHARED
81 #error PTE shared bit mismatch
82 #endif
83 #if L_PTE_BUFFERABLE != PTE_BUFFERABLE
84 #error PTE bufferable bit mismatch
85 #endif
86 #if L_PTE_CACHEABLE != PTE_CACHEABLE
87 #error PTE cacheable bit mismatch
88 #endif
89 #if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\
90 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
91 #error Invalid Linux PTE bit settings
92 #endif
93
94 /*
95 * The ARMv6 and ARMv7 set_pte_ext translation function.
96 *
97 * Permission translation:
98 * YUWD APX AP1 AP0 SVC User
99 * 0xxx 0 0 0 no acc no acc
100 * 100x 1 0 1 r/o no acc
101 * 10x0 1 0 1 r/o no acc
102 * 1011 0 0 1 r/w no acc
103 * 110x 0 1 0 r/w r/o
104 * 11x0 0 1 0 r/w r/o
105 * 1111 0 1 1 r/w r/w
106 */
107 .macro armv6_mt_table pfx
108 \pfx\()_mt_table:
109 .long 0x00 @ L_PTE_MT_UNCACHED
110 .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
111 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
112 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
113 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
114 .long 0x00 @ unused
115 .long 0x00 @ L_PTE_MT_MINICACHE (not present)
116 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
117 .long 0x00 @ unused
118 .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
119 .long 0x00 @ unused
120 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
121 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
122 .long 0x00 @ unused
123 .long 0x00 @ unused
124 .long 0x00 @ unused
125 .endm
126
127 .macro armv6_set_pte_ext pfx
128 str r1, [r0], #-2048 @ linux version
129
130 bic r3, r1, #0x000003fc
131 bic r3, r3, #PTE_TYPE_MASK
132 orr r3, r3, r2
133 orr r3, r3, #PTE_EXT_AP0 | 2
134
135 adr ip, \pfx\()_mt_table
136 and r2, r1, #L_PTE_MT_MASK
137 ldr r2, [ip, r2]
138
139 tst r1, #L_PTE_WRITE
140 tstne r1, #L_PTE_DIRTY
141 orreq r3, r3, #PTE_EXT_APX
142
143 tst r1, #L_PTE_USER
144 orrne r3, r3, #PTE_EXT_AP1
145 tstne r3, #PTE_EXT_APX
146 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
147
148 tst r1, #L_PTE_EXEC
149 orreq r3, r3, #PTE_EXT_XN
150
151 orr r3, r3, r2
152
153 tst r1, #L_PTE_YOUNG
154 tstne r1, #L_PTE_PRESENT
155 moveq r3, #0
156
157 str r3, [r0]
158 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
159 .endm
160
161
162 /*
163 * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function,
164 * covering most CPUs except Xscale and Xscale 3.
165 *
166 * Permission translation:
167 * YUWD AP SVC User
168 * 0xxx 0x00 no acc no acc
169 * 100x 0x00 r/o no acc
170 * 10x0 0x00 r/o no acc
171 * 1011 0x55 r/w no acc
172 * 110x 0xaa r/w r/o
173 * 11x0 0xaa r/w r/o
174 * 1111 0xff r/w r/w
175 */
176 .macro armv3_set_pte_ext wc_disable=1
177 str r1, [r0], #-2048 @ linux version
178
179 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
180
181 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
182 bic r2, r2, #PTE_TYPE_MASK
183 orr r2, r2, #PTE_TYPE_SMALL
184
185 tst r3, #L_PTE_USER @ user?
186 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
187
188 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
189 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
190
191 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
192 movne r2, #0
193
194 .if \wc_disable
195 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
196 tst r2, #PTE_CACHEABLE
197 bicne r2, r2, #PTE_BUFFERABLE
198 #endif
199 .endif
200 str r2, [r0] @ hardware version
201 .endm
202
203
204 /*
205 * Xscale set_pte_ext translation, split into two halves to cope
206 * with work-arounds. r3 must be preserved by code between these
207 * two macros.
208 *
209 * Permission translation:
210 * YUWD AP SVC User
211 * 0xxx 00 no acc no acc
212 * 100x 00 r/o no acc
213 * 10x0 00 r/o no acc
214 * 1011 01 r/w no acc
215 * 110x 10 r/w r/o
216 * 11x0 10 r/w r/o
217 * 1111 11 r/w r/w
218 */
219 .macro xscale_set_pte_ext_prologue
220 str r1, [r0], #-2048 @ linux version
221
222 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
223
224 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
225 orr r2, r2, #PTE_TYPE_EXT @ extended page
226
227 tst r3, #L_PTE_USER @ user?
228 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
229
230 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
231 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
232 @ combined with user -> user r/w
233 .endm
234
235 .macro xscale_set_pte_ext_epilogue
236 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
237 movne r2, #0 @ no -> fault
238
239 str r2, [r0] @ hardware version
240 mov ip, #0
241 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
242 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
243 .endm
Impressum, Datenschutz