1 #include <linux/config.h>
6 * For ivt.s we want to access the stack virtually so we don't have to disable translation
10 * r1: pointer to current task (ar.k6)
12 #define MINSTATE_START_SAVE_MIN_VIRT \
13 dep r1=-1,r1,61,3; /* r1 = current (virtual) */ \
14 (pUser) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
16 (pUser) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
17 (pUser) mov r24=ar.rnat; \
18 (pKern) mov r1=sp; /* get sp */ \
20 (pUser) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
21 (pUser) mov r23=ar.bspstore; /* save ar.bspstore */ \
23 (pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
24 (pUser) mov ar.bspstore=r22; /* switch to kernel RBS */ \
26 (pUser) mov r18=ar.bsp; \
27 (pUser) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
29 #define MINSTATE_END_SAVE_MIN_VIRT \
30 or r13=r13,r14; /* make `current' a kernel virtual address */ \
31 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
35 * For mca_asm.S we want to access the stack physically since the state is saved before we
36 * go virtual and don't want to destroy the iip or ipsr.
38 #define MINSTATE_START_SAVE_MIN_PHYS \
39 (pKern) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE; \
40 (pUser) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
41 (pUser) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
43 (pUser) mov r24=ar.rnat; \
44 (pKern) dep r1=0,sp,61,3; /* compute physical addr of sp */ \
45 (pUser) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
46 (pUser) mov r23=ar.bspstore; /* save ar.bspstore */ \
47 (pUser) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */\
49 (pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
50 (pUser) mov ar.bspstore=r22; /* switch to kernel RBS */ \
52 (pUser) mov r18=ar.bsp; \
53 (pUser) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
55 #define MINSTATE_END_SAVE_MIN_PHYS \
56 or r12=r12,r14; /* make sp a kernel virtual address */ \
57 or r13=r13,r14; /* make `current' a kernel virtual address */ \
61 # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
62 # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
66 # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
67 # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
71 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
72 * the minimum state necessary that allows us to turn psr.ic back
75 * Assumed state upon entry:
77 * r31: contains saved predicates (pr)
79 * Upon exit, the state is as follows:
81 * r2 = points to &pt_regs.r16
82 * r8 = contents of ar.ccv
83 * r9 = contents of ar.csd
84 * r10 = contents of ar.ssd
86 * r12 = kernel sp (kernel virtual address)
87 * r13 = points to current task_struct (kernel virtual address)
88 * p15 = TRUE if psr.i is set in cr.ipsr
89 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
92 * Note that psr.ic is NOT turned on by this macro. This is so that
93 * we can pass interruption state as arguments to a handler.
95 #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
97 mov r17=IA64_KR(CURRENT); /* r1 = current (physical) */ \
107 extr.u r16=r29,32,2; /* extract psr.cpl */ \
109 cmp.eq pKern,pUser=r0,r16; /* are we in kernel mode already? (psr.cpl==0) */ \
111 /* switch from user to kernel RBS: */ \
114 MINSTATE_START_SAVE_MIN \
115 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
116 adds r16=PT(CR_IPSR),r1; \
118 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
119 st8 [r16]=r29; /* save cr.ipsr */ \
121 lfetch.fault.excl.nt1 [r17]; \
123 adds r16=PT(R8),r1; /* initialize first base pointer */ \
124 adds r17=PT(R9),r1; /* initialize second base pointer */ \
126 .mem.offset 0,0; st8.spill [r16]=r8,16; \
127 .mem.offset 8,0; st8.spill [r17]=r9,16; \
129 .mem.offset 0,0; st8.spill [r16]=r10,24; \
130 .mem.offset 8,0; st8.spill [r17]=r11,32; \
132 st8 [r16]=r28,8; /* save cr.iip */ \
133 mov r28=b0; /* rCRIIP=branch reg b0 */ \
134 (pKern) mov r18=r0; /* make sure r18 isn't NaT */ \
138 movl r11=FPSR_DEFAULT; /* L-unit */ \
140 st8 [r16]=r30,16; /* save cr.ifs */ \
141 st8 [r17]=r25,16; /* save ar.unat */ \
142 (pUser) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
144 st8 [r16]=r26,16; /* save ar.pfs */ \
145 st8 [r17]=r27,16; /* save ar.rsc */ \
146 tbit.nz p15,p0=r29,IA64_PSR_I_BIT \
147 ;; /* avoid RAW on r16 & r17 */ \
148 (pKern) adds r16=16,r16; /* skip over ar_rnat field */ \
149 (pKern) adds r17=16,r17; /* skip over ar_bspstore field */ \
150 (pUser) st8 [r16]=r24,16; /* save ar.rnat */ \
151 (pUser) st8 [r17]=r23,16; /* save ar.bspstore */ \
153 st8 [r16]=r31,16; /* save predicates */ \
154 st8 [r17]=r28,16; /* save b0 */ \
155 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
157 st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
158 st8.spill [r17]=r20,16; /* save original r1 */ \
160 .mem.offset 0,0; st8.spill [r16]=r12,16; \
161 .mem.offset 8,0; st8.spill [r17]=r13,16; \
162 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
164 .mem.offset 0,0; st8 [r16]=r21,PT(R14)-PT(AR_FPSR); /* ar.fpsr */ \
165 .mem.offset 8,0; st8.spill [r17]=r15,PT(R3)-PT(R15); \
166 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
168 mov r13=IA64_KR(CURRENT); /* establish `current' */ \
169 .mem.offset 0,0; st8.spill [r16]=r14,8; \
170 dep r14=-1,r0,61,3; \
172 .mem.offset 0,0; st8.spill [r16]=r2,16; \
173 .mem.offset 8,0; st8.spill [r17]=r3,16; \
174 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
177 movl r1=__gp; /* establish kernel global pointer */ \
179 MINSTATE_END_SAVE_MIN
182 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
184 * Assumed state upon entry:
186 * r2: points to &pt_regs.r16
187 * r3: points to &pt_regs.r17
188 * r8: contents of ar.ccv
189 * r9: contents of ar.csd
190 * r10: contents of ar.ssd
193 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
196 .mem.offset 0,0; st8.spill [r2]=r16,16; \
197 .mem.offset 8,0; st8.spill [r3]=r17,16; \
199 .mem.offset 0,0; st8.spill [r2]=r18,16; \
200 .mem.offset 8,0; st8.spill [r3]=r19,16; \
202 .mem.offset 0,0; st8.spill [r2]=r20,16; \
203 .mem.offset 8,0; st8.spill [r3]=r21,16; \
206 .mem.offset 0,0; st8.spill [r2]=r22,16; \
207 .mem.offset 8,0; st8.spill [r3]=r23,16; \
210 .mem.offset 0,0; st8.spill [r2]=r24,16; \
211 .mem.offset 8,0; st8.spill [r3]=r25,16; \
213 .mem.offset 0,0; st8.spill [r2]=r26,16; \
214 .mem.offset 8,0; st8.spill [r3]=r27,16; \
216 .mem.offset 0,0; st8.spill [r2]=r28,16; \
217 .mem.offset 8,0; st8.spill [r3]=r29,16; \
219 .mem.offset 0,0; st8.spill [r2]=r30,16; \
220 .mem.offset 8,0; st8.spill [r3]=r31,32; \
222 mov ar.fpsr=r11; /* M-unit */ \
223 st8 [r2]=r8,8; /* ar.ccv */ \
224 adds r24=PT(B6)-PT(F7),r3; \
226 stf.spill [r2]=f6,32; \
227 stf.spill [r3]=f7,32; \
229 stf.spill [r2]=f8,32; \
230 stf.spill [r3]=f9,32; \
232 stf.spill [r2]=f10; \
233 stf.spill [r3]=f11; \
234 adds r25=PT(B7)-PT(F11),r3; \
236 st8 [r24]=r18,16; /* b6 */ \
237 st8 [r25]=r19,16; /* b7 */ \
239 st8 [r24]=r9; /* ar.csd */ \
240 st8 [r25]=r10; /* ar.ssd */ \
243 #define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
244 #define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
245 #define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )