X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fkernel%2Ffpu.S;h=b780b42c95fc0f57c42dbb4b12ed58876742e4d3;hb=6bac953fa424519f784ba2589fefd8f040ce54f0;hp=04e95e810b21f3b5557904749f44cf9f25e1a2eb;hpb=b85a046af3a260e079505e8023ccd10e01cf4f2b;p=powerpc.git diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 04e95e810b..b780b42c95 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include #include @@ -40,21 +40,21 @@ _GLOBAL(load_up_fpu) */ #ifndef CONFIG_SMP LOADBASE(r3, last_task_used_math) - tophys(r3,r3) - LDL r4,OFF(last_task_used_math)(r3) - CMPI 0,r4,0 + toreal(r3) + PPC_LL r4,OFF(last_task_used_math)(r3) + PPC_LCMPI 0,r4,0 beq 1f - tophys(r4,r4) + toreal(r4) addi r4,r4,THREAD /* want last_task_used_math->thread */ SAVE_32FPRS(0, r4) mffs fr0 - stfd fr0,THREAD_FPSCR-4(r4) - LDL r5,PT_REGS(r4) - tophys(r5,r5) - LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5) + stfd fr0,THREAD_FPSCR(r4) + PPC_LL r5,PT_REGS(r4) + toreal(r5) + PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r10,MSR_FP|MSR_FE0|MSR_FE1 andc r4,r4,r10 /* disable FP for previous task */ - STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) + PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1: #endif /* CONFIG_SMP */ /* enable use of FP after return */ @@ -71,13 +71,13 @@ _GLOBAL(load_up_fpu) or r12,r12,r4 std r12,_MSR(r1) #endif - lfd fr0,THREAD_FPSCR-4(r5) + lfd fr0,THREAD_FPSCR(r5) mtfsf 0xff,fr0 REST_32FPRS(0, r5) #ifndef CONFIG_SMP subi r4,r5,THREAD - tovirt(r4,r4) - STL r4,OFF(last_task_used_math)(r3) + fromreal(r4) + PPC_STL r4,OFF(last_task_used_math)(r3) #endif /* CONFIG_SMP */ /* restore registers and return */ /* we haven't used ctr or xer or lr */ @@ -97,23 +97,48 @@ _GLOBAL(giveup_fpu) MTMSRD(r5) /* enable use of fpu now */ SYNC_601 isync - CMPI 0,r3,0 + PPC_LCMPI 0,r3,0 beqlr- /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ - LDL r5,PT_REGS(r3) - CMPI 0,r5,0 + PPC_LL r5,PT_REGS(r3) + PPC_LCMPI 0,r5,0 SAVE_32FPRS(0, r3) mffs fr0 - stfd fr0,THREAD_FPSCR-4(r3) + stfd fr0,THREAD_FPSCR(r3) beq 1f - LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5) + PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r3,MSR_FP|MSR_FE0|MSR_FE1 andc r4,r4,r3 /* disable FP for previous task */ - STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) + PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1: #ifndef CONFIG_SMP li r5,0 LOADBASE(r4,last_task_used_math) - STL r5,OFF(last_task_used_math)(r4) + PPC_STL r5,OFF(last_task_used_math)(r4) #endif /* CONFIG_SMP */ blr + +/* + * These are used in the alignment trap handler when emulating + * single-precision loads and stores. + * We restore and save the fpscr so the task gets the same result + * and exceptions as if the cpu had performed the load or store. + */ + +_GLOBAL(cvt_fd) + lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ + mtfsf 0xff,0 + lfs 0,0(r3) + stfd 0,0(r4) + mffs 0 + stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ + blr + +_GLOBAL(cvt_df) + lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ + mtfsf 0xff,0 + lfd 0,0(r3) + stfs 0,0(r4) + mffs 0 + stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ + blr