2 * include/asm-x86_64/i387.h
4 * Copyright (C) 1994 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 * x86-64 work by Andi Kleen 2002
12 #ifndef __ASM_X86_64_I387_H
13 #define __ASM_X86_64_I387_H
15 #include <linux/sched.h>
16 #include <asm/processor.h>
17 #include <asm/sigcontext.h>
19 #include <asm/uaccess.h>
21 extern void init_fpu(struct task_struct *child);
22 extern int save_i387(struct _fpstate *buf);
25 * FPU lazy state save handling...
28 #define kernel_fpu_end() stts()
30 #define unlazy_fpu( tsk ) do { \
31 if ( tsk->flags & PF_USEDFPU ) \
32 save_init_fpu( tsk ); \
35 #define clear_fpu( tsk ) do { \
36 if ( tsk->flags & PF_USEDFPU ) { \
37 asm volatile("fnclex ; fwait"); \
38 tsk->flags &= ~PF_USEDFPU; \
43 #define load_mxcsr( val ) do { \
44 unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
45 asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \
49 * ptrace request handers...
51 extern int get_fpregs( struct user_i387_struct *buf,
52 struct task_struct *tsk );
53 extern int set_fpregs( struct task_struct *tsk,
54 struct user_i387_struct *buf );
57 * FPU state for core dumps...
59 extern int dump_fpu( struct pt_regs *regs,
60 struct user_i387_struct *fpu );
63 * i387 state interaction
65 #define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr)
66 #define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd)
67 #define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd)
68 #define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd)
69 #define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val))
70 #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
71 #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
72 #define set_fpu_mxcsr(t,val) ((t)->thread.i387.fxsave.mxcsr = (val)&0xffbf)
74 static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
77 asm volatile("1: rex64 ; fxrstor (%[fx])\n\t"
79 ".section .fixup,\"ax\"\n"
80 "3: movl $-1,%[err]\n"
83 ".section __ex_table,\"a\"\n"
88 : [fx] "r" (fx), "0" (0));
94 static inline int save_i387_checking(struct i387_fxsave_struct *fx)
97 asm volatile("1: rex64 ; fxsave (%[fx])\n\t"
99 ".section .fixup,\"ax\"\n"
100 "3: movl $-1,%[err]\n"
103 ".section __ex_table,\"a\"\n"
108 : [fx] "r" (fx), "0" (0));
110 __clear_user(fx, sizeof(struct i387_fxsave_struct));
114 static inline void kernel_fpu_begin(void)
116 struct task_struct *tsk = current;
117 if (tsk->flags & PF_USEDFPU) {
118 asm volatile("rex64 ; fxsave %0 ; fnclex"
119 : "=m" (tsk->thread.i387.fxsave));
120 tsk->flags &= ~PF_USEDFPU;
126 static inline void save_init_fpu( struct task_struct *tsk )
128 asm volatile( "fxsave %0"
129 : "=m" (tsk->thread.i387.fxsave));
130 if (tsk->thread.i387.fxsave.swd & (1<<7))
131 asm volatile("fnclex");
132 /* AMD CPUs leak F?P through FXSAVE. Clear it here */
133 asm volatile("ffree %st(7) ; fildl %gs:0");
134 tsk->flags &= ~PF_USEDFPU;
139 * This restores directly out of user space. Exceptions are handled.
141 static inline int restore_i387(struct _fpstate *buf)
143 return restore_fpu_checking((struct i387_fxsave_struct *)buf);
146 #endif /* __ASM_X86_64_I387_H */