2 * linux/arch/i386/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
40 * "current" is in register %ebx during any slow entries.
43 #include <linux/config.h>
44 #include <linux/linkage.h>
45 #include <asm/thread_info.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
50 #include "irq_vectors.h"
52 #define nr_syscalls ((syscall_table_size)/4)
78 #define preempt_stop cli
81 #define resume_kernel restore_all
95 movl $(__USER_DS), %edx; \
99 #define RESTORE_INT_REGS \
108 #define RESTORE_REGS \
112 .section .fixup,"ax"; \
118 .section __ex_table,"a";\
125 #define RESTORE_ALL \
129 .section .fixup,"ax"; \
131 movl $(__USER_DS), %edx; \
137 .section __ex_table,"a";\
145 pushfl # We get a different stack layout with call
146 # gates, which has to be cleaned up later..
153 movl EIP(%ebp), %eax # due to call gates, this is eflags, not eip..
154 movl CS(%ebp), %edx # this is eip..
155 movl EFLAGS(%ebp), %ecx # and this is cs..
156 movl %eax,EFLAGS(%ebp) #
157 movl %edx,EIP(%ebp) # Now we move them to their "normal" places
159 GET_THREAD_INFO_WITH_ESP(%ebp) # GET_THREAD_INFO
160 movl TI_exec_domain(%ebp), %edx # Get the execution domain
161 call *EXEC_DOMAIN_handler(%edx) # Call the handler for the domain
167 pushfl # We get a different stack layout with call
168 # gates, which has to be cleaned up later..
180 GET_THREAD_INFO(%ebp)
185 * Return to user mode is not as complex as all this looks,
186 * but we want the default path for a system call return to
187 * go as quickly as possible which is why some of this is
188 * less clear than it otherwise should be.
191 # userspace resumption stub bypassing syscall exit tracing
196 GET_THREAD_INFO(%ebp)
197 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
199 testl $(VM_MASK | 3), %eax
200 jz resume_kernel # returning to kernel or vm86-space
201 ENTRY(resume_userspace)
202 cli # make sure we don't miss an interrupt
203 # setting need_resched or sigpending
204 # between sampling and the iret
205 movl TI_flags(%ebp), %ecx
206 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
207 # int/exception return?
211 #ifdef CONFIG_PREEMPT
213 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
216 movl TI_flags(%ebp), %ecx # need_resched set ?
217 testb $_TIF_NEED_RESCHED, %cl
219 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
221 movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp)
224 movl $0,TI_preempt_count(%ebp)
229 /* SYSENTER_RETURN points to after the "sysenter" instruction in
230 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
232 # sysenter call handler stub
233 ENTRY(sysenter_entry)
234 movl TSS_sysenter_esp0(%esp),%esp
241 pushl $SYSENTER_RETURN
244 * Load the potential sixth argument from user stack.
245 * Careful about security.
247 cmpl $__PAGE_OFFSET-3,%ebp
250 .section __ex_table,"a"
252 .long 1b,syscall_fault
257 GET_THREAD_INFO(%ebp)
258 cmpl $(nr_syscalls), %eax
261 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
262 jnz syscall_trace_entry
263 call *sys_call_table(,%eax,4)
266 movl TI_flags(%ebp), %ecx
267 testw $_TIF_ALLWORK_MASK, %cx
268 jne syscall_exit_work
269 /* if something modifies registers it must also disable sysexit */
271 movl OLDESP(%esp), %ecx
276 # system call handler stub
278 pushl %eax # save orig_eax
280 GET_THREAD_INFO(%ebp)
281 cmpl $(nr_syscalls), %eax
283 # system call tracing in operation
284 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
285 jnz syscall_trace_entry
287 call *sys_call_table(,%eax,4)
288 movl %eax,EAX(%esp) # store the return value
290 cli # make sure we don't miss an interrupt
291 # setting need_resched or sigpending
292 # between sampling and the iret
293 movl TI_flags(%ebp), %ecx
294 testw $_TIF_ALLWORK_MASK, %cx # current->work
295 jne syscall_exit_work
299 # perform work that needs to be done immediately before resumption
302 testb $_TIF_NEED_RESCHED, %cl
306 cli # make sure we don't miss an interrupt
307 # setting need_resched or sigpending
308 # between sampling and the iret
309 movl TI_flags(%ebp), %ecx
310 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
311 # than syscall tracing?
313 testb $_TIF_NEED_RESCHED, %cl
316 work_notifysig: # deal with pending signals and
317 # notify-resume requests
318 testl $VM_MASK, EFLAGS(%esp)
320 jne work_notifysig_v86 # returning to kernel-space or
323 call do_notify_resume
333 call do_notify_resume
336 # perform syscall exit tracing
339 movl $-ENOSYS,EAX(%esp)
342 call do_syscall_trace
343 movl ORIG_EAX(%esp), %eax
344 cmpl $(nr_syscalls), %eax
348 # perform syscall exit tracing
351 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl
353 sti # could let do_syscall_trace() call
357 call do_syscall_trace
362 pushl %eax # save orig_eax
364 GET_THREAD_INFO(%ebp)
365 movl $-EFAULT,EAX(%esp)
370 movl $-ENOSYS,EAX(%esp)
374 * Build the entry stubs and pointer table with
375 * some assembler magic.
382 ENTRY(irq_entries_start)
399 #define BUILD_INTERRUPT(name, nr) \
406 /* The include is where all of the SMP etc. interrupts come from */
407 #include "entry_arch.h"
410 pushl $0 # no error code
411 pushl $do_divide_error
426 movl ORIG_EAX(%esp), %esi # get the error code
427 movl ES(%esp), %edi # get the function address
428 movl %eax, ORIG_EAX(%esp)
431 pushl %esi # push the error code
432 pushl %edx # push the pt_regs pointer
433 movl $(__USER_DS), %edx
438 jmp ret_from_exception
440 ENTRY(coprocessor_error)
442 pushl $do_coprocessor_error
445 ENTRY(simd_coprocessor_error)
447 pushl $do_simd_coprocessor_error
450 ENTRY(device_not_available)
451 pushl $-1 # mark this as an int
454 testl $0x4, %eax # EM (math emulation bit)
455 jne device_not_available_emulate
457 call math_state_restore
458 jmp ret_from_exception
459 device_not_available_emulate:
460 pushl $0 # temporary storage for ORIG_EIP
463 jmp ret_from_exception
466 * Debug traps and NMI can happen at the one SYSENTER instruction
467 * that sets up the real kernel stack. Check here, since we can't
468 * allow the wrong stack to be used.
470 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
471 * already pushed 3 words if it hits on the sysenter instruction:
472 * eflags, cs and eip.
474 * We just load the right stack, and push the three (known) values
475 * by hand onto the new stack - while updating the return eip past
476 * the instruction that would have done it for sysenter.
478 #define FIX_STACK(offset, ok, label) \
479 cmpw $__KERNEL_CS,4(%esp); \
482 movl TSS_sysenter_esp0+offset(%esp),%esp; \
484 pushl $__KERNEL_CS; \
485 pushl $sysenter_past_esp
488 cmpl $sysenter_entry,(%esp)
489 jne debug_stack_correct
490 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
497 * NMI is doubly nasty. It can happen _while_ we're handling
498 * a debug fault, and the debug fault hasn't yet been able to
499 * clear up the stack. So we first check whether we got an
500 * NMI on the sysenter entry path, but after that we need to
501 * check whether we got an NMI on the debug path where the debug
502 * fault happened on the sysenter path.
505 cmpl $sysenter_entry,(%esp)
509 /* Do not access memory above the end of our stack page,
510 * it might not exist.
512 andl $(THREAD_SIZE-1),%eax
513 cmpl $(THREAD_SIZE-20),%eax
515 jae nmi_stack_correct
516 cmpl $sysenter_entry,12(%esp)
517 je nmi_debug_stack_check
529 FIX_STACK(12,nmi_stack_correct, 1)
530 jmp nmi_stack_correct
531 nmi_debug_stack_check:
532 cmpw $__KERNEL_CS,16(%esp)
533 jne nmi_stack_correct
534 cmpl $debug - 1,(%esp)
535 jle nmi_stack_correct
536 cmpl $debug_esp_fix_insn,(%esp)
537 jle nmi_debug_stack_fixup
538 nmi_debug_stack_fixup:
539 FIX_STACK(24,nmi_stack_correct, 1)
540 jmp nmi_stack_correct
562 ENTRY(coprocessor_segment_overrun)
564 pushl $do_coprocessor_segment_overrun
568 pushl $do_invalid_TSS
571 ENTRY(segment_not_present)
572 pushl $do_segment_not_present
576 pushl $do_stack_segment
579 ENTRY(general_protection)
580 pushl $do_general_protection
583 ENTRY(alignment_check)
584 pushl $do_alignment_check
591 #ifdef CONFIG_X86_MCE
594 pushl machine_check_vector
598 ENTRY(spurious_interrupt_bug)
600 pushl $do_spurious_interrupt_bug
604 ENTRY(sys_call_table)
605 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
610 .long sys_open /* 5 */
615 .long sys_unlink /* 10 */
620 .long sys_chmod /* 15 */
622 .long sys_ni_syscall /* old break syscall holder */
625 .long sys_getpid /* 20 */
630 .long sys_stime /* 25 */
635 .long sys_utime /* 30 */
636 .long sys_ni_syscall /* old stty syscall holder */
637 .long sys_ni_syscall /* old gtty syscall holder */
640 .long sys_ni_syscall /* 35 - old ftime syscall holder */
645 .long sys_rmdir /* 40 */
649 .long sys_ni_syscall /* old prof syscall holder */
650 .long sys_brk /* 45 */
655 .long sys_getegid16 /* 50 */
657 .long sys_umount /* recycled never used phys() */
658 .long sys_ni_syscall /* old lock syscall holder */
660 .long sys_fcntl /* 55 */
661 .long sys_ni_syscall /* old mpx syscall holder */
663 .long sys_ni_syscall /* old ulimit syscall holder */
665 .long sys_umask /* 60 */
670 .long sys_getpgrp /* 65 */
675 .long sys_setreuid16 /* 70 */
679 .long sys_sethostname
680 .long sys_setrlimit /* 75 */
681 .long sys_old_getrlimit
683 .long sys_gettimeofday
684 .long sys_settimeofday
685 .long sys_getgroups16 /* 80 */
686 .long sys_setgroups16
690 .long sys_readlink /* 85 */
695 .long old_mmap /* 90 */
700 .long sys_fchown16 /* 95 */
701 .long sys_getpriority
702 .long sys_setpriority
703 .long sys_ni_syscall /* old profil syscall holder */
705 .long sys_fstatfs /* 100 */
710 .long sys_getitimer /* 105 */
715 .long sys_iopl /* 110 */
717 .long sys_ni_syscall /* old "idle" system call */
720 .long sys_swapoff /* 115 */
725 .long sys_clone /* 120 */
726 .long sys_setdomainname
730 .long sys_mprotect /* 125 */
731 .long sys_sigprocmask
732 .long sys_ni_syscall /* old "create_module" */
733 .long sys_init_module
734 .long sys_delete_module
735 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
740 .long sys_sysfs /* 135 */
741 .long sys_personality
742 .long sys_ni_syscall /* reserved for afs_syscall */
745 .long sys_llseek /* 140 */
750 .long sys_readv /* 145 */
755 .long sys_mlock /* 150 */
759 .long sys_sched_setparam
760 .long sys_sched_getparam /* 155 */
761 .long sys_sched_setscheduler
762 .long sys_sched_getscheduler
763 .long sys_sched_yield
764 .long sys_sched_get_priority_max
765 .long sys_sched_get_priority_min /* 160 */
766 .long sys_sched_rr_get_interval
769 .long sys_setresuid16
770 .long sys_getresuid16 /* 165 */
772 .long sys_ni_syscall /* Old sys_query_module */
775 .long sys_setresgid16 /* 170 */
776 .long sys_getresgid16
778 .long sys_rt_sigreturn
779 .long sys_rt_sigaction
780 .long sys_rt_sigprocmask /* 175 */
781 .long sys_rt_sigpending
782 .long sys_rt_sigtimedwait
783 .long sys_rt_sigqueueinfo
784 .long sys_rt_sigsuspend
785 .long sys_pread64 /* 180 */
790 .long sys_capset /* 185 */
791 .long sys_sigaltstack
793 .long sys_ni_syscall /* reserved for streams1 */
794 .long sys_ni_syscall /* reserved for streams2 */
795 .long sys_vfork /* 190 */
799 .long sys_ftruncate64
800 .long sys_stat64 /* 195 */
805 .long sys_getgid /* 200 */
810 .long sys_getgroups /* 205 */
815 .long sys_setresgid /* 210 */
820 .long sys_setfsuid /* 215 */
825 .long sys_getdents64 /* 220 */
827 .long sys_ni_syscall /* reserved for TUX */
830 .long sys_readahead /* 225 */
835 .long sys_lgetxattr /* 230 */
840 .long sys_removexattr /* 235 */
841 .long sys_lremovexattr
842 .long sys_fremovexattr
845 .long sys_futex /* 240 */
846 .long sys_sched_setaffinity
847 .long sys_sched_getaffinity
848 .long sys_set_thread_area
849 .long sys_get_thread_area
850 .long sys_io_setup /* 245 */
852 .long sys_io_getevents
855 .long sys_fadvise64 /* 250 */
858 .long sys_lookup_dcookie
859 .long sys_epoll_create
860 .long sys_epoll_ctl /* 255 */
862 .long sys_remap_file_pages
863 .long sys_set_tid_address
864 .long sys_timer_create
865 .long sys_timer_settime /* 260 */
866 .long sys_timer_gettime
867 .long sys_timer_getoverrun
868 .long sys_timer_delete
869 .long sys_clock_settime
870 .long sys_clock_gettime /* 265 */
871 .long sys_clock_getres
872 .long sys_clock_nanosleep
875 .long sys_tgkill /* 270 */
877 .long sys_fadvise64_64
878 .long sys_ni_syscall /* sys_vserver */
880 .long sys_get_mempolicy
881 .long sys_set_mempolicy
884 .long sys_mq_timedsend
885 .long sys_mq_timedreceive /* 280 */
887 .long sys_mq_getsetattr
888 .long sys_ni_syscall /* reserved for kexec */
890 syscall_table_size=(.-sys_call_table)