1 #ifndef __I386_MMU_CONTEXT_H
2 #define __I386_MMU_CONTEXT_H
4 #include <linux/config.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
10 * hooks to add arch specific data into the mm struct.
11 * Note that destroy_context is called even if init_new_context
14 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
15 void destroy_context(struct mm_struct *mm);
19 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
21 if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
22 cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
25 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
30 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
33 /* stop flush ipis for the previous mm */
34 clear_bit(cpu, &prev->cpu_vm_mask);
36 cpu_tlbstate[cpu].state = TLBSTATE_OK;
37 cpu_tlbstate[cpu].active_mm = next;
39 set_bit(cpu, &next->cpu_vm_mask);
40 /* Re-load page tables */
42 /* load_LDT, if either the previous or next thread
43 * has a non-default LDT.
45 if (next->context.size+prev->context.size)
46 load_LDT(&next->context);
50 cpu_tlbstate[cpu].state = TLBSTATE_OK;
51 if(cpu_tlbstate[cpu].active_mm != next)
53 if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
54 /* We were in lazy tlb mode and leave_mm disabled
55 * tlb flush IPI delivery. We must reload %cr3.
58 load_LDT(&next->context);
64 #define activate_mm(prev, next) \
65 switch_mm((prev),(next),NULL,smp_processor_id())