1 #ifndef __X86_64_MMU_CONTEXT_H
2 #define __X86_64_MMU_CONTEXT_H
4 #include <linux/config.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
9 #include <asm/pgtable.h>
10 #include <linux/spinlock.h>
13 * possibly do the LDT unload here?
15 #define destroy_context(mm) do { } while(0)
16 #define init_new_context(tsk,mm) ({ rwlock_init(&(mm)->context.ldtlock); 0; })
20 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
22 if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
23 cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
26 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
31 #define activate_mm(prev, next) \
32 switch_mm((prev),(next),NULL,smp_processor_id())
35 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 struct task_struct *tsk, unsigned cpu)
39 /* stop flush ipis for the previous mm */
40 clear_bit(cpu, &prev->cpu_vm_mask);
42 * Re-load LDT if necessary
44 if (prev->context.segments != next->context.segments)
47 cpu_tlbstate[cpu].state = TLBSTATE_OK;
48 cpu_tlbstate[cpu].active_mm = next;
50 set_bit(cpu, &next->cpu_vm_mask);
51 set_bit(cpu, &next->context.cpuvalid);
52 /* Re-load page tables */
53 *read_pda(level4_pgt) = __pa(next->pgd) | _PAGE_TABLE;
58 cpu_tlbstate[cpu].state = TLBSTATE_OK;
59 if(cpu_tlbstate[cpu].active_mm != next)
61 if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
62 /* We were in lazy tlb mode and leave_mm disabled
63 * tlb flush IPI delivery. We must reload the page
66 *read_pda(level4_pgt) = __pa(next->pgd) | _PAGE_TABLE;
69 if (!test_and_set_bit(cpu, &next->context.cpuvalid))