1 #ifndef _ASM_IA64_MMU_CONTEXT_H
2 #define _ASM_IA64_MMU_CONTEXT_H
5 * Copyright (C) 1998-2001 Hewlett-Packard Co
6 * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
10 * Routines to manage the allocation of task context numbers. Task context numbers are
11 * used to reduce or eliminate the need to perform TLB flushes due to context switches.
12 * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
13 * consider the region number when performing a TLB lookup, we need to assign a unique
14 * region id to each region in a process. We use the least significant three bits in a
15 * region id for this purpose.
17 * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
20 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
22 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
29 #include <asm/processor.h>
33 unsigned int next; /* next context number to use */
34 unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
35 unsigned int max_ctx; /* max. context value supported by all CPUs */
38 extern struct ia64_ctx ia64_ctx;
40 extern void wrap_mmu_context (struct mm_struct *mm);
43 enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
47 static inline mm_context_t
48 get_mmu_context (struct mm_struct *mm)
50 mm_context_t context = mm->context;
55 spin_lock(&ia64_ctx.lock);
57 /* re-check, now that we've got the lock: */
58 context = mm->context;
60 if (ia64_ctx.next >= ia64_ctx.limit)
62 mm->context = context = ia64_ctx.next++;
65 spin_unlock(&ia64_ctx.lock);
70 * Initialize context number to some sane value. MM is guaranteed to be a brand-new
71 * address-space, so no TLB flushing is needed, ever.
74 init_new_context (struct task_struct *p, struct mm_struct *mm)
81 destroy_context (struct mm_struct *mm)
87 reload_context (mm_context_t context)
90 unsigned long rid_incr = 0;
91 unsigned long rr0, rr1, rr2, rr3, rr4;
93 rid = context << 3; /* make space for encoding the region number */
96 /* encode the region id, preferred page size, and VHPT enable bit: */
97 rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
98 rr1 = rr0 + 1*rid_incr;
99 rr2 = rr0 + 2*rid_incr;
100 rr3 = rr0 + 3*rid_incr;
101 rr4 = rr0 + 4*rid_incr;
102 ia64_set_rr(0x0000000000000000, rr0);
103 ia64_set_rr(0x2000000000000000, rr1);
104 ia64_set_rr(0x4000000000000000, rr2);
105 ia64_set_rr(0x6000000000000000, rr3);
106 ia64_set_rr(0x8000000000000000, rr4);
107 ia64_insn_group_barrier();
108 ia64_srlz_i(); /* srlz.i implies srlz.d */
109 ia64_insn_group_barrier();
113 activate_context (struct mm_struct *mm)
115 mm_context_t context;
118 context = get_mmu_context(mm);
119 reload_context(context);
120 /* in the unlikely event of a TLB-flush by another thread, redo the load: */
121 } while (unlikely(context != mm->context));
125 * Switch from address space PREV to address space NEXT.
128 activate_mm (struct mm_struct *prev, struct mm_struct *next)
131 * We may get interrupts here, but that's OK because interrupt handlers cannot
134 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
135 activate_context(next);
138 #define switch_mm(prev_mm,next_mm,next_task,cpu) activate_mm(prev_mm, next_mm)
140 # endif /* ! __ASSEMBLY__ */
141 #endif /* _ASM_IA64_MMU_CONTEXT_H */