2 * BK Id: SCCS/s.mmu_context.h 1.26 04/05/02 11:33:38 mporter
5 #ifndef __PPC_MMU_CONTEXT_H
6 #define __PPC_MMU_CONTEXT_H
8 #include <linux/config.h>
9 #include <asm/atomic.h>
10 #include <asm/bitops.h>
14 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
15 * (virtual segment identifiers) for each context. Although the
16 * hardware supports 24-bit VSIDs, and thus >1 million contexts,
17 * we only use 32,768 of them. That is ample, since there can be
18 * at most around 30,000 tasks in the system anyway, and it means
19 * that we can use a bitmap to indicate which contexts are in use.
20 * Using a bitmap means that we entirely avoid all of the problems
21 * that we used to have when the context number overflowed,
22 * particularly on SMP systems.
27 * This function defines the mapping from contexts to VSIDs (virtual
28 * segment IDs). We use a skew on both the context and the high 4 bits
29 * of the 32-bit virtual address (the "effective segment ID") in order
30 * to spread out the entries in the MMU hash table. Note, if this
31 * function is changed then arch/ppc/mm/hashtable.S will have to be
32 * changed to correspond.
34 #define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
38 The MPC8xx has only 16 contexts. We rotate through them on each
39 task switch. A better way would be to keep track of tasks that
40 own contexts, and implement an LRU usage. That way very active
41 tasks don't always have to pay the TLB reload overhead. The
42 kernel pages are mapped shared, so the kernel can run on behalf
43 of any task that makes a kernel entry. Shared does not mean they
44 are not protected, just that the ASID comparison is not performed.
47 The IBM4xx has 256 contexts, so we can just rotate through these
48 as a way of "switching" contexts. If the TID of the TLB is zero,
49 the PID/TID comparison is disabled, so we can use a TID of zero
50 to represent all kernel pages as shared among all contexts.
54 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
60 #define LAST_CONTEXT 15
61 #define FIRST_CONTEXT 0
64 #define NO_CONTEXT 256
65 #define LAST_CONTEXT 255
66 #define FIRST_CONTEXT 1
70 /* PPC 6xx, 7xx CPUs */
71 #define NO_CONTEXT ((mm_context_t) -1)
72 #define LAST_CONTEXT 32767
73 #define FIRST_CONTEXT 1
77 * Set the current MMU context.
78 * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
79 * loading up the segment registers for the user part of the address space.
81 * Since the PGD is immediately available, it is much faster to simply
82 * pass this along as a second parameter, which is required for 8xx and
83 * can be used for debugging on all processors (if you happen to have
86 extern void set_context(mm_context_t context, pgd_t *pgd);
89 * Bitmap of contexts in use.
90 * The size of this bitmap is LAST_CONTEXT + 1 bits.
92 extern unsigned long context_map[];
95 * This caches the next context number that we expect to be free.
96 * Its use is an optimization only, we can't rely on this context
97 * number to be free, but it usually will be.
99 extern mm_context_t next_mmu_context;
102 * If we don't have sufficient contexts to give one to every task
103 * that could be in the system, we need to be able to steal contexts.
104 * These variables support that.
106 #if LAST_CONTEXT < 30000
107 #define FEW_CONTEXTS 1
108 extern atomic_t nr_free_contexts;
109 extern struct mm_struct *context_mm[LAST_CONTEXT+1];
110 extern void steal_context(void);
114 * Get a new mmu context for the address space described by `mm'.
116 static inline void get_mmu_context(struct mm_struct *mm)
120 if (mm->context != NO_CONTEXT)
123 while (atomic_dec_if_positive(&nr_free_contexts) < 0)
126 ctx = next_mmu_context;
127 while (test_and_set_bit(ctx, context_map)) {
128 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
129 if (ctx > LAST_CONTEXT)
132 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
135 context_mm[ctx] = mm;
140 * Set up the context for a new address space.
142 #define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0)
145 * We're finished using the context for an address space.
147 static inline void destroy_context(struct mm_struct *mm)
149 if (mm->context != NO_CONTEXT) {
150 clear_bit(mm->context, context_map);
151 mm->context = NO_CONTEXT;
153 atomic_inc(&nr_free_contexts);
158 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
159 struct task_struct *tsk, int cpu)
161 tsk->thread.pgdir = next->pgd;
162 get_mmu_context(next);
163 set_context(next->context, next->pgd);
167 * After we have set current->mm to a new value, this activates
168 * the context for the new mm so we see the new mappings.
170 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
172 current->thread.pgdir = mm->pgd;
174 set_context(mm->context, mm->pgd);
177 extern void mmu_context_init(void);
179 #endif /* __PPC_MMU_CONTEXT_H */
180 #endif /* __KERNEL__ */