1 #ifndef __PPC64_MMU_CONTEXT_H
2 #define __PPC64_MMU_CONTEXT_H
4 #include <linux/spinlock.h>
5 #include <linux/kernel.h>
8 #include <asm/ppcdebug.h>
11 * Copyright (C) 2001 PPC 64 Team, IBM Corp
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #define FIRST_USER_CONTEXT 0x10 /* First 16 reserved for kernel */
21 #define LAST_USER_CONTEXT 0x8000 /* Same as PID_MAX for now... */
22 #define NUM_USER_CONTEXT (LAST_USER_CONTEXT-FIRST_USER_CONTEXT)
24 /* Choose whether we want to implement our context
25 * number allocator as a LIFO or FIFO queue.
28 #define MMU_CONTEXT_LIFO
30 #define MMU_CONTEXT_FIFO
33 struct mmu_context_queue_t {
38 mm_context_t elements[LAST_USER_CONTEXT];
41 extern struct mmu_context_queue_t mmu_context_queue;
44 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
48 extern void flush_stab(void);
51 * The context number queue has underflowed.
52 * Meaning: we tried to push a context number that was freed
53 * back onto the context queue and the queue was already full.
56 mmu_context_underflow(void)
58 printk(KERN_DEBUG "mmu_context_underflow\n");
59 panic("mmu_context_underflow");
64 * Set up the context for a new address space.
67 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
72 spin_lock_irqsave(&mmu_context_queue.lock, flags);
74 if ( (size = mmu_context_queue.size) <= 0 ) {
75 spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
79 head = mmu_context_queue.head;
80 mm->context = mmu_context_queue.elements[head];
82 head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
83 mmu_context_queue.head = head;
84 mmu_context_queue.size = size-1;
86 spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
92 * We're finished using the context for an address space.
95 destroy_context(struct mm_struct *mm)
97 long index, size = mmu_context_queue.size;
100 spin_lock_irqsave(&mmu_context_queue.lock, flags);
102 if ( (size = mmu_context_queue.size) >= NUM_USER_CONTEXT ) {
103 spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
104 mmu_context_underflow();
107 #ifdef MMU_CONTEXT_LIFO
108 index = mmu_context_queue.head;
109 index = (index > 0) ? index-1 : LAST_USER_CONTEXT-1;
110 mmu_context_queue.head = index;
112 index = mmu_context_queue.tail;
113 index = (index < LAST_USER_CONTEXT-1) ? index+1 : 0;
114 mmu_context_queue.tail = index;
117 mmu_context_queue.size = size+1;
118 mmu_context_queue.elements[index] = mm->context;
120 spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
123 extern void flush_stab(void);
126 * switch_mm is the entry point called from the architecture independent
127 * code in kernel/sched.c
130 switch_mm(struct mm_struct *prev, struct mm_struct *next,
131 struct task_struct *tsk, int cpu)
137 * After we have set current->mm to a new value, this activates
138 * the context for the new mm so we see the new mappings.
140 #define activate_mm(active_mm, mm) \
141 switch_mm(active_mm, mm, current, smp_processor_id());
143 #define VSID_RANDOMIZER 42470972311
144 #define VSID_MASK 0xfffffffff
147 /* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
149 static inline unsigned long
150 get_kernel_vsid( unsigned long ea )
152 unsigned long ordinal, vsid;
154 ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | (ea >> 60);
155 vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
157 ifppcdebug(PPCDBG_HTABSTRESS) {
158 /* For debug, this path creates a very poor vsid distribuition.
159 * A user program can access virtual addresses in the form
160 * 0x0yyyyxxxx000 where yyyy = xxxx to cause multiple mappings
161 * to hash to the same page table group.
163 ordinal = ((ea >> 28) & 0x1fff) | (ea >> 44);
164 vsid = ordinal & VSID_MASK;
170 /* This is only valid for user EA's (user EA's do not exceed 2^41 (EADDR_SIZE))
172 static inline unsigned long
173 get_vsid( unsigned long context, unsigned long ea )
175 unsigned long ordinal, vsid;
177 ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | context;
178 vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
180 ifppcdebug(PPCDBG_HTABSTRESS) {
181 /* See comment above. */
182 ordinal = ((ea >> 28) & 0x1fff) | (context << 16);
183 vsid = ordinal & VSID_MASK;
189 #endif /* __PPC64_MMU_CONTEXT_H */