1 /* arch/sparc64/mm/tsb.c
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <asm/system.h>
9 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgtable.h>
15 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
17 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
20 return vaddr & (nentries - 1);
23 static inline int tag_compare(unsigned long tag, unsigned long vaddr)
25 return (tag == (vaddr >> 22));
28 /* TSB flushes need only occur on the processor initiating the address
29 * space modification, not on each cpu the address space has run on.
30 * Only the TLB flush needs that treatment.
33 void flush_tsb_kernel_range(unsigned long start, unsigned long end)
37 for (v = start; v < end; v += PAGE_SIZE) {
38 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
39 struct tsb *ent = &swapper_tsb[hash];
41 if (tag_compare(ent->tag, v)) {
42 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
43 membar_storeload_storestore();
48 void flush_tsb_user(struct mmu_gather *mp)
50 struct mm_struct *mm = mp->mm;
51 unsigned long nentries, base, flags;
55 spin_lock_irqsave(&mm->context.lock, flags);
57 tsb = mm->context.tsb;
58 nentries = mm->context.tsb_nentries;
60 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
63 base = (unsigned long) tsb;
65 for (i = 0; i < mp->tlb_nr; i++) {
66 unsigned long v = mp->vaddrs[i];
67 unsigned long tag, ent, hash;
71 hash = tsb_hash(v, nentries);
72 ent = base + (hash * sizeof(struct tsb));
78 spin_unlock_irqrestore(&mm->context.lock, flags);
81 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
83 unsigned long tsb_reg, base, tsb_paddr;
84 unsigned long page_sz, tte;
86 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
89 tte = pgprot_val(PAGE_KERNEL_LOCKED);
90 tsb_paddr = __pa(mm->context.tsb);
91 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
93 /* Use the smallest page size that can map the whole TSB
99 #ifdef DCACHE_ALIASING_POSSIBLE
100 base += (tsb_paddr & 8192);
122 page_sz = 512 * 1024;
127 page_sz = 512 * 1024;
132 page_sz = 512 * 1024;
137 page_sz = 4 * 1024 * 1024;
143 tte |= pte_sz_bits(page_sz);
145 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
146 /* Physical mapping, no locked TLB entry for TSB. */
147 tsb_reg |= tsb_paddr;
149 mm->context.tsb_reg_val = tsb_reg;
150 mm->context.tsb_map_vaddr = 0;
151 mm->context.tsb_map_pte = 0;
154 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
155 tte |= (tsb_paddr & ~(page_sz - 1UL));
157 mm->context.tsb_reg_val = tsb_reg;
158 mm->context.tsb_map_vaddr = base;
159 mm->context.tsb_map_pte = tte;
162 /* Setup the Hypervisor TSB descriptor. */
163 if (tlb_type == hypervisor) {
164 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
169 hp->pgsz_idx = HV_PGSZ_IDX_8K;
173 hp->pgsz_idx = HV_PGSZ_IDX_64K;
177 hp->pgsz_idx = HV_PGSZ_IDX_512K;
180 case 4 * 1024 * 1024:
181 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
185 hp->num_ttes = tsb_bytes / 16;
190 hp->pgsz_mask = HV_PGSZ_MASK_8K;
194 hp->pgsz_mask = HV_PGSZ_MASK_64K;
198 hp->pgsz_mask = HV_PGSZ_MASK_512K;
201 case 4 * 1024 * 1024:
202 hp->pgsz_mask = HV_PGSZ_MASK_4MB;
205 hp->tsb_base = tsb_paddr;
210 /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
211 * do_sparc64_fault() invokes this routine to try and grow the TSB.
213 * When we reach the maximum TSB size supported, we stick ~0UL into
214 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
215 * will not trigger any longer.
217 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
218 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
219 * must be 512K aligned. It also must be physically contiguous, so we
220 * cannot use vmalloc().
222 * The idea here is to grow the TSB when the RSS of the process approaches
223 * the number of entries that the current TSB can hold at once. Currently,
224 * we trigger when the RSS hits 3/4 of the TSB capacity.
226 void tsb_grow(struct mm_struct *mm, unsigned long rss)
228 unsigned long max_tsb_size = 1 * 1024 * 1024;
229 unsigned long size, old_size, flags;
231 struct tsb *old_tsb, *new_tsb;
232 unsigned long order, new_rss_limit;
235 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
236 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
238 for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
239 unsigned long n_entries = size / sizeof(struct tsb);
241 n_entries = (n_entries * 3) / 4;
246 if (size == max_tsb_size)
247 new_rss_limit = ~0UL;
249 new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4;
252 order = get_order(size);
253 gfp_flags = GFP_KERNEL;
255 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
257 page = alloc_pages(gfp_flags, order);
258 if (unlikely(!page)) {
259 /* Not being able to fork due to a high-order TSB
260 * allocation failure is very bad behavior. Just back
261 * down to a 0-order allocation and force no TSB
262 * growing for this address space.
264 if (mm->context.tsb == NULL && order > 0) {
266 new_rss_limit = ~0UL;
267 goto retry_page_alloc;
270 /* If we failed on a TSB grow, we are under serious
271 * memory pressure so don't try to grow any more.
273 if (mm->context.tsb != NULL)
274 mm->context.tsb_rss_limit = ~0UL;
278 /* Mark all tags as invalid. */
279 new_tsb = page_address(page);
280 memset(new_tsb, 0x40, size);
282 /* Ok, we are about to commit the changes. If we are
283 * growing an existing TSB the locking is very tricky,
286 * We have to hold mm->context.lock while committing to the
287 * new TSB, this synchronizes us with processors in
288 * flush_tsb_user() and switch_mm() for this address space.
290 * But even with that lock held, processors run asynchronously
291 * accessing the old TSB via TLB miss handling. This is OK
292 * because those actions are just propagating state from the
293 * Linux page tables into the TSB, page table mappings are not
294 * being changed. If a real fault occurs, the processor will
295 * synchronize with us when it hits flush_tsb_user(), this is
296 * also true for the case where vmscan is modifying the page
297 * tables. The only thing we need to be careful with is to
298 * skip any locked TSB entries during copy_tsb().
300 * When we finish committing to the new TSB, we have to drop
301 * the lock and ask all other cpus running this address space
302 * to run tsb_context_switch() to see the new TSB table.
304 spin_lock_irqsave(&mm->context.lock, flags);
306 old_tsb = mm->context.tsb;
307 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
309 /* Handle multiple threads trying to grow the TSB at the same time.
310 * One will get in here first, and bump the size and the RSS limit.
311 * The others will get in here next and hit this check.
313 if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
314 spin_unlock_irqrestore(&mm->context.lock, flags);
316 free_pages((unsigned long) new_tsb, get_order(size));
320 mm->context.tsb_rss_limit = new_rss_limit;
323 extern void copy_tsb(unsigned long old_tsb_base,
324 unsigned long old_tsb_size,
325 unsigned long new_tsb_base,
326 unsigned long new_tsb_size);
327 unsigned long old_tsb_base = (unsigned long) old_tsb;
328 unsigned long new_tsb_base = (unsigned long) new_tsb;
330 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
331 old_tsb_base = __pa(old_tsb_base);
332 new_tsb_base = __pa(new_tsb_base);
334 copy_tsb(old_tsb_base, old_size, new_tsb_base, size);
337 mm->context.tsb = new_tsb;
338 setup_tsb_params(mm, size);
340 spin_unlock_irqrestore(&mm->context.lock, flags);
342 /* If old_tsb is NULL, we're being invoked for the first time
343 * from init_new_context().
346 /* Reload it on the local cpu. */
347 tsb_context_switch(mm);
349 /* Now force other processors to do the same. */
352 /* Now it is safe to free the old tsb. */
353 free_pages((unsigned long) old_tsb, get_order(old_size));
357 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
359 spin_lock_init(&mm->context.lock);
361 mm->context.sparc64_ctx_val = 0UL;
363 /* copy_mm() copies over the parent's mm_struct before calling
364 * us, so we need to zero out the TSB pointer or else tsb_grow()
365 * will be confused and think there is an older TSB to free up.
367 mm->context.tsb = NULL;
369 /* If this is fork, inherit the parent's TSB size. We would
370 * grow it to that size on the first page fault anyways.
372 tsb_grow(mm, get_mm_rss(mm));
374 if (unlikely(!mm->context.tsb))
380 void destroy_context(struct mm_struct *mm)
382 unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
385 free_pages((unsigned long) mm->context.tsb, get_order(size));
387 /* We can remove these later, but for now it's useful
388 * to catch any bogus post-destroy_context() references
391 mm->context.tsb = NULL;
392 mm->context.tsb_reg_val = 0UL;
394 spin_lock_irqsave(&ctx_alloc_lock, flags);
396 if (CTX_VALID(mm->context)) {
397 unsigned long nr = CTX_NRBITS(mm->context);
398 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
401 spin_unlock_irqrestore(&ctx_alloc_lock, flags);