1 /* $Id: ultra.S,v 1.70.2.1 2002/03/03 10:31:56 davem Exp $
2 * ultra.S: Don't expand these all over the place...
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
7 #include <linux/config.h>
9 #include <asm/pgtable.h>
11 #include <asm/spitfire.h>
12 #include <asm/mmu_context.h>
16 /* Basically, most of the Spitfire vs. Cheetah madness
17 * has to do with the fact that Cheetah does not support
18 * IMMU flushes out of the secondary context. Someone needs
19 * to throw a south lake birthday party for the folks
20 * in Microelectronics who refused to fix this shit.
23 /* This file is meant to be read efficiently by the CPU, not humans.
24 * Staraj sie tego nikomu nie pierdolnac...
28 .globl __flush_tlb_page, __flush_tlb_mm, __flush_tlb_range
29 __flush_tlb_page: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=page&PAGE_MASK, %o2=SECONDARY_CONTEXT */
30 ldxa [%o2] ASI_DMMU, %g2
32 bne,pn %icc, __spitfire_flush_tlb_page_slow
34 stxa %g0, [%g3] ASI_DMMU_DEMAP
35 stxa %g0, [%g3] ASI_IMMU_DEMAP
47 __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
48 ldxa [%o1] ASI_DMMU, %g2
50 bne,pn %icc, __spitfire_flush_tlb_mm_slow
52 stxa %g0, [%g3] ASI_DMMU_DEMAP
53 stxa %g0, [%g3] ASI_IMMU_DEMAP
65 __flush_tlb_range: /* %o0=(ctx&TAG_CONTEXT_BITS), %o1=start&PAGE_MASK, %o2=SECONDARY_CONTEXT,
66 * %o3=end&PAGE_MASK, %o4=PAGE_SIZE, %o5=(end - start)
68 #define TLB_MAGIC 207 /* Students, do you know how I calculated this? -DaveM */
70 bleu,pt %xcc, __flush_tlb_page
71 srlx %o5, PAGE_SHIFT, %g5
73 bgeu,pn %icc, __spitfire_flush_tlb_range_constant_time
75 ldxa [%o2] ASI_DMMU, %g2
77 __spitfire_flush_tlb_range_page_by_page:
78 bne,pn %icc, __spitfire_flush_tlb_range_pbp_slow
80 1: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP
81 stxa %g0, [%g5 + %o5] ASI_IMMU_DEMAP
86 __spitfire_flush_tlb_range_constant_time: /* %o0=ctx, %o1=start, %o3=end */
88 wrpr %g1, PSTATE_IE, %pstate
89 mov TLB_TAG_ACCESS, %g3
90 mov ((SPITFIRE_HIGHEST_LOCKED_TLBENT-1) << 3), %g2
92 /* Spitfire Errata #32 workaround. */
94 stxa %g0, [%o4] ASI_DMMU
97 1: ldxa [%g2] ASI_ITLB_TAG_READ, %o4
98 and %o4, TAG_CONTEXT_BITS, %o5
101 andn %o4, TAG_CONTEXT_BITS, %o4
106 2: ldxa [%g2] ASI_DTLB_TAG_READ, %o4
107 and %o4, TAG_CONTEXT_BITS, %o5
109 andn %o4, TAG_CONTEXT_BITS, %o4
117 sub %g2, (1 << 3), %g2
119 wrpr %g1, 0x0, %pstate
120 4: stxa %g0, [%g3] ASI_IMMU
121 stxa %g0, [%g2] ASI_ITLB_DATA_ACCESS
124 /* Spitfire Errata #32 workaround. */
126 stxa %g0, [%o4] ASI_DMMU
132 5: stxa %g0, [%g3] ASI_DMMU
133 stxa %g0, [%g2] ASI_DTLB_DATA_ACCESS
136 /* Spitfire Errata #32 workaround. */
138 stxa %g0, [%o4] ASI_DMMU
144 __spitfire_flush_tlb_mm_slow:
146 wrpr %g1, PSTATE_IE, %pstate
147 stxa %o0, [%o1] ASI_DMMU
148 stxa %g0, [%g3] ASI_DMMU_DEMAP
149 stxa %g0, [%g3] ASI_IMMU_DEMAP
151 stxa %g2, [%o1] ASI_DMMU
156 __spitfire_flush_tlb_page_slow:
158 wrpr %g1, PSTATE_IE, %pstate
159 stxa %o0, [%o2] ASI_DMMU
160 stxa %g0, [%g3] ASI_DMMU_DEMAP
161 stxa %g0, [%g3] ASI_IMMU_DEMAP
163 stxa %g2, [%o2] ASI_DMMU
168 __spitfire_flush_tlb_range_pbp_slow:
170 wrpr %g1, PSTATE_IE, %pstate
171 stxa %o0, [%o2] ASI_DMMU
173 2: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP
174 stxa %g0, [%g5 + %o5] ASI_IMMU_DEMAP
178 stxa %g2, [%o2] ASI_DMMU
181 wrpr %g1, 0x0, %pstate
184 * The following code flushes one page_size worth.
186 #if (PAGE_SHIFT == 13)
187 #define ITAG_MASK 0xfe
188 #elif (PAGE_SHIFT == 16)
189 #define ITAG_MASK 0x7fe
191 #error unsupported PAGE_SIZE
194 .globl __flush_icache_page
195 __flush_icache_page: /* %o0 = phys_page */
196 sethi %hi(1 << 13), %o2 ! IC_set bit
201 ldda [%o1] ASI_IC_TAG, %o4
203 or %o0, %g1, %o0 ! VALID+phys-addr comparitor
206 andn %g2, ITAG_MASK, %g2 ! IC_tag mask
214 1: addx %g0, %g0, %g0
215 ldda [%o1 + %o2] ASI_IC_TAG, %g4
220 ldda [%o1] ASI_IC_TAG, %o4
232 sethi %uhi(PAGE_OFFSET), %g4
236 iflush1:sub %o1, 0x20, %g3
237 stxa %g0, [%g3] ASI_IC_TAG
240 iflush2:sub %o1, 0x20, %g3
241 stxa %g0, [%o1 + %o2] ASI_IC_TAG
245 #if (PAGE_SHIFT == 13)
246 #define DTAG_MASK 0x3
247 #elif (PAGE_SHIFT == 16)
248 #define DTAG_MASK 0x1f
249 #elif (PAGE_SHIFT == 19)
250 #define DTAG_MASK 0xff
251 #elif (PAGE_SHIFT == 22)
252 #define DTAG_MASK 0x3ff
256 .globl __flush_dcache_page
257 __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
261 sethi %hi(1 << 14), %o2
262 1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group
263 add %o4, (1 << 5), %o4 ! IEU0
264 ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group
265 add %o4, (1 << 5), %o4 ! IEU0
266 ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available
267 add %o4, (1 << 5), %o4 ! IEU0
268 andn %o3, DTAG_MASK, %o3 ! IEU1
269 ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group
270 add %o4, (1 << 5), %o4 ! IEU0
271 andn %g1, DTAG_MASK, %g1 ! IEU1
272 cmp %o0, %o3 ! IEU1 Group
273 be,a,pn %xcc, dflush1 ! CTI
274 sub %o4, (4 << 5), %o4 ! IEU0 (Group)
275 cmp %o0, %g1 ! IEU1 Group
276 andn %g2, DTAG_MASK, %g2 ! IEU0
277 be,a,pn %xcc, dflush2 ! CTI
278 sub %o4, (3 << 5), %o4 ! IEU0 (Group)
279 cmp %o0, %g2 ! IEU1 Group
280 andn %g3, DTAG_MASK, %g3 ! IEU0
281 be,a,pn %xcc, dflush3 ! CTI
282 sub %o4, (2 << 5), %o4 ! IEU0 (Group)
283 cmp %o0, %g3 ! IEU1 Group
284 be,a,pn %xcc, dflush4 ! CTI
285 sub %o4, (1 << 5), %o4 ! IEU0
286 2: cmp %o4, %o2 ! IEU1 Group
287 bne,pt %xcc, 1b ! CTI
290 /* The I-cache does not snoop local stores so we
291 * better flush that too when necessary.
293 brnz,pt %o1, __flush_icache_page
298 dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
299 add %o4, (1 << 5), %o4
300 dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
301 add %o4, (1 << 5), %o4
302 dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
303 add %o4, (1 << 5), %o4
304 dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
305 add %o4, (1 << 5), %o4
313 wrpr %g7, PSTATE_IE, %pstate
314 mov TLB_TAG_ACCESS, %g1
315 stxa %o0, [%g1] ASI_DMMU
316 stxa %o1, [%g0] ASI_DTLB_DATA_IN
322 wrpr %g7, PSTATE_IE, %pstate
323 mov TLB_TAG_ACCESS, %g1
324 stxa %o0, [%g1] ASI_IMMU
325 stxa %o1, [%g0] ASI_ITLB_DATA_IN
330 .globl __update_mmu_cache
331 __update_mmu_cache: /* %o0=vma, %o1=address, %o2=pte */
332 ldub [%g6 + AOFF_task_thread + AOFF_thread_fault_code], %o3
333 srlx %o1, PAGE_SHIFT, %o1
334 ldx [%o0 + 0x0], %o4 /* XXX vma->vm_mm */
336 sllx %o1, PAGE_SHIFT, %o0
337 ldx [%o4 + AOFF_mm_context], %o5
338 andcc %o3, FAULT_CODE_DTLB, %g0
340 and %o5, TAG_CONTEXT_BITS, %o5
341 bne,pt %xcc, __prefill_dtlb
343 ba,a,pt %xcc, __prefill_itlb
347 /* Cheetah specific versions, patched at boot time. */
348 __cheetah_flush_tlb_page: /* 14 insns */
350 andn %g5, PSTATE_IE, %g2
351 wrpr %g2, 0x0, %pstate
353 mov PRIMARY_CONTEXT, %o2
354 ldxa [%o2] ASI_DMMU, %g2
355 stxa %o0, [%o2] ASI_DMMU
356 stxa %g0, [%o1] ASI_DMMU_DEMAP
357 stxa %g0, [%o1] ASI_IMMU_DEMAP
358 stxa %g2, [%o2] ASI_DMMU
362 wrpr %g5, 0x0, %pstate
364 __cheetah_flush_tlb_mm: /* 15 insns */
366 andn %g5, PSTATE_IE, %g2
367 wrpr %g2, 0x0, %pstate
369 mov PRIMARY_CONTEXT, %o2
371 ldxa [%o2] ASI_DMMU, %g2
372 stxa %o0, [%o2] ASI_DMMU
373 stxa %g0, [%g3] ASI_DMMU_DEMAP
374 stxa %g0, [%g3] ASI_IMMU_DEMAP
375 stxa %g2, [%o2] ASI_DMMU
379 wrpr %g5, 0x0, %pstate
381 __cheetah_flush_tlb_range: /* 20 insns */
385 andn %g5, PSTATE_IE, %g2
386 wrpr %g2, 0x0, %pstate
388 mov PRIMARY_CONTEXT, %o2
390 ldxa [%o2] ASI_DMMU, %g2
391 stxa %o0, [%o2] ASI_DMMU
392 1: stxa %g0, [%o1 + %o5] ASI_DMMU_DEMAP
393 stxa %g0, [%o1 + %o5] ASI_IMMU_DEMAP
397 stxa %g2, [%o2] ASI_DMMU
401 wrpr %g5, 0x0, %pstate
403 flush_dcpage_cheetah: /* 9 insns */
405 sethi %hi(PAGE_SIZE), %o4
406 1: subcc %o4, (1 << 5), %o4
407 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
411 retl /* I-cache flush never needed on Cheetah, see callers. */
425 .globl cheetah_patch_cachetlbops
426 cheetah_patch_cachetlbops:
429 sethi %hi(__flush_tlb_page), %o0
430 or %o0, %lo(__flush_tlb_page), %o0
431 sethi %hi(__cheetah_flush_tlb_page), %o1
432 or %o1, %lo(__cheetah_flush_tlb_page), %o1
433 call cheetah_patch_one
436 sethi %hi(__flush_tlb_mm), %o0
437 or %o0, %lo(__flush_tlb_mm), %o0
438 sethi %hi(__cheetah_flush_tlb_mm), %o1
439 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
440 call cheetah_patch_one
443 sethi %hi(__flush_tlb_range), %o0
444 or %o0, %lo(__flush_tlb_range), %o0
445 sethi %hi(__cheetah_flush_tlb_range), %o1
446 or %o1, %lo(__cheetah_flush_tlb_range), %o1
447 call cheetah_patch_one
450 sethi %hi(__flush_dcache_page), %o0
451 or %o0, %lo(__flush_dcache_page), %o0
452 sethi %hi(flush_dcpage_cheetah), %o1
453 or %o1, %lo(flush_dcpage_cheetah), %o1
454 call cheetah_patch_one
461 /* These are all called by the slaves of a cross call, at
462 * trap level 1, with interrupts fully disabled.
465 * %g5 mm->context (all tlb flushes)
466 * %g1 address arg 1 (tlb page and range flushes)
467 * %g7 address arg 2 (tlb range flush only)
469 * %g6 ivector table, don't touch
474 * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
477 .globl xcall_flush_tlb_page, xcall_flush_tlb_mm, xcall_flush_tlb_range
478 xcall_flush_tlb_page:
479 mov PRIMARY_CONTEXT, %g2
480 ldxa [%g2] ASI_DMMU, %g3
481 stxa %g5, [%g2] ASI_DMMU
482 stxa %g0, [%g1] ASI_DMMU_DEMAP
483 stxa %g0, [%g1] ASI_IMMU_DEMAP
484 stxa %g3, [%g2] ASI_DMMU
489 mov PRIMARY_CONTEXT, %g2
491 ldxa [%g2] ASI_DMMU, %g3
492 stxa %g5, [%g2] ASI_DMMU
493 stxa %g0, [%g4] ASI_DMMU_DEMAP
494 stxa %g0, [%g4] ASI_IMMU_DEMAP
495 stxa %g3, [%g2] ASI_DMMU
498 xcall_flush_tlb_range:
499 sethi %hi(PAGE_SIZE - 1), %g2
500 or %g2, %lo(PAGE_SIZE - 1), %g2
505 srlx %g3, PAGE_SHIFT, %g4
508 bgu,pn %icc, xcall_flush_tlb_mm
509 mov PRIMARY_CONTEXT, %g4
510 ldxa [%g4] ASI_DMMU, %g7
512 stxa %g5, [%g4] ASI_DMMU
517 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
518 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
522 stxa %g7, [%g4] ASI_DMMU
527 /* This runs in a very controlled environment, so we do
528 * not need to worry about BH races etc.
530 .globl xcall_sync_tick
533 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
538 109: or %g7, %lo(109b), %g7
539 call smp_synchronize_tick_client
543 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
545 /* NOTE: This is SPECIAL!! We do etrap/rtrap however
546 * we choose to deal with the "BH's run with
547 * %pil==15" problem (described in asm/pil.h)
548 * by just invoking rtrap directly past where
549 * BH's are checked for.
551 * We do it like this because we do not want %pil==15
552 * lockups to prevent regs being reported.
554 .globl xcall_report_regs
557 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
562 109: or %g7, %lo(109b), %g7
564 add %sp, PTREGS_OFF, %o0
566 /* Has to be a non-v9 branch due to the large distance. */
568 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
571 .globl xcall_flush_dcache_page_cheetah
572 xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
573 sethi %hi(PAGE_SIZE), %g3
574 1: subcc %g3, (1 << 5), %g3
575 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
582 .globl xcall_flush_dcache_page_spitfire
583 xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
584 %g7 == kernel page virtual address
585 %g5 == (page->mapping != NULL) */
586 #if (L1DCACHE_SIZE > PAGE_SIZE)
587 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
588 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
589 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
590 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
598 stxa %g0, [%g3] ASI_DCACHE_TAG
602 sub %g3, (1 << 5), %g3
605 #endif /* L1DCACHE_SIZE > PAGE_SIZE */
606 sethi %hi(PAGE_SIZE), %g3
609 subcc %g3, (1 << 5), %g3
611 add %g7, (1 << 5), %g7
617 .globl xcall_promstop
620 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
625 109: or %g7, %lo(109b), %g7
629 /* We should not return, just spin if we do... */
640 /* These two are not performance critical... */
641 .globl xcall_flush_tlb_all_spitfire
642 xcall_flush_tlb_all_spitfire:
643 /* Spitfire Errata #32 workaround. */
644 sethi %hi(errata32_hwbug), %g4
645 stx %g0, [%g4 + %lo(errata32_hwbug)]
649 1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4
650 and %g4, _PAGE_L, %g5
652 mov TLB_TAG_ACCESS, %g7
654 stxa %g0, [%g7] ASI_DMMU
656 stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS
659 /* Spitfire Errata #32 workaround. */
660 sethi %hi(errata32_hwbug), %g4
661 stx %g0, [%g4 + %lo(errata32_hwbug)]
663 2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4
664 and %g4, _PAGE_L, %g5
666 mov TLB_TAG_ACCESS, %g7
668 stxa %g0, [%g7] ASI_IMMU
670 stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS
673 /* Spitfire Errata #32 workaround. */
674 sethi %hi(errata32_hwbug), %g4
675 stx %g0, [%g4 + %lo(errata32_hwbug)]
678 cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
684 .globl xcall_flush_tlb_all_cheetah
685 xcall_flush_tlb_all_cheetah:
687 stxa %g0, [%g2] ASI_DMMU_DEMAP
688 stxa %g0, [%g2] ASI_IMMU_DEMAP
691 .globl xcall_flush_cache_all_spitfire
692 xcall_flush_cache_all_spitfire:
693 sethi %hi(16383), %g2
694 or %g2, %lo(16383), %g2
696 1: stxa %g0, [%g3] ASI_IC_TAG
705 /* These just get rescheduled to PIL vectors. */
706 .globl xcall_call_function
708 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
711 .globl xcall_receive_signal
712 xcall_receive_signal:
713 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
718 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
721 #endif /* CONFIG_SMP */