X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=arch%2Fsparc64%2Fkernel%2Fktlb.S;h=d4024ac0d6195cbf5b0cee0f17b28537c130b1d0;hb=fabb5c4e4a474ff0f7d6c1d3466a1b79bbce5f49;hp=883180be3d592097c247929a6b01b84d1017b5be;hpb=6cc200db9500f53c6b884ea5d5bc7eabae7f5d5c;p=powerpc.git diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 883180be3d..d4024ac0d6 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -6,7 +6,6 @@ * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ -#include #include #include #include @@ -121,6 +120,12 @@ kvmap_dtlb_obp: nop .align 32 +kvmap_dtlb_tsb4m_load: + KTSB_LOCK_TAG(%g1, %g2, %g7) + KTSB_WRITE(%g1, %g5, %g6) + ba,pt %xcc, kvmap_dtlb_load + nop + kvmap_dtlb: /* %g6: TAG TARGET */ mov TLB_TAG_ACCESS, %g4 @@ -133,12 +138,51 @@ kvmap_dtlb_4v: brgez,pn %g4, kvmap_dtlb_nonlinear nop - sethi %hi(kern_linear_pte_xor), %g2 - ldx [%g2 + %lo(kern_linear_pte_xor)], %g2 +#ifdef CONFIG_DEBUG_PAGEALLOC + /* Index through the base page size TSB even for linear + * mappings when using page allocation debugging. + */ + KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) +#else + /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ + KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) +#endif + /* TSB entry address left in %g1, lookup linear PTE. + * Must preserve %g1 and %g6 (TAG). + */ +kvmap_dtlb_tsb4m_miss: + sethi %hi(kpte_linear_bitmap), %g2 + or %g2, %lo(kpte_linear_bitmap), %g2 + + /* Clear the PAGE_OFFSET top virtual bits, then shift + * down to get a 256MB physical address index. + */ + sllx %g4, 21, %g5 + mov 1, %g7 + srlx %g5, 21 + 28, %g5 + + /* Don't try this at home kids... this depends upon srlx + * only taking the low 6 bits of the shift count in %g5. + */ + sllx %g7, %g5, %g7 + + /* Divide by 64 to get the offset into the bitmask. */ + srlx %g5, 6, %g5 + sllx %g5, 3, %g5 + + /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ + ldx [%g2 + %g5], %g2 + andcc %g2, %g7, %g0 + sethi %hi(kern_linear_pte_xor), %g5 + or %g5, %lo(kern_linear_pte_xor), %g5 + bne,a,pt %xcc, 1f + add %g5, 8, %g5 + +1: ldx [%g5], %g2 .globl kvmap_linear_patch kvmap_linear_patch: - ba,pt %xcc, kvmap_dtlb_load + ba,pt %xcc, kvmap_dtlb_tsb4m_load xor %g2, %g4, %g5 kvmap_dtlb_vmalloc_addr: