powerpc: Use 64k pages without needing cache-inhibited large pages
[powerpc.git] / arch / powerpc / mm / slb_low.S
index 3e18241..8548dcf 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * arch/ppc64/mm/slb_low.S
- *
  * Low-level SLB routines
  *
  * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
@@ -37,9 +35,9 @@ _GLOBAL(slb_allocate_realmode)
 
        srdi    r9,r3,60                /* get region */
        srdi    r10,r3,28               /* get esid */
-       cmpldi  cr7,r9,0xc              /* cmp KERNELBASE for later use */
+       cmpldi  cr7,r9,0xc              /* cmp PAGE_OFFSET for later use */
 
-       /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */
+       /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
        blt     cr7,0f                  /* user or kernel? */
 
        /* kernel address: proto-VSID = ESID */
@@ -61,10 +59,19 @@ _GLOBAL(slb_miss_kernel_load_linear)
        li      r11,0
        b       slb_finish_load
 
-1:     /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
+1:     /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
         * will be patched by the kernel at boot
         */
-_GLOBAL(slb_miss_kernel_load_virtual)
+BEGIN_FTR_SECTION
+       /* check whether this is in vmalloc or ioremap space */
+       clrldi  r11,r10,48
+       cmpldi  r11,(VMALLOC_SIZE >> 28) - 1
+       bgt     5f
+       lhz     r11,PACAVMALLOCSLLP(r13)
+       b       slb_finish_load
+5:
+END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
+_GLOBAL(slb_miss_kernel_load_io)
        li      r11,0
        b       slb_finish_load
 
@@ -80,12 +87,17 @@ _GLOBAL(slb_miss_kernel_load_virtual)
 BEGIN_FTR_SECTION
        b       1f
 END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
+       cmpldi  r10,16
+
+       lhz     r9,PACALOWHTLBAREAS(r13)
+       mr      r11,r10
+       blt     5f
+
        lhz     r9,PACAHIGHHTLBAREAS(r13)
        srdi    r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
-       srd     r9,r9,r11
-       lhz     r11,PACALOWHTLBAREAS(r13)
-       srd     r11,r11,r10
-       or.     r9,r9,r11
+
+5:     srd     r9,r9,r11
+       andi.   r9,r9,1
        beq     1f
 _GLOBAL(slb_miss_user_load_huge)
        li      r11,0
@@ -93,9 +105,7 @@ _GLOBAL(slb_miss_user_load_huge)
 1:
 #endif /* CONFIG_HUGETLB_PAGE */
 
-_GLOBAL(slb_miss_user_load_normal)
-       li      r11,0
-
+       lhz     r11,PACACONTEXTSLLP(r13)
 2:
        ld      r9,PACACONTEXTID(r13)
        rldimi  r10,r9,USER_ESID_BITS,0
@@ -161,7 +171,7 @@ _GLOBAL(slb_allocate_user)
 /*
  * Finish loading of an SLB entry and return
  *
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
  */
 slb_finish_load:
        ASM_VSID_SCRAMBLE(r10,r9)