[POWERPC] Fix spu SLB invalidations
[powerpc.git] / arch / powerpc / mm / hash_utils_64.c
index 9cefe6a..3c7fe2c 100644 (file)
@@ -21,7 +21,6 @@
 #undef DEBUG
 #undef DEBUG_LOW
 
-#include <linux/config.h>
 #include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -167,34 +166,12 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
                hash = hpt_hash(va, shift);
                hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
-               /* The crap below can be cleaned once ppd_md.probe() can
-                * set up the hash callbacks, thus we can just used the
-                * normal insert callback here.
-                */
-#ifdef CONFIG_PPC_ISERIES
-               if (machine_is(iseries))
-                       ret = iSeries_hpte_insert(hpteg, va,
-                                                 paddr,
-                                                 tmp_mode,
-                                                 HPTE_V_BOLTED,
-                                                 psize);
-               else
-#endif
-#ifdef CONFIG_PPC_PSERIES
-               if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR))
-                       ret = pSeries_lpar_hpte_insert(hpteg, va,
-                                                      paddr,
-                                                      tmp_mode,
-                                                      HPTE_V_BOLTED,
-                                                      psize);
-               else
-#endif
-#ifdef CONFIG_PPC_MULTIPLATFORM
-                       ret = native_hpte_insert(hpteg, va,
-                                                paddr,
-                                                tmp_mode, HPTE_V_BOLTED,
-                                                psize);
-#endif
+               DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
+
+               BUG_ON(!ppc_md.hpte_insert);
+               ret = ppc_md.hpte_insert(hpteg, va, paddr,
+                               tmp_mode, HPTE_V_BOLTED, psize);
+
                if (ret < 0)
                        break;
        }
@@ -300,7 +277,7 @@ static void __init htab_init_page_sizes(void)
         * Not in the device-tree, let's fallback on known size
         * list for 16M capable GP & GR
         */
-       if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
+       if (cpu_has_feature(CPU_FTR_16M_PAGE))
                memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
                       sizeof(mmu_psize_defaults_gp));
  found:
@@ -708,6 +685,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                                       "non-cacheable mapping\n");
                                psize = mmu_vmalloc_psize = MMU_PAGE_4K;
                        }
+#ifdef CONFIG_SPE_BASE
+                       spu_flush_all_slbs(mm);
+#endif
                }
                if (user_region) {
                        if (psize != get_paca()->context.user_psize) {
@@ -782,6 +762,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
                                mmu_psize_defs[MMU_PAGE_4K].sllp;
                        get_paca()->context = mm->context;
                        slb_flush_and_rebolt();
+#ifdef CONFIG_SPE_BASE
+                       spu_flush_all_slbs(mm);
+#endif
                }
        }
        if (mm->context.user_psize == MMU_PAGE_64K)