Automatic merge with /usr/src/ntfs-2.6.git.
[powerpc.git] / arch / arm / mm / mm-armv.c
index 585dfb8..b19f00e 100644 (file)
@@ -37,6 +37,8 @@ pgprot_t pgprot_kernel;
 
 EXPORT_SYMBOL(pgprot_kernel);
 
+pmd_t *top_pmd;
+
 struct cachepolicy {
        const char      policy[16];
        unsigned int    cr_mask;
@@ -142,6 +144,16 @@ __setup("noalign", noalign_setup);
 
 #define FIRST_KERNEL_PGD_NR    (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
 
+static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
+{
+       return pmd_offset(pgd, virt);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long virt)
+{
+       return pmd_off(pgd_offset_k(virt), virt);
+}
+
 /*
  * need to get a 16k page for level 1
  */
@@ -157,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
 
        memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
 
+       /*
+        * Copy over the kernel and IO PGD entries
+        */
        init_pgd = pgd_offset_k(0);
+       memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
+                      (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
+
+       clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 
        if (!vectors_high()) {
                /*
@@ -186,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
                spin_unlock(&mm->page_table_lock);
        }
 
-       /*
-        * Copy over the kernel and IO PGD entries
-        */
-       memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
-                      (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
-
-       clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
-
        return new_pgd;
 
 no_pte:
@@ -220,7 +231,7 @@ void free_pgd_slow(pgd_t *pgd)
                return;
 
        /* pgd is always present and good */
-       pmd = (pmd_t *)pgd;
+       pmd = pmd_off(pgd, 0);
        if (pmd_none(*pmd))
                goto free;
        if (pmd_bad(*pmd)) {
@@ -246,9 +257,8 @@ free:
 static inline void
 alloc_init_section(unsigned long virt, unsigned long phys, int prot)
 {
-       pmd_t *pmdp;
+       pmd_t *pmdp = pmd_off_k(virt);
 
-       pmdp = pmd_offset(pgd_offset_k(virt), virt);
        if (virt & (1 << 20))
                pmdp++;
 
@@ -283,11 +293,9 @@ alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
 static inline void
 alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
 {
-       pmd_t *pmdp;
+       pmd_t *pmdp = pmd_off_k(virt);
        pte_t *ptep;
 
-       pmdp = pmd_offset(pgd_offset_k(virt), virt);
-
        if (pmd_none(*pmdp)) {
                unsigned long pmdval;
                ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
@@ -310,7 +318,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
  */
 static inline void clear_mapping(unsigned long virt)
 {
-       pmd_clear(pmd_offset(pgd_offset_k(virt), virt));
+       pmd_clear(pmd_off_k(virt));
 }
 
 struct mem_types {
@@ -391,7 +399,7 @@ static void __init build_mem_type_table(void)
                ecc_mask = 0;
        }
 
-       if (cpu_arch <= CPU_ARCH_ARMv5) {
+       if (cpu_arch <= CPU_ARCH_ARMv5TEJ) {
                for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
                        if (mem_types[i].prot_l1)
                                mem_types[i].prot_l1 |= PMD_BIT4;
@@ -576,9 +584,9 @@ void setup_mm_for_reboot(char mode)
                pmdval = (i << PGDIR_SHIFT) |
                         PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
                         PMD_TYPE_SECT;
-               if (cpu_arch <= CPU_ARCH_ARMv5)
+               if (cpu_arch <= CPU_ARCH_ARMv5TEJ)
                        pmdval |= PMD_BIT4;
-               pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
+               pmd = pmd_off(pgd, i << PGDIR_SHIFT);
                pmd[0] = __pmd(pmdval);
                pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
                flush_pmd_entry(pmd);
@@ -674,7 +682,9 @@ void __init memtable_init(struct meminfo *mi)
        }
 
        flush_cache_all();
-       flush_tlb_all();
+       local_flush_tlb_all();
+
+       top_pmd = pmd_off_k(0xffff0000);
 }
 
 /*
@@ -687,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        for (i = 0; i < nr; i++)
                create_mapping(io_desc + i);
 }
-
-static inline void
-free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
-{
-       struct page *start_pg, *end_pg;
-       unsigned long pg, pgend;
-
-       /*
-        * Convert start_pfn/end_pfn to a struct page pointer.
-        */
-       start_pg = pfn_to_page(start_pfn);
-       end_pg = pfn_to_page(end_pfn);
-
-       /*
-        * Convert to physical addresses, and
-        * round start upwards and end downwards.
-        */
-       pg = PAGE_ALIGN(__pa(start_pg));
-       pgend = __pa(end_pg) & PAGE_MASK;
-
-       /*
-        * If there are free pages between these,
-        * free the section of the memmap array.
-        */
-       if (pg < pgend)
-               free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
-}
-
-static inline void free_unused_memmap_node(int node, struct meminfo *mi)
-{
-       unsigned long bank_start, prev_bank_end = 0;
-       unsigned int i;
-
-       /*
-        * [FIXME] This relies on each bank being in address order.  This
-        * may not be the case, especially if the user has provided the
-        * information on the command line.
-        */
-       for (i = 0; i < mi->nr_banks; i++) {
-               if (mi->bank[i].size == 0 || mi->bank[i].node != node)
-                       continue;
-
-               bank_start = mi->bank[i].start >> PAGE_SHIFT;
-               if (bank_start < prev_bank_end) {
-                       printk(KERN_ERR "MEM: unordered memory banks.  "
-                               "Not freeing memmap.\n");
-                       break;
-               }
-
-               /*
-                * If we had a previous bank, and there is a space
-                * between the current bank and the previous, free it.
-                */
-               if (prev_bank_end && prev_bank_end != bank_start)
-                       free_memmap(node, prev_bank_end, bank_start);
-
-               prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
-                                          mi->bank[i].size) >> PAGE_SHIFT;
-       }
-}
-
-/*
- * The mem_map array can get very big.  Free
- * the unused area of the memory map.
- */
-void __init create_memmap_holes(struct meminfo *mi)
-{
-       int node;
-
-       for_each_online_node(node)
-               free_unused_memmap_node(node, mi);
-}