uml: use UM_THREAD_SIZE in userspace code
[powerpc.git] / arch / sh / mm / cache-sh4.c
index e48cc22..981b040 100644 (file)
  */
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <asm/addrspace.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -58,21 +54,21 @@ static void __init emit_cache_params(void)
                ctrl_inl(CCN_CVR),
                ctrl_inl(CCN_PRR));
        printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
-               cpu_data->icache.ways,
-               cpu_data->icache.sets,
-               cpu_data->icache.way_incr);
+               current_cpu_data.icache.ways,
+               current_cpu_data.icache.sets,
+               current_cpu_data.icache.way_incr);
        printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
-               cpu_data->icache.entry_mask,
-               cpu_data->icache.alias_mask,
-               cpu_data->icache.n_aliases);
+               current_cpu_data.icache.entry_mask,
+               current_cpu_data.icache.alias_mask,
+               current_cpu_data.icache.n_aliases);
        printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
-               cpu_data->dcache.ways,
-               cpu_data->dcache.sets,
-               cpu_data->dcache.way_incr);
+               current_cpu_data.dcache.ways,
+               current_cpu_data.dcache.sets,
+               current_cpu_data.dcache.way_incr);
        printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
-               cpu_data->dcache.entry_mask,
-               cpu_data->dcache.alias_mask,
-               cpu_data->dcache.n_aliases);
+               current_cpu_data.dcache.entry_mask,
+               current_cpu_data.dcache.alias_mask,
+               current_cpu_data.dcache.n_aliases);
 
        if (!__flush_dcache_segment_fn)
                panic("unknown number of cache ways\n");
@@ -83,18 +79,18 @@ static void __init emit_cache_params(void)
  */
 
 /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
-#define MAX_P3_SEMAPHORES 16
+#define MAX_P3_MUTEXES 16
 
-struct semaphore p3map_sem[MAX_P3_SEMAPHORES];
+struct mutex p3map_mutex[MAX_P3_MUTEXES];
 
 void __init p3_cache_init(void)
 {
        int i;
 
-       compute_alias(&cpu_data->icache);
-       compute_alias(&cpu_data->dcache);
+       compute_alias(&current_cpu_data.icache);
+       compute_alias(&current_cpu_data.dcache);
 
-       switch (cpu_data->dcache.ways) {
+       switch (current_cpu_data.dcache.ways) {
        case 1:
                __flush_dcache_segment_fn = __flush_dcache_segment_1way;
                break;
@@ -111,11 +107,11 @@ void __init p3_cache_init(void)
 
        emit_cache_params();
 
-       if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE))
+       if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
                panic("%s failed.", __FUNCTION__);
 
-       for (i = 0; i < cpu_data->dcache.n_aliases; i++)
-               sema_init(&p3map_sem[i], 1);
+       for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
+               mutex_init(&p3map_mutex[i]);
 }
 
 /*
@@ -204,13 +200,14 @@ void flush_cache_sigtramp(unsigned long addr)
                     : /* no output */
                     : "m" (__m(v)));
 
-       index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask);
+       index = CACHE_IC_ADDRESS_ARRAY |
+                       (v & current_cpu_data.icache.entry_mask);
 
        local_irq_save(flags);
        jump_to_P2();
 
-       for (i = 0; i < cpu_data->icache.ways;
-            i++, index += cpu_data->icache.way_incr)
+       for (i = 0; i < current_cpu_data.icache.ways;
+            i++, index += current_cpu_data.icache.way_incr)
                ctrl_outl(0, index);    /* Clear out Valid-bit */
 
        back_to_P1();
@@ -227,9 +224,9 @@ static inline void flush_cache_4096(unsigned long start,
         * All types of SH-4 require PC to be in P2 to operate on the I-cache.
         * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
         */
-       if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
+       if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
            (start < CACHE_OC_ADDRESS_ARRAY))
-               exec_offset = 0x20000000;
+               exec_offset = 0x20000000;
 
        local_irq_save(flags);
        __flush_cache_4096(start | SH_CACHE_ASSOC,
@@ -249,8 +246,8 @@ void flush_dcache_page(struct page *page)
                int i, n;
 
                /* Loop all the D-cache */
-               n = cpu_data->dcache.n_aliases;
-               for (i = 0; i < n; i++, addr += PAGE_SIZE)
+               n = current_cpu_data.dcache.n_aliases;
+               for (i = 0; i < n; i++, addr += 4096)
                        flush_cache_4096(addr, phys);
        }
 
@@ -281,7 +278,7 @@ static inline void flush_icache_all(void)
 
 void flush_dcache_all(void)
 {
-       (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size);
+       (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
        wmb();
 }
 
@@ -295,8 +292,8 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
                             unsigned long end)
 {
        unsigned long d = 0, p = start & PAGE_MASK;
-       unsigned long alias_mask = cpu_data->dcache.alias_mask;
-       unsigned long n_aliases = cpu_data->dcache.n_aliases;
+       unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
+       unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
        unsigned long select_bit;
        unsigned long all_aliases_mask;
        unsigned long addr_offset;
@@ -383,7 +380,7 @@ void flush_cache_mm(struct mm_struct *mm)
         * If cache is only 4k-per-way, there are never any 'aliases'.  Since
         * the cache is physically tagged, the data can just be left in there.
         */
-       if (cpu_data->dcache.n_aliases == 0)
+       if (current_cpu_data.dcache.n_aliases == 0)
                return;
 
        /*
@@ -420,7 +417,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
        unsigned long phys = pfn << PAGE_SHIFT;
        unsigned int alias_mask;
 
-       alias_mask = cpu_data->dcache.alias_mask;
+       alias_mask = current_cpu_data.dcache.alias_mask;
 
        /* We only need to flush D-cache when we have alias */
        if ((address^phys) & alias_mask) {
@@ -434,7 +431,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
                        phys);
        }
 
-       alias_mask = cpu_data->icache.alias_mask;
+       alias_mask = current_cpu_data.icache.alias_mask;
        if (vma->vm_flags & VM_EXEC) {
                /*
                 * Evict entries from the portion of the cache from which code
@@ -466,7 +463,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
         * If cache is only 4k-per-way, there are never any 'aliases'.  Since
         * the cache is physically tagged, the data can just be left in there.
         */
-       if (cpu_data->dcache.n_aliases == 0)
+       if (current_cpu_data.dcache.n_aliases == 0)
                return;
 
        /*
@@ -527,7 +524,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
        unsigned long a, ea, p;
        unsigned long temp_pc;
 
-       dcache = &cpu_data->dcache;
+       dcache = &current_cpu_data.dcache;
        /* Write this way for better assembly. */
        way_count = dcache->ways;
        way_incr = dcache->way_incr;
@@ -602,7 +599,7 @@ static void __flush_dcache_segment_1way(unsigned long start,
        base_addr = ((base_addr >> 16) << 16);
        base_addr |= start;
 
-       dcache = &cpu_data->dcache;
+       dcache = &current_cpu_data.dcache;
        linesz = dcache->linesz;
        way_incr = dcache->way_incr;
        way_size = dcache->way_size;
@@ -644,7 +641,7 @@ static void __flush_dcache_segment_2way(unsigned long start,
        base_addr = ((base_addr >> 16) << 16);
        base_addr |= start;
 
-       dcache = &cpu_data->dcache;
+       dcache = &current_cpu_data.dcache;
        linesz = dcache->linesz;
        way_incr = dcache->way_incr;
        way_size = dcache->way_size;
@@ -703,7 +700,7 @@ static void __flush_dcache_segment_4way(unsigned long start,
        base_addr = ((base_addr >> 16) << 16);
        base_addr |= start;
 
-       dcache = &cpu_data->dcache;
+       dcache = &current_cpu_data.dcache;
        linesz = dcache->linesz;
        way_incr = dcache->way_incr;
        way_size = dcache->way_size;