more changes on original files
[linux-2.4.git] / arch / sparc64 / mm / init.c
1 /*  $Id: init.c,v 1.207 2001/11/30 06:55:39 davem Exp $
2  *  arch/sparc64/mm/init.c
3  *
4  *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5  *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7  
8 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/mm.h>
15 #include <linux/slab.h>
16 #include <linux/blk.h>
17 #include <linux/swap.h>
18 #include <linux/swapctl.h>
19 #include <linux/pagemap.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22
23 #include <asm/head.h>
24 #include <asm/system.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
28 #include <asm/oplib.h>
29 #include <asm/iommu.h>
30 #include <asm/io.h>
31 #include <asm/uaccess.h>
32 #include <asm/mmu_context.h>
33 #include <asm/dma.h>
34 #include <asm/starfire.h>
35 #include <asm/tlb.h>
36 #include <asm/spitfire.h>
37 #include <asm/sections.h>
38
39 mmu_gather_t mmu_gathers[NR_CPUS];
40
41 extern void device_scan(void);
42
43 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
44
45 unsigned long *sparc64_valid_addr_bitmap;
46
47 /* Ugly, but necessary... -DaveM */
48 unsigned long phys_base, kern_base, kern_size;
49
50 /* This is even uglier. We have a problem where the kernel may not be
51  * located at phys_base. However, initial __alloc_bootmem() calls need to
52  * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
53  * those page mappings wont work. Things are ok after inherit_prom_mappings
54  * is called though. Dave says he'll clean this up some other time.
55  * -- BenC
56  */
57 static unsigned long bootmap_base;
58
59 /* get_new_mmu_context() uses "cache + 1".  */
60 spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;
61 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
62 #define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
63 unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
64
65 /* Initial ramdisk setup */
66 extern unsigned long sparc_ramdisk_image64;
67 extern unsigned int sparc_ramdisk_image;
68 extern unsigned int sparc_ramdisk_size;
69
70 struct page *mem_map_zero;
71
72 int bigkernel = 0;
73
74 int do_check_pgt_cache(int low, int high)
75 {
76         int freed = 0;
77
78         if (pgtable_cache_size > high) {
79                 do {
80 #ifdef CONFIG_SMP
81                         if (pgd_quicklist)
82                                 free_pgd_slow(get_pgd_fast()), freed++;
83 #endif
84                         if (pte_quicklist[0])
85                                 free_pte_slow(pte_alloc_one_fast(NULL, 0)), freed++;
86                         if (pte_quicklist[1])
87                                 free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))), freed++;
88                 } while (pgtable_cache_size > low);
89         }
90 #ifndef CONFIG_SMP 
91         if (pgd_cache_size > high / 4) {
92                 struct page *page, *page2;
93                 for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
94                         if ((unsigned long)page->pprev_hash == 3) {
95                                 if (page2)
96                                         page2->next_hash = page->next_hash;
97                                 else
98                                         pgd_quicklist = (unsigned long *)page->next_hash;
99                                 page->next_hash = NULL;
100                                 page->pprev_hash = NULL;
101                                 pgd_cache_size -= 2;
102                                 __free_page(page);
103                                 freed++;
104                                 if (page2)
105                                         page = page2->next_hash;
106                                 else
107                                         page = (struct page *)pgd_quicklist;
108                                 if (pgd_cache_size <= low / 4)
109                                         break;
110                                 continue;
111                         }
112                         page2 = page;
113                         page = page->next_hash;
114                 }
115         }
116 #endif
117         return freed;
118 }
119
120 extern void __update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
121
122 #ifdef CONFIG_DEBUG_DCFLUSH
123 atomic_t dcpage_flushes = ATOMIC_INIT(0);
124 #ifdef CONFIG_SMP
125 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
126 #endif
127 #endif
128
129 __inline__ void flush_dcache_page_impl(struct page *page)
130 {
131 #ifdef CONFIG_DEBUG_DCFLUSH
132         atomic_inc(&dcpage_flushes);
133 #endif
134
135 #if (L1DCACHE_SIZE > PAGE_SIZE)
136         __flush_dcache_page(page->virtual,
137                             ((tlb_type == spitfire) &&
138                              page->mapping != NULL));
139 #else
140         if (page->mapping != NULL &&
141             tlb_type == spitfire)
142                 __flush_icache_page(__pa(page->virtual));
143 #endif
144 }
145
146 #define PG_dcache_dirty         PG_arch_1
147
148 #define dcache_dirty_cpu(page) \
149         (((page)->flags >> 24) & (NR_CPUS - 1UL))
150
151 static __inline__ void set_dcache_dirty(struct page *page)
152 {
153         unsigned long mask = smp_processor_id();
154         unsigned long non_cpu_bits = (1UL << 24UL) - 1UL;
155         mask = (mask << 24) | (1UL << PG_dcache_dirty);
156         __asm__ __volatile__("1:\n\t"
157                              "ldx       [%2], %%g7\n\t"
158                              "and       %%g7, %1, %%g5\n\t"
159                              "or        %%g5, %0, %%g5\n\t"
160                              "casx      [%2], %%g7, %%g5\n\t"
161                              "cmp       %%g7, %%g5\n\t"
162                              "bne,pn    %%xcc, 1b\n\t"
163                              " membar   #StoreLoad | #StoreStore"
164                              : /* no outputs */
165                              : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
166                              : "g5", "g7");
167 }
168
169 static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
170 {
171         unsigned long mask = (1UL << PG_dcache_dirty);
172
173         __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
174                              "1:\n\t"
175                              "ldx       [%2], %%g7\n\t"
176                              "srlx      %%g7, 24, %%g5\n\t"
177                              "cmp       %%g5, %0\n\t"
178                              "bne,pn    %%icc, 2f\n\t"
179                              " andn     %%g7, %1, %%g5\n\t"
180                              "casx      [%2], %%g7, %%g5\n\t"
181                              "cmp       %%g7, %%g5\n\t"
182                              "bne,pn    %%xcc, 1b\n\t"
183                              " membar   #StoreLoad | #StoreStore\n"
184                              "2:"
185                              : /* no outputs */
186                              : "r" (cpu), "r" (mask), "r" (&page->flags)
187                              : "g5", "g7");
188 }
189
190 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
191 {
192         struct page *page = pte_page(pte);
193         unsigned long pg_flags;
194
195         if (VALID_PAGE(page) &&
196             page->mapping &&
197             ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
198                 int cpu = (pg_flags >> 24);
199
200                 /* This is just to optimize away some function calls
201                  * in the SMP case.
202                  */
203                 if (cpu == smp_processor_id())
204                         flush_dcache_page_impl(page);
205                 else
206                         smp_flush_dcache_page_impl(page, cpu);
207
208                 clear_dcache_dirty_cpu(page, cpu);
209         }
210         __update_mmu_cache(vma, address, pte);
211 }
212
213 void flush_dcache_page(struct page *page)
214 {
215         int dirty = test_bit(PG_dcache_dirty, &page->flags);
216         int dirty_cpu = dcache_dirty_cpu(page);
217
218         if (page->mapping &&
219             page->mapping->i_mmap == NULL &&
220             page->mapping->i_mmap_shared == NULL) {
221                 if (dirty) {
222                         if (dirty_cpu == smp_processor_id())
223                                 return;
224                         smp_flush_dcache_page_impl(page, dirty_cpu);
225                 }
226                 set_dcache_dirty(page);
227         } else {
228                 /* We could delay the flush for the !page->mapping
229                  * case too.  But that case is for exec env/arg
230                  * pages and those are %99 certainly going to get
231                  * faulted into the tlb (and thus flushed) anyways.
232                  */
233                 flush_dcache_page_impl(page);
234         }
235 }
236
237 void flush_icache_range(unsigned long start, unsigned long end)
238 {
239         /* Cheetah has coherent I-cache. */
240         if (tlb_type == spitfire) {
241                 unsigned long kaddr;
242
243                 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
244                         __flush_icache_page(__get_phys(kaddr));
245         }
246 }
247
248 void show_mem(void)
249 {
250         printk("Mem-info:\n");
251         show_free_areas();
252         printk("Free swap:       %6dkB\n",
253                nr_swap_pages << (PAGE_SHIFT-10));
254         printk("%ld pages of RAM\n", num_physpages);
255         printk("%d free pages\n", nr_free_pages());
256         printk("%d pages in page table cache\n",pgtable_cache_size);
257 #ifndef CONFIG_SMP
258         printk("%d entries in page dir cache\n",pgd_cache_size);
259 #endif  
260         show_buffers();
261 }
262
263 void mmu_info(struct seq_file *m)
264 {
265         if (tlb_type == cheetah)
266                 seq_printf(m, "MMU Type\t: Cheetah\n");
267         else if (tlb_type == cheetah_plus)
268                 seq_printf(m, "MMU Type\t: Cheetah+\n");
269         else if (tlb_type == spitfire)
270                 seq_printf(m, "MMU Type\t: Spitfire\n");
271         else
272                 seq_printf(m, "MMU Type\t: ???\n");
273
274 #ifdef CONFIG_DEBUG_DCFLUSH
275         seq_printf(m, "DCPageFlushes\t: %d\n",
276                    atomic_read(&dcpage_flushes));
277 #ifdef CONFIG_SMP
278         seq_printf(m, "DCPageFlushesXC\t: %d\n",
279                    atomic_read(&dcpage_flushes_xcall));
280 #endif /* CONFIG_SMP */
281 #endif /* CONFIG_DEBUG_DCFLUSH */
282 }
283
284 struct linux_prom_translation {
285         unsigned long virt;
286         unsigned long size;
287         unsigned long data;
288 };
289
290 extern unsigned long prom_boot_page;
291 extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
292 extern int prom_get_mmu_ihandle(void);
293 extern void register_prom_callbacks(void);
294
295 /* Exported for SMP bootup purposes. */
296 unsigned long kern_locked_tte_data;
297
298 void __init early_pgtable_allocfail(char *type)
299 {
300         prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
301         prom_halt();
302 }
303
304 #define BASE_PAGE_SIZE 8192
305 static pmd_t *prompmd;
306
307 /*
308  * Translate PROM's mapping we capture at boot time into physical address.
309  * The second parameter is only set from prom_callback() invocations.
310  */
311 unsigned long prom_virt_to_phys(unsigned long promva, int *error)
312 {
313         pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
314         pte_t *ptep;
315         unsigned long base;
316
317         if (pmd_none(*pmdp)) {
318                 if (error)
319                         *error = 1;
320                 return(0);
321         }
322         ptep = (pte_t *)pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
323         if (!pte_present(*ptep)) {
324                 if (error)
325                         *error = 1;
326                 return(0);
327         }
328         if (error) {
329                 *error = 0;
330                 return(pte_val(*ptep));
331         }
332         base = pte_val(*ptep) & _PAGE_PADDR;
333         return(base + (promva & (BASE_PAGE_SIZE - 1)));
334 }
335
336 static void inherit_prom_mappings(void)
337 {
338         struct linux_prom_translation *trans;
339         unsigned long phys_page, tte_vaddr, tte_data;
340         void (*remap_func)(unsigned long, unsigned long, int);
341         pmd_t *pmdp;
342         pte_t *ptep;
343         int node, n, i, tsz;
344         extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
345
346         node = prom_finddevice("/virtual-memory");
347         n = prom_getproplen(node, "translations");
348         if (n == 0 || n == -1) {
349                 prom_printf("Couldn't get translation property\n");
350                 prom_halt();
351         }
352         n += 5 * sizeof(struct linux_prom_translation);
353         for (tsz = 1; tsz < n; tsz <<= 1)
354                 /* empty */;
355         trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
356         if (trans == NULL) {
357                 prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
358                 prom_halt();
359         }
360         memset(trans, 0, tsz);
361
362         if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
363                 prom_printf("Couldn't get translation property\n");
364                 prom_halt();
365         }
366         n = n / sizeof(*trans);
367
368         /*
369          * The obp translations are saved based on 8k pagesize, since obp can use
370          * a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000, ie obp 
371          * range, are handled in entry.S and do not use the vpte scheme (see rant
372          * in inherit_locked_prom_mappings()).
373          */
374 #define OBP_PMD_SIZE 2048
375         prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
376         if (prompmd == NULL)
377                 early_pgtable_allocfail("pmd");
378         memset(prompmd, 0, OBP_PMD_SIZE);
379         for (i = 0; i < n; i++) {
380                 unsigned long vaddr;
381
382                 if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
383                         for (vaddr = trans[i].virt;
384                              ((vaddr < trans[i].virt + trans[i].size) && 
385                              (vaddr < HI_OBP_ADDRESS));
386                              vaddr += BASE_PAGE_SIZE) {
387                                 unsigned long val;
388
389                                 pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
390                                 if (pmd_none(*pmdp)) {
391                                         ptep = __alloc_bootmem(BASE_PAGE_SIZE,
392                                                                BASE_PAGE_SIZE,
393                                                                bootmap_base);
394                                         if (ptep == NULL)
395                                                 early_pgtable_allocfail("pte");
396                                         memset(ptep, 0, BASE_PAGE_SIZE);
397                                         pmd_set(pmdp, ptep);
398                                 }
399                                 ptep = (pte_t *)pmd_page(*pmdp) +
400                                                 ((vaddr >> 13) & 0x3ff);
401
402                                 val = trans[i].data;
403
404                                 /* Clear diag TTE bits. */
405                                 if (tlb_type == spitfire)
406                                         val &= ~0x0003fe0000000000UL;
407
408                                 set_pte (ptep, __pte(val | _PAGE_MODIFIED));
409                                 trans[i].data += BASE_PAGE_SIZE;
410                         }
411                 }
412         }
413         phys_page = __pa(prompmd);
414         obp_iaddr_patch[0] |= (phys_page >> 10);
415         obp_iaddr_patch[1] |= (phys_page & 0x3ff);
416         flushi((long)&obp_iaddr_patch[0]);
417         obp_daddr_patch[0] |= (phys_page >> 10);
418         obp_daddr_patch[1] |= (phys_page & 0x3ff);
419         flushi((long)&obp_daddr_patch[0]);
420
421         /* Now fixup OBP's idea about where we really are mapped. */
422         prom_printf("Remapping the kernel... ");
423
424         /* Spitfire Errata #32 workaround */
425         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
426                              "flush     %%g6"
427                              : /* No outputs */
428                              : "r" (0),
429                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
430
431         switch (tlb_type) {
432         default:
433         case spitfire:
434                 phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
435                 break;
436
437         case cheetah:
438         case cheetah_plus:
439                 phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
440                 break;
441         };
442
443         phys_page &= _PAGE_PADDR;
444         phys_page += ((unsigned long)&prom_boot_page -
445                       (unsigned long)KERNBASE);
446
447         if (tlb_type == spitfire) {
448                 /* Lock this into i/d tlb entry 59 */
449                 __asm__ __volatile__(
450                         "stxa   %%g0, [%2] %3\n\t"
451                         "stxa   %0, [%1] %4\n\t"
452                         "membar #Sync\n\t"
453                         "flush  %%g6\n\t"
454                         "stxa   %%g0, [%2] %5\n\t"
455                         "stxa   %0, [%1] %6\n\t"
456                         "membar #Sync\n\t"
457                         "flush  %%g6"
458                         : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
459                                  _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
460                         "r" (59 << 3), "r" (TLB_TAG_ACCESS),
461                         "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
462                         "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
463                         : "memory");
464         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
465                 /* Lock this into i/d tlb-0 entry 11 */
466                 __asm__ __volatile__(
467                         "stxa   %%g0, [%2] %3\n\t"
468                         "stxa   %0, [%1] %4\n\t"
469                         "membar #Sync\n\t"
470                         "flush  %%g6\n\t"
471                         "stxa   %%g0, [%2] %5\n\t"
472                         "stxa   %0, [%1] %6\n\t"
473                         "membar #Sync\n\t"
474                         "flush  %%g6"
475                         : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
476                                  _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
477                         "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
478                         "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
479                         "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
480                         : "memory");
481         } else {
482                 /* Implement me :-) */
483                 BUG();
484         }
485
486         tte_vaddr = (unsigned long) KERNBASE;
487
488         /* Spitfire Errata #32 workaround */
489         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
490                              "flush     %%g6"
491                              : /* No outputs */
492                              : "r" (0),
493                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
494
495         if (tlb_type == spitfire)
496                 tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
497         else
498                 tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
499
500         kern_locked_tte_data = tte_data;
501
502         remap_func = (void *)  ((unsigned long) &prom_remap -
503                                 (unsigned long) &prom_boot_page);
504
505
506         /* Spitfire Errata #32 workaround */
507         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
508                              "flush     %%g6"
509                              : /* No outputs */
510                              : "r" (0),
511                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
512
513         remap_func((tlb_type == spitfire ?
514                     (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
515                     (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
516                    (unsigned long) KERNBASE,
517                    prom_get_mmu_ihandle());
518
519         if (bigkernel)
520                 remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
521                         (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
522
523         /* Flush out that temporary mapping. */
524         spitfire_flush_dtlb_nucleus_page(0x0);
525         spitfire_flush_itlb_nucleus_page(0x0);
526
527         /* Now lock us back into the TLBs via OBP. */
528         prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
529         prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
530         if (bigkernel) {
531                 prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 
532                                                                 tte_vaddr + 0x400000);
533                 prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 
534                                                                 tte_vaddr + 0x400000);
535         }
536
537         /* Re-read translations property. */
538         if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
539                 prom_printf("Couldn't get translation property\n");
540                 prom_halt();
541         }
542         n = n / sizeof(*trans);
543
544         for (i = 0; i < n; i++) {
545                 unsigned long vaddr = trans[i].virt;
546                 unsigned long size = trans[i].size;
547
548                 if (vaddr < 0xf0000000UL) {
549                         unsigned long avoid_start = (unsigned long) KERNBASE;
550                         unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
551
552                         if (bigkernel)
553                                 avoid_end += (4 * 1024 * 1024);
554                         if (vaddr < avoid_start) {
555                                 unsigned long top = vaddr + size;
556
557                                 if (top > avoid_start)
558                                         top = avoid_start;
559                                 prom_unmap(top - vaddr, vaddr);
560                         }
561                         if ((vaddr + size) > avoid_end) {
562                                 unsigned long bottom = vaddr;
563
564                                 if (bottom < avoid_end)
565                                         bottom = avoid_end;
566                                 prom_unmap((vaddr + size) - bottom, bottom);
567                         }
568                 }
569         }
570
571         prom_printf("done.\n");
572
573         register_prom_callbacks();
574 }
575
576 /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
577  * upwards as reserved for use by the firmware (I wonder if this
578  * will be the same on Cheetah...).  We use this virtual address
579  * range for the VPTE table mappings of the nucleus so we need
580  * to zap them when we enter the PROM.  -DaveM
581  */
582 static void __flush_nucleus_vptes(void)
583 {
584         unsigned long prom_reserved_base = 0xfffffffc00000000UL;
585         int i;
586
587         /* Only DTLB must be checked for VPTE entries. */
588         if (tlb_type == spitfire) {
589                 for (i = 0; i < 63; i++) {
590                         unsigned long tag;
591
592                         /* Spitfire Errata #32 workaround */
593                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
594                                              "flush     %%g6"
595                                              : /* No outputs */
596                                              : "r" (0),
597                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
598
599                         tag = spitfire_get_dtlb_tag(i);
600                         if (((tag & ~(PAGE_MASK)) == 0) &&
601                             ((tag &  (PAGE_MASK)) >= prom_reserved_base)) {
602                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
603                                                      "membar #Sync"
604                                                      : /* no outputs */
605                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
606                                 spitfire_put_dtlb_data(i, 0x0UL);
607                         }
608                 }
609         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
610                 for (i = 0; i < 512; i++) {
611                         unsigned long tag = cheetah_get_dtlb_tag(i, 2);
612
613                         if ((tag & ~PAGE_MASK) == 0 &&
614                             (tag & PAGE_MASK) >= prom_reserved_base) {
615                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
616                                                      "membar #Sync"
617                                                      : /* no outputs */
618                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
619                                 cheetah_put_dtlb_data(i, 0x0UL, 2);
620                         }
621
622                         if (tlb_type != cheetah_plus)
623                                 continue;
624
625                         tag = cheetah_get_dtlb_tag(i, 3);
626
627                         if ((tag & ~PAGE_MASK) == 0 &&
628                             (tag & PAGE_MASK) >= prom_reserved_base) {
629                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
630                                                      "membar #Sync"
631                                                      : /* no outputs */
632                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
633                                 cheetah_put_dtlb_data(i, 0x0UL, 3);
634                         }
635                 }
636         } else {
637                 /* Implement me :-) */
638                 BUG();
639         }
640 }
641
642 static int prom_ditlb_set;
643 struct prom_tlb_entry {
644         int             tlb_ent;
645         unsigned long   tlb_tag;
646         unsigned long   tlb_data;
647 };
648 struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
649
650 void prom_world(int enter)
651 {
652         unsigned long pstate;
653         int i;
654
655         if (!enter)
656                 set_fs(current->thread.current_ds);
657
658         if (!prom_ditlb_set)
659                 return;
660
661         /* Make sure the following runs atomically. */
662         __asm__ __volatile__("flushw\n\t"
663                              "rdpr      %%pstate, %0\n\t"
664                              "wrpr      %0, %1, %%pstate"
665                              : "=r" (pstate)
666                              : "i" (PSTATE_IE));
667
668         if (enter) {
669                 /* Kick out nucleus VPTEs. */
670                 __flush_nucleus_vptes();
671
672                 /* Install PROM world. */
673                 for (i = 0; i < 16; i++) {
674                         if (prom_dtlb[i].tlb_ent != -1) {
675                                 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
676                                                      "membar #Sync"
677                                         : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
678                                         "i" (ASI_DMMU));
679                                 if (tlb_type == spitfire)
680                                         spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
681                                                                prom_dtlb[i].tlb_data);
682                                 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
683                                         cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
684                                                                prom_dtlb[i].tlb_data);
685                         }
686                         if (prom_itlb[i].tlb_ent != -1) {
687                                 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
688                                                      "membar #Sync"
689                                                      : : "r" (prom_itlb[i].tlb_tag),
690                                                      "r" (TLB_TAG_ACCESS),
691                                                      "i" (ASI_IMMU));
692                                 if (tlb_type == spitfire)
693                                         spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
694                                                                prom_itlb[i].tlb_data);
695                                 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
696                                         cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
697                                                                prom_itlb[i].tlb_data);
698                         }
699                 }
700         } else {
701                 for (i = 0; i < 16; i++) {
702                         if (prom_dtlb[i].tlb_ent != -1) {
703                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
704                                                      "membar #Sync"
705                                         : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
706                                 if (tlb_type == spitfire)
707                                         spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
708                                 else
709                                         cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
710                         }
711                         if (prom_itlb[i].tlb_ent != -1) {
712                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
713                                                      "membar #Sync"
714                                                      : : "r" (TLB_TAG_ACCESS),
715                                                      "i" (ASI_IMMU));
716                                 if (tlb_type == spitfire)
717                                         spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
718                                 else
719                                         cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
720                         }
721                 }
722         }
723         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
724                              : : "r" (pstate));
725 }
726
727 void inherit_locked_prom_mappings(int save_p)
728 {
729         int i;
730         int dtlb_seen = 0;
731         int itlb_seen = 0;
732
733         /* Fucking losing PROM has more mappings in the TLB, but
734          * it (conveniently) fails to mention any of these in the
735          * translations property.  The only ones that matter are
736          * the locked PROM tlb entries, so we impose the following
737          * irrecovable rule on the PROM, it is allowed 8 locked
738          * entries in the ITLB and 8 in the DTLB.
739          *
740          * Supposedly the upper 16GB of the address space is
741          * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
742          * SOMEWHERE!!!!!!!!!!!!!!!!!  Furthermore the entire interface
743          * used between the client program and the firmware on sun5
744          * systems to coordinate mmu mappings is also COMPLETELY
745          * UNDOCUMENTED!!!!!! Thanks S(t)un!
746          */
747         if (save_p) {
748                 for (i = 0; i < 16; i++) {
749                         prom_itlb[i].tlb_ent = -1;
750                         prom_dtlb[i].tlb_ent = -1;
751                 }
752         }
753         if (tlb_type == spitfire) {
754                 int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
755                 for (i = 0; i < high; i++) {
756                         unsigned long data;
757
758                         /* Spitfire Errata #32 workaround */
759                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
760                                              "flush     %%g6"
761                                              : /* No outputs */
762                                              : "r" (0),
763                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
764
765                         data = spitfire_get_dtlb_data(i);
766                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
767                                 unsigned long tag;
768
769                                 /* Spitfire Errata #32 workaround */
770                                 __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
771                                                      "flush     %%g6"
772                                                      : /* No outputs */
773                                                      : "r" (0),
774                                                      "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
775
776                                 tag = spitfire_get_dtlb_tag(i);
777                                 if (save_p) {
778                                         prom_dtlb[dtlb_seen].tlb_ent = i;
779                                         prom_dtlb[dtlb_seen].tlb_tag = tag;
780                                         prom_dtlb[dtlb_seen].tlb_data = data;
781                                 }
782                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
783                                                      "membar #Sync"
784                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
785                                 spitfire_put_dtlb_data(i, 0x0UL);
786
787                                 dtlb_seen++;
788                                 if (dtlb_seen > 15)
789                                         break;
790                         }
791                 }
792
793                 for (i = 0; i < high; i++) {
794                         unsigned long data;
795
796                         /* Spitfire Errata #32 workaround */
797                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
798                                              "flush     %%g6"
799                                              : /* No outputs */
800                                              : "r" (0),
801                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
802
803                         data = spitfire_get_itlb_data(i);
804                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
805                                 unsigned long tag;
806
807                                 /* Spitfire Errata #32 workaround */
808                                 __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
809                                                      "flush     %%g6"
810                                                      : /* No outputs */
811                                                      : "r" (0),
812                                                      "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
813
814                                 tag = spitfire_get_itlb_tag(i);
815                                 if (save_p) {
816                                         prom_itlb[itlb_seen].tlb_ent = i;
817                                         prom_itlb[itlb_seen].tlb_tag = tag;
818                                         prom_itlb[itlb_seen].tlb_data = data;
819                                 }
820                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
821                                                      "membar #Sync"
822                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
823                                 spitfire_put_itlb_data(i, 0x0UL);
824
825                                 itlb_seen++;
826                                 if (itlb_seen > 15)
827                                         break;
828                         }
829                 }
830         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
831                 int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
832
833                 for (i = 0; i < high; i++) {
834                         unsigned long data;
835
836                         data = cheetah_get_ldtlb_data(i);
837                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
838                                 unsigned long tag;
839
840                                 tag = cheetah_get_ldtlb_tag(i);
841                                 if (save_p) {
842                                         prom_dtlb[dtlb_seen].tlb_ent = i;
843                                         prom_dtlb[dtlb_seen].tlb_tag = tag;
844                                         prom_dtlb[dtlb_seen].tlb_data = data;
845                                 }
846                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
847                                                      "membar #Sync"
848                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
849                                 cheetah_put_ldtlb_data(i, 0x0UL);
850
851                                 dtlb_seen++;
852                                 if (dtlb_seen > 15)
853                                         break;
854                         }
855                 }
856
857                 for (i = 0; i < high; i++) {
858                         unsigned long data;
859
860                         data = cheetah_get_litlb_data(i);
861                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
862                                 unsigned long tag;
863
864                                 tag = cheetah_get_litlb_tag(i);
865                                 if (save_p) {
866                                         prom_itlb[itlb_seen].tlb_ent = i;
867                                         prom_itlb[itlb_seen].tlb_tag = tag;
868                                         prom_itlb[itlb_seen].tlb_data = data;
869                                 }
870                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
871                                                      "membar #Sync"
872                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
873                                 cheetah_put_litlb_data(i, 0x0UL);
874
875                                 itlb_seen++;
876                                 if (itlb_seen > 15)
877                                         break;
878                         }
879                 }
880         } else {
881                 /* Implement me :-) */
882                 BUG();
883         }
884         if (save_p)
885                 prom_ditlb_set = 1;
886 }
887
888 /* Give PROM back his world, done during reboots... */
889 void prom_reload_locked(void)
890 {
891         int i;
892
893         for (i = 0; i < 16; i++) {
894                 if (prom_dtlb[i].tlb_ent != -1) {
895                         __asm__ __volatile__("stxa %0, [%1] %2\n\t"
896                                              "membar #Sync"
897                                 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
898                                 "i" (ASI_DMMU));
899                         if (tlb_type == spitfire)
900                                 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
901                                                        prom_dtlb[i].tlb_data);
902                         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
903                                 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
904                                                       prom_dtlb[i].tlb_data);
905                 }
906
907                 if (prom_itlb[i].tlb_ent != -1) {
908                         __asm__ __volatile__("stxa %0, [%1] %2\n\t"
909                                              "membar #Sync"
910                                              : : "r" (prom_itlb[i].tlb_tag),
911                                              "r" (TLB_TAG_ACCESS),
912                                              "i" (ASI_IMMU));
913                         if (tlb_type == spitfire)
914                                 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
915                                                        prom_itlb[i].tlb_data);
916                         else
917                                 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
918                                                        prom_itlb[i].tlb_data);
919                 }
920         }
921 }
922
923 void __flush_dcache_range(unsigned long start, unsigned long end)
924 {
925         unsigned long va;
926
927         if (tlb_type == spitfire) {
928                 int n = 0;
929
930                 for (va = start; va < end; va += 32) {
931                         spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
932                         if (++n >= 512)
933                                 break;
934                 }
935         } else {
936                 start = __pa(start);
937                 end = __pa(end);
938                 for (va = start; va < end; va += 32)
939                         __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
940                                              "membar #Sync"
941                                              : /* no outputs */
942                                              : "r" (va),
943                                                "i" (ASI_DCACHE_INVALIDATE));
944         }
945 }
946
947 void __flush_cache_all(void)
948 {
949         /* Cheetah should be fine here too. */
950         if (tlb_type == spitfire) {
951                 unsigned long va;
952
953                 flushw_all();
954                 for (va =  0; va < (PAGE_SIZE << 1); va += 32)
955                         spitfire_put_icache_tag(va, 0x0);
956                 __asm__ __volatile__("flush %g6");
957         }
958 }
959
960 /* If not locked, zap it. */
961 void __flush_tlb_all(void)
962 {
963         unsigned long pstate;
964         int i;
965
966         __asm__ __volatile__("flushw\n\t"
967                              "rdpr      %%pstate, %0\n\t"
968                              "wrpr      %0, %1, %%pstate"
969                              : "=r" (pstate)
970                              : "i" (PSTATE_IE));
971         if (tlb_type == spitfire) {
972                 for (i = 0; i < 64; i++) {
973                         /* Spitfire Errata #32 workaround */
974                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
975                                              "flush     %%g6"
976                                              : /* No outputs */
977                                              : "r" (0),
978                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
979
980                         if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
981                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
982                                                      "membar #Sync"
983                                                      : /* no outputs */
984                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
985                                 spitfire_put_dtlb_data(i, 0x0UL);
986                         }
987
988                         /* Spitfire Errata #32 workaround */
989                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
990                                              "flush     %%g6"
991                                              : /* No outputs */
992                                              : "r" (0),
993                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
994
995                         if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
996                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
997                                                      "membar #Sync"
998                                                      : /* no outputs */
999                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1000                                 spitfire_put_itlb_data(i, 0x0UL);
1001                         }
1002                 }
1003         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1004                 cheetah_flush_dtlb_all();
1005                 cheetah_flush_itlb_all();
1006         }
1007         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
1008                              : : "r" (pstate));
1009 }
1010
1011 /* Caller does TLB context flushing on local CPU if necessary.
1012  * The caller also ensures that CTX_VALID(mm->context) is false.
1013  *
1014  * We must be careful about boundary cases so that we never
1015  * let the user have CTX 0 (nucleus) or we ever use a CTX
1016  * version of zero (and thus NO_CONTEXT would not be caught
1017  * by version mis-match tests in mmu_context.h).
1018  */
1019 void get_new_mmu_context(struct mm_struct *mm)
1020 {
1021         unsigned long ctx, new_ctx;
1022         
1023         spin_lock(&ctx_alloc_lock);
1024         ctx = CTX_HWBITS(tlb_context_cache + 1);
1025         new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << CTX_VERSION_SHIFT, ctx);
1026         if (new_ctx >= (1UL << CTX_VERSION_SHIFT)) {
1027                 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
1028                 if (new_ctx >= ctx) {
1029                         int i;
1030                         new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
1031                                 CTX_FIRST_VERSION;
1032                         if (new_ctx == 1)
1033                                 new_ctx = CTX_FIRST_VERSION;
1034
1035                         /* Don't call memset, for 16 entries that's just
1036                          * plain silly...
1037                          */
1038                         mmu_context_bmap[0] = 3;
1039                         mmu_context_bmap[1] = 0;
1040                         mmu_context_bmap[2] = 0;
1041                         mmu_context_bmap[3] = 0;
1042                         for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
1043                                 mmu_context_bmap[i + 0] = 0;
1044                                 mmu_context_bmap[i + 1] = 0;
1045                                 mmu_context_bmap[i + 2] = 0;
1046                                 mmu_context_bmap[i + 3] = 0;
1047                         }
1048                         goto out;
1049                 }
1050         }
1051         mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
1052         new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
1053 out:
1054         tlb_context_cache = new_ctx;
1055         spin_unlock(&ctx_alloc_lock);
1056
1057         mm->context = new_ctx;
1058 }
1059
1060 #ifndef CONFIG_SMP
1061 struct pgtable_cache_struct pgt_quicklists;
1062 #endif
1063
1064 /* OK, we have to color these pages. The page tables are accessed
1065  * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
1066  * code, as well as by PAGE_OFFSET range direct-mapped addresses by 
1067  * other parts of the kernel. By coloring, we make sure that the tlbmiss 
1068  * fast handlers do not get data from old/garbage dcache lines that 
1069  * correspond to an old/stale virtual address (user/kernel) that 
1070  * previously mapped the pagetable page while accessing vpte range 
1071  * addresses. The idea is that if the vpte color and PAGE_OFFSET range 
1072  * color is the same, then when the kernel initializes the pagetable 
1073  * using the later address range, accesses with the first address
1074  * range will see the newly initialized data rather than the garbage.
1075  */
1076 #if (L1DCACHE_SIZE > PAGE_SIZE)                 /* is there D$ aliasing problem */
1077 #define DC_ALIAS_SHIFT  1
1078 #else
1079 #define DC_ALIAS_SHIFT  0
1080 #endif
1081 pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
1082 {
1083         struct page *page = alloc_pages(GFP_KERNEL, DC_ALIAS_SHIFT);
1084         unsigned long color = VPTE_COLOR(address);
1085
1086         if (page) {
1087                 unsigned long *to_free;
1088                 unsigned long paddr;
1089                 pte_t *pte;
1090
1091 #if (L1DCACHE_SIZE > PAGE_SIZE)                 /* is there D$ aliasing problem */
1092                 set_page_count((page + 1), 1);
1093 #endif
1094                 paddr = (unsigned long) page_address(page);
1095                 memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
1096
1097                 if (!color) {
1098                         pte = (pte_t *) paddr;
1099                         to_free = (unsigned long *) (paddr + PAGE_SIZE);
1100                 } else {
1101                         pte = (pte_t *) (paddr + PAGE_SIZE);
1102                         to_free = (unsigned long *) paddr;
1103                 }
1104
1105 #if (L1DCACHE_SIZE > PAGE_SIZE)                 /* is there D$ aliasing problem */
1106                 /* Now free the other one up, adjust cache size. */
1107                 *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
1108                 pte_quicklist[color ^ 0x1] = to_free;
1109                 pgtable_cache_size++;
1110 #endif
1111
1112                 return pte;
1113         }
1114         return NULL;
1115 }
1116
1117 void sparc_ultra_dump_itlb(void)
1118 {
1119         int slot;
1120
1121         if (tlb_type == spitfire) {
1122                 printk ("Contents of itlb: ");
1123                 for (slot = 0; slot < 14; slot++) printk ("    ");
1124                 printk ("%2x:%016lx,%016lx\n",
1125                         0,
1126                         spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
1127                 for (slot = 1; slot < 64; slot+=3) {
1128                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
1129                                 slot,
1130                                 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
1131                                 slot+1,
1132                                 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
1133                                 slot+2,
1134                                 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
1135                 }
1136         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1137                 printk ("Contents of itlb0:\n");
1138                 for (slot = 0; slot < 16; slot+=2) {
1139                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1140                                 slot,
1141                                 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
1142                                 slot+1,
1143                                 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
1144                 }
1145                 printk ("Contents of itlb2:\n");
1146                 for (slot = 0; slot < 128; slot+=2) {
1147                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1148                                 slot,
1149                                 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
1150                                 slot+1,
1151                                 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
1152                 }
1153         }
1154 }
1155
1156 void sparc_ultra_dump_dtlb(void)
1157 {
1158         int slot;
1159
1160         if (tlb_type == spitfire) {
1161                 printk ("Contents of dtlb: ");
1162                 for (slot = 0; slot < 14; slot++) printk ("    ");
1163                 printk ("%2x:%016lx,%016lx\n", 0,
1164                         spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
1165                 for (slot = 1; slot < 64; slot+=3) {
1166                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
1167                                 slot,
1168                                 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
1169                                 slot+1,
1170                                 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
1171                                 slot+2,
1172                                 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
1173                 }
1174         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1175                 printk ("Contents of dtlb0:\n");
1176                 for (slot = 0; slot < 16; slot+=2) {
1177                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1178                                 slot,
1179                                 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
1180                                 slot+1,
1181                                 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
1182                 }
1183                 printk ("Contents of dtlb2:\n");
1184                 for (slot = 0; slot < 512; slot+=2) {
1185                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1186                                 slot,
1187                                 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
1188                                 slot+1,
1189                                 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
1190                 }
1191                 if (tlb_type == cheetah_plus) {
1192                         printk ("Contents of dtlb3:\n");
1193                         for (slot = 0; slot < 512; slot+=2) {
1194                                 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1195                                         slot,
1196                                         cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
1197                                         slot+1,
1198                                         cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
1199                         }
1200                 }
1201         }
1202 }
1203
1204 extern unsigned long cmdline_memory_size;
1205
1206 unsigned long __init bootmem_init(unsigned long *pages_avail)
1207 {
1208         unsigned long bootmap_size, start_pfn, end_pfn;
1209         unsigned long end_of_phys_memory = 0UL;
1210         unsigned long bootmap_pfn, bytes_avail, size;
1211         int i;
1212
1213 #ifdef CONFIG_DEBUG_BOOTMEM
1214         prom_printf("bootmem_init: Scan sp_banks, ");
1215 #endif
1216
1217         bytes_avail = 0UL;
1218         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1219                 end_of_phys_memory = sp_banks[i].base_addr +
1220                         sp_banks[i].num_bytes;
1221                 bytes_avail += sp_banks[i].num_bytes;
1222                 if (cmdline_memory_size) {
1223                         if (bytes_avail > cmdline_memory_size) {
1224                                 unsigned long slack = bytes_avail - cmdline_memory_size;
1225
1226                                 bytes_avail -= slack;
1227                                 end_of_phys_memory -= slack;
1228
1229                                 sp_banks[i].num_bytes -= slack;
1230                                 if (sp_banks[i].num_bytes == 0) {
1231                                         sp_banks[i].base_addr = 0xdeadbeef;
1232                                 } else {
1233                                         sp_banks[i+1].num_bytes = 0;
1234                                         sp_banks[i+1].base_addr = 0xdeadbeef;
1235                                 }
1236                                 break;
1237                         }
1238                 }
1239         }
1240
1241         *pages_avail = bytes_avail >> PAGE_SHIFT;
1242
1243         /* Start with page aligned address of last symbol in kernel
1244          * image.  The kernel is hard mapped below PAGE_OFFSET in a
1245          * 4MB locked TLB translation.
1246          */
1247         start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
1248
1249         bootmap_pfn = start_pfn;
1250
1251         end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1252
1253 #ifdef CONFIG_BLK_DEV_INITRD
1254         /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
1255         if (sparc_ramdisk_image || sparc_ramdisk_image64) {
1256                 unsigned long ramdisk_image = sparc_ramdisk_image ?
1257                         sparc_ramdisk_image : sparc_ramdisk_image64;
1258                 if (ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
1259                         ramdisk_image -= KERNBASE;
1260                 initrd_start = ramdisk_image + phys_base;
1261                 initrd_end = initrd_start + sparc_ramdisk_size;
1262                 if (initrd_end > end_of_phys_memory) {
1263                         printk(KERN_CRIT "initrd extends beyond end of memory "
1264                                          "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1265                                initrd_end, end_of_phys_memory);
1266                         initrd_start = 0;
1267                 }
1268                 if (initrd_start) {
1269                         if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
1270                             initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
1271                                 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
1272                 }
1273         }
1274 #endif  
1275         /* Initialize the boot-time allocator. */
1276         max_pfn = max_low_pfn = end_pfn;
1277         min_low_pfn = phys_base >> PAGE_SHIFT;
1278
1279 #ifdef CONFIG_DEBUG_BOOTMEM
1280         prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1281                     min_low_pfn, bootmap_pfn, max_low_pfn);
1282 #endif
1283         bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, min_low_pfn, end_pfn);
1284
1285         bootmap_base = bootmap_pfn << PAGE_SHIFT;
1286
1287         /* Now register the available physical memory with the
1288          * allocator.
1289          */
1290         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1291 #ifdef CONFIG_DEBUG_BOOTMEM
1292                 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
1293                             i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
1294 #endif
1295                 free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
1296         }
1297
1298 #ifdef CONFIG_BLK_DEV_INITRD
1299         if (initrd_start) {
1300                 size = initrd_end - initrd_start;
1301 #ifdef CONFIG_DEBUG_BOOTMEM
1302                 prom_printf("reserve_bootmem(initrd): base[%lx] size[%lx]\n",
1303                             initrd_start, size);
1304 #endif
1305                 /* Resert the initrd image area. */
1306 #ifdef CONFIG_DEBUG_BOOTMEM
1307                 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1308                         initrd_start, initrd_end);
1309 #endif
1310                 reserve_bootmem(initrd_start, size);
1311                 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1312
1313                 initrd_start += PAGE_OFFSET;
1314                 initrd_end += PAGE_OFFSET;
1315         }
1316 #endif
1317         /* Reserve the kernel text/data/bss. */
1318 #ifdef CONFIG_DEBUG_BOOTMEM
1319         prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
1320 #endif
1321         reserve_bootmem(kern_base, kern_size);
1322         *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
1323
1324         /* Reserve the bootmem map.   We do not account for it
1325          * in pages_avail because we will release that memory
1326          * in free_all_bootmem.
1327          */
1328         size = bootmap_size;
1329 #ifdef CONFIG_DEBUG_BOOTMEM
1330         prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1331                     (bootmap_pfn << PAGE_SHIFT), size);
1332 #endif
1333         reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1334         *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1335
1336         return end_pfn;
1337 }
1338
1339 /* paging_init() sets up the page tables */
1340
1341 extern void sun_serial_setup(void);
1342 extern void cheetah_ecache_flush_init(void);
1343
1344 static unsigned long last_valid_pfn;
1345
1346 void __init paging_init(void)
1347 {
1348         extern pmd_t swapper_pmd_dir[1024];
1349         extern unsigned int sparc64_vpte_patchme1[1];
1350         extern unsigned int sparc64_vpte_patchme2[1];
1351         unsigned long alias_base = kern_base + PAGE_OFFSET;
1352         unsigned long second_alias_page = 0;
1353         unsigned long pt, flags, end_pfn, pages_avail;
1354         unsigned long shift = alias_base - ((unsigned long)KERNBASE);
1355         unsigned long real_end;
1356
1357         set_bit(0, mmu_context_bmap);
1358
1359         real_end = (unsigned long)&_end;
1360         if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1361                 bigkernel = 1;
1362 #ifdef CONFIG_BLK_DEV_INITRD
1363         if (sparc_ramdisk_image || sparc_ramdisk_image64)
1364                 real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
1365 #endif
1366
1367         /* We assume physical memory starts at some 4mb multiple,
1368          * if this were not true we wouldn't boot up to this point
1369          * anyways.
1370          */
1371         pt  = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
1372         pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
1373         __save_and_cli(flags);
1374         if (tlb_type == spitfire) {
1375                 __asm__ __volatile__(
1376         "       stxa    %1, [%0] %3\n"
1377         "       stxa    %2, [%5] %4\n"
1378         "       membar  #Sync\n"
1379         "       flush   %%g6\n"
1380         "       nop\n"
1381         "       nop\n"
1382         "       nop\n"
1383                 : /* No outputs */
1384                 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1385                   "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
1386                 : "memory");
1387                 if (real_end >= KERNBASE + 0x340000) {
1388                         second_alias_page = alias_base + 0x400000;
1389                         __asm__ __volatile__(
1390                 "       stxa    %1, [%0] %3\n"
1391                 "       stxa    %2, [%5] %4\n"
1392                 "       membar  #Sync\n"
1393                 "       flush   %%g6\n"
1394                 "       nop\n"
1395                 "       nop\n"
1396                 "       nop\n"
1397                         : /* No outputs */
1398                         : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1399                           "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
1400                         : "memory");
1401                 }
1402         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1403                 __asm__ __volatile__(
1404         "       stxa    %1, [%0] %3\n"
1405         "       stxa    %2, [%5] %4\n"
1406         "       membar  #Sync\n"
1407         "       flush   %%g6\n"
1408         "       nop\n"
1409         "       nop\n"
1410         "       nop\n"
1411                 : /* No outputs */
1412                 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1413                   "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
1414                 : "memory");
1415                 if (real_end >= KERNBASE + 0x340000) {
1416                         second_alias_page = alias_base + 0x400000;
1417                         __asm__ __volatile__(
1418                 "       stxa    %1, [%0] %3\n"
1419                 "       stxa    %2, [%5] %4\n"
1420                 "       membar  #Sync\n"
1421                 "       flush   %%g6\n"
1422                 "       nop\n"
1423                 "       nop\n"
1424                 "       nop\n"
1425                         : /* No outputs */
1426                         : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1427                           "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
1428                         : "memory");
1429                 }
1430         }
1431         __restore_flags(flags);
1432         
1433         /* Now set kernel pgd to upper alias so physical page computations
1434          * work.
1435          */
1436         init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1437         
1438         memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
1439
1440         /* Now can init the kernel/bad page tables. */
1441         pgd_set(&swapper_pg_dir[0], swapper_pmd_dir + (shift / sizeof(pgd_t)));
1442         
1443         sparc64_vpte_patchme1[0] |=
1444                 (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
1445         sparc64_vpte_patchme2[0] |=
1446                 (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
1447         flushi((long)&sparc64_vpte_patchme1[0]);
1448         
1449         /* Setup bootmem... */
1450         pages_avail = 0;
1451         last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1452
1453         /* Inherit non-locked OBP mappings. */
1454         inherit_prom_mappings();
1455         
1456         /* Ok, we can use our TLB miss and window trap handlers safely.
1457          * We need to do a quick peek here to see if we are on StarFire
1458          * or not, so setup_tba can setup the IRQ globals correctly (it
1459          * needs to get the hard smp processor id correctly).
1460          */
1461         {
1462                 extern void setup_tba(int);
1463                 setup_tba(this_is_starfire);
1464         }
1465
1466         inherit_locked_prom_mappings(1);
1467
1468 #ifdef CONFIG_SUN_SERIAL
1469         /* This does not logically belong here, but we need to call it at
1470          * the moment we are able to use the bootmem allocator. This _has_
1471          * to be done after the prom_mappings above so since
1472          * __alloc_bootmem() doesn't work correctly until then.
1473          */
1474         sun_serial_setup();
1475 #endif
1476
1477         /* We only created DTLB mapping of this stuff. */
1478         spitfire_flush_dtlb_nucleus_page(alias_base);
1479         if (second_alias_page)
1480                 spitfire_flush_dtlb_nucleus_page(second_alias_page);
1481
1482         __flush_tlb_all();
1483
1484         {
1485                 unsigned long zones_size[MAX_NR_ZONES];
1486                 unsigned long zholes_size[MAX_NR_ZONES];
1487                 unsigned long npages;
1488                 int znum;
1489
1490                 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1491                         zones_size[znum] = zholes_size[znum] = 0;
1492
1493                 npages = end_pfn - (phys_base >> PAGE_SHIFT);
1494                 zones_size[ZONE_DMA] = npages;
1495                 zholes_size[ZONE_DMA] = npages - pages_avail;
1496
1497                 free_area_init_node(0, NULL, NULL, zones_size,
1498                                     phys_base, zholes_size);
1499         }
1500
1501         device_scan();
1502 }
1503
1504 /* Ok, it seems that the prom can allocate some more memory chunks
1505  * as a side effect of some prom calls we perform during the
1506  * boot sequence.  My most likely theory is that it is from the
1507  * prom_set_traptable() call, and OBP is allocating a scratchpad
1508  * for saving client program register state etc.
1509  */
1510 void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1511 {
1512         int swapi = 0;
1513         int i, mitr;
1514         unsigned long tmpaddr, tmpsize;
1515         unsigned long lowest;
1516
1517         for (i = 0; thislist[i].theres_more != 0; i++) {
1518                 lowest = thislist[i].start_adr;
1519                 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1520                         if (thislist[mitr].start_adr < lowest) {
1521                                 lowest = thislist[mitr].start_adr;
1522                                 swapi = mitr;
1523                         }
1524                 if (lowest == thislist[i].start_adr)
1525                         continue;
1526                 tmpaddr = thislist[swapi].start_adr;
1527                 tmpsize = thislist[swapi].num_bytes;
1528                 for (mitr = swapi; mitr > i; mitr--) {
1529                         thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1530                         thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1531                 }
1532                 thislist[i].start_adr = tmpaddr;
1533                 thislist[i].num_bytes = tmpsize;
1534         }
1535 }
1536
1537 void __init rescan_sp_banks(void)
1538 {
1539         struct linux_prom64_registers memlist[64];
1540         struct linux_mlist_p1275 avail[64], *mlist;
1541         unsigned long bytes, base_paddr;
1542         int num_regs, node = prom_finddevice("/memory");
1543         int i;
1544
1545         num_regs = prom_getproperty(node, "available",
1546                                     (char *) memlist, sizeof(memlist));
1547         num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1548         for (i = 0; i < num_regs; i++) {
1549                 avail[i].start_adr = memlist[i].phys_addr;
1550                 avail[i].num_bytes = memlist[i].reg_size;
1551                 avail[i].theres_more = &avail[i + 1];
1552         }
1553         avail[i - 1].theres_more = NULL;
1554         sort_memlist(avail);
1555
1556         mlist = &avail[0];
1557         i = 0;
1558         bytes = mlist->num_bytes;
1559         base_paddr = mlist->start_adr;
1560   
1561         sp_banks[0].base_addr = base_paddr;
1562         sp_banks[0].num_bytes = bytes;
1563
1564         while (mlist->theres_more != NULL){
1565                 i++;
1566                 mlist = mlist->theres_more;
1567                 bytes = mlist->num_bytes;
1568                 if (i >= SPARC_PHYS_BANKS-1) {
1569                         printk ("The machine has more banks than "
1570                                 "this kernel can support\n"
1571                                 "Increase the SPARC_PHYS_BANKS "
1572                                 "setting (currently %d)\n",
1573                                 SPARC_PHYS_BANKS);
1574                         i = SPARC_PHYS_BANKS-1;
1575                         break;
1576                 }
1577     
1578                 sp_banks[i].base_addr = mlist->start_adr;
1579                 sp_banks[i].num_bytes = mlist->num_bytes;
1580         }
1581
1582         i++;
1583         sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1584         sp_banks[i].num_bytes = 0;
1585
1586         for (i = 0; sp_banks[i].num_bytes != 0; i++)
1587                 sp_banks[i].num_bytes &= PAGE_MASK;
1588 }
1589
1590 static void __init taint_real_pages(void)
1591 {
1592         struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1593         int i;
1594
1595         for (i = 0; i < SPARC_PHYS_BANKS; i++) {
1596                 saved_sp_banks[i].base_addr =
1597                         sp_banks[i].base_addr;
1598                 saved_sp_banks[i].num_bytes =
1599                         sp_banks[i].num_bytes;
1600         }
1601
1602         rescan_sp_banks();
1603
1604         /* Find changes discovered in the sp_bank rescan and
1605          * reserve the lost portions in the bootmem maps.
1606          */
1607         for (i = 0; saved_sp_banks[i].num_bytes; i++) {
1608                 unsigned long old_start, old_end;
1609
1610                 old_start = saved_sp_banks[i].base_addr;
1611                 old_end = old_start +
1612                         saved_sp_banks[i].num_bytes;
1613                 while (old_start < old_end) {
1614                         int n;
1615
1616                         for (n = 0; sp_banks[n].num_bytes; n++) {
1617                                 unsigned long new_start, new_end;
1618
1619                                 new_start = sp_banks[n].base_addr;
1620                                 new_end = new_start + sp_banks[n].num_bytes;
1621
1622                                 if (new_start <= old_start &&
1623                                     new_end >= (old_start + PAGE_SIZE)) {
1624                                         set_bit (old_start >> 22,
1625                                                  sparc64_valid_addr_bitmap);
1626                                         goto do_next_page;
1627                                 }
1628                         }
1629                         reserve_bootmem(old_start, PAGE_SIZE);
1630
1631                 do_next_page:
1632                         old_start += PAGE_SIZE;
1633                 }
1634         }
1635 }
1636
1637 void __init mem_init(void)
1638 {
1639         unsigned long codepages, datapages, initpages;
1640         unsigned long addr, last;
1641         int i;
1642
1643         i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1644         i += 1;
1645         sparc64_valid_addr_bitmap = (unsigned long *)
1646                 __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
1647         if (sparc64_valid_addr_bitmap == NULL) {
1648                 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1649                 prom_halt();
1650         }
1651         memset(sparc64_valid_addr_bitmap, 0, i << 3);
1652
1653         addr = PAGE_OFFSET + kern_base;
1654         last = PAGE_ALIGN(kern_size) + addr;
1655         while (addr < last) {
1656                 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1657                 addr += PAGE_SIZE;
1658         }
1659
1660         taint_real_pages();
1661
1662         max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT);
1663         high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1664
1665 #ifdef CONFIG_DEBUG_BOOTMEM
1666         prom_printf("mem_init: Calling free_all_bootmem().\n");
1667 #endif
1668         num_physpages = free_all_bootmem() - 1;
1669
1670         /*
1671          * Set up the zero page, mark it reserved, so that page count
1672          * is not manipulated when freeing the page from user ptes.
1673          */
1674         mem_map_zero = _alloc_pages(GFP_KERNEL, 0);
1675         if (mem_map_zero == NULL) {
1676                 prom_printf("paging_init: Cannot alloc zero page.\n");
1677                 prom_halt();
1678         }
1679         SetPageReserved(mem_map_zero);
1680         clear_page(page_address(mem_map_zero));
1681
1682         codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
1683         codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1684         datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
1685         datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1686         initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
1687         initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1688
1689 #ifndef CONFIG_SMP
1690         {
1691                 /* Put empty_pg_dir on pgd_quicklist */
1692                 extern pgd_t empty_pg_dir[1024];
1693                 unsigned long addr = (unsigned long)empty_pg_dir;
1694                 unsigned long alias_base = kern_base + PAGE_OFFSET -
1695                         (long)(KERNBASE);
1696                 
1697                 memset(empty_pg_dir, 0, sizeof(empty_pg_dir));
1698                 addr += alias_base;
1699                 free_pgd_fast((pgd_t *)addr);
1700                 num_physpages++;
1701         }
1702 #endif
1703
1704         printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1705                nr_free_pages() << (PAGE_SHIFT-10),
1706                codepages << (PAGE_SHIFT-10),
1707                datapages << (PAGE_SHIFT-10), 
1708                initpages << (PAGE_SHIFT-10), 
1709                PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1710
1711         if (tlb_type == cheetah || tlb_type == cheetah_plus)
1712                 cheetah_ecache_flush_init();
1713 }
1714
1715 void free_initmem (void)
1716 {
1717         unsigned long addr, initend;
1718
1719         /*
1720          * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1721          */
1722         addr = PAGE_ALIGN((unsigned long)(&__init_begin));
1723         initend = (unsigned long)(&__init_end) & PAGE_MASK;
1724         for (; addr < initend; addr += PAGE_SIZE) {
1725                 unsigned long page;
1726                 struct page *p;
1727
1728                 page = (addr +
1729                         ((unsigned long) __va(kern_base)) -
1730                         ((unsigned long) KERNBASE));
1731                 p = virt_to_page(page);
1732
1733                 ClearPageReserved(p);
1734                 set_page_count(p, 1);
1735                 __free_page(p);
1736                 num_physpages++;
1737         }
1738 }
1739
1740 #ifdef CONFIG_BLK_DEV_INITRD
1741 void free_initrd_mem(unsigned long start, unsigned long end)
1742 {
1743         if (start < end)
1744                 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1745         for (; start < end; start += PAGE_SIZE) {
1746                 struct page *p = virt_to_page(start);
1747
1748                 ClearPageReserved(p);
1749                 set_page_count(p, 1);
1750                 __free_page(p);
1751                 num_physpages++;
1752         }
1753 }
1754 #endif
1755
1756 void si_meminfo(struct sysinfo *val)
1757 {
1758         val->totalram = num_physpages;
1759         val->sharedram = 0;
1760         val->freeram = nr_free_pages();
1761         val->bufferram = atomic_read(&buffermem_pages);
1762
1763         /* These are always zero on Sparc64. */
1764         val->totalhigh = 0;
1765         val->freehigh = 0;
1766
1767         val->mem_unit = PAGE_SIZE;
1768 }