http://downloads.netgear.com/files/GPL/DM111PSP_v3.61d_GPL.tar.gz
[bcm963xx.git] / kernel / linux / arch / mips / kernel / setup.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995 Linus Torvalds
7  * Copyright (C) 1995 Waldorf Electronics
8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
9  * Copyright (C) 1996 Stoned Elipot
10  * Copyright (C) 1999 Silicon Graphics, Inc.
11  * Copyright (C) 2000 2001, 2002  Maciej W. Rozycki
12  */
13 #include <linux/config.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/ioport.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/stddef.h>
22 #include <linux/string.h>
23 #include <linux/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/utsname.h>
27 #include <linux/a.out.h>
28 #include <linux/tty.h>
29 #include <linux/bootmem.h>
30 #include <linux/initrd.h>
31 #include <linux/major.h>
32 #include <linux/kdev_t.h>
33 #include <linux/root_dev.h>
34 #include <linux/highmem.h>
35 #include <linux/console.h>
36
37 #include <asm/addrspace.h>
38 #include <asm/bootinfo.h>
39 #include <asm/cpu.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/system.h>
43
44 struct cpuinfo_mips cpu_data[NR_CPUS];
45
46 EXPORT_SYMBOL(cpu_data);
47
48 #ifdef CONFIG_VT
49 struct screen_info screen_info;
50 #endif
51
52 /*
53  * Despite it's name this variable is even if we don't have PCI
54  */
55 unsigned int PCI_DMA_BUS_IS_PHYS;
56
57 EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
58
59 extern void * __rd_start, * __rd_end;
60
61 /*
62  * Setup information
63  *
64  * These are initialized so they are in the .data section
65  */
66 unsigned long mips_machtype = MACH_UNKNOWN;
67 unsigned long mips_machgroup = MACH_GROUP_UNKNOWN;
68
69 EXPORT_SYMBOL(mips_machtype);
70 EXPORT_SYMBOL(mips_machgroup);
71
72 struct boot_mem_map boot_mem_map;
73
74 static char command_line[CL_SIZE];
75        char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE;
76
77 /*
78  * mips_io_port_base is the begin of the address space to which x86 style
79  * I/O ports are mapped.
80  */
81
82 const unsigned long mips_io_port_base = KSEG1;
83
84 EXPORT_SYMBOL(mips_io_port_base);
85
86 /*
87  * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
88  * for the processor.
89  */
90 unsigned long isa_slot_offset;
91 EXPORT_SYMBOL(isa_slot_offset);
92
93 static struct resource code_resource = { "Kernel code" };
94 static struct resource data_resource = { "Kernel data" };
95
96 void __init add_memory_region(phys_t start, phys_t size, long type)
97 {
98         int x = boot_mem_map.nr_map;
99         struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
100
101         /*
102          * Try to merge with previous entry if any.  This is far less than
103          * perfect but is sufficient for most real world cases.
104          */
105         if (x && prev->addr + prev->size == start && prev->type == type) {
106                 prev->size += size;
107                 return;
108         }
109
110         if (x == BOOT_MEM_MAP_MAX) {
111                 printk("Ooops! Too many entries in the memory map!\n");
112                 return;
113         }
114
115         boot_mem_map.map[x].addr = start;
116         boot_mem_map.map[x].size = size;
117         boot_mem_map.map[x].type = type;
118         boot_mem_map.nr_map++;
119 }
120
121 static void __init print_memory_map(void)
122 {
123         int i;
124         const int field = 2 * sizeof(unsigned long);
125
126         for (i = 0; i < boot_mem_map.nr_map; i++) {
127                 printk(" memory: %0*Lx @ %0*Lx ",
128                        field, (unsigned long long) boot_mem_map.map[i].size,
129                        field, (unsigned long long) boot_mem_map.map[i].addr);
130
131                 switch (boot_mem_map.map[i].type) {
132                 case BOOT_MEM_RAM:
133                         printk("(usable)\n");
134                         break;
135                 case BOOT_MEM_ROM_DATA:
136                         printk("(ROM data)\n");
137                         break;
138                 case BOOT_MEM_RESERVED:
139                         printk("(reserved)\n");
140                         break;
141                 default:
142                         printk("type %lu\n", boot_mem_map.map[i].type);
143                         break;
144                 }
145         }
146 }
147
148 static inline void parse_cmdline_early(void)
149 {
150         char c = ' ', *to = command_line, *from = saved_command_line;
151         unsigned long start_at, mem_size;
152         int len = 0;
153         int usermem = 0;
154
155         printk("Determined physical RAM map:\n");
156         print_memory_map();
157
158         for (;;) {
159                 /*
160                  * "mem=XXX[kKmM]" defines a memory region from
161                  * 0 to <XXX>, overriding the determined size.
162                  * "mem=XXX[KkmM]@YYY[KkmM]" defines a memory region from
163                  * <YYY> to <YYY>+<XXX>, overriding the determined size.
164                  */
165                 if (c == ' ' && !memcmp(from, "mem=", 4)) {
166                         if (to != command_line)
167                                 to--;
168                         /*
169                          * If a user specifies memory size, we
170                          * blow away any automatically generated
171                          * size.
172                          */
173                         if (usermem == 0) {
174                                 boot_mem_map.nr_map = 0;
175                                 usermem = 1;
176                         }
177                         mem_size = memparse(from + 4, &from);
178                         if (*from == '@')
179                                 start_at = memparse(from + 1, &from);
180                         else
181                                 start_at = 0;
182                         add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
183                 }
184                 c = *(from++);
185                 if (!c)
186                         break;
187                 if (CL_SIZE <= ++len)
188                         break;
189                 *(to++) = c;
190         }
191         *to = '\0';
192
193         if (usermem) {
194                 printk("User-defined physical RAM map:\n");
195                 print_memory_map();
196         }
197 }
198
199
200 #define PFN_UP(x)       (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
201 #define PFN_DOWN(x)     ((x) >> PAGE_SHIFT)
202 #define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
203
204 #define MAXMEM          HIGHMEM_START
205 #define MAXMEM_PFN      PFN_DOWN(MAXMEM)
206
207 static inline void bootmem_init(void)
208 {
209         unsigned long start_pfn;
210 #ifndef CONFIG_SGI_IP27
211         unsigned long bootmap_size, max_low_pfn, first_usable_pfn;
212         int i;
213 #endif
214 #ifdef CONFIG_BLK_DEV_INITRD
215         unsigned long tmp;
216         unsigned long *initrd_header;
217
218         tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8;
219         if (tmp < (unsigned long)&_end)
220                 tmp += PAGE_SIZE;
221         initrd_header = (unsigned long *)tmp;
222         if (initrd_header[0] == 0x494E5244) {
223                 initrd_start = (unsigned long)&initrd_header[2];
224                 initrd_end = initrd_start + initrd_header[1];
225         }
226         start_pfn = PFN_UP(CPHYSADDR((&_end)+(initrd_end - initrd_start) + PAGE_SIZE));
227 #else
228         /*
229          * Partially used pages are not usable - thus
230          * we are rounding upwards.
231          */
232         start_pfn = PFN_UP(CPHYSADDR(&_end));
233 #endif  /* CONFIG_BLK_DEV_INITRD */
234
235 #ifndef CONFIG_SGI_IP27
236         /* Find the highest page frame number we have available.  */
237         max_pfn = 0;
238         first_usable_pfn = -1UL;
239         for (i = 0; i < boot_mem_map.nr_map; i++) {
240                 unsigned long start, end;
241
242                 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
243                         continue;
244
245                 start = PFN_UP(boot_mem_map.map[i].addr);
246                 end = PFN_DOWN(boot_mem_map.map[i].addr
247                       + boot_mem_map.map[i].size);
248
249                 if (start >= end)
250                         continue;
251                 if (end > max_pfn)
252                         max_pfn = end;
253                 if (start < first_usable_pfn) {
254                         if (start > start_pfn) {
255                                 first_usable_pfn = start;
256                         } else if (end > start_pfn) {
257                                 first_usable_pfn = start_pfn;
258                         }
259                 }
260         }
261
262         /*
263          * Determine low and high memory ranges
264          */
265         max_low_pfn = max_pfn;
266         if (max_low_pfn > MAXMEM_PFN) {
267                 max_low_pfn = MAXMEM_PFN;
268 #ifndef CONFIG_HIGHMEM
269                 /* Maximum memory usable is what is directly addressable */
270                 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
271                        MAXMEM >> 20);
272                 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
273 #endif
274         }
275
276 #ifdef CONFIG_HIGHMEM
277         /*
278          * Crude, we really should make a better attempt at detecting
279          * highstart_pfn
280          */
281         highstart_pfn = highend_pfn = max_pfn;
282         if (max_pfn > MAXMEM_PFN) {
283                 highstart_pfn = MAXMEM_PFN;
284                 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
285                        (highend_pfn - highstart_pfn) >> (20 - PAGE_SHIFT));
286         }
287 #endif
288
289         /* Initialize the boot-time allocator with low memory only.  */
290         bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
291
292         /*
293          * Register fully available low RAM pages with the bootmem allocator.
294          */
295         for (i = 0; i < boot_mem_map.nr_map; i++) {
296                 unsigned long curr_pfn, last_pfn, size;
297
298                 /*
299                  * Reserve usable memory.
300                  */
301                 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
302                         continue;
303
304                 /*
305                  * We are rounding up the start address of usable memory:
306                  */
307                 curr_pfn = PFN_UP(boot_mem_map.map[i].addr);
308                 if (curr_pfn >= max_low_pfn)
309                         continue;
310                 if (curr_pfn < start_pfn)
311                         curr_pfn = start_pfn;
312
313                 /*
314                  * ... and at the end of the usable range downwards:
315                  */
316                 last_pfn = PFN_DOWN(boot_mem_map.map[i].addr
317                                     + boot_mem_map.map[i].size);
318
319                 if (last_pfn > max_low_pfn)
320                         last_pfn = max_low_pfn;
321
322                 /*
323                  * Only register lowmem part of lowmem segment with bootmem.
324                  */
325                 size = last_pfn - curr_pfn;
326                 if (curr_pfn > PFN_DOWN(HIGHMEM_START))
327                         continue;
328                 if (curr_pfn + size - 1 > PFN_DOWN(HIGHMEM_START))
329                         size = PFN_DOWN(HIGHMEM_START) - curr_pfn;
330                 if (!size)
331                         continue;
332
333                 /*
334                  * ... finally, did all the rounding and playing
335                  * around just make the area go away?
336                  */
337                 if (last_pfn <= curr_pfn)
338                         continue;
339
340                 /* Register lowmem ranges */
341                 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
342         }
343
344         /* Reserve the bootmap memory.  */
345         reserve_bootmem(PFN_PHYS(first_usable_pfn), bootmap_size);
346 #endif
347
348 #ifdef CONFIG_BLK_DEV_INITRD
349         /* Board specific code should have set up initrd_start and initrd_end */
350         ROOT_DEV = Root_RAM0;
351         if (&__rd_start != &__rd_end) {
352                 initrd_start = (unsigned long)&__rd_start;
353                 initrd_end = (unsigned long)&__rd_end;
354         }
355         initrd_below_start_ok = 1;
356         if (initrd_start) {
357                 unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start);
358                 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
359                        (void *)initrd_start,
360                        initrd_size);
361
362                 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
363                         printk("initrd extends beyond end of memory "
364                                "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
365                                sizeof(long) * 2, CPHYSADDR(initrd_end),
366                                sizeof(long) * 2, PFN_PHYS(max_low_pfn));
367                         initrd_start = initrd_end = 0;
368                 }
369         }
370 #endif /* CONFIG_BLK_DEV_INITRD  */
371 }
372
373 static inline void resource_init(void)
374 {
375         int i;
376
377         code_resource.start = virt_to_phys(&_text);
378         code_resource.end = virt_to_phys(&_etext) - 1;
379         data_resource.start = virt_to_phys(&_etext);
380         data_resource.end = virt_to_phys(&_edata) - 1;
381
382         /*
383          * Request address space for all standard RAM.
384          */
385         for (i = 0; i < boot_mem_map.nr_map; i++) {
386                 struct resource *res;
387                 unsigned long start, end;
388
389                 start = boot_mem_map.map[i].addr;
390                 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
391                 if (start >= MAXMEM)
392                         continue;
393                 if (end >= MAXMEM)
394                         end = MAXMEM - 1;
395
396                 res = alloc_bootmem(sizeof(struct resource));
397                 switch (boot_mem_map.map[i].type) {
398                 case BOOT_MEM_RAM:
399                 case BOOT_MEM_ROM_DATA:
400                         res->name = "System RAM";
401                         break;
402                 case BOOT_MEM_RESERVED:
403                 default:
404                         res->name = "reserved";
405                 }
406
407                 res->start = start;
408                 res->end = end;
409
410                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
411                 request_resource(&iomem_resource, res);
412
413                 /*
414                  *  We don't know which RAM region contains kernel data,
415                  *  so we try it repeatedly and let the resource manager
416                  *  test it.
417                  */
418                 request_resource(res, &code_resource);
419                 request_resource(res, &data_resource);
420         }
421 }
422
423 #undef PFN_UP
424 #undef PFN_DOWN
425 #undef PFN_PHYS
426
427 #undef MAXMEM
428 #undef MAXMEM_PFN
429
430 static int __initdata earlyinit_debug;
431
432 static int __init earlyinit_debug_setup(char *str)
433 {
434         earlyinit_debug = 1;
435         return 1;
436 }
437 __setup("earlyinit_debug", earlyinit_debug_setup);
438
439 extern initcall_t __earlyinitcall_start, __earlyinitcall_end;
440
441 static void __init do_earlyinitcalls(void)
442 {
443         initcall_t *call, *start, *end;
444
445         start = &__earlyinitcall_start;
446         end = &__earlyinitcall_end;
447
448         for (call = start; call < end; call++) {
449                 if (earlyinit_debug)
450                         printk("calling earlyinitcall 0x%p\n", *call);
451
452                 (*call)();
453         }
454 }
455
456 void __init setup_arch(char **cmdline_p)
457 {
458         unsigned int status;
459
460         cpu_probe();
461         prom_init();
462         cpu_report();
463
464 #ifdef CONFIG_MIPS32
465         /* Disable coprocessors and set FPU for 16/32 FPR register model */
466         status = read_c0_status();
467         status &= ~(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX|ST0_FR);
468         status |= ST0_CU0;
469         write_c0_status(status);
470 #endif
471 #ifdef CONFIG_MIPS64
472         /*
473          * On IP27, I am seeing the TS bit set when the kernel is loaded.
474          * Maybe because the kernel is in ckseg0 and not xkphys? Clear it
475          * anyway ...
476          */
477         status = read_c0_status();
478         status &= ~(ST0_BEV|ST0_TS|ST0_CU1|ST0_CU2|ST0_CU3);
479         status |= (ST0_CU0|ST0_KX|ST0_SX|ST0_FR);
480         write_c0_status(status);
481 #endif
482
483 #if defined(CONFIG_VT)
484 #if defined(CONFIG_VGA_CONSOLE)
485         conswitchp = &vga_con;
486 #elif defined(CONFIG_DUMMY_CONSOLE)
487         conswitchp = &dummy_con;
488 #endif
489 #endif
490
491         /* call board setup routine */
492         do_earlyinitcalls();
493
494         strlcpy(command_line, arcs_cmdline, sizeof(command_line));
495         strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
496
497         *cmdline_p = command_line;
498
499         parse_cmdline_early();
500         bootmem_init();
501         paging_init();
502         resource_init();
503 }
504
505 int __init fpu_disable(char *s)
506 {
507         cpu_data[0].options &= ~MIPS_CPU_FPU;
508
509         return 1;
510 }
511
512 __setup("nofpu", fpu_disable);