2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
4 * $Id: e820.c,v 1.13 2004/03/22 00:31:08 ak Exp $
6 #include <linux/config.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/acpi.h>
11 #include <linux/bootmem.h>
12 #include <linux/ioport.h>
15 #include <asm/proto.h>
18 #include <asm/bootsetup.h>
19 #include <asm/mpspec.h>
20 #include <asm/io_apic.h>
22 extern unsigned long table_start, table_end;
25 #ifdef CONFIG_ACPI_BOOT
26 extern acpi_interrupt_flags acpi_sci_flags;
29 extern struct resource code_resource, data_resource, vram_resource;
31 /* Check for some hardcoded bad areas that early boot is not allowed to touch */
32 static inline int bad_addr(unsigned long *addrp, unsigned long size)
34 unsigned long addr = *addrp, last = addr + size;
36 /* various gunk below that needed for SMP startup */
37 if (addr < 7*PAGE_SIZE) {
41 /* direct mapping tables of the kernel */
42 if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
43 *addrp = table_end << PAGE_SHIFT;
47 #ifdef CONFIG_BLK_DEV_INITRD
48 if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
49 addr < INITRD_START+INITRD_SIZE) {
50 *addrp = INITRD_START + INITRD_SIZE;
54 /* kernel code + 640k memory hole (later should not be needed, but
55 be paranoid for now) */
56 if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
57 *addrp = __pa_symbol(&_end);
60 /* XXX ramdisk image here? */
64 int __init e820_mapped(unsigned long start, unsigned long end, int type)
67 for (i = 0; i < e820.nr_map; i++) {
68 struct e820entry *ei = &e820.map[i];
69 if (type && ei->type != type)
71 if (ei->addr >= end || ei->addr + ei->size < start)
79 * Find a free area in a specific range.
81 unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
84 for (i = 0; i < e820.nr_map; i++) {
85 struct e820entry *ei = &e820.map[i];
86 unsigned long addr = ei->addr, last;
87 if (ei->type != E820_RAM)
91 if (addr > ei->addr + ei->size)
93 while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
96 if (last > ei->addr + ei->size)
106 * Free bootmem based on the e820 table for a node.
108 void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
111 for (i = 0; i < e820.nr_map; i++) {
112 struct e820entry *ei = &e820.map[i];
113 unsigned long last, addr;
115 if (ei->type != E820_RAM ||
116 ei->addr+ei->size <= start ||
120 addr = round_up(ei->addr, PAGE_SIZE);
124 last = round_down(ei->addr + ei->size, PAGE_SIZE);
128 if (last > addr && last-addr >= PAGE_SIZE)
129 free_bootmem_node(pgdat, addr, last-addr);
134 * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
135 * The direct mapping extends to end_pfn_map, so that we can directly access
136 * ACPI and other tables without having to play with fixmaps.
138 unsigned long end_pfn_map;
141 * Last pfn which the user wants to use.
143 unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
146 * Find the highest page frame number we have available
149 void __init e820_end_of_ram(void)
154 for (i = 0; i < e820.nr_map; i++) {
155 struct e820entry *ei = &e820.map[i];
156 unsigned long start, end;
158 start = round_up(ei->addr, PAGE_SIZE);
159 end = round_down(ei->addr + ei->size, PAGE_SIZE);
162 if (ei->type == E820_RAM) {
163 if (end > end_pfn<<PAGE_SHIFT)
164 end_pfn = end>>PAGE_SHIFT;
166 if (end > end_pfn_map<<PAGE_SHIFT)
167 end_pfn_map = end>>PAGE_SHIFT;
171 if (end_pfn > end_pfn_map)
172 end_pfn_map = end_pfn;
173 if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
174 end_pfn_map = MAXMEM>>PAGE_SHIFT;
175 if (end_pfn > end_user_pfn)
176 end_pfn = end_user_pfn;
177 if (end_pfn > end_pfn_map)
178 end_pfn = end_pfn_map;
182 * Mark e820 reserved areas as busy for the resource manager.
184 void __init e820_reserve_resources(void)
187 for (i = 0; i < e820.nr_map; i++) {
188 struct resource *res;
189 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
191 res = alloc_bootmem_low(sizeof(struct resource));
192 switch (e820.map[i].type) {
193 case E820_RAM: res->name = "System RAM"; break;
194 case E820_ACPI: res->name = "ACPI Tables"; break;
195 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
196 default: res->name = "reserved";
198 res->start = e820.map[i].addr;
199 res->end = res->start + e820.map[i].size - 1;
200 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
201 request_resource(&iomem_resource, res);
202 if (e820.map[i].type == E820_RAM) {
204 * We dont't know which RAM region contains kernel data,
205 * so we try it repeatedly and let the resource manager
208 request_resource(res, &code_resource);
209 request_resource(res, &data_resource);
215 * Add a memory region to the kernel e820 map.
217 void __init add_memory_region(unsigned long start, unsigned long size, int type)
222 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
226 e820.map[x].addr = start;
227 e820.map[x].size = size;
228 e820.map[x].type = type;
232 void __init e820_print_map(char *who)
236 for (i = 0; i < e820.nr_map; i++) {
237 printk(" %s: %016Lx - %016Lx ", who,
238 (unsigned long long) e820.map[i].addr,
239 (unsigned long long) (e820.map[i].addr + e820.map[i].size));
240 switch (e820.map[i].type) {
241 case E820_RAM: printk("(usable)\n");
244 printk("(reserved)\n");
247 printk("(ACPI data)\n");
250 printk("(ACPI NVS)\n");
252 default: printk("type %u\n", e820.map[i].type);
259 * Sanitize the BIOS e820 map.
261 * Some e820 responses include overlapping entries. The following
262 * replaces the original e820 map with a new one, removing overlaps.
265 static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
267 struct change_member {
268 struct e820entry *pbios; /* pointer to original bios entry */
269 unsigned long long addr; /* address for this change point */
271 static struct change_member change_point_list[2*E820MAX] __initdata;
272 static struct change_member *change_point[2*E820MAX] __initdata;
273 static struct e820entry *overlap_list[E820MAX] __initdata;
274 static struct e820entry new_bios[E820MAX] __initdata;
275 struct change_member *change_tmp;
276 unsigned long current_type, last_type;
277 unsigned long long last_addr;
278 int chgidx, still_changing;
285 Visually we're performing the following (1,2,3,4 = memory types)...
287 Sample memory map (w/overlaps):
288 ____22__________________
289 ______________________4_
290 ____1111________________
291 _44_____________________
292 11111111________________
293 ____________________33__
294 ___________44___________
295 __________33333_________
296 ______________22________
297 ___________________2222_
298 _________111111111______
299 _____________________11_
300 _________________4______
302 Sanitized equivalent (no overlap):
303 1_______________________
304 _44_____________________
305 ___1____________________
306 ____22__________________
307 ______11________________
308 _________1______________
309 __________3_____________
310 ___________44___________
311 _____________33_________
312 _______________2________
313 ________________1_______
314 _________________4______
315 ___________________2____
316 ____________________33__
317 ______________________4_
320 /* if there's only one memory region, don't bother */
326 /* bail out if we find any unreasonable addresses in bios map */
327 for (i=0; i<old_nr; i++)
328 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
331 /* create pointers for initial change-point information (for sorting) */
332 for (i=0; i < 2*old_nr; i++)
333 change_point[i] = &change_point_list[i];
335 /* record all known change-points (starting and ending addresses) */
337 for (i=0; i < old_nr; i++) {
338 change_point[chgidx]->addr = biosmap[i].addr;
339 change_point[chgidx++]->pbios = &biosmap[i];
340 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
341 change_point[chgidx++]->pbios = &biosmap[i];
344 /* sort change-point list by memory addresses (low -> high) */
346 while (still_changing) {
348 for (i=1; i < 2*old_nr; i++) {
349 /* if <current_addr> > <last_addr>, swap */
350 /* or, if current=<start_addr> & last=<end_addr>, swap */
351 if ((change_point[i]->addr < change_point[i-1]->addr) ||
352 ((change_point[i]->addr == change_point[i-1]->addr) &&
353 (change_point[i]->addr == change_point[i]->pbios->addr) &&
354 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
357 change_tmp = change_point[i];
358 change_point[i] = change_point[i-1];
359 change_point[i-1] = change_tmp;
365 /* create a new bios memory map, removing overlaps */
366 overlap_entries=0; /* number of entries in the overlap table */
367 new_bios_entry=0; /* index for creating new bios map entries */
368 last_type = 0; /* start with undefined memory type */
369 last_addr = 0; /* start with 0 as last starting address */
370 /* loop through change-points, determining affect on the new bios map */
371 for (chgidx=0; chgidx < 2*old_nr; chgidx++)
373 /* keep track of all overlapping bios entries */
374 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
376 /* add map entry to overlap list (> 1 entry implies an overlap) */
377 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
381 /* remove entry from list (order independent, so swap with last) */
382 for (i=0; i<overlap_entries; i++)
384 if (overlap_list[i] == change_point[chgidx]->pbios)
385 overlap_list[i] = overlap_list[overlap_entries-1];
389 /* if there are overlapping entries, decide which "type" to use */
390 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
392 for (i=0; i<overlap_entries; i++)
393 if (overlap_list[i]->type > current_type)
394 current_type = overlap_list[i]->type;
395 /* continue building up new bios map based on this information */
396 if (current_type != last_type) {
397 if (last_type != 0) {
398 new_bios[new_bios_entry].size =
399 change_point[chgidx]->addr - last_addr;
400 /* move forward only if the new size was non-zero */
401 if (new_bios[new_bios_entry].size != 0)
402 if (++new_bios_entry >= E820MAX)
403 break; /* no more space left for new bios entries */
405 if (current_type != 0) {
406 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
407 new_bios[new_bios_entry].type = current_type;
408 last_addr=change_point[chgidx]->addr;
410 last_type = current_type;
413 new_nr = new_bios_entry; /* retain count for new bios entries */
415 /* copy new bios mapping into original location */
416 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
423 * Copy the BIOS e820 map into a safe place.
425 * Sanity-check it while we're at it..
427 * If we're lucky and live on a modern system, the setup code
428 * will have given us a memory map that we can use to properly
429 * set up memory. If we aren't, we'll fake a memory map.
431 * We check to see that the memory map contains at least 2 elements
432 * before we'll use it, because the detection code in setup.S may
433 * not be perfect and most every PC known to man has two memory
434 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
435 * thinkpad 560x, for example, does not cooperate with the memory
438 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
440 /* Only one memory region (or negative)? Ignore it */
445 unsigned long start = biosmap->addr;
446 unsigned long size = biosmap->size;
447 unsigned long end = start + size;
448 unsigned long type = biosmap->type;
450 /* Overflow in 64 bits? Ignore the memory map. */
455 * Some BIOSes claim RAM in the 640k - 1M region.
456 * Not right. Fix it up.
458 * This should be removed on Hammer which is supposed to not
459 * have non e820 covered ISA mappings there, but I had some strange
460 * problems so it stays for now. -AK
462 if (type == E820_RAM) {
463 if (start < 0x100000ULL && end > 0xA0000ULL) {
464 if (start < 0xA0000ULL)
465 add_memory_region(start, 0xA0000ULL-start, type);
466 if (end <= 0x100000ULL)
473 add_memory_region(start, size, type);
474 } while (biosmap++,--nr_map);
478 void __init setup_memory_region(void)
480 char *who = "BIOS-e820";
483 * Try to copy the BIOS-supplied E820-map.
485 * Otherwise fake a memory map; one section from 0k->640k,
486 * the next section from 1mb->appropriate_mem_k
488 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
489 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
490 unsigned long mem_size;
492 /* compare results from other methods and take the greater */
493 if (ALT_MEM_K < EXT_MEM_K) {
494 mem_size = EXT_MEM_K;
497 mem_size = ALT_MEM_K;
501 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
502 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
504 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
508 extern char command_line[], saved_command_line[];
509 extern int fallback_aper_order;
510 extern int iommu_setup(char *opt);
512 void __init parse_mem_cmdline (char ** cmdline_p)
514 char c = ' ', *to = command_line, *from = COMMAND_LINE;
517 /* Save unparsed command line copy for /proc/cmdline */
518 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
519 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
526 * mem=XXX[kKmM] limits kernel memory to XXX+1MB
528 * It would be more logical to count from 0 instead of from
529 * HIGH_MEMORY, but we keep that for now for i386 compatibility. -AK
531 if (!memcmp(from, "mem=", 4)) {
533 * No support for custom mapping like i386.
534 * The reason is that we need to read the e820 map
535 * anyways to handle the ACPI mappings in the
537 * Also on x86-64 there should be always a good e820
538 * map. This is only an upper limit, you cannot force
539 * usage of memory not in e820.
541 end_user_pfn = memparse(from+4, &from) + HIGH_MEMORY;
542 end_user_pfn >>= PAGE_SHIFT;
544 #ifdef CONFIG_GART_IOMMU
545 else if (!memcmp(from,"iommu=",6)) {
551 * If the BIOS enumerates physical processors before logical,
552 * maxcpus=N at enumeration-time can be used to disable HT.
554 else if (!memcmp(from, "maxcpus=", 8)) {
555 extern unsigned int max_cpus;
557 max_cpus = simple_strtoul(from + 8, NULL, 0);
561 #ifdef CONFIG_ACPI_BOOT
562 else if (!memcmp(from, "acpi=off", 8))
565 /* acpi=strict disables out-of-spec workarounds */
566 else if (!memcmp(from, "acpi=strict", 11)) {
570 else if (!memcmp(from, "pci=noacpi", 10))
572 else if (!memcmp(from, "acpi=noirq", 10))
574 else if (!memcmp(from, "acpi_sci=edge", 13))
575 acpi_sci_flags.trigger = 1;
576 else if (!memcmp(from, "acpi_sci=level", 14))
577 acpi_sci_flags.trigger = 3;
578 else if (!memcmp(from, "acpi_sci=high", 13))
579 acpi_sci_flags.polarity = 1;
580 else if (!memcmp(from, "acpi_sci=low", 12))
581 acpi_sci_flags.polarity = 3;
583 else if (!memcmp(from,"maxcpus=0",9)) {
584 disable_ioapic_setup();
588 else if (!memcmp(from, "noapic", 6))
589 disable_ioapic_setup();
590 else if (!memcmp(from, "nolocalapic", 11) || !memcmp(from,"nolapic",7))
592 else if (!memcmp(from,"apic",4)) {
593 extern int ioapic_force;
595 skip_ioapic_setup = 0;
597 else if (!memcmp(from, "noexec=", 7)) {
598 extern int nonx_setup(char *);
599 nonx_setup(from + 7);
605 if (COMMAND_LINE_SIZE <= ++len)
610 *cmdline_p = command_line;