2 * linux/arch/i386/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean
7 * and Martin Mares, November 1997.
9 * Force Cyrix 6x86(MX) and M II processors to report MTRR capability
10 * and Cyrix "coma bug" recognition by
11 * Zoltán Böszörményi <zboszor@mail.externet.hu> February 1999.
13 * Force Centaur C6 processors to report MTRR capability.
14 * Bart Hartgers <bart@etpmod.phys.tue.nl>, May 1999.
16 * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999.
18 * IDT Winchip tweaks, misc clean ups.
19 * Dave Jones <davej@suse.de>, August 1999
21 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
23 * Better detection of Centaur/IDT WinChip models.
24 * Bart Hartgers <bart@etpmod.phys.tue.nl>, August 1999.
26 * Memory region support
27 * David Parsons <orc@pell.chi.il.us>, July-August 1999
29 * Cleaned up cache-detection code
30 * Dave Jones <davej@suse.de>, October 1999
32 * Added proper L2 cache detection for Coppermine
33 * Dragan Stancevic <visitor@valinux.com>, October 1999
35 * Added the original array for capability flags but forgot to credit
36 * myself :) (~1998) Fixed/cleaned up some cpu_model_info and other stuff
37 * Jauder Ho <jauderho@carumba.com>, January 2000
39 * Detection for Celeron coppermine, identify_cpu() overhauled,
40 * and a few other clean ups.
41 * Dave Jones <davej@suse.de>, April 2000
43 * Pentium III FXSR, SSE support
44 * General FPU state handling cleanups
45 * Gareth Hughes <gareth@valinux.com>, May 2000
47 * Added proper Cascades CPU and L2 cache detection for Cascades
48 * and 8-way type cache happy bunch from Intel:^)
49 * Dragan Stancevic <visitor@valinux.com>, May 2000
51 * Forward port AMD Duron errata T13 from 2.2.17pre
52 * Dave Jones <davej@suse.de>, August 2000
54 * Forward port lots of fixes/improvements from 2.2.18pre
55 * Cyrix III, Pentium IV support.
56 * Dave Jones <davej@suse.de>, October 2000
58 * Massive cleanup of CPU detection and bug handling;
59 * Transmeta CPU detection,
60 * H. Peter Anvin <hpa@zytor.com>, November 2000
62 * Added E820 sanitization routine (removes overlapping memory regions);
63 * Brian Moyle <bmoyle@mvista.com>, February 2001
66 * Dave Jones <davej@suse.de>, March 2001
68 * AMD Athlon/Duron/Thunderbird bluesmoke support.
69 * Dave Jones <davej@suse.de>, April 2001.
71 * CacheSize bug workaround updates for AMD, Intel & VIA Cyrix.
72 * Dave Jones <davej@suse.de>, September, October 2001.
74 * Provisions for empty E820 memory regions (reported by certain BIOSes).
75 * Alex Achenbach <xela@slit.de>, December 2002.
80 * This file handles the architecture-dependent parts of initialization
83 #include <linux/errno.h>
84 #include <linux/sched.h>
85 #include <linux/kernel.h>
87 #include <linux/stddef.h>
88 #include <linux/unistd.h>
89 #include <linux/ptrace.h>
90 #include <linux/slab.h>
91 #include <linux/user.h>
92 #include <linux/a.out.h>
93 #include <linux/tty.h>
94 #include <linux/ioport.h>
95 #include <linux/delay.h>
96 #include <linux/config.h>
97 #include <linux/init.h>
98 #include <linux/acpi.h>
99 #include <linux/apm_bios.h>
100 #ifdef CONFIG_BLK_DEV_RAM
101 #include <linux/blk.h>
103 #include <linux/highmem.h>
104 #include <linux/bootmem.h>
105 #include <linux/pci.h>
106 #include <linux/pci_ids.h>
107 #include <linux/seq_file.h>
108 #include <asm/processor.h>
109 #include <linux/console.h>
110 #include <linux/module.h>
111 #include <asm/mtrr.h>
112 #include <asm/uaccess.h>
113 #include <asm/system.h>
116 #include <asm/cobalt.h>
118 #include <asm/desc.h>
119 #include <asm/e820.h>
121 #include <asm/mpspec.h>
122 #include <asm/mmu_context.h>
123 #include <asm/io_apic.h>
129 char ignore_irq13; /* set if exception 16 works */
130 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
132 unsigned long mmu_cr4_features;
133 EXPORT_SYMBOL(mmu_cr4_features);
143 /* for MCA, but anyone else can use it if they want */
144 unsigned int machine_id;
145 unsigned int machine_submodel_id;
146 unsigned int BIOS_revision;
147 unsigned int mca_pentium_flag;
149 /* For PCI or other memory-mapped resources */
150 unsigned long pci_mem_start = 0x10000000;
152 /* user-defined highmem size */
153 static unsigned int highmem_pages __initdata = -1;
158 struct drive_info_struct { char dummy[32]; } drive_info;
159 struct screen_info screen_info;
160 struct apm_info apm_info;
161 struct sys_desc_table_struct {
162 unsigned short length;
163 unsigned char table[0];
168 unsigned char aux_device_present;
170 extern void mcheck_init(struct cpuinfo_x86 *c);
171 extern void dmi_scan_machine(void);
172 extern int root_mountflags;
173 extern char _text, _etext, _edata, _end;
175 static int have_cpuid_p(void) __init;
177 static int disable_x86_serial_nr __initdata = 1;
178 static u32 disabled_x86_caps[NCAPINTS] __initdata = { 0 };
180 #ifdef CONFIG_ACPI_INTERPRETER
181 int acpi_disabled = 0;
183 int acpi_disabled = 1;
185 EXPORT_SYMBOL(acpi_disabled);
187 #ifdef CONFIG_ACPI_BOOT
188 extern int __initdata acpi_ht;
189 int acpi_force __initdata = 0;
190 extern acpi_interrupt_flags acpi_sci_flags;
193 extern int blk_nohighio;
196 * This is set up by the setup-routine at boot-time
198 #define PARAM ((unsigned char *)empty_zero_page)
199 #define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
200 #define EXT_MEM_K (*(unsigned short *) (PARAM+2))
201 #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
202 #define E820_MAP_NR (*(char*) (PARAM+E820NR))
203 #define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
204 #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
205 #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
206 #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
207 #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
208 #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
209 #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
210 #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
211 #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
212 #define KERNEL_START (*(unsigned long *) (PARAM+0x214))
213 #define INITRD_START (*(unsigned long *) (PARAM+0x218))
214 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
215 #define DISK80_SIGNATURE_BUFFER (*(unsigned int*) (PARAM+DISK80_SIG_BUFFER))
216 #define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
217 #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
218 #define COMMAND_LINE ((char *) (PARAM+2048))
219 #define COMMAND_LINE_SIZE 256
221 #define RAMDISK_IMAGE_START_MASK 0x07FF
222 #define RAMDISK_PROMPT_FLAG 0x8000
223 #define RAMDISK_LOAD_FLAG 0x4000
226 char visws_board_type = -1;
227 char visws_board_rev = -1;
229 #define PIIX_PM_START 0x0F80
231 #define SIO_GPIO_START 0x0FC0
233 #define SIO_PM_START 0x0FC8
235 #define PMBASE PIIX_PM_START
236 #define GPIREG0 (PMBASE+0x30)
237 #define GPIREG(x) (GPIREG0+((x)/8))
238 #define PIIX_GPI_BD_ID1 18
239 #define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1)
241 #define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8)
243 #define SIO_INDEX 0x2e
244 #define SIO_DATA 0x2f
246 #define SIO_DEV_SEL 0x7
247 #define SIO_DEV_ENB 0x30
248 #define SIO_DEV_MSB 0x60
249 #define SIO_DEV_LSB 0x61
251 #define SIO_GP_DEV 0x7
253 #define SIO_GP_BASE SIO_GPIO_START
254 #define SIO_GP_MSB (SIO_GP_BASE>>8)
255 #define SIO_GP_LSB (SIO_GP_BASE&0xff)
257 #define SIO_GP_DATA1 (SIO_GP_BASE+0)
259 #define SIO_PM_DEV 0x8
261 #define SIO_PM_BASE SIO_PM_START
262 #define SIO_PM_MSB (SIO_PM_BASE>>8)
263 #define SIO_PM_LSB (SIO_PM_BASE&0xff)
264 #define SIO_PM_INDEX (SIO_PM_BASE+0)
265 #define SIO_PM_DATA (SIO_PM_BASE+1)
267 #define SIO_PM_FER2 0x1
269 #define SIO_PM_GP_EN 0x80
271 static void __init visws_get_board_type_and_rev(void)
275 visws_board_type = (char)(inb_p(PIIX_GPI_BD_REG) & PIIX_GPI_BD_REG)
276 >> PIIX_GPI_BD_SHIFT;
279 * First, we have to initialize the 307 part to allow us access
280 * to the GPIO registers. Let's map them at 0x0fc0 which is right
281 * after the PIIX4 PM section.
283 outb_p(SIO_DEV_SEL, SIO_INDEX);
284 outb_p(SIO_GP_DEV, SIO_DATA); /* Talk to GPIO regs. */
286 outb_p(SIO_DEV_MSB, SIO_INDEX);
287 outb_p(SIO_GP_MSB, SIO_DATA); /* MSB of GPIO base address */
289 outb_p(SIO_DEV_LSB, SIO_INDEX);
290 outb_p(SIO_GP_LSB, SIO_DATA); /* LSB of GPIO base address */
292 outb_p(SIO_DEV_ENB, SIO_INDEX);
293 outb_p(1, SIO_DATA); /* Enable GPIO registers. */
296 * Now, we have to map the power management section to write
297 * a bit which enables access to the GPIO registers.
298 * What lunatic came up with this shit?
300 outb_p(SIO_DEV_SEL, SIO_INDEX);
301 outb_p(SIO_PM_DEV, SIO_DATA); /* Talk to GPIO regs. */
303 outb_p(SIO_DEV_MSB, SIO_INDEX);
304 outb_p(SIO_PM_MSB, SIO_DATA); /* MSB of PM base address */
306 outb_p(SIO_DEV_LSB, SIO_INDEX);
307 outb_p(SIO_PM_LSB, SIO_DATA); /* LSB of PM base address */
309 outb_p(SIO_DEV_ENB, SIO_INDEX);
310 outb_p(1, SIO_DATA); /* Enable PM registers. */
313 * Now, write the PM register which enables the GPIO registers.
315 outb_p(SIO_PM_FER2, SIO_PM_INDEX);
316 outb_p(SIO_PM_GP_EN, SIO_PM_DATA);
319 * Now, initialize the GPIO registers.
320 * We want them all to be inputs which is the
321 * power on default, so let's leave them alone.
322 * So, let's just read the board rev!
324 raw = inb_p(SIO_GP_DATA1);
325 raw &= 0x7f; /* 7 bits of valid board revision ID. */
327 if (visws_board_type == VISWS_320) {
330 } else if (raw < 0xc) {
336 } else if (visws_board_type == VISWS_540) {
339 visws_board_rev = raw;
342 printk(KERN_INFO "Silicon Graphics %s (rev %d)\n",
343 visws_board_type == VISWS_320 ? "320" :
344 (visws_board_type == VISWS_540 ? "540" :
351 static char command_line[COMMAND_LINE_SIZE];
352 char saved_command_line[COMMAND_LINE_SIZE];
354 struct resource standard_io_resources[] = {
355 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
356 { "pic1", 0x20, 0x3f, IORESOURCE_BUSY },
357 { "timer0", 0x40, 0x43, IORESOURCE_BUSY },
358 { "timer1", 0x50, 0x53, IORESOURCE_BUSY },
359 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
360 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
361 { "pic2", 0xa0, 0xbf, IORESOURCE_BUSY },
362 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
363 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
366 #define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
368 static struct resource code_resource = { "Kernel code", 0x100000, 0 };
369 static struct resource data_resource = { "Kernel data", 0, 0 };
370 static struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
372 /* System ROM resources */
374 static struct resource rom_resources[MAXROMS] = {
375 { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
376 { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
379 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
381 static void __init probe_roms(void)
385 unsigned char *romstart;
387 request_resource(&iomem_resource, rom_resources+0);
389 /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
390 for (base = 0xC0000; base < 0xE0000; base += 2048) {
391 romstart = bus_to_virt(base);
392 if (!romsignature(romstart))
394 request_resource(&iomem_resource, rom_resources + roms);
399 /* Extension roms at C800:0000 - DFFF:0000 */
400 for (base = 0xC8000; base < 0xE0000; base += 2048) {
401 unsigned long length;
403 romstart = bus_to_virt(base);
404 if (!romsignature(romstart))
406 length = romstart[2] * 512;
409 unsigned char chksum;
412 for (i = 0; i < length; i++)
413 chksum += romstart[i];
417 rom_resources[roms].start = base;
418 rom_resources[roms].end = base + length - 1;
419 rom_resources[roms].name = "Extension ROM";
420 rom_resources[roms].flags = IORESOURCE_BUSY;
422 request_resource(&iomem_resource, rom_resources + roms);
430 /* Final check for motherboard extension rom at E000:0000 */
432 romstart = bus_to_virt(base);
434 if (romsignature(romstart)) {
435 rom_resources[roms].start = base;
436 rom_resources[roms].end = base + 65535;
437 rom_resources[roms].name = "Extension ROM";
438 rom_resources[roms].flags = IORESOURCE_BUSY;
440 request_resource(&iomem_resource, rom_resources + roms);
444 static void __init limit_regions (unsigned long long size)
446 unsigned long long current_addr = 0;
449 for (i = 0; i < e820.nr_map; i++) {
450 if (e820.map[i].type == E820_RAM) {
451 current_addr = e820.map[i].addr + e820.map[i].size;
452 if (current_addr >= size) {
453 e820.map[i].size -= current_addr-size;
460 static void __init add_memory_region(unsigned long long start,
461 unsigned long long size, int type)
466 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
470 e820.map[x].addr = start;
471 e820.map[x].size = size;
472 e820.map[x].type = type;
474 } /* add_memory_region */
478 static void __init print_memory_map(char *who)
482 for (i = 0; i < e820.nr_map; i++) {
483 printk(" %s: %016Lx - %016Lx ", who,
485 e820.map[i].addr + e820.map[i].size);
486 switch (e820.map[i].type) {
487 case E820_RAM: printk("(usable)\n");
490 printk("(reserved)\n");
493 printk("(ACPI data)\n");
496 printk("(ACPI NVS)\n");
498 default: printk("type %lu\n", e820.map[i].type);
505 * Sanitize the BIOS e820 map.
507 * Some e820 responses include overlapping entries. The following
508 * replaces the original e820 map with a new one, removing overlaps.
511 static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
513 struct change_member {
514 struct e820entry *pbios; /* pointer to original bios entry */
515 unsigned long long addr; /* address for this change point */
517 struct change_member change_point_list[2*E820MAX];
518 struct change_member *change_point[2*E820MAX];
519 struct e820entry *overlap_list[E820MAX];
520 struct e820entry new_bios[E820MAX];
521 struct change_member *change_tmp;
522 unsigned long current_type, last_type;
523 unsigned long long last_addr;
524 int chgidx, still_changing;
527 int old_nr, new_nr, chg_nr;
531 Visually we're performing the following (1,2,3,4 = memory types)...
533 Sample memory map (w/overlaps):
534 ____22__________________
535 ______________________4_
536 ____1111________________
537 _44_____________________
538 11111111________________
539 ____________________33__
540 ___________44___________
541 __________33333_________
542 ______________22________
543 ___________________2222_
544 _________111111111______
545 _____________________11_
546 _________________4______
548 Sanitized equivalent (no overlap):
549 1_______________________
550 _44_____________________
551 ___1____________________
552 ____22__________________
553 ______11________________
554 _________1______________
555 __________3_____________
556 ___________44___________
557 _____________33_________
558 _______________2________
559 ________________1_______
560 _________________4______
561 ___________________2____
562 ____________________33__
563 ______________________4_
566 /* if there's only one memory region, don't bother */
572 /* bail out if we find any unreasonable addresses in bios map */
573 for (i=0; i<old_nr; i++)
574 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
577 /* create pointers for initial change-point information (for sorting) */
578 for (i=0; i < 2*old_nr; i++)
579 change_point[i] = &change_point_list[i];
581 /* record all known change-points (starting and ending addresses),
582 omitting those that are for empty memory regions */
584 for (i=0; i < old_nr; i++) {
585 if (biosmap[i].size != 0) {
586 change_point[chgidx]->addr = biosmap[i].addr;
587 change_point[chgidx++]->pbios = &biosmap[i];
588 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
589 change_point[chgidx++]->pbios = &biosmap[i];
592 chg_nr = chgidx; /* true number of change-points */
594 /* sort change-point list by memory addresses (low -> high) */
596 while (still_changing) {
598 for (i=1; i < chg_nr; i++) {
599 /* if <current_addr> > <last_addr>, swap */
600 /* or, if current=<start_addr> & last=<end_addr>, swap */
601 if ((change_point[i]->addr < change_point[i-1]->addr) ||
602 ((change_point[i]->addr == change_point[i-1]->addr) &&
603 (change_point[i]->addr == change_point[i]->pbios->addr) &&
604 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
607 change_tmp = change_point[i];
608 change_point[i] = change_point[i-1];
609 change_point[i-1] = change_tmp;
615 /* create a new bios memory map, removing overlaps */
616 overlap_entries=0; /* number of entries in the overlap table */
617 new_bios_entry=0; /* index for creating new bios map entries */
618 last_type = 0; /* start with undefined memory type */
619 last_addr = 0; /* start with 0 as last starting address */
620 /* loop through change-points, determining affect on the new bios map */
621 for (chgidx=0; chgidx < chg_nr; chgidx++)
623 /* keep track of all overlapping bios entries */
624 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
626 /* add map entry to overlap list (> 1 entry implies an overlap) */
627 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
631 /* remove entry from list (order independent, so swap with last) */
632 for (i=0; i<overlap_entries; i++)
634 if (overlap_list[i] == change_point[chgidx]->pbios)
635 overlap_list[i] = overlap_list[overlap_entries-1];
639 /* if there are overlapping entries, decide which "type" to use */
640 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
642 for (i=0; i<overlap_entries; i++)
643 if (overlap_list[i]->type > current_type)
644 current_type = overlap_list[i]->type;
645 /* continue building up new bios map based on this information */
646 if (current_type != last_type) {
647 if (last_type != 0) {
648 new_bios[new_bios_entry].size =
649 change_point[chgidx]->addr - last_addr;
650 /* move forward only if the new size was non-zero */
651 if (new_bios[new_bios_entry].size != 0)
652 if (++new_bios_entry >= E820MAX)
653 break; /* no more space left for new bios entries */
655 if (current_type != 0) {
656 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
657 new_bios[new_bios_entry].type = current_type;
658 last_addr=change_point[chgidx]->addr;
660 last_type = current_type;
663 new_nr = new_bios_entry; /* retain count for new bios entries */
665 /* copy new bios mapping into original location */
666 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
673 * Copy the BIOS e820 map into a safe place.
675 * Sanity-check it while we're at it..
677 * If we're lucky and live on a modern system, the setup code
678 * will have given us a memory map that we can use to properly
679 * set up memory. If we aren't, we'll fake a memory map.
681 * We check to see that the memory map contains at least 2 elements
682 * before we'll use it, because the detection code in setup.S may
683 * not be perfect and most every PC known to man has two memory
684 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
685 * thinkpad 560x, for example, does not cooperate with the memory
688 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
690 /* Only one memory region (or negative)? Ignore it */
695 unsigned long long start = biosmap->addr;
696 unsigned long long size = biosmap->size;
697 unsigned long long end = start + size;
698 unsigned long type = biosmap->type;
700 /* Overflow in 64 bits? Ignore the memory map. */
705 * Some BIOSes claim RAM in the 640k - 1M region.
706 * Not right. Fix it up.
708 if (type == E820_RAM) {
709 if (start < 0x100000ULL && end > 0xA0000ULL) {
710 if (start < 0xA0000ULL)
711 add_memory_region(start, 0xA0000ULL-start, type);
712 if (end <= 0x100000ULL)
718 add_memory_region(start, size, type);
719 } while (biosmap++,--nr_map);
723 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
725 struct edd_info edd[EDDMAXNR];
726 unsigned int edd_disk80_sig;
728 * copy_edd() - Copy the BIOS EDD information
729 * from empty_zero_page into a safe place.
732 static inline void copy_edd(void)
735 memcpy(edd, EDD_BUF, sizeof(edd));
736 edd_disk80_sig = DISK80_SIGNATURE_BUFFER;
739 static inline void copy_edd(void) {}
743 * Do NOT EVER look at the BIOS memory size location.
744 * It does not work on many machines.
746 #define LOWMEMSIZE() (0x9f000)
748 static void __init setup_memory_region(void)
750 char *who = "BIOS-e820";
753 * Try to copy the BIOS-supplied E820-map.
755 * Otherwise fake a memory map; one section from 0k->640k,
756 * the next section from 1mb->appropriate_mem_k
758 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
759 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
760 unsigned long mem_size;
762 /* compare results from other methods and take the greater */
763 if (ALT_MEM_K < EXT_MEM_K) {
764 mem_size = EXT_MEM_K;
767 mem_size = ALT_MEM_K;
772 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
773 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
775 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
776 print_memory_map(who);
777 } /* setup_memory_region */
780 static void __init parse_cmdline_early (char ** cmdline_p)
782 char c = ' ', *to = command_line, *from = COMMAND_LINE;
786 /* Save unparsed command line copy for /proc/cmdline */
787 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
788 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
794 * "mem=nopentium" disables the 4MB page tables.
795 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
796 * to <mem>, overriding the bios size.
797 * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
798 * <start> to <start>+<mem>, overriding the bios size.
800 if (!memcmp(from, "mem=", 4)) {
801 if (to != command_line)
803 if (!memcmp(from+4, "nopentium", 9)) {
805 clear_bit(X86_FEATURE_PSE, &boot_cpu_data.x86_capability);
806 set_bit(X86_FEATURE_PSE, &disabled_x86_caps);
807 } else if (!memcmp(from+4, "exactmap", 8)) {
812 /* If the user specifies memory size, we
813 * limit the BIOS-provided memory map to
814 * that size. exactmap can be used to specify
815 * the exact map. mem=number can be used to
816 * trim the existing memory map.
818 unsigned long long start_at, mem_size;
820 mem_size = memparse(from+4, &from);
822 start_at = memparse(from+1, &from);
823 add_memory_region(start_at, mem_size, E820_RAM);
824 } else if (*from == '#') {
825 start_at = memparse(from+1, &from);
826 add_memory_region(start_at, mem_size, E820_ACPI);
827 } else if (*from == '$') {
828 start_at = memparse(from+1, &from);
829 add_memory_region(start_at, mem_size, E820_RESERVED);
831 limit_regions(mem_size);
838 * If the BIOS enumerates physical processors before logical,
839 * maxcpus=N at enumeration-time can be used to disable HT.
841 else if (!memcmp(from, "maxcpus=", 8)) {
842 extern unsigned int max_cpus;
844 max_cpus = simple_strtoul(from + 8, NULL, 0);
848 #ifdef CONFIG_ACPI_BOOT
849 /* "acpi=off" disables both ACPI table parsing and interpreter */
850 else if (!memcmp(from, "acpi=off", 8)) {
854 /* acpi=force to over-ride black-list */
855 else if (!memcmp(from, "acpi=force", 10)) {
861 /* Limit ACPI to boot-time only, still enabled HT */
862 else if (!memcmp(from, "acpi=ht", 7)) {
868 /* acpi=strict disables out-of-spec workarounds */
869 else if (!memcmp(from, "acpi=strict", 11)) {
873 else if (!memcmp(from, "pci=noacpi", 10)) {
877 /* disable IO-APIC */
878 else if (!memcmp(from, "noapic", 6))
879 disable_ioapic_setup();
881 else if (!memcmp(from, "acpi_sci=edge", 13))
882 acpi_sci_flags.trigger = 1;
883 else if (!memcmp(from, "acpi_sci=level", 14))
884 acpi_sci_flags.trigger = 3;
885 else if (!memcmp(from, "acpi_sci=high", 13))
886 acpi_sci_flags.polarity = 1;
887 else if (!memcmp(from, "acpi_sci=low", 12))
888 acpi_sci_flags.polarity = 3;
892 * highmem=size forces highmem to be exactly 'size' bytes.
893 * This works even on boxes that have no highmem otherwise.
894 * This also works to reduce highmem size on bigger boxes.
896 else if (!memcmp(from, "highmem=", 8))
897 highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
902 if (COMMAND_LINE_SIZE <= ++len)
907 *cmdline_p = command_line;
909 printk(KERN_INFO "user-defined physical RAM map:\n");
910 print_memory_map("user");
914 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
915 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
916 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
919 * Reserved space for vmalloc and iomap - defined in asm/page.h
921 #define MAXMEM_PFN PFN_DOWN(MAXMEM)
922 #define MAX_NONPAE_PFN (1 << 20)
925 * Find the highest page frame number we have available
927 static void __init find_max_pfn(void)
932 for (i = 0; i < e820.nr_map; i++) {
933 unsigned long start, end;
935 if (e820.map[i].type != E820_RAM)
937 start = PFN_UP(e820.map[i].addr);
938 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
947 * Determine low and high memory ranges:
949 static unsigned long __init find_max_low_pfn(void)
951 unsigned long max_low_pfn;
953 max_low_pfn = max_pfn;
954 if (max_low_pfn > MAXMEM_PFN) {
955 if (highmem_pages == -1)
956 highmem_pages = max_pfn - MAXMEM_PFN;
957 if (highmem_pages + MAXMEM_PFN < max_pfn)
958 max_pfn = MAXMEM_PFN + highmem_pages;
959 if (highmem_pages + MAXMEM_PFN > max_pfn) {
960 printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
963 max_low_pfn = MAXMEM_PFN;
964 #ifndef CONFIG_HIGHMEM
965 /* Maximum memory usable is what is directly addressable */
966 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
968 if (max_pfn > MAX_NONPAE_PFN)
969 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
971 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
972 #else /* !CONFIG_HIGHMEM */
973 #ifndef CONFIG_X86_PAE
974 if (max_pfn > MAX_NONPAE_PFN) {
975 max_pfn = MAX_NONPAE_PFN;
976 printk(KERN_WARNING "Warning only 4GB will be used.\n");
977 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
979 #endif /* !CONFIG_X86_PAE */
980 #endif /* !CONFIG_HIGHMEM */
982 if (highmem_pages == -1)
985 if (highmem_pages >= max_pfn) {
986 printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
990 if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
991 printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
994 max_low_pfn -= highmem_pages;
998 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
1006 * Register fully available low RAM pages with the bootmem allocator.
1008 static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
1012 for (i = 0; i < e820.nr_map; i++) {
1013 unsigned long curr_pfn, last_pfn, size;
1015 * Reserve usable low memory
1017 if (e820.map[i].type != E820_RAM)
1020 * We are rounding up the start address of usable memory:
1022 curr_pfn = PFN_UP(e820.map[i].addr);
1023 if (curr_pfn >= max_low_pfn)
1026 * ... and at the end of the usable range downwards:
1028 last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
1030 if (last_pfn > max_low_pfn)
1031 last_pfn = max_low_pfn;
1034 * .. finally, did all the rounding and playing
1035 * around just make the area go away?
1037 if (last_pfn <= curr_pfn)
1040 size = last_pfn - curr_pfn;
1041 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
1045 static unsigned long __init setup_memory(void)
1047 unsigned long bootmap_size, start_pfn, max_low_pfn;
1050 * partially used pages are not usable - thus
1051 * we are rounding upwards:
1053 start_pfn = PFN_UP(__pa(&_end));
1057 max_low_pfn = find_max_low_pfn();
1059 #ifdef CONFIG_HIGHMEM
1060 highstart_pfn = highend_pfn = max_pfn;
1061 if (max_pfn > max_low_pfn) {
1062 highstart_pfn = max_low_pfn;
1064 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
1065 pages_to_mb(highend_pfn - highstart_pfn));
1067 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
1068 pages_to_mb(max_low_pfn));
1070 * Initialize the boot-time allocator (with low memory only):
1072 bootmap_size = init_bootmem(start_pfn, max_low_pfn);
1074 register_bootmem_low_pages(max_low_pfn);
1077 * Reserve the bootmem bitmap itself as well. We do this in two
1078 * steps (first step was init_bootmem()) because this catches
1079 * the (very unlikely) case of us accidentally initializing the
1080 * bootmem allocator with an invalid RAM area.
1082 reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
1083 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
1086 * reserve physical page 0 - it's a special BIOS page on many boxes,
1087 * enabling clean reboots, SMP operation, laptop functions.
1089 reserve_bootmem(0, PAGE_SIZE);
1093 * But first pinch a few for the stack/trampoline stuff
1094 * FIXME: Don't need the extra page at 4K, but need to fix
1095 * trampoline before removing it. (see the GDT stuff)
1097 reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
1099 #ifdef CONFIG_ACPI_SLEEP
1101 * Reserve low memory region for sleep support.
1103 acpi_reserve_bootmem();
1105 #ifdef CONFIG_X86_LOCAL_APIC
1107 * Find and reserve possible boot-time SMP configuration.
1111 #ifdef CONFIG_BLK_DEV_INITRD
1112 if (LOADER_TYPE && INITRD_START) {
1113 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
1114 reserve_bootmem(INITRD_START, INITRD_SIZE);
1116 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
1117 initrd_end = initrd_start+INITRD_SIZE;
1120 printk(KERN_ERR "initrd extends beyond end of memory "
1121 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
1122 INITRD_START + INITRD_SIZE,
1123 max_low_pfn << PAGE_SHIFT);
1133 * Request address space for all standard RAM and ROM resources
1134 * and also for regions reported as reserved by the e820.
1136 static void __init register_memory(unsigned long max_low_pfn)
1138 unsigned long low_mem_size;
1141 for (i = 0; i < e820.nr_map; i++) {
1142 struct resource *res;
1143 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
1145 res = alloc_bootmem_low(sizeof(struct resource));
1146 switch (e820.map[i].type) {
1147 case E820_RAM: res->name = "System RAM"; break;
1148 case E820_ACPI: res->name = "ACPI Tables"; break;
1149 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
1150 default: res->name = "reserved";
1152 res->start = e820.map[i].addr;
1153 res->end = res->start + e820.map[i].size - 1;
1154 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1155 request_resource(&iomem_resource, res);
1156 if (e820.map[i].type == E820_RAM) {
1158 * We dont't know which RAM region contains kernel data,
1159 * so we try it repeatedly and let the resource manager
1162 request_resource(res, &code_resource);
1163 request_resource(res, &data_resource);
1166 request_resource(&iomem_resource, &vram_resource);
1168 /* request I/O space for devices used on all i[345]86 PCs */
1169 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
1170 request_resource(&ioport_resource, standard_io_resources+i);
1172 /* Tell the PCI layer not to allocate too close to the RAM area.. */
1173 low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
1174 if (low_mem_size > pci_mem_start)
1175 pci_mem_start = low_mem_size;
1178 void __init setup_arch(char **cmdline_p)
1180 unsigned long max_low_pfn;
1183 visws_get_board_type_and_rev();
1186 #ifndef CONFIG_HIGHIO
1190 ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
1191 drive_info = DRIVE_INFO;
1192 screen_info = SCREEN_INFO;
1193 apm_info.bios = APM_BIOS_INFO;
1194 if( SYS_DESC_TABLE.length != 0 ) {
1195 MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
1196 machine_id = SYS_DESC_TABLE.table[0];
1197 machine_submodel_id = SYS_DESC_TABLE.table[1];
1198 BIOS_revision = SYS_DESC_TABLE.table[2];
1200 aux_device_present = AUX_DEVICE_INFO;
1202 #ifdef CONFIG_BLK_DEV_RAM
1203 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
1204 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
1205 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
1207 setup_memory_region();
1210 if (!MOUNT_ROOT_RDONLY)
1211 root_mountflags &= ~MS_RDONLY;
1212 init_mm.start_code = (unsigned long) &_text;
1213 init_mm.end_code = (unsigned long) &_etext;
1214 init_mm.end_data = (unsigned long) &_edata;
1215 init_mm.brk = (unsigned long) &_end;
1217 code_resource.start = virt_to_bus(&_text);
1218 code_resource.end = virt_to_bus(&_etext)-1;
1219 data_resource.start = virt_to_bus(&_etext);
1220 data_resource.end = virt_to_bus(&_edata)-1;
1222 parse_cmdline_early(cmdline_p);
1224 max_low_pfn = setup_memory();
1227 * NOTE: before this point _nobody_ is allowed to allocate
1228 * any memory using the bootmem allocator.
1232 smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
1239 * Parse the ACPI tables for possible boot-time SMP configuration.
1243 #ifdef CONFIG_X86_LOCAL_APIC
1245 * get boot-time SMP configuration:
1247 if (smp_found_config)
1251 register_memory(max_low_pfn);
1254 #if defined(CONFIG_VGA_CONSOLE)
1255 conswitchp = &vga_con;
1256 #elif defined(CONFIG_DUMMY_CONSOLE)
1257 conswitchp = &dummy_con;
1262 static int cachesize_override __initdata = -1;
1263 static int __init cachesize_setup(char *str)
1265 get_option (&str, &cachesize_override);
1268 __setup("cachesize=", cachesize_setup);
1271 #ifndef CONFIG_X86_TSC
1272 static int tsc_disable __initdata = 0;
1274 static int __init notsc_setup(char *str)
1280 static int __init notsc_setup(char *str)
1282 printk("notsc: Kernel compiled with CONFIG_X86_TSC, cannot disable TSC.\n");
1286 __setup("notsc", notsc_setup);
1288 static int __init highio_setup(char *str)
1290 printk("i386: disabling HIGHMEM block I/O\n");
1294 __setup("nohighio", highio_setup);
1296 static int __init get_model_name(struct cpuinfo_x86 *c)
1301 if (cpuid_eax(0x80000000) < 0x80000004)
1304 v = (unsigned int *) c->x86_model_id;
1305 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
1306 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
1307 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
1308 c->x86_model_id[48] = 0;
1310 /* Intel chips right-justify this string for some dumb reason;
1311 undo that brain damage */
1312 p = q = &c->x86_model_id[0];
1318 while ( q <= &c->x86_model_id[48] )
1319 *q++ = '\0'; /* Zero-pad the rest */
1326 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
1328 unsigned int n, dummy, ecx, edx, l2size;
1330 n = cpuid_eax(0x80000000);
1332 if (n >= 0x80000005) {
1333 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
1334 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
1335 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
1336 c->x86_cache_size=(ecx>>24)+(edx>>24);
1339 if (n < 0x80000006) /* Some chips just has a large L1. */
1342 ecx = cpuid_ecx(0x80000006);
1345 /* AMD errata T13 (order #21922) */
1346 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
1347 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
1349 if (c->x86_model == 4 &&
1350 (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
1354 if (c->x86_vendor == X86_VENDOR_CENTAUR) {
1355 /* VIA C3 CPUs (670-68F) need further shifting. */
1356 if ((c->x86 == 6) &&
1357 ((c->x86_model == 7) || (c->x86_model == 8))) {
1361 /* VIA also screwed up Nehemiah stepping 1, and made
1362 it return '65KB' instead of '64KB'
1363 - Note, it seems this may only be in engineering samples. */
1364 if ((c->x86==6) && (c->x86_model==9) &&
1365 (c->x86_mask==1) && (l2size==65))
1369 /* Allow user to override all this if necessary. */
1370 if (cachesize_override != -1)
1371 l2size = cachesize_override;
1374 return; /* Again, no L2 cache is possible */
1376 c->x86_cache_size = l2size;
1378 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
1379 l2size, ecx & 0xFF);
1383 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
1384 * misexecution of code under Linux. Owners of such processors should
1385 * contact AMD for precise details and a CPU swap.
1387 * See http://www.multimania.com/poulot/k6bug.html
1388 * http://www.amd.com/K6/k6docs/revgd.html
1390 * The following test is erm.. interesting. AMD neglected to up
1391 * the chip setting when fixing the bug but they also tweaked some
1392 * performance at the same time..
1395 extern void vide(void);
1396 __asm__(".align 4\nvide: ret");
1398 static int __init init_amd(struct cpuinfo_x86 *c)
1401 int mbytes = max_mapnr >> (20-PAGE_SHIFT);
1405 * FIXME: We should handle the K5 here. Set up the write
1406 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
1410 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1411 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1412 clear_bit(0*32+31, &c->x86_capability);
1414 r = get_model_name(c);
1419 if( c->x86_model < 6 )
1421 /* Based on AMD doc 20734R - June 2000 */
1422 if ( c->x86_model == 0 ) {
1423 clear_bit(X86_FEATURE_APIC, &c->x86_capability);
1424 set_bit(X86_FEATURE_PGE, &c->x86_capability);
1429 if ( c->x86_model == 6 && c->x86_mask == 1 ) {
1430 const int K6_BUG_LOOP = 1000000;
1432 void (*f_vide)(void);
1433 unsigned long d, d2;
1435 printk(KERN_INFO "AMD K6 stepping B detected - ");
1438 * It looks like AMD fixed the 2.6.2 bug and improved indirect
1439 * calls at the same time.
1450 /* Knock these two lines out if it debugs out ok */
1451 printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);
1452 printk(KERN_INFO "AMD K6 stepping B detected - ");
1453 /* -- cut here -- */
1454 if (d > 20*K6_BUG_LOOP)
1455 printk("system stability may be impaired when more than 32 MB are used.\n");
1457 printk("probably OK (after B9730xxxx).\n");
1458 printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");
1461 /* K6 with old style WHCR */
1462 if (c->x86_model < 8 ||
1463 (c->x86_model== 8 && c->x86_mask < 8)) {
1464 /* We can only write allocate on the low 508Mb */
1468 rdmsr(MSR_K6_WHCR, l, h);
1469 if ((l&0x0000FFFF)==0) {
1470 unsigned long flags;
1471 l=(1<<0)|((mbytes/4)<<1);
1472 local_irq_save(flags);
1474 wrmsr(MSR_K6_WHCR, l, h);
1475 local_irq_restore(flags);
1476 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
1482 if ((c->x86_model == 8 && c->x86_mask >7) ||
1483 c->x86_model == 9 || c->x86_model == 13) {
1484 /* The more serious chips .. */
1489 rdmsr(MSR_K6_WHCR, l, h);
1490 if ((l&0xFFFF0000)==0) {
1491 unsigned long flags;
1492 l=((mbytes>>2)<<22)|(1<<16);
1493 local_irq_save(flags);
1495 wrmsr(MSR_K6_WHCR, l, h);
1496 local_irq_restore(flags);
1497 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
1501 /* Set MTRR capability flag if appropriate */
1502 if (c->x86_model == 13 || c->x86_model == 9 ||
1503 (c->x86_model == 8 && c->x86_mask >= 8))
1504 set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability);
1509 case 6: /* An Athlon/Duron */
1511 /* Bit 15 of Athlon specific MSR 15, needs to be 0
1512 * to enable SSE on Palomino/Morgan CPU's.
1513 * If the BIOS didn't enable it already, enable it
1516 if (c->x86_model >= 6 && c->x86_model <= 10) {
1517 if (!test_bit(X86_FEATURE_XMM,
1518 &c->x86_capability)) {
1520 "Enabling Disabled K7/SSE Support...\n");
1521 rdmsr(MSR_K7_HWCR, l, h);
1523 wrmsr(MSR_K7_HWCR, l, h);
1524 set_bit(X86_FEATURE_XMM,
1525 &c->x86_capability);
1529 /* It's been determined by AMD that Athlons since model 8 stepping 1
1530 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
1531 * As per AMD technical note 27212 0.2
1533 if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
1534 rdmsr(MSR_K7_CLK_CTL, l, h);
1535 if ((l & 0xfff00000) != 0x20000000) {
1536 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
1537 ((l & 0x000fffff)|0x20000000));
1538 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
1544 display_cacheinfo(c);
1549 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
1551 static void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
1553 unsigned char ccr2, ccr3;
1554 unsigned long flags;
1556 /* we test for DEVID by checking whether CCR3 is writable */
1557 local_irq_save(flags);
1558 ccr3 = getCx86(CX86_CCR3);
1559 setCx86(CX86_CCR3, ccr3 ^ 0x80);
1560 getCx86(0xc0); /* dummy to change bus */
1562 if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */
1563 ccr2 = getCx86(CX86_CCR2);
1564 setCx86(CX86_CCR2, ccr2 ^ 0x04);
1565 getCx86(0xc0); /* dummy */
1567 if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
1569 else { /* Cx486S A step */
1570 setCx86(CX86_CCR2, ccr2);
1575 setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
1577 /* read DIR0 and DIR1 CPU registers */
1578 *dir0 = getCx86(CX86_DIR0);
1579 *dir1 = getCx86(CX86_DIR1);
1581 local_irq_restore(flags);
1585 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in
1586 * order to identify the Cyrix CPU model after we're out of the
1589 static unsigned char Cx86_dir0_msb __initdata = 0;
1591 static char Cx86_model[][9] __initdata = {
1592 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
1595 static char Cx486_name[][5] __initdata = {
1596 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
1599 static char Cx486S_name[][4] __initdata = {
1600 "S", "S2", "Se", "S2e"
1602 static char Cx486D_name[][4] __initdata = {
1603 "DX", "DX2", "?", "?", "?", "DX4"
1605 static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
1606 static char cyrix_model_mult1[] __initdata = "12??43";
1607 static char cyrix_model_mult2[] __initdata = "12233445";
1610 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
1611 * BIOSes for compatability with DOS games. This makes the udelay loop
1612 * work correctly, and improves performance.
1614 * FIXME: our newer udelay uses the tsc. We dont need to frob with SLOP
1617 extern void calibrate_delay(void) __init;
1619 static void __init check_cx686_slop(struct cpuinfo_x86 *c)
1621 unsigned long flags;
1623 if (Cx86_dir0_msb == 3) {
1624 unsigned char ccr3, ccr5;
1626 local_irq_save(flags);
1627 ccr3 = getCx86(CX86_CCR3);
1628 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
1629 ccr5 = getCx86(CX86_CCR5);
1631 setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */
1632 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
1633 local_irq_restore(flags);
1635 if (ccr5 & 2) { /* possible wrong calibration done */
1636 printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
1638 c->loops_per_jiffy = loops_per_jiffy;
1643 static void __init init_cyrix(struct cpuinfo_x86 *c)
1645 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
1646 char *buf = c->x86_model_id;
1647 const char *p = NULL;
1649 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1650 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1651 clear_bit(0*32+31, &c->x86_capability);
1653 /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
1654 if ( test_bit(1*32+24, &c->x86_capability) ) {
1655 clear_bit(1*32+24, &c->x86_capability);
1656 set_bit(X86_FEATURE_CXMMX, &c->x86_capability);
1659 do_cyrix_devid(&dir0, &dir1);
1661 check_cx686_slop(c);
1663 Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */
1664 dir0_lsn = dir0 & 0xf; /* model or clock multiplier */
1666 /* common case step number/rev -- exceptions handled below */
1667 c->x86_model = (dir1 >> 4) + 1;
1668 c->x86_mask = dir1 & 0xf;
1670 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
1671 * We do the same thing for each generation: we work out
1672 * the model, multiplier and stepping. Black magic included,
1673 * to make the silicon step/rev numbers match the printed ones.
1679 case 0: /* Cx486SLC/DLC/SRx/DRx */
1680 p = Cx486_name[dir0_lsn & 7];
1683 case 1: /* Cx486S/DX/DX2/DX4 */
1684 p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
1685 : Cx486S_name[dir0_lsn & 3];
1689 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1693 case 3: /* 6x86/6x86L */
1695 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1696 if (dir1 > 0x21) { /* 686L */
1702 /* Emulate MTRRs using Cyrix's ARRs. */
1703 set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
1704 /* 6x86's contain this bug */
1708 case 4: /* MediaGX/GXm */
1710 /* It isnt really a PCI quirk directly, but the cure is the
1711 same. The MediaGX has deep magic SMM stuff that handles the
1712 SB emulation. It thows away the fifo on disable_dma() which
1713 is wrong and ruins the audio.
1715 Bug2: VSA1 has a wrap bug so that using maximum sized DMA
1716 causes bad things. According to NatSemi VSA2 has another
1717 bug to do with 'hlt'. I've not seen any boards using VSA2
1718 and X doesn't seem to support it either so who cares 8).
1719 VSA1 we work around however.
1722 printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
1723 isa_dma_bridge_buggy = 2;
1725 c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
1727 /* GXm supports extended cpuid levels 'ala' AMD */
1728 if (c->cpuid_level == 2) {
1729 get_model_name(c); /* get CPU marketing name */
1731 * The 5510/5520 companion chips have a funky PIT
1732 * that breaks the TSC synchronizing, so turn it off
1734 if(pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, NULL) ||
1735 pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, NULL))
1736 clear_bit(X86_FEATURE_TSC, c->x86_capability);
1739 else { /* MediaGX */
1740 Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
1742 c->x86_model = (dir1 & 0x20) ? 1 : 2;
1743 if(pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, NULL) ||
1744 pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, NULL))
1745 clear_bit(X86_FEATURE_TSC, &c->x86_capability);
1749 case 5: /* 6x86MX/M II */
1752 dir0_msn++; /* M II */
1753 /* Enable MMX extensions (App note 108) */
1754 setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
1758 c->coma_bug = 1; /* 6x86MX, it has the bug. */
1760 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
1761 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
1763 if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
1765 /* Emulate MTRRs using Cyrix's ARRs. */
1766 set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
1769 case 0xf: /* Cyrix 486 without DEVID registers */
1771 case 0xd: /* either a 486SLC or DLC w/o DEVID */
1773 p = Cx486_name[(c->hard_math) ? 1 : 0];
1776 case 0xe: /* a 486S A step */
1783 default: /* unknown (shouldn't happen, we know everyone ;-) */
1787 strcpy(buf, Cx86_model[dir0_msn & 7]);
1788 if (p) strcat(buf, p);
1792 #ifdef CONFIG_X86_OOSTORE
1794 static u32 __init power2(u32 x)
1803 * Set up an actual MCR
1806 static void __init winchip_mcr_insert(int reg, u32 base, u32 size, int key)
1811 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
1812 lo &= ~0xFFF; /* Remove the ctrl value bits */
1813 lo |= key; /* Attribute we wish to set */
1814 wrmsr(reg+MSR_IDT_MCR0, lo, hi);
1815 mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
1819 * Figure what we can cover with MCR's
1821 * Shortcut: We know you can't put 4Gig of RAM on a winchip
1824 static u32 __init ramtop(void) /* 16388 */
1828 u32 clip = 0xFFFFFFFFUL;
1830 for (i = 0; i < e820.nr_map; i++) {
1831 unsigned long start, end;
1833 if (e820.map[i].addr > 0xFFFFFFFFUL)
1836 * Don't MCR over reserved space. Ignore the ISA hole
1837 * we frob around that catastrophy already
1840 if (e820.map[i].type == E820_RESERVED)
1842 if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
1843 clip = e820.map[i].addr;
1846 start = e820.map[i].addr;
1847 end = e820.map[i].addr + e820.map[i].size;
1853 /* Everything below 'top' should be RAM except for the ISA hole.
1854 Because of the limited MCR's we want to map NV/ACPI into our
1855 MCR range for gunk in RAM
1857 Clip might cause us to MCR insufficient RAM but that is an
1858 acceptable failure mode and should only bite obscure boxes with
1861 The second case Clip sometimes kicks in is when the EBDA is marked
1862 as reserved. Again we fail safe with reasonable results
1872 * Compute a set of MCR's to give maximum coverage
1875 static int __init winchip_mcr_compute(int nr, int key)
1878 u32 root = power2(mem);
1889 * Find the largest block we will fill going upwards
1892 u32 high = power2(mem-top);
1895 * Find the largest block we will fill going downwards
1901 * Don't fill below 1Mb going downwards as there
1902 * is an ISA hole in the way.
1905 if(base <= 1024*1024)
1909 * See how much space we could cover by filling below
1915 else if(floor ==512*1024)
1918 /* And forget ROM space */
1921 * Now install the largest coverage we get
1924 if(fspace > high && fspace > low)
1926 winchip_mcr_insert(ct, floor, fspace, key);
1931 winchip_mcr_insert(ct, top, high, key);
1937 winchip_mcr_insert(ct, base, low, key);
1943 * We loaded ct values. We now need to set the mask. The caller
1950 static void __init winchip_create_optimal_mcr(void)
1954 * Allocate up to 6 mcrs to mark as much of ram as possible
1955 * as write combining and weak write ordered.
1957 * To experiment with: Linux never uses stack operations for
1958 * mmio spaces so we could globally enable stack operation wc
1960 * Load the registers with type 31 - full write combining, all
1961 * writes weakly ordered.
1963 int used = winchip_mcr_compute(6, 31);
1970 wrmsr(MSR_IDT_MCR0+i, 0, 0);
1973 static void __init winchip2_create_optimal_mcr(void)
1979 * Allocate up to 6 mcrs to mark as much of ram as possible
1980 * as write combining, weak store ordered.
1982 * Load the registers with type 25
1983 * 8 - weak write ordering
1984 * 16 - weak read ordering
1985 * 1 - write combining
1988 int used = winchip_mcr_compute(6, 25);
1991 * Mark the registers we are using.
1994 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
1997 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2004 wrmsr(MSR_IDT_MCR0+i, 0, 0);
2008 * Handle the MCR key on the Winchip 2.
2011 static void __init winchip2_unprotect_mcr(void)
2016 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2017 lo&=~0x1C0; /* blank bits 8-6 */
2019 lo |= key<<6; /* replace with unlock key */
2020 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2023 static void __init winchip2_protect_mcr(void)
2027 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2028 lo&=~0x1C0; /* blank bits 8-6 */
2029 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2034 static void __init init_c3(struct cpuinfo_x86 *c)
2038 /* Test for Centaur Extended Feature Flags presence */
2039 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
2040 /* store Centaur Extended Feature Flags as
2041 * word 5 of the CPU capability bit array
2043 c->x86_capability[5] = cpuid_edx(0xC0000001);
2046 switch (c->x86_model) {
2047 case 6 ... 8: /* Cyrix III family */
2048 rdmsr (MSR_VIA_FCR, lo, hi);
2049 lo |= (1<<1 | 1<<7); /* Report CX8 & enable PGE */
2050 wrmsr (MSR_VIA_FCR, lo, hi);
2052 set_bit(X86_FEATURE_CX8, c->x86_capability);
2053 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
2057 case 9: /* Nehemiah */
2060 display_cacheinfo(c);
2065 static void __init init_centaur(struct cpuinfo_x86 *c)
2094 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
2095 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
2096 clear_bit(0*32+31, &c->x86_capability);
2101 switch(c->x86_model) {
2104 fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
2106 printk(KERN_NOTICE "Disabling bugged TSC.\n");
2107 clear_bit(X86_FEATURE_TSC, &c->x86_capability);
2108 #ifdef CONFIG_X86_OOSTORE
2109 winchip_create_optimal_mcr();
2111 write combining on non-stack, non-string
2112 write combining on string, all types
2115 The C6 original lacks weak read order
2117 Note 0x120 is write only on Winchip 1 */
2119 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
2123 switch(c->x86_mask) {
2134 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
2136 #ifdef CONFIG_X86_OOSTORE
2137 winchip2_unprotect_mcr();
2138 winchip2_create_optimal_mcr();
2139 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2141 write combining on non-stack, non-string
2142 write combining on string, all types
2146 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2147 winchip2_protect_mcr();
2152 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
2154 #ifdef CONFIG_X86_OOSTORE
2155 winchip2_unprotect_mcr();
2156 winchip2_create_optimal_mcr();
2157 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2159 write combining on non-stack, non-string
2160 write combining on string, all types
2164 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2165 winchip2_protect_mcr();
2170 /* no info on the WC4 yet */
2176 rdmsr(MSR_IDT_FCR1, lo, hi);
2177 newlo=(lo|fcr_set) & (~fcr_clr);
2180 printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
2181 wrmsr(MSR_IDT_FCR1, newlo, hi );
2183 printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
2185 /* Emulate MTRRs using Centaur's MCR. */
2186 set_bit(X86_FEATURE_CENTAUR_MCR, &c->x86_capability);
2188 set_bit(X86_FEATURE_CX8, &c->x86_capability);
2189 /* Set 3DNow! on Winchip 2 and above. */
2190 if (c->x86_model >=8)
2191 set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
2192 /* See if we can find out some more. */
2193 if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
2195 cpuid(0x80000005,&aa,&bb,&cc,&dd);
2196 /* Add L1 data and code cache sizes. */
2197 c->x86_cache_size = (cc>>24)+(dd>>24);
2199 sprintf( c->x86_model_id, "WinChip %s", name );
2209 static void __init init_transmeta(struct cpuinfo_x86 *c)
2211 unsigned int cap_mask, uk, max, dummy;
2212 unsigned int cms_rev1, cms_rev2;
2213 unsigned int cpu_rev, cpu_freq, cpu_flags;
2216 get_model_name(c); /* Same as AMD/Cyrix */
2217 display_cacheinfo(c);
2219 /* Print CMS and CPU revision */
2220 max = cpuid_eax(0x80860000);
2221 if ( max >= 0x80860001 ) {
2222 cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
2223 printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
2224 (cpu_rev >> 24) & 0xff,
2225 (cpu_rev >> 16) & 0xff,
2226 (cpu_rev >> 8) & 0xff,
2230 if ( max >= 0x80860002 ) {
2231 cpuid(0x80860002, &dummy, &cms_rev1, &cms_rev2, &dummy);
2232 printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
2233 (cms_rev1 >> 24) & 0xff,
2234 (cms_rev1 >> 16) & 0xff,
2235 (cms_rev1 >> 8) & 0xff,
2239 if ( max >= 0x80860006 ) {
2241 (void *)&cpu_info[0],
2242 (void *)&cpu_info[4],
2243 (void *)&cpu_info[8],
2244 (void *)&cpu_info[12]);
2246 (void *)&cpu_info[16],
2247 (void *)&cpu_info[20],
2248 (void *)&cpu_info[24],
2249 (void *)&cpu_info[28]);
2251 (void *)&cpu_info[32],
2252 (void *)&cpu_info[36],
2253 (void *)&cpu_info[40],
2254 (void *)&cpu_info[44]);
2256 (void *)&cpu_info[48],
2257 (void *)&cpu_info[52],
2258 (void *)&cpu_info[56],
2259 (void *)&cpu_info[60]);
2260 cpu_info[64] = '\0';
2261 printk(KERN_INFO "CPU: %s\n", cpu_info);
2264 /* Unhide possibly hidden capability flags */
2265 rdmsr(0x80860004, cap_mask, uk);
2266 wrmsr(0x80860004, ~0, uk);
2267 c->x86_capability[0] = cpuid_edx(0x00000001);
2268 wrmsr(0x80860004, cap_mask, uk);
2270 /* If we can run i686 user-space code, call us an i686 */
2271 #define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)
2272 if ( c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686 )
2277 static void __init init_rise(struct cpuinfo_x86 *c)
2279 printk("CPU: Rise iDragon");
2280 if (c->x86_model > 2)
2284 /* Unhide possibly hidden capability flags
2285 The mp6 iDragon family don't have MSRs.
2286 We switch on extra features with this cpuid weirdness: */
2288 "movl $0x6363452a, %%eax\n\t"
2289 "movl $0x3231206c, %%ecx\n\t"
2290 "movl $0x2a32313a, %%edx\n\t"
2292 "movl $0x63634523, %%eax\n\t"
2293 "movl $0x32315f6c, %%ecx\n\t"
2294 "movl $0x2333313a, %%edx\n\t"
2295 "cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
2297 set_bit(X86_FEATURE_CX8, &c->x86_capability);
2301 extern void trap_init_f00f_bug(void);
2303 #define LVL_1_INST 1
2304 #define LVL_1_DATA 2
2311 unsigned char descriptor;
2316 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
2317 static struct _cache_table cache_table[] __initdata =
2319 { 0x06, LVL_1_INST, 8 },
2320 { 0x08, LVL_1_INST, 16 },
2321 { 0x0A, LVL_1_DATA, 8 },
2322 { 0x0C, LVL_1_DATA, 16 },
2323 { 0x22, LVL_3, 512 },
2324 { 0x23, LVL_3, 1024 },
2325 { 0x25, LVL_3, 2048 },
2326 { 0x29, LVL_3, 4096 },
2327 { 0x2c, LVL_1_DATA, 32 },
2328 { 0x30, LVL_1_INST, 32 },
2329 { 0x39, LVL_2, 128 },
2330 { 0x3b, LVL_2, 128 },
2331 { 0x3C, LVL_2, 256 },
2332 { 0x41, LVL_2, 128 },
2333 { 0x42, LVL_2, 256 },
2334 { 0x43, LVL_2, 512 },
2335 { 0x44, LVL_2, 1024 },
2336 { 0x45, LVL_2, 2048 },
2337 { 0x60, LVL_1_DATA, 16 },
2338 { 0x66, LVL_1_DATA, 8 },
2339 { 0x67, LVL_1_DATA, 16 },
2340 { 0x68, LVL_1_DATA, 32 },
2341 { 0x70, LVL_TRACE, 12 },
2342 { 0x71, LVL_TRACE, 16 },
2343 { 0x72, LVL_TRACE, 32 },
2344 { 0x79, LVL_2, 128 },
2345 { 0x7A, LVL_2, 256 },
2346 { 0x7B, LVL_2, 512 },
2347 { 0x7C, LVL_2, 1024 },
2348 { 0x82, LVL_2, 256 },
2349 { 0x83, LVL_2, 512 },
2350 { 0x84, LVL_2, 1024 },
2351 { 0x85, LVL_2, 2048 },
2352 { 0x86, LVL_2, 512 },
2353 { 0x87, LVL_2, 1024 },
2357 static void __init init_intel(struct cpuinfo_x86 *c)
2359 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
2361 #ifndef CONFIG_X86_F00F_WORKS_OK
2362 static int f00f_workaround_enabled = 0;
2365 * All current models of Pentium and Pentium with MMX technology CPUs
2366 * have the F0 0F bug, which lets nonpriviledged users lock up the system.
2367 * Note that the workaround only should be initialized once...
2372 if (!f00f_workaround_enabled) {
2373 trap_init_f00f_bug();
2374 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
2375 f00f_workaround_enabled = 1;
2378 #endif /* CONFIG_X86_F00F_WORKS_OK */
2380 if (c->cpuid_level > 1) {
2381 /* supports eax=2 call */
2384 unsigned char *dp = (unsigned char *)regs;
2386 /* Number of times to iterate */
2387 n = cpuid_eax(2) & 0xFF;
2389 for ( i = 0 ; i < n ; i++ ) {
2390 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
2392 /* If bit 31 is set, this is an unknown format */
2393 for ( j = 0 ; j < 3 ; j++ ) {
2394 if ( regs[j] < 0 ) regs[j] = 0;
2397 /* Byte 0 is level count, not a descriptor */
2398 for ( j = 1 ; j < 16 ; j++ ) {
2399 unsigned char des = dp[j];
2400 unsigned char k = 0;
2402 /* look up this descriptor in the table */
2403 while (cache_table[k].descriptor != 0)
2405 if (cache_table[k].descriptor == des) {
2406 switch (cache_table[k].cache_type) {
2408 l1i += cache_table[k].size;
2411 l1d += cache_table[k].size;
2414 l2 += cache_table[k].size;
2417 l3 += cache_table[k].size;
2420 trace += cache_table[k].size;
2431 /* Intel PIII Tualatin. This comes in two flavours.
2432 * One has 256kb of cache, the other 512. We have no way
2433 * to determine which, so we use a boottime override
2434 * for the 512kb model, and assume 256 otherwise.
2436 if ((c->x86 == 6) && (c->x86_model == 11) && (l2 == 0))
2438 /* Allow user to override all this if necessary. */
2439 if (cachesize_override != -1)
2440 l2 = cachesize_override;
2443 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
2445 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
2447 printk(", L1 D cache: %dK\n", l1d);
2452 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
2454 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
2457 * This assumes the L3 cache is shared; it typically lives in
2458 * the northbridge. The L1 caches are included by the L2
2459 * cache, and so should not be included for the purpose of
2460 * SMP switching weights.
2462 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
2465 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
2466 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
2467 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
2469 /* Names for the Pentium II/Celeron processors
2470 detectable only by also checking the cache size.
2471 Dixon is NOT a Celeron. */
2473 switch (c->x86_model) {
2476 p = "Celeron (Covington)";
2478 p = "Mobile Pentium II (Dixon)";
2483 p = "Celeron (Mendocino)";
2488 p = "Celeron (Coppermine)";
2494 strcpy(c->x86_model_id, p);
2497 if (test_bit(X86_FEATURE_HT, &c->x86_capability)) {
2498 extern int phys_proc_id[NR_CPUS];
2500 u32 eax, ebx, ecx, edx;
2501 int index_lsb, index_msb, tmp;
2502 int initial_apic_id;
2503 int cpu = smp_processor_id();
2505 cpuid(1, &eax, &ebx, &ecx, &edx);
2506 smp_num_siblings = (ebx & 0xff0000) >> 16;
2508 if (smp_num_siblings == 1) {
2509 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
2510 } else if (smp_num_siblings > 1 ) {
2514 * At this point we only support two siblings per
2515 * processor package.
2517 #define NR_SIBLINGS 2
2518 if (smp_num_siblings != NR_SIBLINGS) {
2519 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
2520 smp_num_siblings = 1;
2523 tmp = smp_num_siblings;
2524 while ((tmp & 1) == 0) {
2528 tmp = smp_num_siblings;
2529 while ((tmp & 0x80000000 ) == 0) {
2533 if (index_lsb != index_msb )
2535 initial_apic_id = ebx >> 24 & 0xff;
2536 phys_proc_id[cpu] = initial_apic_id >> index_msb;
2538 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
2546 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
2548 char *v = c->x86_vendor_id;
2550 if (!strcmp(v, "GenuineIntel"))
2551 c->x86_vendor = X86_VENDOR_INTEL;
2552 else if (!strcmp(v, "AuthenticAMD"))
2553 c->x86_vendor = X86_VENDOR_AMD;
2554 else if (!strcmp(v, "CyrixInstead"))
2555 c->x86_vendor = X86_VENDOR_CYRIX;
2556 else if (!strcmp(v, "Geode by NSC"))
2557 c->x86_vendor = X86_VENDOR_NSC;
2558 else if (!strcmp(v, "UMC UMC UMC "))
2559 c->x86_vendor = X86_VENDOR_UMC;
2560 else if (!strcmp(v, "CentaurHauls"))
2561 c->x86_vendor = X86_VENDOR_CENTAUR;
2562 else if (!strcmp(v, "NexGenDriven"))
2563 c->x86_vendor = X86_VENDOR_NEXGEN;
2564 else if (!strcmp(v, "RiseRiseRise"))
2565 c->x86_vendor = X86_VENDOR_RISE;
2566 else if (!strcmp(v, "GenuineTMx86") ||
2567 !strcmp(v, "TransmetaCPU"))
2568 c->x86_vendor = X86_VENDOR_TRANSMETA;
2569 else if (!strcmp(v, "SiS SiS SiS "))
2570 c->x86_vendor = X86_VENDOR_SIS;
2572 c->x86_vendor = X86_VENDOR_UNKNOWN;
2575 struct cpu_model_info {
2578 char *model_names[16];
2581 /* Naming convention should be: <Name> [(<Codename>)] */
2582 /* This table only is used unless init_<vendor>() below doesn't set it; */
2583 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
2584 static struct cpu_model_info cpu_models[] __initdata = {
2585 { X86_VENDOR_INTEL, 4,
2586 { "486 DX-25/33", "486 DX-50", "486 SX", "486 DX/2", "486 SL",
2587 "486 SX/2", NULL, "486 DX/2-WB", "486 DX/4", "486 DX/4-WB", NULL,
2588 NULL, NULL, NULL, NULL, NULL }},
2589 { X86_VENDOR_INTEL, 5,
2590 { "Pentium 60/66 A-step", "Pentium 60/66", "Pentium 75 - 200",
2591 "OverDrive PODP5V83", "Pentium MMX", NULL, NULL,
2592 "Mobile Pentium 75 - 200", "Mobile Pentium MMX", NULL, NULL, NULL,
2593 NULL, NULL, NULL, NULL }},
2594 { X86_VENDOR_INTEL, 6,
2595 { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
2596 NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
2597 "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
2598 "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
2599 { X86_VENDOR_AMD, 4,
2600 { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
2601 "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
2603 { X86_VENDOR_AMD, 5, /* Is this this really necessary?? */
2605 "K5", "K5", NULL, NULL,
2607 "K6-3", NULL, NULL, NULL, NULL, NULL, NULL }},
2608 { X86_VENDOR_AMD, 6, /* Is this this really necessary?? */
2609 { "Athlon", "Athlon",
2610 "Athlon", NULL, "Athlon", NULL,
2612 NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
2613 { X86_VENDOR_UMC, 4,
2614 { NULL, "U5D", "U5S", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2615 NULL, NULL, NULL, NULL, NULL, NULL }},
2616 { X86_VENDOR_NEXGEN, 5,
2617 { "Nx586", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2618 NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
2619 { X86_VENDOR_RISE, 5,
2620 { "iDragon", NULL, "iDragon", NULL, NULL, NULL, NULL,
2621 NULL, "iDragon II", "iDragon II", NULL, NULL, NULL, NULL, NULL, NULL }},
2622 { X86_VENDOR_SIS, 5,
2623 { NULL, NULL, NULL, NULL, "SiS55x", NULL, NULL,
2624 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
2627 /* Look up CPU names by table lookup. */
2628 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
2630 struct cpu_model_info *info = cpu_models;
2633 if ( c->x86_model >= 16 )
2634 return NULL; /* Range check */
2636 for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ ) {
2637 if ( info->vendor == c->x86_vendor &&
2638 info->family == c->x86 ) {
2639 return info->model_names[c->x86_model];
2643 return NULL; /* Not found */
2647 * Detect a NexGen CPU running without BIOS hypercode new enough
2648 * to have CPUID. (Thanks to Herbert Oppmann)
2651 static int __init deep_magic_nexgen_probe(void)
2655 __asm__ __volatile__ (
2656 " movw $0x5555, %%ax\n"
2664 : "=a" (ret) : : "cx", "dx" );
2668 static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
2670 if( test_bit(X86_FEATURE_PN, &c->x86_capability) &&
2671 disable_x86_serial_nr ) {
2672 /* Disable processor serial number */
2673 unsigned long lo,hi;
2674 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
2676 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
2677 printk(KERN_NOTICE "CPU serial number disabled.\n");
2678 clear_bit(X86_FEATURE_PN, &c->x86_capability);
2680 /* Disabling the serial number may affect the cpuid level */
2681 c->cpuid_level = cpuid_eax(0);
2686 static int __init x86_serial_nr_setup(char *s)
2688 disable_x86_serial_nr = 0;
2691 __setup("serialnumber", x86_serial_nr_setup);
2693 static int __init x86_fxsr_setup(char * s)
2695 set_bit(X86_FEATURE_XMM, disabled_x86_caps);
2696 set_bit(X86_FEATURE_FXSR, disabled_x86_caps);
2699 __setup("nofxsr", x86_fxsr_setup);
2702 /* Standard macro to see if a specific flag is changeable */
2703 static inline int flag_is_changeable_p(u32 flag)
2717 : "=&r" (f1), "=&r" (f2)
2720 return ((f1^f2) & flag) != 0;
2724 /* Probe for the CPUID instruction */
2725 static int __init have_cpuid_p(void)
2727 return flag_is_changeable_p(X86_EFLAGS_ID);
2731 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
2732 * by the fact that they preserve the flags across the division of 5/2.
2733 * PII and PPro exhibit this behavior too, but they have cpuid available.
2737 * Perform the Cyrix 5/2 test. A Cyrix won't change
2738 * the flags, while other 486 chips will.
2740 static inline int test_cyrix_52div(void)
2744 __asm__ __volatile__(
2745 "sahf\n\t" /* clear flags (%eax = 0x0005) */
2746 "div %b2\n\t" /* divide 5 by 2 */
2747 "lahf" /* store flags into %ah */
2752 /* AH is 0x02 on Cyrix after the divide.. */
2753 return (unsigned char) (test >> 8) == 0x02;
2756 /* Try to detect a CPU with disabled CPUID, and if so, enable. This routine
2757 may also be used to detect non-CPUID processors and fill in some of
2758 the information manually. */
2759 static int __init id_and_try_enable_cpuid(struct cpuinfo_x86 *c)
2761 /* First of all, decide if this is a 486 or higher */
2762 /* It's a 486 if we can modify the AC flag */
2763 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
2768 /* Detect Cyrix with disabled CPUID */
2769 if ( c->x86 == 4 && test_cyrix_52div() ) {
2770 unsigned char dir0, dir1;
2772 strcpy(c->x86_vendor_id, "CyrixInstead");
2773 c->x86_vendor = X86_VENDOR_CYRIX;
2775 /* Actually enable cpuid on the older cyrix */
2777 /* Retrieve CPU revisions */
2779 do_cyrix_devid(&dir0, &dir1);
2783 /* Check it is an affected model */
2785 if (dir0 == 5 || dir0 == 3)
2787 unsigned char ccr3, ccr4;
2788 unsigned long flags;
2789 printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
2790 local_irq_save(flags);
2791 ccr3 = getCx86(CX86_CCR3);
2792 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
2793 ccr4 = getCx86(CX86_CCR4);
2794 setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
2795 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
2796 local_irq_restore(flags);
2800 /* Detect NexGen with old hypercode */
2801 if ( deep_magic_nexgen_probe() ) {
2802 strcpy(c->x86_vendor_id, "NexGenDriven");
2805 return have_cpuid_p(); /* Check to see if CPUID now enabled? */
2809 * This does the hard work of actually picking apart the CPU stuff...
2811 void __init identify_cpu(struct cpuinfo_x86 *c)
2816 c->loops_per_jiffy = loops_per_jiffy;
2817 c->x86_cache_size = -1;
2818 c->x86_vendor = X86_VENDOR_UNKNOWN;
2819 c->cpuid_level = -1; /* CPUID not detected */
2820 c->x86_model = c->x86_mask = 0; /* So far unknown... */
2821 c->x86_vendor_id[0] = '\0'; /* Unset */
2822 c->x86_model_id[0] = '\0'; /* Unset */
2823 memset(&c->x86_capability, 0, sizeof c->x86_capability);
2825 if ( !have_cpuid_p() && !id_and_try_enable_cpuid(c) ) {
2826 /* CPU doesn't have CPUID */
2828 /* If there are any capabilities, they're vendor-specific */
2829 /* enable_cpuid() would have set c->x86 for us. */
2831 /* CPU does have CPUID */
2833 /* Get vendor name */
2834 cpuid(0x00000000, &c->cpuid_level,
2835 (int *)&c->x86_vendor_id[0],
2836 (int *)&c->x86_vendor_id[8],
2837 (int *)&c->x86_vendor_id[4]);
2840 /* Initialize the standard set of capabilities */
2841 /* Note that the vendor-specific code below might override */
2843 /* Intel-defined flags: level 0x00000001 */
2844 if ( c->cpuid_level >= 0x00000001 ) {
2845 u32 capability, excap;
2846 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
2847 c->x86_capability[0] = capability;
2848 c->x86_capability[4] = excap;
2849 c->x86 = (tfms >> 8) & 15;
2850 c->x86_model = (tfms >> 4) & 15;
2851 if (c->x86 == 0xf) {
2852 c->x86 += (tfms >> 20) & 0xff;
2853 c->x86_model += ((tfms >> 16) & 0xF) << 4;
2855 c->x86_mask = tfms & 15;
2857 /* Have CPUID level 0 only - unheard of */
2861 /* AMD-defined flags: level 0x80000001 */
2862 xlvl = cpuid_eax(0x80000000);
2863 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
2864 if ( xlvl >= 0x80000001 )
2865 c->x86_capability[1] = cpuid_edx(0x80000001);
2866 if ( xlvl >= 0x80000004 )
2867 get_model_name(c); /* Default name */
2870 /* Transmeta-defined flags: level 0x80860001 */
2871 xlvl = cpuid_eax(0x80860000);
2872 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
2873 if ( xlvl >= 0x80860001 )
2874 c->x86_capability[2] = cpuid_edx(0x80860001);
2879 * Vendor-specific initialization. In this section we
2880 * canonicalize the feature flags, meaning if there are
2881 * features a certain CPU supports which CPUID doesn't
2882 * tell us, CPUID claiming incorrect flags, or other bugs,
2883 * we handle them here.
2885 * At the end of this section, c->x86_capability better
2886 * indicate the features this CPU genuinely supports!
2888 switch ( c->x86_vendor ) {
2889 case X86_VENDOR_UNKNOWN:
2891 /* Not much we can do here... */
2892 /* Check if at least it has cpuid */
2893 if (c->cpuid_level == -1)
2895 /* No cpuid. It must be an ancient CPU */
2897 strcpy(c->x86_model_id, "486");
2898 else if (c->x86 == 3)
2899 strcpy(c->x86_model_id, "386");
2903 case X86_VENDOR_CYRIX:
2907 case X86_VENDOR_NSC:
2911 case X86_VENDOR_AMD:
2915 case X86_VENDOR_CENTAUR:
2919 case X86_VENDOR_INTEL:
2923 case X86_VENDOR_NEXGEN:
2924 c->x86_cache_size = 256; /* A few had 1 MB... */
2927 case X86_VENDOR_TRANSMETA:
2931 case X86_VENDOR_RISE:
2937 * The vendor-specific functions might have changed features. Now
2938 * we do "generic changes."
2942 #ifndef CONFIG_X86_TSC
2944 clear_bit(X86_FEATURE_TSC, &c->x86_capability);
2947 /* check for caps that have been disabled earlier */
2948 for (i = 0; i < NCAPINTS; i++) {
2949 c->x86_capability[i] &= ~disabled_x86_caps[i];
2952 /* Disable the PN if appropriate */
2953 squash_the_stupid_serial_number(c);
2955 /* Init Machine Check Exception if available. */
2958 /* If the model name is still unset, do table lookup. */
2959 if ( !c->x86_model_id[0] ) {
2961 p = table_lookup_model(c);
2963 strcpy(c->x86_model_id, p);
2965 /* Last resort... */
2966 sprintf(c->x86_model_id, "%02x/%02x",
2967 c->x86_vendor, c->x86_model);
2970 /* Now the feature flags better reflect actual CPU features! */
2972 printk(KERN_DEBUG "CPU: After generic, caps: %08x %08x %08x %08x\n",
2973 c->x86_capability[0],
2974 c->x86_capability[1],
2975 c->x86_capability[2],
2976 c->x86_capability[3]);
2979 * On SMP, boot_cpu_data holds the common feature set between
2980 * all CPUs; so make sure that we indicate which features are
2981 * common between the CPUs. The first time this routine gets
2982 * executed, c == &boot_cpu_data.
2984 if ( c != &boot_cpu_data ) {
2985 /* AND the already accumulated flags with these */
2986 for ( i = 0 ; i < NCAPINTS ; i++ )
2987 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
2990 printk(KERN_DEBUG "CPU: Common caps: %08x %08x %08x %08x\n",
2991 boot_cpu_data.x86_capability[0],
2992 boot_cpu_data.x86_capability[1],
2993 boot_cpu_data.x86_capability[2],
2994 boot_cpu_data.x86_capability[3]);
2997 * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
3000 void __init dodgy_tsc(void)
3002 get_cpu_vendor(&boot_cpu_data);
3004 if ( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ||
3005 boot_cpu_data.x86_vendor == X86_VENDOR_NSC )
3006 init_cyrix(&boot_cpu_data);
3010 /* These need to match <asm/processor.h> */
3011 static char *cpu_vendor_names[] __initdata = {
3012 "Intel", "Cyrix", "AMD", "UMC", "NexGen",
3013 "Centaur", "Rise", "Transmeta", "NSC"
3017 void __init print_cpu_info(struct cpuinfo_x86 *c)
3019 char *vendor = NULL;
3021 if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
3022 vendor = cpu_vendor_names[c->x86_vendor];
3023 else if (c->cpuid_level >= 0)
3024 vendor = c->x86_vendor_id;
3026 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
3027 printk("%s ", vendor);
3029 if (!c->x86_model_id[0])
3030 printk("%d86", c->x86);
3032 printk("%s", c->x86_model_id);
3034 if (c->x86_mask || c->cpuid_level >= 0)
3035 printk(" stepping %02x\n", c->x86_mask);
3041 * Get CPU information for use by the procfs.
3043 static int show_cpuinfo(struct seq_file *m, void *v)
3046 * These flag bits must match the definitions in <asm/cpufeature.h>.
3047 * NULL means this bit is undefined or reserved; either way it doesn't
3048 * have meaning as far as Linux is concerned. Note that it's important
3049 * to realize there is a difference between this table and CPUID -- if
3050 * applications want to get the raw CPUID data, they should access
3051 * /dev/cpu/<cpu_nr>/cpuid instead.
3053 static char *x86_cap_flags[] = {
3055 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
3056 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
3057 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
3058 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
3061 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3062 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
3063 NULL, NULL, NULL, "mp", NULL, NULL, "mmxext", NULL,
3064 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
3066 /* Transmeta-defined */
3067 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
3068 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3069 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3070 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3072 /* Other (Linux-defined) */
3073 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
3074 NULL, NULL, NULL, NULL,
3075 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3076 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3077 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3079 /* Intel-defined (#2) */
3080 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "tm2",
3081 "est", NULL, "cid", NULL, NULL, NULL, NULL, NULL,
3082 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3083 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3085 /* VIA/Cyrix/Centaur-defined */
3086 NULL, NULL, "xstore", NULL, NULL, NULL, NULL, NULL,
3087 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3088 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3089 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3091 struct cpuinfo_x86 *c = v;
3092 int i, n = c - cpu_data;
3096 if (!(cpu_online_map & (1<<n)))
3099 seq_printf(m, "processor\t: %d\n"
3101 "cpu family\t: %d\n"
3103 "model name\t: %s\n",
3105 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
3108 c->x86_model_id[0] ? c->x86_model_id : "unknown");
3110 if (c->x86_mask || c->cpuid_level >= 0)
3111 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
3113 seq_printf(m, "stepping\t: unknown\n");
3115 if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
3116 seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
3117 cpu_khz / 1000, (cpu_khz % 1000));
3121 if (c->x86_cache_size >= 0)
3122 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
3124 /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
3125 fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
3126 seq_printf(m, "fdiv_bug\t: %s\n"
3131 "fpu_exception\t: %s\n"
3132 "cpuid level\t: %d\n"
3135 c->fdiv_bug ? "yes" : "no",
3136 c->hlt_works_ok ? "no" : "yes",
3137 c->f00f_bug ? "yes" : "no",
3138 c->coma_bug ? "yes" : "no",
3139 c->hard_math ? "yes" : "no",
3140 fpu_exception ? "yes" : "no",
3142 c->wp_works_ok ? "yes" : "no");
3144 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
3145 if ( test_bit(i, &c->x86_capability) &&
3146 x86_cap_flags[i] != NULL )
3147 seq_printf(m, " %s", x86_cap_flags[i]);
3149 seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
3150 c->loops_per_jiffy/(500000/HZ),
3151 (c->loops_per_jiffy/(5000/HZ)) % 100);
3155 static void *c_start(struct seq_file *m, loff_t *pos)
3157 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
3159 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
3162 return c_start(m, pos);
3164 static void c_stop(struct seq_file *m, void *v)
3167 struct seq_operations cpuinfo_op = {
3174 unsigned long cpu_initialized __initdata = 0;
3177 * cpu_init() initializes state that is per-CPU. Some data is already
3178 * initialized (naturally) in the bootstrap process, such as the GDT
3179 * and IDT. We reload them nevertheless, this function acts as a
3180 * 'CPU state barrier', nothing should get across.
3182 void __init cpu_init (void)
3184 int nr = smp_processor_id();
3185 struct tss_struct * t = &init_tss[nr];
3187 if (test_and_set_bit(nr, &cpu_initialized)) {
3188 printk(KERN_WARNING "CPU#%d already initialized!\n", nr);
3191 printk(KERN_INFO "Initializing CPU#%d\n", nr);
3193 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
3194 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
3195 #ifndef CONFIG_X86_TSC
3196 if (tsc_disable && cpu_has_tsc) {
3197 printk(KERN_NOTICE "Disabling TSC...\n");
3198 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
3199 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
3200 set_in_cr4(X86_CR4_TSD);
3204 __asm__ __volatile__("lgdt %0": "=m" (gdt_descr));
3205 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
3210 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
3213 * set up and load the per-CPU TSS and LDT
3215 atomic_inc(&init_mm.mm_count);
3216 current->active_mm = &init_mm;
3219 enter_lazy_tlb(&init_mm, current, nr);
3221 t->esp0 = current->thread.esp0;
3223 gdt_table[__TSS(nr)].b &= 0xfffffdff;
3225 load_LDT(&init_mm.context);
3228 * Clear all 6 debug registers:
3231 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
3233 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
3238 * Force FPU initialization:
3240 current->flags &= ~PF_USEDFPU;
3241 current->used_math = 0;
3246 * Early probe support logic for ppro memory erratum #50
3248 * This is called before we do cpu ident work
3251 int __init ppro_with_ram_bug(void)
3256 /* Must have CPUID */
3264 (int *)&vendor_id[0],
3265 (int *)&vendor_id[8],
3266 (int *)&vendor_id[4]);
3268 if(memcmp(vendor_id, "IntelInside", 12))
3271 ident = cpuid_eax(1);
3275 if(((ident>>8)&15)!=6)
3280 if(((ident>>4)&15)!=1)
3285 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
3288 printk(KERN_INFO "Your Pentium Pro seems ok.\n");
3295 * c-file-style:"k&r"