2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
11 * This file handles the architecture-dependent parts of initialization
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/tty.h>
25 #include <linux/ioport.h>
26 #include <linux/delay.h>
27 #include <linux/config.h>
28 #include <linux/init.h>
29 #include <linux/acpi.h>
30 #include <linux/blk.h>
31 #include <linux/highmem.h>
32 #include <linux/bootmem.h>
33 #include <linux/module.h>
34 #include <asm/processor.h>
35 #include <linux/console.h>
36 #include <linux/seq_file.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
46 #include <asm/mpspec.h>
47 #include <asm/mmu_context.h>
48 #include <asm/bootsetup.h>
49 #include <asm/proto.h>
52 EXPORT_SYMBOL(acpi_disabled);
55 EXPORT_SYMBOL(swiotlb);
57 extern int phys_proc_id[NR_CPUS];
63 struct cpuinfo_x86 boot_cpu_data = {
67 unsigned long mmu_cr4_features;
68 EXPORT_SYMBOL(mmu_cr4_features);
70 /* For PCI or other memory-mapped resources */
71 unsigned long pci_mem_start = 0x10000000;
76 struct drive_info_struct { char dummy[32]; } drive_info;
77 struct screen_info screen_info;
78 struct sys_desc_table_struct {
79 unsigned short length;
80 unsigned char table[0];
85 unsigned char aux_device_present;
87 extern int root_mountflags;
88 extern char _text, _etext, _edata, _end;
90 char command_line[COMMAND_LINE_SIZE];
91 char saved_command_line[COMMAND_LINE_SIZE];
93 struct resource standard_io_resources[] = {
94 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
95 { "pic1", 0x20, 0x3f, IORESOURCE_BUSY },
96 { "timer0", 0x40, 0x43, IORESOURCE_BUSY },
97 { "timer1", 0x50, 0x53, IORESOURCE_BUSY },
98 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
99 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
100 { "pic2", 0xa0, 0xbf, IORESOURCE_BUSY },
101 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
102 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
105 #define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
107 struct resource code_resource = { "Kernel code", 0x100000, 0 };
108 struct resource data_resource = { "Kernel data", 0, 0 };
109 struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
112 /* System ROM resources */
114 static struct resource rom_resources[MAXROMS] = {
115 { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
116 { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
119 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
121 static void __init probe_roms(void)
125 unsigned char *romstart;
127 request_resource(&iomem_resource, rom_resources+0);
129 /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
130 for (base = 0xC0000; base < 0xE0000; base += 2048) {
131 romstart = bus_to_virt(base);
132 if (!romsignature(romstart))
134 request_resource(&iomem_resource, rom_resources + roms);
139 /* Extension roms at C800:0000 - DFFF:0000 */
140 for (base = 0xC8000; base < 0xE0000; base += 2048) {
141 unsigned long length;
143 romstart = bus_to_virt(base);
144 if (!romsignature(romstart))
146 length = romstart[2] * 512;
149 unsigned char chksum;
152 for (i = 0; i < length; i++)
153 chksum += romstart[i];
157 rom_resources[roms].start = base;
158 rom_resources[roms].end = base + length - 1;
159 rom_resources[roms].name = "Extension ROM";
160 rom_resources[roms].flags = IORESOURCE_BUSY;
162 request_resource(&iomem_resource, rom_resources + roms);
170 /* Final check for motherboard extension rom at E000:0000 */
172 romstart = bus_to_virt(base);
174 if (romsignature(romstart)) {
175 rom_resources[roms].start = base;
176 rom_resources[roms].end = base + 65535;
177 rom_resources[roms].name = "Extension ROM";
178 rom_resources[roms].flags = IORESOURCE_BUSY;
180 request_resource(&iomem_resource, rom_resources + roms);
184 unsigned long start_pfn, end_pfn;
185 extern unsigned long table_start, table_end;
187 #ifndef CONFIG_DISCONTIGMEM
188 static void __init contig_initmem_init(void)
190 unsigned long bootmap_size, bootmap;
191 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
192 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
194 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
195 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
196 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
197 reserve_bootmem(bootmap, bootmap_size);
201 void __init setup_arch(char **cmdline_p)
204 unsigned long kernel_end;
206 ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
207 drive_info = DRIVE_INFO;
208 screen_info = SCREEN_INFO;
209 aux_device_present = AUX_DEVICE_INFO;
211 #ifdef CONFIG_BLK_DEV_RAM
212 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
213 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
214 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
216 setup_memory_region();
218 if (!MOUNT_ROOT_RDONLY)
219 root_mountflags &= ~MS_RDONLY;
220 init_mm.start_code = (unsigned long) &_text;
221 init_mm.end_code = (unsigned long) &_etext;
222 init_mm.end_data = (unsigned long) &_edata;
223 init_mm.brk = (unsigned long) &_end;
225 code_resource.start = virt_to_bus(&_text);
226 code_resource.end = virt_to_bus(&_etext)-1;
227 data_resource.start = virt_to_bus(&_etext);
228 data_resource.end = virt_to_bus(&_edata)-1;
230 parse_mem_cmdline(cmdline_p);
236 init_memory_mapping();
238 #ifdef CONFIG_BLK_DEV_INITRD
239 if (LOADER_TYPE && INITRD_START) {
240 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
242 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
243 initrd_end = initrd_start+INITRD_SIZE;
246 printk(KERN_ERR "initrd extends beyond end of memory "
247 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
248 (unsigned long)INITRD_START + INITRD_SIZE,
249 (unsigned long)(end_pfn << PAGE_SHIFT));
255 #ifdef CONFIG_DISCONTIGMEM
256 numa_initmem_init(0, end_pfn);
258 contig_initmem_init();
261 /* Reserve direct mapping */
262 reserve_bootmem_generic(table_start << PAGE_SHIFT,
263 (table_end - table_start) << PAGE_SHIFT);
265 #ifdef CONFIG_BLK_DEV_INITRD
267 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
270 /* Reserve BIOS data page. Some things still need it */
271 reserve_bootmem_generic(0, PAGE_SIZE);
275 * But first pinch a few for the stack/trampoline stuff
276 * FIXME: Don't need the extra page at 4K, but need to fix
277 * trampoline before removing it. (see the GDT stuff)
279 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
281 /* Reserve SMP trampoline */
282 reserve_bootmem_generic(0x6000, PAGE_SIZE);
285 kernel_end = round_up(__pa_symbol(&_end), PAGE_SIZE);
286 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
288 #ifdef CONFIG_ACPI_SLEEP
290 * Reserve low memory region for sleep support.
292 acpi_reserve_bootmem();
294 #ifdef CONFIG_X86_LOCAL_APIC
296 * Find and reserve possible boot-time SMP configuration:
302 /* AP processor realmode stacks in low memory*/
307 #if defined(CONFIG_X86_IO_APIC)
308 extern void check_ioapic(void);
312 #ifdef CONFIG_ACPI_BOOT
314 * Parse the ACPI tables for possible boot-time SMP configuration.
318 #ifdef CONFIG_X86_LOCAL_APIC
320 * get boot-time SMP configuration:
322 if (smp_found_config)
324 init_apic_mappings();
328 * Request address space for all standard RAM and ROM resources
329 * and also for regions reported as reserved by the e820.
332 e820_reserve_resources();
333 request_resource(&iomem_resource, &vram_resource);
335 /* request I/O space for devices used on all i[345]86 PCs */
336 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
337 request_resource(&ioport_resource, standard_io_resources+i);
339 /* We put PCI memory up to make sure VALID_PAGE with DISCONTIGMEM
340 never returns true for it */
342 /* Tell the PCI layer not to allocate too close to the RAM area.. */
343 pci_mem_start = IOMAP_START;
345 #ifdef CONFIG_GART_IOMMU
348 #ifdef CONFIG_SWIOTLB
349 if (!iommu_aperture && end_pfn >= 0xffffffff>>PAGE_SHIFT) {
356 #if defined(CONFIG_VGA_CONSOLE)
357 conswitchp = &vga_con;
358 #elif defined(CONFIG_DUMMY_CONSOLE)
359 conswitchp = &dummy_con;
363 num_mappedpages = end_pfn;
366 static int __init get_model_name(struct cpuinfo_x86 *c)
370 if (cpuid_eax(0x80000000) < 0x80000004)
373 v = (unsigned int *) c->x86_model_id;
374 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
375 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
376 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
377 c->x86_model_id[48] = 0;
382 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
384 unsigned int n, dummy, ecx, edx, eax, ebx, eax_2, ebx_2, ecx_2;
386 n = cpuid_eax(0x80000000);
388 if (n >= 0x80000005) {
390 cpuid(0x80000006, &eax_2, &ebx_2, &ecx_2, &dummy);
392 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
393 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line/%d way), D cache %dK (%d bytes/line/%d way)\n",
394 edx>>24, edx&0xFF, (edx>>16)&0xff,
395 ecx>>24, ecx&0xFF, (ecx>>16)&0xff);
396 c->x86_cache_size=(ecx>>24)+(edx>>24);
397 if (n >= 0x80000006) {
398 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line/%d way)\n",
399 ecx_2>>16, ecx_2&0xFF,
400 /* use bits[15:13] as power of 2 for # of ways */
401 1 << ((ecx>>13) & 0x7)
402 /* Direct and Full associative L2 are very unlikely */);
403 c->x86_cache_size = ecx_2 >> 16;
404 c->x86_tlbsize = ((ebx>>16)&0xff) + ((ebx_2>>16)&0xfff) +
405 (ebx&0xff) + ((ebx_2)&0xfff);
408 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
409 if (n >= 0x80000008) {
410 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
411 c->x86_virt_bits = (eax >> 8) & 0xff;
412 c->x86_phys_bits = eax & 0xff;
425 unsigned char descriptor;
430 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
431 static struct _cache_table cache_table[] __initdata =
433 { 0x06, LVL_1_INST, 8 },
434 { 0x08, LVL_1_INST, 16 },
435 { 0x0A, LVL_1_DATA, 8 },
436 { 0x0C, LVL_1_DATA, 16 },
437 { 0x22, LVL_3, 512 },
438 { 0x23, LVL_3, 1024 },
439 { 0x25, LVL_3, 2048 },
440 { 0x29, LVL_3, 4096 },
441 { 0x39, LVL_2, 128 },
442 { 0x3C, LVL_2, 256 },
443 { 0x41, LVL_2, 128 },
444 { 0x42, LVL_2, 256 },
445 { 0x43, LVL_2, 512 },
446 { 0x44, LVL_2, 1024 },
447 { 0x45, LVL_2, 2048 },
448 { 0x66, LVL_1_DATA, 8 },
449 { 0x67, LVL_1_DATA, 16 },
450 { 0x68, LVL_1_DATA, 32 },
451 { 0x70, LVL_TRACE, 12 },
452 { 0x71, LVL_TRACE, 16 },
453 { 0x72, LVL_TRACE, 32 },
454 { 0x79, LVL_2, 128 },
455 { 0x7A, LVL_2, 256 },
456 { 0x7B, LVL_2, 512 },
457 { 0x7C, LVL_2, 1024 },
458 { 0x82, LVL_2, 256 },
459 { 0x83, LVL_2, 512 },
460 { 0x84, LVL_2, 1024 },
461 { 0x85, LVL_2, 2048 },
465 int select_idle_routine(struct cpuinfo_x86 *c);
467 static void __init init_intel(struct cpuinfo_x86 *c)
469 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
476 select_idle_routine(c);
477 if (c->cpuid_level > 1) {
478 /* supports eax=2 call */
481 unsigned char *dp = (unsigned char *)regs;
483 /* Number of times to iterate */
484 n = cpuid_eax(2) & 0xFF;
486 for ( i = 0 ; i < n ; i++ ) {
487 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
489 /* If bit 31 is set, this is an unknown format */
490 for ( j = 0 ; j < 3 ; j++ ) {
491 if ( regs[j] < 0 ) regs[j] = 0;
494 /* Byte 0 is level count, not a descriptor */
495 for ( j = 1 ; j < 16 ; j++ ) {
496 unsigned char des = dp[j];
499 /* look up this descriptor in the table */
500 while (cache_table[k].descriptor != 0)
502 if (cache_table[k].descriptor == des) {
503 switch (cache_table[k].cache_type) {
505 l1i += cache_table[k].size;
508 l1d += cache_table[k].size;
511 l2 += cache_table[k].size;
514 l3 += cache_table[k].size;
517 trace += cache_table[k].size;
529 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
531 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
533 printk(", L1 D cache: %dK\n", l1d);
537 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
539 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
542 * This assumes the L3 cache is shared; it typically lives in
543 * the northbridge. The L1 caches are included by the L2
544 * cache, and so should not be included for the purpose of
545 * SMP switching weights.
547 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
551 strcpy(c->x86_model_id, p);
554 if (test_bit(X86_FEATURE_HT, &c->x86_capability)) {
555 int index_lsb, index_msb, tmp;
557 int cpu = smp_processor_id();
560 cpuid(1, &eax, &ebx, &ecx, &edx);
561 smp_num_siblings = (ebx & 0xff0000) >> 16;
563 if (smp_num_siblings == 1) {
564 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
565 } else if (smp_num_siblings > 1 ) {
569 * At this point we only support two siblings per
572 #define NR_SIBLINGS 2
573 if (smp_num_siblings != NR_SIBLINGS) {
574 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
575 smp_num_siblings = 1;
578 tmp = smp_num_siblings;
579 while ((tmp & 1) == 0) {
583 tmp = smp_num_siblings;
584 while ((tmp & 0x80000000 ) == 0) {
588 if (index_lsb != index_msb )
590 initial_apic_id = ebx >> 24 & 0xff;
591 phys_proc_id[cpu] = initial_apic_id >> index_msb;
593 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
600 n = cpuid_eax(0x80000000);
601 if (n >= 0x80000008) {
602 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
603 c->x86_virt_bits = (eax >> 8) & 0xff;
604 c->x86_phys_bits = eax & 0xff;
609 static int __init init_amd(struct cpuinfo_x86 *c)
613 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
614 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
615 clear_bit(0*32+31, &c->x86_capability);
617 r = get_model_name(c);
621 /* Should distingush Models here, but this is only
622 a fallback anyways. */
623 strcpy(c->x86_model_id, "Hammer");
627 display_cacheinfo(c);
632 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
634 char *v = c->x86_vendor_id;
636 if (!strcmp(v, "AuthenticAMD"))
637 c->x86_vendor = X86_VENDOR_AMD;
638 else if (!strcmp(v, "GenuineIntel"))
639 c->x86_vendor = X86_VENDOR_INTEL;
641 c->x86_vendor = X86_VENDOR_UNKNOWN;
644 struct cpu_model_info {
647 char *model_names[16];
651 * This does the hard work of actually picking apart the CPU stuff...
653 void __init identify_cpu(struct cpuinfo_x86 *c)
658 c->loops_per_jiffy = loops_per_jiffy;
659 c->x86_cache_size = -1;
660 c->x86_vendor = X86_VENDOR_UNKNOWN;
661 c->x86_model = c->x86_mask = 0; /* So far unknown... */
662 c->x86_vendor_id[0] = '\0'; /* Unset */
663 c->x86_model_id[0] = '\0'; /* Unset */
664 memset(&c->x86_capability, 0, sizeof c->x86_capability);
666 /* Get vendor name */
667 cpuid(0x00000000, &c->cpuid_level,
668 (int *)&c->x86_vendor_id[0],
669 (int *)&c->x86_vendor_id[8],
670 (int *)&c->x86_vendor_id[4]);
673 /* Initialize the standard set of capabilities */
674 /* Note that the vendor-specific code below might override */
676 /* Intel-defined flags: level 0x00000001 */
677 if ( c->cpuid_level >= 0x00000001 ) {
679 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
680 &c->x86_capability[0]);
681 c->x86 = (tfms >> 8) & 15;
682 c->x86_model = (tfms >> 4) & 15;
683 if (c->x86 == 0xf) { /* extended */
684 c->x86 += (tfms >> 20) & 0xff;
685 c->x86_model += ((tfms >> 16) & 0xF) << 4;
687 c->x86_mask = tfms & 15;
688 if (c->x86_capability[0] & (1<<19))
689 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
691 /* Have CPUID level 0 only - unheard of */
695 /* AMD-defined flags: level 0x80000001 */
696 xlvl = cpuid_eax(0x80000000);
697 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
698 if ( xlvl >= 0x80000001 )
699 c->x86_capability[1] = cpuid_edx(0x80000001);
700 if ( xlvl >= 0x80000004 )
701 get_model_name(c); /* Default name */
704 /* Transmeta-defined flags: level 0x80860001 */
705 xlvl = cpuid_eax(0x80860000);
706 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
707 if ( xlvl >= 0x80860001 )
708 c->x86_capability[2] = cpuid_edx(0x80860001);
713 * Vendor-specific initialization. In this section we
714 * canonicalize the feature flags, meaning if there are
715 * features a certain CPU supports which CPUID doesn't
716 * tell us, CPUID claiming incorrect flags, or other bugs,
717 * we handle them here.
719 * At the end of this section, c->x86_capability better
720 * indicate the features this CPU genuinely supports!
722 switch ( c->x86_vendor ) {
728 case X86_VENDOR_INTEL:
731 case X86_VENDOR_UNKNOWN:
733 display_cacheinfo(c);
738 * The vendor-specific functions might have changed features. Now
739 * we do "generic changes."
743 * On SMP, boot_cpu_data holds the common feature set between
744 * all CPUs; so make sure that we indicate which features are
745 * common between the CPUs. The first time this routine gets
746 * executed, c == &boot_cpu_data.
748 if ( c != &boot_cpu_data ) {
749 /* AND the already accumulated flags with these */
750 for ( i = 0 ; i < NCAPINTS ; i++ )
751 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
759 void __init print_cpu_info(struct cpuinfo_x86 *c)
761 if (c->x86_model_id[0])
762 printk("%s", c->x86_model_id);
764 if (c->x86_mask || c->cpuid_level >= 0)
765 printk(" stepping %02x\n", c->x86_mask);
771 * Get CPU information for use by the procfs.
774 static int show_cpuinfo(struct seq_file *m, void *v)
776 struct cpuinfo_x86 *c = v;
779 * These flag bits must match the definitions in <asm/cpufeature.h>.
780 * NULL means this bit is undefined or reserved; either way it doesn't
781 * have meaning as far as Linux is concerned. Note that it's important
782 * to realize there is a difference between this table and CPUID -- if
783 * applications want to get the raw CPUID data, they should access
784 * /dev/cpu/<cpu_nr>/cpuid instead.
786 static char *x86_cap_flags[] = {
788 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
789 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
790 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
791 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
794 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
795 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
796 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
797 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
799 /* Transmeta-defined */
800 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
803 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
805 /* Other (Linux-defined) */
806 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
807 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
808 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
809 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
811 /* Intel Defined (cpuid 1 and ecx) */
812 "pni", NULL, NULL, "monitor", "ds-cpl", NULL, NULL, "est",
813 "tm2", NULL, "cid", NULL, NULL, "cmpxchg16b", NULL, NULL,
814 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
815 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
817 static char *x86_power_flags[] = {
818 "ts", /* temperature sensor */
819 "fid", /* frequency id control */
820 "vid", /* voltage id control */
821 "ttp", /* thermal trip */
825 if (!(cpu_online_map & (1<<(c-cpu_data))))
829 seq_printf(m,"processor\t: %u\n"
833 "model name\t: %s\n",
834 (unsigned)(c-cpu_data),
835 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
838 c->x86_model_id[0] ? c->x86_model_id : "unknown");
840 if (c->x86_mask || c->cpuid_level >= 0)
841 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
843 seq_printf(m, "stepping\t: unknown\n");
845 if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
846 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
847 cpu_khz / 1000, (cpu_khz % 1000));
850 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
853 seq_printf(m, "physical id\t: %d\n",phys_proc_id[c - cpu_data]);
854 seq_printf(m, "siblings\t: %d\n",smp_num_siblings);
859 "fpu_exception\t: yes\n"
860 "cpuid level\t: %d\n"
867 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
868 if ( test_bit(i, &c->x86_capability) &&
869 x86_cap_flags[i] != NULL )
870 seq_printf(m, " %s", x86_cap_flags[i]);
873 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
874 c->loops_per_jiffy/(500000/HZ),
875 (c->loops_per_jiffy/(5000/HZ)) % 100);
877 if (c->x86_tlbsize > 0)
878 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
879 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
881 if (c->x86_phys_bits > 0)
882 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
883 c->x86_phys_bits, c->x86_virt_bits);
885 seq_printf(m, "power management:");
888 for (i = 0; i < 32; i++)
889 if (c->x86_power & (1 << i)) {
890 if (i < ARRAY_SIZE(x86_power_flags))
891 seq_printf(m, " %s", x86_power_flags[i]);
893 seq_printf(m, " [%d]", i);
897 seq_printf(m, "\n\n");
901 static void *c_start(struct seq_file *m, loff_t *pos)
903 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
906 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
909 return c_start(m, pos);
912 static void c_stop(struct seq_file *m, void *v)
916 struct seq_operations cpuinfo_op = {