2 * boot.c - Architecture-Specific Low-Level ACPI Boot Support
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 #include <linux/init.h>
27 #include <linux/acpi.h>
28 #include <linux/efi.h>
29 #include <linux/module.h>
30 #include <linux/dmi.h>
31 #include <linux/irq.h>
33 #include <asm/pgtable.h>
34 #include <asm/io_apic.h>
37 #include <asm/mpspec.h>
39 int __initdata acpi_force = 0;
43 extern void __init clustered_apic_check(void);
45 extern int gsi_irq_sharing(int gsi);
46 #include <asm/proto.h>
48 static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
53 #ifdef CONFIG_X86_LOCAL_APIC
54 #include <mach_apic.h>
55 #include <mach_mpparse.h>
56 #endif /* CONFIG_X86_LOCAL_APIC */
58 static inline int gsi_irq_sharing(int gsi) { return gsi; }
62 #define BAD_MADT_ENTRY(entry, end) ( \
63 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
64 ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
66 #define PREFIX "ACPI: "
68 int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
69 int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
70 int acpi_ht __initdata = 1; /* enable HT */
75 EXPORT_SYMBOL(acpi_strict);
77 acpi_interrupt_flags acpi_sci_flags __initdata;
78 int acpi_sci_override_gsi __initdata;
79 int acpi_skip_timer_override __initdata;
81 #ifdef CONFIG_X86_LOCAL_APIC
82 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
85 #ifndef __HAVE_ARCH_CMPXCHG
86 #warning ACPI uses CMPXCHG, i486 and later hardware
89 #define MAX_MADT_ENTRIES 256
90 u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
91 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
92 EXPORT_SYMBOL(x86_acpiid_to_apicid);
94 /* --------------------------------------------------------------------------
95 Boot-time Configuration
96 -------------------------------------------------------------------------- */
99 * The default interrupt routing model is PIC (8259). This gets
100 * overriden if IOAPICs are enumerated (below).
102 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
106 /* rely on all ACPI tables being in the direct mapping */
107 char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
109 if (!phys_addr || !size)
112 if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
113 return __va(phys_addr);
121 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
122 * to map the target physical address. The problem is that set_fixmap()
123 * provides a single page, and it is possible that the page is not
125 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
126 * i.e. until the next __va_range() call.
128 * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
129 * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
130 * count idx down while incrementing the phys address.
132 char *__acpi_map_table(unsigned long phys, unsigned long size)
134 unsigned long base, offset, mapped_size;
137 if (phys + size < 8 * 1024 * 1024)
140 offset = phys & (PAGE_SIZE - 1);
141 mapped_size = PAGE_SIZE - offset;
142 set_fixmap(FIX_ACPI_END, phys);
143 base = fix_to_virt(FIX_ACPI_END);
146 * Most cases can be covered by the below.
149 while (mapped_size < size) {
150 if (--idx < FIX_ACPI_BEGIN)
151 return NULL; /* cannot handle this */
153 set_fixmap(idx, phys);
154 mapped_size += PAGE_SIZE;
157 return ((unsigned char *)base + offset);
161 #ifdef CONFIG_PCI_MMCONFIG
162 /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
163 struct acpi_table_mcfg_config *pci_mmcfg_config;
164 int pci_mmcfg_config_num;
166 int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
168 struct acpi_table_mcfg *mcfg;
172 if (!phys_addr || !size)
175 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
177 printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
181 /* how many config structures do we have */
182 pci_mmcfg_config_num = 0;
183 i = size - sizeof(struct acpi_table_mcfg);
184 while (i >= sizeof(struct acpi_table_mcfg_config)) {
185 ++pci_mmcfg_config_num;
186 i -= sizeof(struct acpi_table_mcfg_config);
188 if (pci_mmcfg_config_num == 0) {
189 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
193 config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
194 pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
195 if (!pci_mmcfg_config) {
196 printk(KERN_WARNING PREFIX
197 "No memory for MCFG config tables\n");
201 memcpy(pci_mmcfg_config, &mcfg->config, config_size);
202 for (i = 0; i < pci_mmcfg_config_num; ++i) {
203 if (mcfg->config[i].base_reserved) {
204 printk(KERN_ERR PREFIX
205 "MMCONFIG not in low 4GB of memory\n");
206 kfree(pci_mmcfg_config);
207 pci_mmcfg_config_num = 0;
214 #endif /* CONFIG_PCI_MMCONFIG */
216 #ifdef CONFIG_X86_LOCAL_APIC
217 static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
219 struct acpi_table_madt *madt = NULL;
221 if (!phys_addr || !size || !cpu_has_apic)
224 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
226 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
230 if (madt->lapic_address) {
231 acpi_lapic_addr = (u64) madt->lapic_address;
233 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
234 madt->lapic_address);
237 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
243 acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
245 struct acpi_table_lapic *processor = NULL;
247 processor = (struct acpi_table_lapic *)header;
249 if (BAD_MADT_ENTRY(processor, end))
252 acpi_table_print_madt_entry(header);
254 /* Record local apic id only when enabled */
255 if (processor->flags.enabled)
256 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
259 * We need to register disabled CPU as well to permit
260 * counting disabled CPUs. This allows us to size
261 * cpus_possible_map more accurately, to permit
262 * to not preallocating memory for all NR_CPUS
263 * when we use CPU hotplug.
265 mp_register_lapic(processor->id, /* APIC ID */
266 processor->flags.enabled); /* Enabled? */
272 acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
273 const unsigned long end)
275 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
277 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
279 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
282 acpi_lapic_addr = lapic_addr_ovr->address;
288 acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
290 struct acpi_table_lapic_nmi *lapic_nmi = NULL;
292 lapic_nmi = (struct acpi_table_lapic_nmi *)header;
294 if (BAD_MADT_ENTRY(lapic_nmi, end))
297 acpi_table_print_madt_entry(header);
299 if (lapic_nmi->lint != 1)
300 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
305 #endif /*CONFIG_X86_LOCAL_APIC */
307 #ifdef CONFIG_X86_IO_APIC
310 acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
312 struct acpi_table_ioapic *ioapic = NULL;
314 ioapic = (struct acpi_table_ioapic *)header;
316 if (BAD_MADT_ENTRY(ioapic, end))
319 acpi_table_print_madt_entry(header);
321 mp_register_ioapic(ioapic->id,
322 ioapic->address, ioapic->global_irq_base);
328 * Parse Interrupt Source Override for the ACPI SCI
330 static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
332 if (trigger == 0) /* compatible SCI trigger is level */
335 if (polarity == 0) /* compatible SCI polarity is low */
338 /* Command-line over-ride via acpi_sci= */
339 if (acpi_sci_flags.trigger)
340 trigger = acpi_sci_flags.trigger;
342 if (acpi_sci_flags.polarity)
343 polarity = acpi_sci_flags.polarity;
346 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
347 * If GSI is < 16, this will update its flags,
348 * else it will create a new mp_irqs[] entry.
350 mp_override_legacy_irq(gsi, polarity, trigger, gsi);
353 * stash over-ride to indicate we've been here
354 * and for later update of acpi_fadt
356 acpi_sci_override_gsi = gsi;
361 acpi_parse_int_src_ovr(acpi_table_entry_header * header,
362 const unsigned long end)
364 struct acpi_table_int_src_ovr *intsrc = NULL;
366 intsrc = (struct acpi_table_int_src_ovr *)header;
368 if (BAD_MADT_ENTRY(intsrc, end))
371 acpi_table_print_madt_entry(header);
373 if (intsrc->bus_irq == acpi_fadt.sci_int) {
374 acpi_sci_ioapic_setup(intsrc->global_irq,
375 intsrc->flags.polarity,
376 intsrc->flags.trigger);
380 if (acpi_skip_timer_override &&
381 intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
382 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
386 mp_override_legacy_irq(intsrc->bus_irq,
387 intsrc->flags.polarity,
388 intsrc->flags.trigger, intsrc->global_irq);
394 acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
396 struct acpi_table_nmi_src *nmi_src = NULL;
398 nmi_src = (struct acpi_table_nmi_src *)header;
400 if (BAD_MADT_ENTRY(nmi_src, end))
403 acpi_table_print_madt_entry(header);
405 /* TBD: Support nimsrc entries? */
410 #endif /* CONFIG_X86_IO_APIC */
413 * acpi_pic_sci_set_trigger()
415 * use ELCR to set PIC-mode trigger type for SCI
417 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
418 * it may require Edge Trigger -- use "acpi_sci=edge"
420 * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
421 * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
422 * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
423 * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
426 void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
428 unsigned int mask = 1 << irq;
429 unsigned int old, new;
431 /* Real old ELCR mask */
432 old = inb(0x4d0) | (inb(0x4d1) << 8);
435 * If we use ACPI to set PCI irq's, then we should clear ELCR
436 * since we will set it correctly as we enable the PCI irq
439 new = acpi_noirq ? old : 0;
442 * Update SCI information in the ELCR, it isn't in the PCI
446 case 1: /* Edge - clear */
449 case 3: /* Level - set */
457 printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
459 outb(new >> 8, 0x4d1);
462 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
464 #ifdef CONFIG_X86_IO_APIC
465 if (use_pci_vector() && !platform_legacy_irq(gsi))
466 *irq = IO_APIC_VECTOR(gsi);
469 *irq = gsi_irq_sharing(gsi);
474 * success: return IRQ number (>=0)
475 * failure: return < 0
477 int acpi_register_gsi(u32 gsi, int triggering, int polarity)
480 unsigned int plat_gsi = gsi;
484 * Make sure all (legacy) PCI IRQs are set as level-triggered.
486 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
487 extern void eisa_set_level_irq(unsigned int irq);
489 if (triggering == ACPI_LEVEL_SENSITIVE)
490 eisa_set_level_irq(gsi);
494 #ifdef CONFIG_X86_IO_APIC
495 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
496 plat_gsi = mp_register_gsi(gsi, triggering, polarity);
499 acpi_gsi_to_irq(plat_gsi, &irq);
503 EXPORT_SYMBOL(acpi_register_gsi);
506 * ACPI based hotplug support for CPU
508 #ifdef CONFIG_ACPI_HOTPLUG_CPU
509 int acpi_map_lsapic(acpi_handle handle, int *pcpu)
515 EXPORT_SYMBOL(acpi_map_lsapic);
517 int acpi_unmap_lsapic(int cpu)
523 EXPORT_SYMBOL(acpi_unmap_lsapic);
524 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
526 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
532 EXPORT_SYMBOL(acpi_register_ioapic);
534 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
540 EXPORT_SYMBOL(acpi_unregister_ioapic);
542 static unsigned long __init
543 acpi_scan_rsdp(unsigned long start, unsigned long length)
545 unsigned long offset = 0;
546 unsigned long sig_len = sizeof("RSD PTR ") - 1;
549 * Scan all 16-byte boundaries of the physical memory region for the
552 for (offset = 0; offset < length; offset += 16) {
553 if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
555 return (start + offset);
561 static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
563 struct acpi_table_sbf *sb;
565 if (!phys_addr || !size)
568 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
570 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
574 sbf_port = sb->sbf_cmos; /* Save CMOS port */
579 #ifdef CONFIG_HPET_TIMER
581 static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
583 struct acpi_table_hpet *hpet_tbl;
588 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
590 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
594 if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
595 printk(KERN_WARNING PREFIX "HPET timers must be located in "
600 vxtime.hpet_address = hpet_tbl->addr.addrl |
601 ((long)hpet_tbl->addr.addrh << 32);
603 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
604 hpet_tbl->id, vxtime.hpet_address);
607 extern unsigned long hpet_address;
609 hpet_address = hpet_tbl->addr.addrl;
610 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
611 hpet_tbl->id, hpet_address);
618 #define acpi_parse_hpet NULL
621 #ifdef CONFIG_X86_PM_TIMER
622 extern u32 pmtmr_ioport;
625 static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
627 struct fadt_descriptor *fadt = NULL;
629 fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
631 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
634 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
635 acpi_fadt.sci_int = fadt->sci_int;
637 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
638 acpi_fadt.revision = fadt->revision;
639 acpi_fadt.force_apic_physical_destination_mode =
640 fadt->force_apic_physical_destination_mode;
642 #ifdef CONFIG_X86_PM_TIMER
643 /* detect the location of the ACPI PM Timer */
644 if (fadt->revision >= FADT2_REVISION_ID) {
646 if (fadt->xpm_tmr_blk.address_space_id !=
647 ACPI_ADR_SPACE_SYSTEM_IO)
650 pmtmr_ioport = fadt->xpm_tmr_blk.address;
652 * "X" fields are optional extensions to the original V1.0
653 * fields, so we must selectively expand V1.0 fields if the
654 * corresponding X field is zero.
657 pmtmr_ioport = fadt->V1_pm_tmr_blk;
660 pmtmr_ioport = fadt->V1_pm_tmr_blk;
663 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
669 unsigned long __init acpi_find_rsdp(void)
671 unsigned long rsdp_phys = 0;
674 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
676 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
680 * Scan memory looking for the RSDP signature. First search EBDA (low
681 * memory) paragraphs and then search upper memory (E0000-FFFFF).
683 rsdp_phys = acpi_scan_rsdp(0, 0x400);
685 rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
690 #ifdef CONFIG_X86_LOCAL_APIC
692 * Parse LAPIC entries in MADT
693 * returns 0 on success, < 0 on error
695 static int __init acpi_parse_madt_lapic_entries(void)
703 * Note that the LAPIC address is obtained from the MADT (32-bit value)
704 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
708 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
709 acpi_parse_lapic_addr_ovr, 0);
711 printk(KERN_ERR PREFIX
712 "Error parsing LAPIC address override entry\n");
716 mp_register_lapic_address(acpi_lapic_addr);
718 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
721 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
722 /* TBD: Cleanup to allow fallback to MPS */
724 } else if (count < 0) {
725 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
726 /* TBD: Cleanup to allow fallback to MPS */
731 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
733 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
734 /* TBD: Cleanup to allow fallback to MPS */
739 #endif /* CONFIG_X86_LOCAL_APIC */
741 #ifdef CONFIG_X86_IO_APIC
743 * Parse IOAPIC related entries in MADT
744 * returns 0 on success, < 0 on error
746 static int __init acpi_parse_madt_ioapic_entries(void)
751 * ACPI interpreter is required to complete interrupt setup,
752 * so if it is off, don't enumerate the io-apics with ACPI.
753 * If MPS is present, it will handle them,
754 * otherwise the system will stay in PIC mode
756 if (acpi_disabled || acpi_noirq) {
764 * if "noapic" boot option, don't look for IO-APICs
766 if (skip_ioapic_setup) {
767 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
768 "due to 'noapic' option.\n");
773 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
776 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
778 } else if (count < 0) {
779 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
784 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
787 printk(KERN_ERR PREFIX
788 "Error parsing interrupt source overrides entry\n");
789 /* TBD: Cleanup to allow fallback to MPS */
794 * If BIOS did not supply an INT_SRC_OVR for the SCI
795 * pretend we got one so we can set the SCI flags.
797 if (!acpi_sci_override_gsi)
798 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
800 /* Fill in identity legacy mapings where no override */
801 mp_config_acpi_legacy_irqs();
804 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
807 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
808 /* TBD: Cleanup to allow fallback to MPS */
815 static inline int acpi_parse_madt_ioapic_entries(void)
819 #endif /* !CONFIG_X86_IO_APIC */
821 static void __init acpi_process_madt(void)
823 #ifdef CONFIG_X86_LOCAL_APIC
826 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
830 * Parse MADT LAPIC entries
832 error = acpi_parse_madt_lapic_entries();
836 #ifdef CONFIG_X86_GENERICARCH
837 generic_bigsmp_probe();
840 * Parse MADT IO-APIC entries
842 error = acpi_parse_madt_ioapic_entries();
844 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
845 acpi_irq_balance_set(NULL);
848 smp_found_config = 1;
849 clustered_apic_check();
852 if (error == -EINVAL) {
854 * Dell Precision Workstation 410, 610 come here.
856 printk(KERN_ERR PREFIX
857 "Invalid BIOS MADT, disabling ACPI\n");
867 static int __init disable_acpi_irq(struct dmi_system_id *d)
870 printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
877 static int __init disable_acpi_pci(struct dmi_system_id *d)
880 printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
887 static int __init dmi_disable_acpi(struct dmi_system_id *d)
890 printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
894 "Warning: DMI blacklist says broken, but acpi forced\n");
900 * Limit ACPI to CPU enumeration for HT
902 static int __init force_acpi_ht(struct dmi_system_id *d)
905 printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
911 "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
917 * If your system is blacklisted here, but you find that acpi=force
918 * works for you, please contact acpi-devel@sourceforge.net
920 static struct dmi_system_id __initdata acpi_dmi_table[] = {
922 * Boxes that need ACPI disabled
925 .callback = dmi_disable_acpi,
926 .ident = "IBM Thinkpad",
928 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
929 DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
934 * Boxes that need acpi=ht
937 .callback = force_acpi_ht,
938 .ident = "FSC Primergy T850",
940 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
941 DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
945 .callback = force_acpi_ht,
946 .ident = "DELL GX240",
948 DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
949 DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
953 .callback = force_acpi_ht,
954 .ident = "HP VISUALIZE NT Workstation",
956 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
957 DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
961 .callback = force_acpi_ht,
962 .ident = "Compaq Workstation W8000",
964 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
965 DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
969 .callback = force_acpi_ht,
970 .ident = "ASUS P4B266",
972 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
973 DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
977 .callback = force_acpi_ht,
978 .ident = "ASUS P2B-DS",
980 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
981 DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
985 .callback = force_acpi_ht,
986 .ident = "ASUS CUR-DLS",
988 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
989 DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
993 .callback = force_acpi_ht,
994 .ident = "ABIT i440BX-W83977",
996 DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
997 DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
1001 .callback = force_acpi_ht,
1002 .ident = "IBM Bladecenter",
1004 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1005 DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
1009 .callback = force_acpi_ht,
1010 .ident = "IBM eServer xSeries 360",
1012 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1013 DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
1017 .callback = force_acpi_ht,
1018 .ident = "IBM eserver xSeries 330",
1020 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1021 DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1025 .callback = force_acpi_ht,
1026 .ident = "IBM eserver xSeries 440",
1028 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1029 DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1034 * Boxes that need ACPI PCI IRQ routing disabled
1037 .callback = disable_acpi_irq,
1038 .ident = "ASUS A7V",
1040 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1041 DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1042 /* newer BIOS, Revision 1011, does work */
1043 DMI_MATCH(DMI_BIOS_VERSION,
1044 "ASUS A7V ACPI BIOS Revision 1007"),
1049 * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1052 .callback = disable_acpi_pci,
1053 .ident = "ASUS PR-DLS",
1055 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1056 DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1057 DMI_MATCH(DMI_BIOS_VERSION,
1058 "ASUS PR-DLS ACPI BIOS Revision 1010"),
1059 DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1063 .callback = disable_acpi_pci,
1064 .ident = "Acer TravelMate 36x Laptop",
1066 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1067 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1073 #endif /* __i386__ */
1076 * acpi_boot_table_init() and acpi_boot_init()
1077 * called from setup_arch(), always.
1078 * 1. checksums all tables
1079 * 2. enumerates lapics
1080 * 3. enumerates io-apics
1082 * acpi_table_init() is separate to allow reading SRAT without
1083 * other side effects.
1085 * side effects of acpi_boot_init:
1086 * acpi_lapic = 1 if LAPIC found
1087 * acpi_ioapic = 1 if IOAPIC found
1088 * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1089 * if acpi_blacklisted() acpi_disabled = 1;
1090 * acpi_irq_model=...
1093 * return value: (currently ignored)
1098 int __init acpi_boot_table_init(void)
1103 dmi_check_system(acpi_dmi_table);
1107 * If acpi_disabled, bail out
1108 * One exception: acpi=ht continues far enough to enumerate LAPICs
1110 if (acpi_disabled && !acpi_ht)
1114 * Initialize the ACPI boot-time table parser.
1116 error = acpi_table_init();
1122 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1125 * blacklist may disable ACPI entirely
1127 error = acpi_blacklisted();
1130 printk(KERN_WARNING PREFIX "acpi=force override\n");
1132 printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1141 int __init acpi_boot_init(void)
1144 * If acpi_disabled, bail out
1145 * One exception: acpi=ht continues far enough to enumerate LAPICs
1147 if (acpi_disabled && !acpi_ht)
1150 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1153 * set sci_int and PM timer address
1155 acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
1158 * Process the Multiple APIC Description Table (MADT), if present
1160 acpi_process_madt();
1162 acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
1167 static int __init parse_acpi(char *arg)
1172 /* "acpi=off" disables both ACPI table parsing and interpreter */
1173 if (strcmp(arg, "off") == 0) {
1176 /* acpi=force to over-ride black-list */
1177 else if (strcmp(arg, "force") == 0) {
1182 /* acpi=strict disables out-of-spec workarounds */
1183 else if (strcmp(arg, "strict") == 0) {
1186 /* Limit ACPI just to boot-time to enable HT */
1187 else if (strcmp(arg, "ht") == 0) {
1192 /* "acpi=noirq" disables ACPI interrupt routing */
1193 else if (strcmp(arg, "noirq") == 0) {
1196 /* Core will printk when we return error. */
1201 early_param("acpi", parse_acpi);
1203 /* FIXME: Using pci= for an ACPI parameter is a travesty. */
1204 static int __init parse_pci(char *arg)
1206 if (arg && strcmp(arg, "noacpi") == 0)
1210 early_param("pci", parse_pci);
1212 #ifdef CONFIG_X86_IO_APIC
1213 static int __init parse_acpi_skip_timer_override(char *arg)
1215 acpi_skip_timer_override = 1;
1218 early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1219 #endif /* CONFIG_X86_IO_APIC */
1221 static int __init setup_acpi_sci(char *s)
1225 if (!strcmp(s, "edge"))
1226 acpi_sci_flags.trigger = 1;
1227 else if (!strcmp(s, "level"))
1228 acpi_sci_flags.trigger = 3;
1229 else if (!strcmp(s, "high"))
1230 acpi_sci_flags.polarity = 1;
1231 else if (!strcmp(s, "low"))
1232 acpi_sci_flags.polarity = 3;
1237 early_param("acpi_sci", setup_acpi_sci);