2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
15 #include <linux/nmi.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/sysdev.h>
21 #include <linux/sysctl.h>
22 #include <linux/kprobes.h>
23 #include <linux/cpumask.h>
27 #include <asm/proto.h>
28 #include <asm/kdebug.h>
30 #include <asm/intel_arch_perfmon.h>
32 int unknown_nmi_panic;
33 int nmi_watchdog_enabled;
34 int panic_on_unrecovered_nmi;
36 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
37 * evtsel_nmi_owner tracks the ownership of the event selection
38 * - different performance counters/ event selection may be reserved for
39 * different subsystems this reservation system just tries to coordinate
42 static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
43 static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
45 static cpumask_t backtrace_mask = CPU_MASK_NONE;
47 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
48 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
50 #define NMI_MAX_COUNTER_BITS 66
53 * >0: the lapic NMI watchdog is active, but can be disabled
54 * <0: the lapic NMI watchdog has not been set up, and cannot
56 * 0: the lapic NMI watchdog is disabled, but can be enabled
58 atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
61 unsigned int nmi_watchdog = NMI_DEFAULT;
62 static unsigned int nmi_hz = HZ;
64 struct nmi_watchdog_ctlblk {
67 unsigned int cccr_msr;
68 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
69 unsigned int evntsel_msr; /* the MSR to select the events to handle */
71 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
73 /* local prototypes */
74 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
76 /* converts an msr to an appropriate reservation bit */
77 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
79 /* returns the bit offset of the performance counter register */
80 switch (boot_cpu_data.x86_vendor) {
82 return (msr - MSR_K7_PERFCTR0);
83 case X86_VENDOR_INTEL:
84 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
85 return (msr - MSR_ARCH_PERFMON_PERFCTR0);
87 return (msr - MSR_P4_BPU_PERFCTR0);
92 /* converts an msr to an appropriate reservation bit */
93 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
95 /* returns the bit offset of the event selection register */
96 switch (boot_cpu_data.x86_vendor) {
98 return (msr - MSR_K7_EVNTSEL0);
99 case X86_VENDOR_INTEL:
100 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
101 return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
103 return (msr - MSR_P4_BSU_ESCR0);
108 /* checks for a bit availability (hack for oprofile) */
109 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
112 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
113 for_each_possible_cpu (cpu) {
114 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
120 /* checks the an msr for availability */
121 int avail_to_resrv_perfctr_nmi(unsigned int msr)
123 unsigned int counter;
126 counter = nmi_perfctr_msr_to_bit(msr);
127 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
129 for_each_possible_cpu (cpu) {
130 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
136 static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
138 unsigned int counter;
140 cpu = smp_processor_id();
142 counter = nmi_perfctr_msr_to_bit(msr);
143 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
145 if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
150 static void __release_perfctr_nmi(int cpu, unsigned int msr)
152 unsigned int counter;
154 cpu = smp_processor_id();
156 counter = nmi_perfctr_msr_to_bit(msr);
157 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
159 clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
162 int reserve_perfctr_nmi(unsigned int msr)
165 for_each_possible_cpu (cpu) {
166 if (!__reserve_perfctr_nmi(cpu, msr)) {
167 for_each_possible_cpu (i) {
170 __release_perfctr_nmi(i, msr);
178 void release_perfctr_nmi(unsigned int msr)
181 for_each_possible_cpu (cpu)
182 __release_perfctr_nmi(cpu, msr);
185 int __reserve_evntsel_nmi(int cpu, unsigned int msr)
187 unsigned int counter;
189 cpu = smp_processor_id();
191 counter = nmi_evntsel_msr_to_bit(msr);
192 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
194 if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
199 static void __release_evntsel_nmi(int cpu, unsigned int msr)
201 unsigned int counter;
203 cpu = smp_processor_id();
205 counter = nmi_evntsel_msr_to_bit(msr);
206 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
208 clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
211 int reserve_evntsel_nmi(unsigned int msr)
214 for_each_possible_cpu (cpu) {
215 if (!__reserve_evntsel_nmi(cpu, msr)) {
216 for_each_possible_cpu (i) {
219 __release_evntsel_nmi(i, msr);
227 void release_evntsel_nmi(unsigned int msr)
230 for_each_possible_cpu (cpu) {
231 __release_evntsel_nmi(cpu, msr);
235 static __cpuinit inline int nmi_known_cpu(void)
237 switch (boot_cpu_data.x86_vendor) {
239 return boot_cpu_data.x86 == 15 || boot_cpu_data.x86 == 16;
240 case X86_VENDOR_INTEL:
241 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
244 return (boot_cpu_data.x86 == 15);
249 /* Run after command line and cpu_init init, but before all other checks */
250 void nmi_watchdog_default(void)
252 if (nmi_watchdog != NMI_DEFAULT)
254 nmi_watchdog = NMI_NONE;
257 static int endflag __initdata = 0;
260 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
261 * the CPU is idle. To make sure the NMI watchdog really ticks on all
262 * CPUs during the test make them busy.
264 static __init void nmi_cpu_busy(void *data)
266 local_irq_enable_in_hardirq();
267 /* Intentionally don't use cpu_relax here. This is
268 to make sure that the performance counter really ticks,
269 even if there is a simulator or similar that catches the
270 pause instruction. On a real HT machine this is fine because
271 all other CPUs are busy with "useless" delay loops and don't
272 care if they get somewhat less cycles. */
278 static unsigned int adjust_for_32bit_ctr(unsigned int hz)
280 unsigned int retval = hz;
283 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
284 * are writable, with higher bits sign extending from bit 31.
285 * So, we can only program the counter with 31 bit values and
286 * 32nd bit should be 1, for 33.. to be 1.
287 * Find the appropriate nmi_hz
289 if ((((u64)cpu_khz * 1000) / retval) > 0x7fffffffULL) {
290 retval = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
295 int __init check_nmi_watchdog (void)
300 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
303 if (!atomic_read(&nmi_active))
306 counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
310 printk(KERN_INFO "testing NMI watchdog ... ");
313 if (nmi_watchdog == NMI_LOCAL_APIC)
314 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
317 for (cpu = 0; cpu < NR_CPUS; cpu++)
318 counts[cpu] = cpu_pda(cpu)->__nmi_count;
320 mdelay((20*1000)/nmi_hz); // wait 20 ticks
322 for_each_online_cpu(cpu) {
323 if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
325 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
326 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
329 cpu_pda(cpu)->__nmi_count);
330 per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
331 atomic_dec(&nmi_active);
334 if (!atomic_read(&nmi_active)) {
336 atomic_set(&nmi_active, -1);
343 /* now that we know it works we can reduce NMI frequency to
344 something more reasonable; makes a difference in some configs */
345 if (nmi_watchdog == NMI_LOCAL_APIC) {
346 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
349 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0)
350 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
357 int __init setup_nmi_watchdog(char *str)
361 if (!strncmp(str,"panic",5)) {
362 panic_on_timeout = 1;
363 str = strchr(str, ',');
369 get_option(&str, &nmi);
371 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
378 __setup("nmi_watchdog=", setup_nmi_watchdog);
380 static void disable_lapic_nmi_watchdog(void)
382 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
384 if (atomic_read(&nmi_active) <= 0)
387 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
389 BUG_ON(atomic_read(&nmi_active) != 0);
392 static void enable_lapic_nmi_watchdog(void)
394 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
396 /* are we already enabled */
397 if (atomic_read(&nmi_active) != 0)
400 /* are we lapic aware */
401 if (nmi_known_cpu() <= 0)
404 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
405 touch_nmi_watchdog();
408 void disable_timer_nmi_watchdog(void)
410 BUG_ON(nmi_watchdog != NMI_IO_APIC);
412 if (atomic_read(&nmi_active) <= 0)
416 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
418 BUG_ON(atomic_read(&nmi_active) != 0);
421 void enable_timer_nmi_watchdog(void)
423 BUG_ON(nmi_watchdog != NMI_IO_APIC);
425 if (atomic_read(&nmi_active) == 0) {
426 touch_nmi_watchdog();
427 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
432 static void __acpi_nmi_disable(void *__unused)
434 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
438 * Disable timer based NMIs on all CPUs:
440 void acpi_nmi_disable(void)
442 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
443 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
446 static void __acpi_nmi_enable(void *__unused)
448 apic_write(APIC_LVT0, APIC_DM_NMI);
452 * Enable timer based NMIs on all CPUs:
454 void acpi_nmi_enable(void)
456 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
457 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
461 static int nmi_pm_active; /* nmi_active before suspend */
463 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
465 /* only CPU0 goes here, other CPUs should be offline */
466 nmi_pm_active = atomic_read(&nmi_active);
467 stop_apic_nmi_watchdog(NULL);
468 BUG_ON(atomic_read(&nmi_active) != 0);
472 static int lapic_nmi_resume(struct sys_device *dev)
474 /* only CPU0 goes here, other CPUs should be offline */
475 if (nmi_pm_active > 0) {
476 setup_apic_nmi_watchdog(NULL);
477 touch_nmi_watchdog();
482 static struct sysdev_class nmi_sysclass = {
483 set_kset_name("lapic_nmi"),
484 .resume = lapic_nmi_resume,
485 .suspend = lapic_nmi_suspend,
488 static struct sys_device device_lapic_nmi = {
490 .cls = &nmi_sysclass,
493 static int __init init_lapic_nmi_sysfs(void)
497 /* should really be a BUG_ON but b/c this is an
498 * init call, it just doesn't work. -dcz
500 if (nmi_watchdog != NMI_LOCAL_APIC)
503 if ( atomic_read(&nmi_active) < 0 )
506 error = sysdev_class_register(&nmi_sysclass);
508 error = sysdev_register(&device_lapic_nmi);
511 /* must come after the local APIC's device_initcall() */
512 late_initcall(init_lapic_nmi_sysfs);
514 #endif /* CONFIG_PM */
517 * Activate the NMI watchdog via the local APIC.
518 * Original code written by Keith Owens.
521 /* Note that these events don't tick when the CPU idles. This means
522 the frequency varies with CPU load. */
524 #define K7_EVNTSEL_ENABLE (1 << 22)
525 #define K7_EVNTSEL_INT (1 << 20)
526 #define K7_EVNTSEL_OS (1 << 17)
527 #define K7_EVNTSEL_USR (1 << 16)
528 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
529 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
531 static int setup_k7_watchdog(void)
533 unsigned int perfctr_msr, evntsel_msr;
534 unsigned int evntsel;
535 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
537 perfctr_msr = MSR_K7_PERFCTR0;
538 evntsel_msr = MSR_K7_EVNTSEL0;
539 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
542 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
545 /* Simulator may not support it */
546 if (checking_wrmsrl(evntsel_msr, 0UL))
548 wrmsrl(perfctr_msr, 0UL);
550 evntsel = K7_EVNTSEL_INT
555 /* setup the timer */
556 wrmsr(evntsel_msr, evntsel, 0);
557 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
558 apic_write(APIC_LVTPC, APIC_DM_NMI);
559 evntsel |= K7_EVNTSEL_ENABLE;
560 wrmsr(evntsel_msr, evntsel, 0);
562 wd->perfctr_msr = perfctr_msr;
563 wd->evntsel_msr = evntsel_msr;
564 wd->cccr_msr = 0; //unused
565 wd->check_bit = 1ULL<<63;
568 __release_evntsel_nmi(-1, evntsel_msr);
570 __release_perfctr_nmi(-1, perfctr_msr);
575 static void stop_k7_watchdog(void)
577 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
579 wrmsr(wd->evntsel_msr, 0, 0);
581 __release_evntsel_nmi(-1, wd->evntsel_msr);
582 __release_perfctr_nmi(-1, wd->perfctr_msr);
585 /* Note that these events don't tick when the CPU idles. This means
586 the frequency varies with CPU load. */
588 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
589 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
590 #define P4_ESCR_OS (1<<3)
591 #define P4_ESCR_USR (1<<2)
592 #define P4_CCCR_OVF_PMI0 (1<<26)
593 #define P4_CCCR_OVF_PMI1 (1<<27)
594 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
595 #define P4_CCCR_COMPLEMENT (1<<19)
596 #define P4_CCCR_COMPARE (1<<18)
597 #define P4_CCCR_REQUIRED (3<<16)
598 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
599 #define P4_CCCR_ENABLE (1<<12)
600 #define P4_CCCR_OVF (1<<31)
601 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
602 CRU_ESCR0 (with any non-null event selector) through a complemented
603 max threshold. [IA32-Vol3, Section 14.9.9] */
605 static int setup_p4_watchdog(void)
607 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
608 unsigned int evntsel, cccr_val;
609 unsigned int misc_enable, dummy;
611 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
613 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
614 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
618 /* detect which hyperthread we are on */
619 if (smp_num_siblings == 2) {
620 unsigned int ebx, apicid;
623 apicid = (ebx >> 24) & 0xff;
629 /* performance counters are shared resources
630 * assign each hyperthread its own set
631 * (re-use the ESCR0 register, seems safe
632 * and keeps the cccr_val the same)
636 perfctr_msr = MSR_P4_IQ_PERFCTR0;
637 evntsel_msr = MSR_P4_CRU_ESCR0;
638 cccr_msr = MSR_P4_IQ_CCCR0;
639 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
642 perfctr_msr = MSR_P4_IQ_PERFCTR1;
643 evntsel_msr = MSR_P4_CRU_ESCR0;
644 cccr_msr = MSR_P4_IQ_CCCR1;
645 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
648 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
651 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
654 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
658 cccr_val |= P4_CCCR_THRESHOLD(15)
663 wrmsr(evntsel_msr, evntsel, 0);
664 wrmsr(cccr_msr, cccr_val, 0);
665 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
666 apic_write(APIC_LVTPC, APIC_DM_NMI);
667 cccr_val |= P4_CCCR_ENABLE;
668 wrmsr(cccr_msr, cccr_val, 0);
670 wd->perfctr_msr = perfctr_msr;
671 wd->evntsel_msr = evntsel_msr;
672 wd->cccr_msr = cccr_msr;
673 wd->check_bit = 1ULL<<39;
676 __release_perfctr_nmi(-1, perfctr_msr);
681 static void stop_p4_watchdog(void)
683 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
685 wrmsr(wd->cccr_msr, 0, 0);
686 wrmsr(wd->evntsel_msr, 0, 0);
688 __release_evntsel_nmi(-1, wd->evntsel_msr);
689 __release_perfctr_nmi(-1, wd->perfctr_msr);
692 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
693 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
695 static int setup_intel_arch_watchdog(void)
698 union cpuid10_eax eax;
700 unsigned int perfctr_msr, evntsel_msr;
701 unsigned int evntsel;
702 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
705 * Check whether the Architectural PerfMon supports
706 * Unhalted Core Cycles Event or not.
707 * NOTE: Corresponding bit = 0 in ebx indicates event present.
709 cpuid(10, &(eax.full), &ebx, &unused, &unused);
710 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
711 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
714 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
715 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
717 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
720 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
723 wrmsrl(perfctr_msr, 0UL);
725 evntsel = ARCH_PERFMON_EVENTSEL_INT
726 | ARCH_PERFMON_EVENTSEL_OS
727 | ARCH_PERFMON_EVENTSEL_USR
728 | ARCH_PERFMON_NMI_EVENT_SEL
729 | ARCH_PERFMON_NMI_EVENT_UMASK;
731 /* setup the timer */
732 wrmsr(evntsel_msr, evntsel, 0);
734 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
735 wrmsr(perfctr_msr, (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
737 apic_write(APIC_LVTPC, APIC_DM_NMI);
738 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
739 wrmsr(evntsel_msr, evntsel, 0);
741 wd->perfctr_msr = perfctr_msr;
742 wd->evntsel_msr = evntsel_msr;
743 wd->cccr_msr = 0; //unused
744 wd->check_bit = 1ULL << (eax.split.bit_width - 1);
747 __release_perfctr_nmi(-1, perfctr_msr);
752 static void stop_intel_arch_watchdog(void)
755 union cpuid10_eax eax;
757 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
760 * Check whether the Architectural PerfMon supports
761 * Unhalted Core Cycles Event or not.
762 * NOTE: Corresponding bit = 0 in ebx indicates event present.
764 cpuid(10, &(eax.full), &ebx, &unused, &unused);
765 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
766 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
769 wrmsr(wd->evntsel_msr, 0, 0);
771 __release_evntsel_nmi(-1, wd->evntsel_msr);
772 __release_perfctr_nmi(-1, wd->perfctr_msr);
775 void setup_apic_nmi_watchdog(void *unused)
777 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
779 /* only support LOCAL and IO APICs for now */
780 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
781 (nmi_watchdog != NMI_IO_APIC))
784 if (wd->enabled == 1)
787 /* cheap hack to support suspend/resume */
788 /* if cpu0 is not active neither should the other cpus */
789 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
792 if (nmi_watchdog == NMI_LOCAL_APIC) {
793 switch (boot_cpu_data.x86_vendor) {
795 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
797 if (!setup_k7_watchdog())
800 case X86_VENDOR_INTEL:
801 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
802 if (!setup_intel_arch_watchdog())
806 if (!setup_p4_watchdog())
814 atomic_inc(&nmi_active);
817 void stop_apic_nmi_watchdog(void *unused)
819 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
821 /* only support LOCAL and IO APICs for now */
822 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
823 (nmi_watchdog != NMI_IO_APIC))
826 if (wd->enabled == 0)
829 if (nmi_watchdog == NMI_LOCAL_APIC) {
830 switch (boot_cpu_data.x86_vendor) {
832 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
836 case X86_VENDOR_INTEL:
837 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
838 stop_intel_arch_watchdog();
848 atomic_dec(&nmi_active);
852 * the best way to detect whether a CPU has a 'hard lockup' problem
853 * is to check it's local APIC timer IRQ counts. If they are not
854 * changing then that CPU has some problem.
856 * as these watchdog NMI IRQs are generated on every CPU, we only
857 * have to check the current processor.
860 static DEFINE_PER_CPU(unsigned, last_irq_sum);
861 static DEFINE_PER_CPU(local_t, alert_counter);
862 static DEFINE_PER_CPU(int, nmi_touch);
864 void touch_nmi_watchdog (void)
866 if (nmi_watchdog > 0) {
870 * Tell other CPUs to reset their alert counters. We cannot
871 * do it ourselves because the alert count increase is not
874 for_each_present_cpu (cpu)
875 per_cpu(nmi_touch, cpu) = 1;
878 touch_softlockup_watchdog();
881 int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
885 int cpu = smp_processor_id();
886 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
890 /* check for other users first */
891 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
897 sum = read_pda(apic_timer_irqs);
898 if (__get_cpu_var(nmi_touch)) {
899 __get_cpu_var(nmi_touch) = 0;
903 if (cpu_isset(cpu, backtrace_mask)) {
904 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
907 printk("NMI backtrace for cpu %d\n", cpu);
910 cpu_clear(cpu, backtrace_mask);
913 #ifdef CONFIG_X86_MCE
914 /* Could check oops_in_progress here too, but it's safer
916 if (atomic_read(&mce_entry) > 0)
919 /* if the apic timer isn't firing, this cpu isn't doing much */
920 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
922 * Ayiee, looks like this CPU is stuck ...
923 * wait a few IRQs (5 seconds) before doing the oops ...
925 local_inc(&__get_cpu_var(alert_counter));
926 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
927 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs,
930 __get_cpu_var(last_irq_sum) = sum;
931 local_set(&__get_cpu_var(alert_counter), 0);
934 /* see if the nmi watchdog went off */
936 if (nmi_watchdog == NMI_LOCAL_APIC) {
937 rdmsrl(wd->perfctr_msr, dummy);
938 if (dummy & wd->check_bit){
939 /* this wasn't a watchdog timer interrupt */
943 /* only Intel uses the cccr msr */
944 if (wd->cccr_msr != 0) {
947 * - An overflown perfctr will assert its interrupt
948 * until the OVF flag in its CCCR is cleared.
949 * - LVTPC is masked on interrupt and must be
950 * unmasked by the LVTPC handler.
952 rdmsrl(wd->cccr_msr, dummy);
953 dummy &= ~P4_CCCR_OVF;
954 wrmsrl(wd->cccr_msr, dummy);
955 apic_write(APIC_LVTPC, APIC_DM_NMI);
956 /* start the cycle over again */
957 wrmsrl(wd->perfctr_msr,
958 -((u64)cpu_khz * 1000 / nmi_hz));
959 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
961 * ArchPerfom/Core Duo needs to re-unmask
964 apic_write(APIC_LVTPC, APIC_DM_NMI);
965 /* ARCH_PERFMON has 32 bit counter writes */
966 wrmsr(wd->perfctr_msr,
967 (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
969 /* start the cycle over again */
970 wrmsrl(wd->perfctr_msr,
971 -((u64)cpu_khz * 1000 / nmi_hz));
974 } else if (nmi_watchdog == NMI_IO_APIC) {
975 /* don't know how to accurately check for this.
976 * just assume it was a watchdog timer interrupt
977 * This matches the old behaviour.
981 printk(KERN_WARNING "Unknown enabled NMI hardware?!\n");
987 asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
990 add_pda(__nmi_count,1);
991 default_do_nmi(regs);
995 int do_nmi_callback(struct pt_regs * regs, int cpu)
998 if (unknown_nmi_panic)
999 return unknown_nmi_panic_callback(regs, cpu);
1004 #ifdef CONFIG_SYSCTL
1006 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
1008 unsigned char reason = get_nmi_reason();
1011 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
1012 die_nmi(buf, regs, 1); /* Always panic here */
1017 * proc handler for /proc/sys/kernel/nmi
1019 int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
1020 void __user *buffer, size_t *length, loff_t *ppos)
1024 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
1025 old_state = nmi_watchdog_enabled;
1026 proc_dointvec(table, write, file, buffer, length, ppos);
1027 if (!!old_state == !!nmi_watchdog_enabled)
1030 if (atomic_read(&nmi_active) < 0) {
1031 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
1035 /* if nmi_watchdog is not set yet, then set it */
1036 nmi_watchdog_default();
1038 if (nmi_watchdog == NMI_LOCAL_APIC) {
1039 if (nmi_watchdog_enabled)
1040 enable_lapic_nmi_watchdog();
1042 disable_lapic_nmi_watchdog();
1044 printk( KERN_WARNING
1045 "NMI watchdog doesn't know what hardware to touch\n");
1053 void __trigger_all_cpu_backtrace(void)
1057 backtrace_mask = cpu_online_map;
1058 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
1059 for (i = 0; i < 10 * 1000; i++) {
1060 if (cpus_empty(backtrace_mask))
1066 EXPORT_SYMBOL(nmi_active);
1067 EXPORT_SYMBOL(nmi_watchdog);
1068 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
1069 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
1070 EXPORT_SYMBOL(reserve_perfctr_nmi);
1071 EXPORT_SYMBOL(release_perfctr_nmi);
1072 EXPORT_SYMBOL(reserve_evntsel_nmi);
1073 EXPORT_SYMBOL(release_evntsel_nmi);
1074 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
1075 EXPORT_SYMBOL(enable_timer_nmi_watchdog);
1076 EXPORT_SYMBOL(touch_nmi_watchdog);