x86_64 irq: use mask/unmask and proper locking in fixup_irqs()
[powerpc.git] / arch / x86_64 / kernel / irq.c
index e46c558..39cb3fa 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/uaccess.h>
 #include <asm/io_apic.h>
 #include <asm/idle.h>
+#include <asm/smp.h>
 
 atomic_t irq_err_count;
 
@@ -31,7 +32,7 @@ atomic_t irq_err_count;
  */
 static inline void stack_overflow_check(struct pt_regs *regs)
 {
-       u64 curbase = (u64) current->thread_info;
+       u64 curbase = (u64)task_stack_page(current);
        static unsigned long warned = -60*HZ;
 
        if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
@@ -120,9 +121,14 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
 
        if (likely(irq < NR_IRQS))
                generic_handle_irq(irq);
-       else
-               printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
-                       __func__, smp_processor_id(), vector);
+       else {
+               if (!disable_apic)
+                       ack_APIC_irq();
+
+               if (printk_ratelimit())
+                       printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
+                               __func__, smp_processor_id(), vector);
+       }
 
        irq_exit();
 
@@ -138,17 +144,43 @@ void fixup_irqs(cpumask_t map)
 
        for (irq = 0; irq < NR_IRQS; irq++) {
                cpumask_t mask;
+               int break_affinity = 0;
+               int set_affinity = 1;
+
                if (irq == 2)
                        continue;
 
+               /* interrupt's are disabled at this point */
+               spin_lock(&irq_desc[irq].lock);
+
+               if (!irq_has_action(irq) ||
+                   cpus_equal(irq_desc[irq].affinity, map)) {
+                       spin_unlock(&irq_desc[irq].lock);
+                       continue;
+               }
+
                cpus_and(mask, irq_desc[irq].affinity, map);
-               if (any_online_cpu(mask) == NR_CPUS) {
-                       printk("Breaking affinity for irq %i\n", irq);
+               if (cpus_empty(mask)) {
+                       break_affinity = 1;
                        mask = map;
                }
+
+               if (irq_desc[irq].chip->mask)
+                       irq_desc[irq].chip->mask(irq);
+
                if (irq_desc[irq].chip->set_affinity)
                        irq_desc[irq].chip->set_affinity(irq, mask);
-               else if (irq_desc[irq].action && !(warned++))
+               else if (!(warned++))
+                       set_affinity = 0;
+
+               if (irq_desc[irq].chip->unmask)
+                       irq_desc[irq].chip->unmask(irq);
+
+               spin_unlock(&irq_desc[irq].lock);
+
+               if (break_affinity && set_affinity)
+                       printk("Broke affinity for irq %i\n", irq);
+               else if (!set_affinity)
                        printk("Cannot set affinity for irq %i\n", irq);
        }