6bdd03c524c763108dc7e956b2dd95f743c1b3a5
[powerpc.git] / kernel / irq / migration.c
1 #include <linux/irq.h>
2
3 #if defined(CONFIG_GENERIC_PENDING_IRQ)
4
5 void set_pending_irq(unsigned int irq, cpumask_t mask)
6 {
7         irq_desc_t *desc = irq_desc + irq;
8         unsigned long flags;
9
10         spin_lock_irqsave(&desc->lock, flags);
11         desc->move_irq = 1;
12         pending_irq_cpumask[irq] = mask;
13         spin_unlock_irqrestore(&desc->lock, flags);
14 }
15
16 void move_native_irq(int irq)
17 {
18         cpumask_t tmp;
19         irq_desc_t *desc = irq_descp(irq);
20
21         if (likely (!desc->move_irq))
22                 return;
23
24         desc->move_irq = 0;
25
26         if (likely(cpus_empty(pending_irq_cpumask[irq])))
27                 return;
28
29         if (!desc->handler->set_affinity)
30                 return;
31
32         /* note - we hold the desc->lock */
33         cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
34
35         /*
36          * If there was a valid mask to work with, please
37          * do the disable, re-program, enable sequence.
38          * This is *not* particularly important for level triggered
39          * but in a edge trigger case, we might be setting rte
40          * when an active trigger is comming in. This could
41          * cause some ioapics to mal-function.
42          * Being paranoid i guess!
43          */
44         if (unlikely(!cpus_empty(tmp))) {
45                 desc->handler->disable(irq);
46                 desc->handler->set_affinity(irq,tmp);
47                 desc->handler->enable(irq);
48         }
49         cpus_clear(pending_irq_cpumask[irq]);
50 }
51
52 #endif