2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/linkage.h>
8 #include <linux/bitops.h>
9 #include <linux/preempt.h>
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/sched.h>
13 #include <asm/atomic.h>
14 #include <asm/ptrace.h>
15 #include <asm/system.h>
18 * For 2.4.x compatibility, 2.4.x can use
20 * typedef void irqreturn_t;
23 * #define IRQ_RETVAL(x)
25 * To mix old-style and new-style irq handler returns.
27 * IRQ_NONE means we didn't handle it.
28 * IRQ_HANDLED means that we did have a valid interrupt and handled it.
29 * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
31 typedef int irqreturn_t;
34 #define IRQ_HANDLED (1)
35 #define IRQ_RETVAL(x) ((x) != 0)
38 irqreturn_t (*handler)(int, void *, struct pt_regs *);
43 struct irqaction *next;
45 struct proc_dir_entry *dir;
48 extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
49 extern int request_irq(unsigned int,
50 irqreturn_t (*handler)(int, void *, struct pt_regs *),
51 unsigned long, const char *, void *);
52 extern void free_irq(unsigned int, void *);
55 #ifdef CONFIG_GENERIC_HARDIRQS
56 extern void disable_irq_nosync(unsigned int irq);
57 extern void disable_irq(unsigned int irq);
58 extern void enable_irq(unsigned int irq);
61 #ifndef __ARCH_SET_SOFTIRQ_PENDING
62 #define set_softirq_pending(x) (local_softirq_pending() = (x))
63 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
67 * Temporary defines for UP kernels, until all code gets fixed.
70 static inline void __deprecated cli(void)
74 static inline void __deprecated sti(void)
78 static inline void __deprecated save_flags(unsigned long *x)
82 #define save_flags(x) save_flags(&x)
83 static inline void __deprecated restore_flags(unsigned long x)
88 static inline void __deprecated save_and_cli(unsigned long *x)
92 #define save_and_cli(x) save_and_cli(&x)
93 #endif /* CONFIG_SMP */
95 /* SoftIRQ primitives. */
96 #define local_bh_disable() \
97 do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
98 #define __local_bh_enable() \
99 do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
101 extern void local_bh_enable(void);
103 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
104 frequency threaded job scheduling. For almost all the purposes
105 tasklets are more than enough. F.e. all serial device BHs et
106 al. should be converted to tasklets, not to softirqs.
120 /* softirq mask and active fields moved to irq_cpustat_t in
121 * asm/hardirq.h to get better cache usage. KAO
124 struct softirq_action
126 void (*action)(struct softirq_action *);
130 asmlinkage void do_softirq(void);
131 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
132 extern void softirq_init(void);
133 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
134 extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
135 extern void FASTCALL(raise_softirq(unsigned int nr));
138 /* Tasklets --- multithreaded analogue of BHs.
140 Main feature differing them of generic softirqs: tasklet
141 is running only on one CPU simultaneously.
143 Main feature differing them of BHs: different tasklets
144 may be run simultaneously on different CPUs.
147 * If tasklet_schedule() is called, then tasklet is guaranteed
148 to be executed on some cpu at least once after this.
149 * If the tasklet is already scheduled, but its excecution is still not
150 started, it will be executed only once.
151 * If this tasklet is already running on another CPU (or schedule is called
152 from tasklet itself), it is rescheduled for later.
153 * Tasklet is strictly serialized wrt itself, but not
154 wrt another tasklets. If client needs some intertask synchronization,
155 he makes it with spinlocks.
158 struct tasklet_struct
160 struct tasklet_struct *next;
163 void (*func)(unsigned long);
167 #define DECLARE_TASKLET(name, func, data) \
168 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
170 #define DECLARE_TASKLET_DISABLED(name, func, data) \
171 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
176 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
177 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
181 static inline int tasklet_trylock(struct tasklet_struct *t)
183 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
186 static inline void tasklet_unlock(struct tasklet_struct *t)
188 smp_mb__before_clear_bit();
189 clear_bit(TASKLET_STATE_RUN, &(t)->state);
192 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
194 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
197 #define tasklet_trylock(t) 1
198 #define tasklet_unlock_wait(t) do { } while (0)
199 #define tasklet_unlock(t) do { } while (0)
202 extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
204 static inline void tasklet_schedule(struct tasklet_struct *t)
206 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
207 __tasklet_schedule(t);
210 extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
212 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
214 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
215 __tasklet_hi_schedule(t);
219 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
221 atomic_inc(&t->count);
222 smp_mb__after_atomic_inc();
225 static inline void tasklet_disable(struct tasklet_struct *t)
227 tasklet_disable_nosync(t);
228 tasklet_unlock_wait(t);
232 static inline void tasklet_enable(struct tasklet_struct *t)
234 smp_mb__before_atomic_dec();
235 atomic_dec(&t->count);
238 static inline void tasklet_hi_enable(struct tasklet_struct *t)
240 smp_mb__before_atomic_dec();
241 atomic_dec(&t->count);
244 extern void tasklet_kill(struct tasklet_struct *t);
245 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
246 extern void tasklet_init(struct tasklet_struct *t,
247 void (*func)(unsigned long), unsigned long data);
250 * Autoprobing for irqs:
252 * probe_irq_on() and probe_irq_off() provide robust primitives
253 * for accurate IRQ probing during kernel initialization. They are
254 * reasonably simple to use, are not "fooled" by spurious interrupts,
255 * and, unlike other attempts at IRQ probing, they do not get hung on
256 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
258 * For reasonably foolproof probing, use them as follows:
260 * 1. clear and/or mask the device's internal interrupt.
262 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
263 * 4. enable the device and cause it to trigger an interrupt.
264 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
265 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
266 * 7. service the device to clear its pending interrupt.
267 * 8. loop again if paranoia is required.
269 * probe_irq_on() returns a mask of allocated irq's.
271 * probe_irq_off() takes the mask as a parameter,
272 * and returns the irq number which occurred,
273 * or zero if none occurred, or a negative irq number
274 * if more than one irq occurred.
277 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
278 static inline unsigned long probe_irq_on(void)
282 static inline int probe_irq_off(unsigned long val)
286 static inline unsigned int probe_irq_mask(unsigned long val)
291 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
292 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
293 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */