make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / arch / arm / kernel / irq.c
1 /*
2  *  linux/arch/arm/kernel/irq.c
3  *
4  *  Copyright (C) 1992 Linus Torvalds
5  *  Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  *  This file contains the code used by various IRQ handling routines:
12  *  asking for different IRQ's should be done through these routines
13  *  instead of just grabbing them. Thus setups with different IRQ numbers
14  *  shouldn't result in any weird surprises, and installing new handlers
15  *  should be easier.
16  *
17  *  IRQ's are in fact implemented a bit like signal handlers for the kernel.
18  *  Naturally it's not a 1:1 relation, but there are similarities.
19  */
20 #include <linux/config.h>
21 #include <linux/ptrace.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/init.h>
31
32 #include <asm/irq.h>
33 #include <asm/system.h>
34 #include <asm/mach/irq.h>
35
36 #include <asm/arch/irq.h>       /* pick up fixup_irq definition */
37
38 /*
39  * Maximum IRQ count.  Currently, this is arbitary.  However, it should
40  * not be set too low to prevent false triggering.  Conversely, if it
41  * is set too high, then you could miss a stuck IRQ.
42  *
43  * Maybe we ought to set a timer and re-enable the IRQ at a later time?
44  */
45 #define MAX_IRQ_CNT     100000
46
47 static volatile unsigned long irq_err_count;
48 static spinlock_t irq_controller_lock;
49
50 struct irqdesc irq_desc[NR_IRQS];
51 void (*init_arch_irq)(void) __initdata = NULL;
52
53 /*
54  * Dummy mask/unmask handler
55  */
56 static void dummy_mask_unmask_irq(unsigned int irq)
57 {
58 }
59
60 /**
61  *      disable_irq - disable an irq and wait for completion
62  *      @irq: Interrupt to disable
63  *
64  *      Disable the selected interrupt line.
65  *
66  *      This function may be called - with care - from IRQ context.
67  */
68 void disable_irq(unsigned int irq)
69 {
70         unsigned long flags;
71
72         spin_lock_irqsave(&irq_controller_lock, flags);
73         irq_desc[irq].enabled = 0;
74         irq_desc[irq].mask(irq);
75         spin_unlock_irqrestore(&irq_controller_lock, flags);
76 }
77
78 /**
79  *      enable_irq - enable interrupt handling on an irq
80  *      @irq: Interrupt to enable
81  *
82  *      Re-enables the processing of interrupts on this IRQ line
83  *
84  *      This function may be called from IRQ context.
85  */
86 void enable_irq(unsigned int irq)
87 {
88         unsigned long flags;
89
90         spin_lock_irqsave(&irq_controller_lock, flags);
91         irq_desc[irq].probing = 0;
92         irq_desc[irq].triggered = 0;
93         irq_desc[irq].enabled = 1;
94         irq_desc[irq].unmask(irq);
95         spin_unlock_irqrestore(&irq_controller_lock, flags);
96 }
97
98 int get_irq_list(char *buf)
99 {
100         int i;
101         struct irqaction * action;
102         char *p = buf;
103
104         for (i = 0 ; i < NR_IRQS ; i++) {
105                 action = irq_desc[i].action;
106                 if (!action)
107                         continue;
108                 p += sprintf(p, "%3d: %10u ", i, kstat_irqs(i));
109                 p += sprintf(p, "  %s", action->name);
110                 for (action = action->next; action; action = action->next) {
111                         p += sprintf(p, ", %s", action->name);
112                 }
113                 *p++ = '\n';
114         }
115
116 #ifdef CONFIG_ARCH_ACORN
117         p += get_fiq_list(p);
118 #endif
119         p += sprintf(p, "Err: %10lu\n", irq_err_count);
120         return p - buf;
121 }
122
123 /*
124  * IRQ lock detection.
125  *
126  * Hopefully, this should get us out of a few locked situations.
127  * However, it may take a while for this to happen, since we need
128  * a large number if IRQs to appear in the same jiffie with the
129  * same instruction pointer (or within 2 instructions).
130  */
131 static void check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
132 {
133         unsigned long instr_ptr = instruction_pointer(regs);
134
135         if (desc->lck_jif == jiffies &&
136             desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
137                 desc->lck_cnt += 1;
138
139                 if (desc->lck_cnt > MAX_IRQ_CNT) {
140                         printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
141                         disable_irq(irq);
142                 }
143         } else {
144                 desc->lck_cnt = 0;
145                 desc->lck_pc  = instruction_pointer(regs);
146                 desc->lck_jif = jiffies;
147         }
148 }
149
150 /*
151  * do_IRQ handles all normal device IRQ's
152  */
153 asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
154 {
155         struct irqdesc * desc;
156         struct irqaction * action;
157         int cpu;
158
159         irq = fixup_irq(irq);
160
161         /*
162          * Some hardware gives randomly wrong interrupts.  Rather
163          * than crashing, do something sensible.
164          */
165         if (irq >= NR_IRQS)
166                 goto bad_irq;
167
168         desc = irq_desc + irq;
169
170         spin_lock(&irq_controller_lock);
171         desc->mask_ack(irq);
172         spin_unlock(&irq_controller_lock);
173
174         cpu = smp_processor_id();
175         irq_enter(cpu, irq);
176         kstat.irqs[cpu][irq]++;
177         desc->triggered = 1;
178
179         /* Return with this interrupt masked if no action */
180         action = desc->action;
181
182         if (action) {
183                 int status = 0;
184
185                 if (desc->nomask) {
186                         spin_lock(&irq_controller_lock);
187                         desc->unmask(irq);
188                         spin_unlock(&irq_controller_lock);
189                 }
190
191                 if (!(action->flags & SA_INTERRUPT))
192                         __sti();
193
194                 do {
195                         status |= action->flags;
196                         action->handler(irq, action->dev_id, regs);
197                         action = action->next;
198                 } while (action);
199
200                 if (status & SA_SAMPLE_RANDOM)
201                         add_interrupt_randomness(irq);
202                 __cli();
203
204                 if (!desc->nomask && desc->enabled) {
205                         spin_lock(&irq_controller_lock);
206                         desc->unmask(irq);
207                         spin_unlock(&irq_controller_lock);
208                 }
209         }
210
211         /*
212          * Debug measure - hopefully we can continue if an
213          * IRQ lockup problem occurs...
214          */
215         check_irq_lock(desc, irq, regs);
216
217         irq_exit(cpu, irq);
218
219         if (softirq_pending(cpu))
220                 do_softirq();
221         return;
222
223 bad_irq:
224         irq_err_count += 1;
225         printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
226         return;
227 }
228
229 #ifdef CONFIG_ARCH_ACORN
230 void do_ecard_IRQ(int irq, struct pt_regs *regs)
231 {
232         struct irqdesc * desc;
233         struct irqaction * action;
234         int cpu;
235
236         desc = irq_desc + irq;
237
238         cpu = smp_processor_id();
239         kstat.irqs[cpu][irq]++;
240         desc->triggered = 1;
241
242         action = desc->action;
243
244         if (action) {
245                 do {
246                         action->handler(irq, action->dev_id, regs);
247                         action = action->next;
248                 } while (action);
249         } else {
250                 spin_lock(&irq_controller_lock);
251                 desc->mask(irq);
252                 spin_unlock(&irq_controller_lock);
253         }
254 }
255 #endif
256
257 int setup_arm_irq(int irq, struct irqaction * new)
258 {
259         int shared = 0;
260         struct irqaction *old, **p;
261         unsigned long flags;
262         struct irqdesc *desc;
263
264         /*
265          * Some drivers like serial.c use request_irq() heavily,
266          * so we have to be careful not to interfere with a
267          * running system.
268          */
269         if (new->flags & SA_SAMPLE_RANDOM) {
270                 /*
271                  * This function might sleep, we want to call it first,
272                  * outside of the atomic block.
273                  * Yes, this might clear the entropy pool if the wrong
274                  * driver is attempted to be loaded, without actually
275                  * installing a new handler, but is this really a problem,
276                  * only the sysadmin is able to do this.
277                  */
278                 rand_initialize_irq(irq);
279         }
280
281         /*
282          * The following block of code has to be executed atomically
283          */
284         desc = irq_desc + irq;
285         spin_lock_irqsave(&irq_controller_lock, flags);
286         p = &desc->action;
287         if ((old = *p) != NULL) {
288                 /* Can't share interrupts unless both agree to */
289                 if (!(old->flags & new->flags & SA_SHIRQ)) {
290                         spin_unlock_irqrestore(&irq_controller_lock, flags);
291                         return -EBUSY;
292                 }
293
294                 /* add new interrupt at end of irq queue */
295                 do {
296                         p = &old->next;
297                         old = *p;
298                 } while (old);
299                 shared = 1;
300         }
301
302         *p = new;
303
304         if (!shared) {
305                 desc->nomask = (new->flags & SA_IRQNOMASK) ? 1 : 0;
306                 desc->probing = 0;
307                 if (!desc->noautoenable) {
308                         desc->enabled = 1;
309                         desc->unmask(irq);
310                 }
311         }
312
313         spin_unlock_irqrestore(&irq_controller_lock, flags);
314         return 0;
315 }
316
317 /**
318  *      request_irq - allocate an interrupt line
319  *      @irq: Interrupt line to allocate
320  *      @handler: Function to be called when the IRQ occurs
321  *      @irqflags: Interrupt type flags
322  *      @devname: An ascii name for the claiming device
323  *      @dev_id: A cookie passed back to the handler function
324  *
325  *      This call allocates interrupt resources and enables the
326  *      interrupt line and IRQ handling. From the point this
327  *      call is made your handler function may be invoked. Since
328  *      your handler function must clear any interrupt the board
329  *      raises, you must take care both to initialise your hardware
330  *      and to set up the interrupt handler in the right order.
331  *
332  *      Dev_id must be globally unique. Normally the address of the
333  *      device data structure is used as the cookie. Since the handler
334  *      receives this value it makes sense to use it.
335  *
336  *      If your interrupt is shared you must pass a non NULL dev_id
337  *      as this is required when freeing the interrupt.
338  *
339  *      Flags:
340  *
341  *      SA_SHIRQ                Interrupt is shared
342  *
343  *      SA_INTERRUPT            Disable local interrupts while processing
344  *
345  *      SA_SAMPLE_RANDOM        The interrupt can be used for entropy
346  *
347  */
348 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
349                  unsigned long irq_flags, const char * devname, void *dev_id)
350 {
351         unsigned long retval;
352         struct irqaction *action;
353
354         if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
355             (irq_flags & SA_SHIRQ && !dev_id))
356                 return -EINVAL;
357
358         action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
359         if (!action)
360                 return -ENOMEM;
361
362         action->handler = handler;
363         action->flags = irq_flags;
364         action->mask = 0;
365         action->name = devname;
366         action->next = NULL;
367         action->dev_id = dev_id;
368
369         retval = setup_arm_irq(irq, action);
370
371         if (retval)
372                 kfree(action);
373         return retval;
374 }
375
376 /**
377  *      free_irq - free an interrupt
378  *      @irq: Interrupt line to free
379  *      @dev_id: Device identity to free
380  *
381  *      Remove an interrupt handler. The handler is removed and if the
382  *      interrupt line is no longer in use by any driver it is disabled.
383  *      On a shared IRQ the caller must ensure the interrupt is disabled
384  *      on the card it drives before calling this function.
385  *
386  *      This function may be called from interrupt context.
387  */
388 void free_irq(unsigned int irq, void *dev_id)
389 {
390         struct irqaction * action, **p;
391         unsigned long flags;
392
393         if (irq >= NR_IRQS || !irq_desc[irq].valid) {
394                 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
395 #ifdef CONFIG_DEBUG_ERRORS
396                 __backtrace();
397 #endif
398                 return;
399         }
400
401         spin_lock_irqsave(&irq_controller_lock, flags);
402         for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
403                 if (action->dev_id != dev_id)
404                         continue;
405
406                 /* Found it - now free it */
407                 *p = action->next;
408                 kfree(action);
409                 goto out;
410         }
411         printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
412 #ifdef CONFIG_DEBUG_ERRORS
413         __backtrace();
414 #endif
415 out:
416         spin_unlock_irqrestore(&irq_controller_lock, flags);
417 }
418
419 /* Start the interrupt probing.  Unlike other architectures,
420  * we don't return a mask of interrupts from probe_irq_on,
421  * but return the number of interrupts enabled for the probe.
422  * The interrupts which have been enabled for probing is
423  * instead recorded in the irq_desc structure.
424  */
425 unsigned long probe_irq_on(void)
426 {
427         unsigned int i, irqs = 0;
428         unsigned long delay;
429
430         /*
431          * first snaffle up any unassigned but
432          * probe-able interrupts
433          */
434         spin_lock_irq(&irq_controller_lock);
435         for (i = 0; i < NR_IRQS; i++) {
436                 if (!irq_desc[i].valid ||
437                     !irq_desc[i].probe_ok ||
438                     irq_desc[i].action)
439                         continue;
440
441                 irq_desc[i].probing = 1;
442                 irq_desc[i].triggered = 0;
443                 irq_desc[i].unmask(i);
444                 irqs += 1;
445         }
446         spin_unlock_irq(&irq_controller_lock);
447
448         /*
449          * wait for spurious interrupts to mask themselves out again
450          */
451         for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
452                 /* min 100ms delay */;
453
454         /*
455          * now filter out any obviously spurious interrupts
456          */
457         spin_lock_irq(&irq_controller_lock);
458         for (i = 0; i < NR_IRQS; i++) {
459                 if (irq_desc[i].probing &&
460                     irq_desc[i].triggered) {
461                         irq_desc[i].probing = 0;
462                         irqs -= 1;
463                 }
464         }
465         spin_unlock_irq(&irq_controller_lock);
466
467         /* now filter out any obviously spurious interrupts */
468         return irqs;
469 }
470
471 /*
472  * Possible return values:
473  *  >= 0 - interrupt number
474  *    -1 - no interrupt/many interrupts
475  */
476 int probe_irq_off(unsigned long irqs)
477 {
478         unsigned int i;
479         int irq_found = NO_IRQ;
480
481         /*
482          * look at the interrupts, and find exactly one
483          * that we were probing has been triggered
484          */
485         spin_lock_irq(&irq_controller_lock);
486         for (i = 0; i < NR_IRQS; i++) {
487                 if (irq_desc[i].probing &&
488                     irq_desc[i].triggered) {
489                         if (irq_found != NO_IRQ) {
490                                 irq_found = NO_IRQ;
491                                 goto out;
492                         }
493                         irq_found = i;
494                 }
495         }
496
497         if (irq_found == -1)
498                 irq_found = NO_IRQ;
499 out:
500         spin_unlock_irq(&irq_controller_lock);
501
502         return irq_found;
503 }
504
505 void __init init_irq_proc(void)
506 {
507 }
508
509 void __init init_IRQ(void)
510 {
511         extern void init_dma(void);
512         int irq;
513
514         for (irq = 0; irq < NR_IRQS; irq++) {
515                 irq_desc[irq].probe_ok = 0;
516                 irq_desc[irq].valid    = 0;
517                 irq_desc[irq].noautoenable = 0;
518                 irq_desc[irq].mask_ack = dummy_mask_unmask_irq;
519                 irq_desc[irq].mask     = dummy_mask_unmask_irq;
520                 irq_desc[irq].unmask   = dummy_mask_unmask_irq;
521         }
522
523         init_arch_irq();
524         init_dma();
525 }