2 * BK Id: SCCS/s.irq.c 1.56 10/16/02 11:02:50 paulus
5 * arch/ppc/kernel/irq.c
7 * Derived from arch/i386/kernel/irq.c
8 * Copyright (C) 1992 Linus Torvalds
9 * Adapted from arch/i386 by Gary Thomas
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
12 * Copyright (C) 1996-2001 Cort Dougan
13 * Adapted for Power Macintosh by Paul Mackerras
14 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
15 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
17 * This file contains the code used by various IRQ handling routines:
18 * asking for different IRQ's should be done through these routines
19 * instead of just grabbing them. Thus setups with different IRQ numbers
20 * shouldn't result in any weird surprises, and installing new handlers
23 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
24 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
25 * mask register (of which only 16 are defined), hence the weird shifting
26 * and compliment of the cached_irq_mask. I want to be able to stuff
27 * this right into the SIU SMASK register.
28 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
29 * to reduce code space and undefined function references.
33 #include <linux/ptrace.h>
34 #include <linux/errno.h>
35 #include <linux/threads.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/signal.h>
38 #include <linux/sched.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/config.h>
43 #include <linux/init.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/irq.h>
48 #include <linux/proc_fs.h>
49 #include <linux/random.h>
51 #include <asm/uaccess.h>
52 #include <asm/bitops.h>
53 #include <asm/system.h>
55 #include <asm/pgtable.h>
57 #include <asm/cache.h>
59 #include <asm/ptrace.h>
61 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
63 extern atomic_t ipi_recv;
64 extern atomic_t ipi_sent;
65 void enable_irq(unsigned int irq_nr);
66 void disable_irq(unsigned int irq_nr);
68 static void register_irq_proc (unsigned int irq);
70 #define MAXCOUNT 10000000
72 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
73 { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};
75 int ppc_spurious_interrupts = 0;
76 struct irqaction *ppc_irq_action[NR_IRQS];
77 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
78 unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
79 atomic_t ppc_n_lost_interrupts;
81 /* nasty hack for shared irq's since we need to do kmalloc calls but
82 * can't very early in the boot when we need to do a request irq.
83 * this needs to be removed.
86 #define IRQ_KMALLOC_ENTRIES 8
87 static int cache_bitmask = 0;
88 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
89 extern int mem_init_done;
91 #if defined(CONFIG_TAU_INT)
92 extern int tau_interrupts(unsigned long cpu);
93 extern int tau_initialized;
96 void *irq_kmalloc(size_t size, int pri)
100 return kmalloc(size,pri);
101 for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
102 if ( ! ( cache_bitmask & (1<<i) ) )
104 cache_bitmask |= (1<<i);
105 return (void *)(&malloc_cache[i]);
110 void irq_kfree(void *ptr)
113 for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
114 if ( ptr == &malloc_cache[i] )
116 cache_bitmask &= ~(1<<i);
123 setup_irq(unsigned int irq, struct irqaction * new)
127 struct irqaction *old, **p;
128 irq_desc_t *desc = irq_desc + irq;
131 * Some drivers like serial.c use request_irq() heavily,
132 * so we have to be careful not to interfere with a
135 if (new->flags & SA_SAMPLE_RANDOM) {
137 * This function might sleep, we want to call it first,
138 * outside of the atomic block.
139 * Yes, this might clear the entropy pool if the wrong
140 * driver is attempted to be loaded, without actually
141 * installing a new handler, but is this really a problem,
142 * only the sysadmin is able to do this.
144 rand_initialize_irq(irq);
148 * The following block of code has to be executed atomically
150 spin_lock_irqsave(&desc->lock,flags);
152 if ((old = *p) != NULL) {
153 /* Can't share interrupts unless both agree to */
154 if (!(old->flags & new->flags & SA_SHIRQ)) {
155 printk("!(old->flags & new->flags & SA_SHIRQ) \n");
156 spin_unlock_irqrestore(&desc->lock,flags);
160 /* add new interrupt at end of irq queue */
172 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
175 spin_unlock_irqrestore(&desc->lock,flags);
177 register_irq_proc(irq);
181 #if (defined(CONFIG_8xx) || defined(CONFIG_8260))
182 /* Name change so we can catch standard drivers that potentially mess up
183 * the internal interrupt controller on 8xx and 8260. Just bear with me,
184 * I don't like this either and I am searching a better solution. For
185 * now, this is what I need. -- Dan
187 #define request_irq request_8xxirq
190 void free_irq(unsigned int irq, void* dev_id)
193 struct irqaction **p;
196 desc = irq_desc + irq;
197 spin_lock_irqsave(&desc->lock,flags);
200 struct irqaction * action = *p;
202 struct irqaction **pp = p;
204 if (action->dev_id != dev_id)
207 /* Found it - now remove it from the list of entries */
210 desc->status |= IRQ_DISABLED;
213 spin_unlock_irqrestore(&desc->lock,flags);
216 /* Wait to make sure it's not being used on another CPU */
217 while (desc->status & IRQ_INPROGRESS)
223 printk("Trying to free free IRQ%d\n",irq);
224 spin_unlock_irqrestore(&desc->lock,flags);
230 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
231 unsigned long irqflags, const char * devname, void *dev_id)
233 struct irqaction *action;
238 printk("request_irq irq >= NR_IRQS fail \n");
244 * free_irq() used to be implemented as a call to
245 * request_irq() with handler being NULL. Now we have
246 * a real free_irq() but need to allow the old behavior
247 * for old code that hasn't caught up yet.
248 * -- Cort <cort@fsmlabs.com>
250 printk("request_irq !handler fail \n");
251 free_irq(irq, dev_id);
255 action = (struct irqaction *)
256 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
258 printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
262 action->handler = handler;
263 action->flags = irqflags;
265 action->name = devname;
266 action->dev_id = dev_id;
269 retval = setup_irq(irq, action);
272 printk("request_irq retval %d \n", retval);
281 * Generic enable/disable code: this just calls
282 * down into the PIC-specific version for the actual
283 * hardware disable after having gotten the irq
288 * disable_irq_nosync - disable an irq without waiting
289 * @irq: Interrupt to disable
291 * Disable the selected interrupt line. Disables of an interrupt
292 * stack. Unlike disable_irq(), this function does not ensure existing
293 * instances of the IRQ handler have completed before returning.
295 * This function may be called from IRQ context.
298 void disable_irq_nosync(unsigned int irq)
300 irq_desc_t *desc = irq_desc + irq;
303 spin_lock_irqsave(&desc->lock, flags);
304 if (!desc->depth++) {
305 if (!(desc->status & IRQ_PER_CPU))
306 desc->status |= IRQ_DISABLED;
309 spin_unlock_irqrestore(&desc->lock, flags);
313 * disable_irq - disable an irq and wait for completion
314 * @irq: Interrupt to disable
316 * Disable the selected interrupt line. Disables of an interrupt
317 * stack. That is for two disables you need two enables. This
318 * function waits for any pending IRQ handlers for this interrupt
319 * to complete before returning. If you use this function while
320 * holding a resource the IRQ handler may need you will deadlock.
322 * This function may be called - with care - from IRQ context.
325 void disable_irq(unsigned int irq)
327 disable_irq_nosync(irq);
329 if (!local_irq_count(smp_processor_id())) {
332 } while (irq_desc[irq].status & IRQ_INPROGRESS);
337 * enable_irq - enable interrupt handling on an irq
338 * @irq: Interrupt to enable
340 * Re-enables the processing of interrupts on this IRQ line
341 * providing no disable_irq calls are now in effect.
343 * This function may be called from IRQ context.
346 void enable_irq(unsigned int irq)
348 irq_desc_t *desc = irq_desc + irq;
351 spin_lock_irqsave(&desc->lock, flags);
352 switch (desc->depth) {
354 unsigned int status = desc->status & ~IRQ_DISABLED;
355 desc->status = status;
356 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
357 desc->status = status | IRQ_REPLAY;
358 hw_resend_irq(desc->handler,irq);
367 printk("enable_irq(%u) unbalanced\n", irq);
369 spin_unlock_irqrestore(&desc->lock, flags);
372 int get_irq_list(char *buf)
375 struct irqaction * action;
377 len += sprintf(buf+len, " ");
378 for (j=0; j<smp_num_cpus; j++)
379 len += sprintf(buf+len, "CPU%d ",j);
380 *(char *)(buf+len++) = '\n';
382 for (i = 0 ; i < NR_IRQS ; i++) {
383 action = irq_desc[i].action;
384 if ( !action || !action->handler )
386 len += sprintf(buf+len, "%3d: ", i);
388 for (j = 0; j < smp_num_cpus; j++)
389 len += sprintf(buf+len, "%10u ",
390 kstat.irqs[cpu_logical_map(j)][i]);
392 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
393 #endif /* CONFIG_SMP */
394 if ( irq_desc[i].handler )
395 len += sprintf(buf+len, " %s ", irq_desc[i].handler->typename );
397 len += sprintf(buf+len, " None ");
398 len += sprintf(buf+len, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
399 len += sprintf(buf+len, " %s",action->name);
400 for (action=action->next; action; action = action->next) {
401 len += sprintf(buf+len, ", %s", action->name);
403 len += sprintf(buf+len, "\n");
405 #ifdef CONFIG_TAU_INT
406 if (tau_initialized){
407 len += sprintf(buf+len, "TAU: ");
408 for (j = 0; j < smp_num_cpus; j++)
409 len += sprintf(buf+len, "%10u ",
411 len += sprintf(buf+len, " PowerPC Thermal Assist (cpu temp)\n");
415 /* should this be per processor send/receive? */
416 len += sprintf(buf+len, "IPI (recv/sent): %10u/%u\n",
417 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
419 len += sprintf(buf+len, "BAD: %10u\n", ppc_spurious_interrupts);
424 handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
428 if (!(action->flags & SA_INTERRUPT))
432 status |= action->flags;
433 action->handler(irq, action->dev_id, regs);
434 action = action->next;
436 if (status & SA_SAMPLE_RANDOM)
437 add_interrupt_randomness(irq);
442 * Eventually, this should take an array of interrupts and an array size
443 * so it can dispatch multiple interrupts.
445 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
448 struct irqaction *action;
449 int cpu = smp_processor_id();
450 irq_desc_t *desc = irq_desc + irq;
452 kstat.irqs[cpu][irq]++;
453 spin_lock(&desc->lock);
456 REPLAY is when Linux resends an IRQ that was dropped earlier
457 WAITING is used by probe to mark irqs that are being tested
459 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
460 if (!(status & IRQ_PER_CPU))
461 status |= IRQ_PENDING; /* we _want_ to handle it */
464 * If the IRQ is disabled for whatever reason, we cannot
465 * use the action we have.
468 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
469 action = desc->action;
470 if (!action || !action->handler) {
471 ppc_spurious_interrupts++;
472 printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
473 /* We can't call disable_irq here, it would deadlock */
475 desc->status |= IRQ_DISABLED;
477 /* This is a real interrupt, we have to eoi it,
481 status &= ~IRQ_PENDING; /* we commit to handling */
482 if (!(status & IRQ_PER_CPU))
483 status |= IRQ_INPROGRESS; /* we are handling it */
485 desc->status = status;
488 * If there is no IRQ handler or it was disabled, exit early.
489 Since we set PENDING, if another processor is handling
490 a different instance of this same irq, the other processor
491 will take care of it.
498 * Edge triggered interrupts need to remember
500 * This applies to any hw interrupts that allow a second
501 * instance of the same irq to arrive while we are in do_IRQ
502 * or in the handler. But the code here only handles the _second_
503 * instance of the irq, not the third or fourth. So it is mostly
504 * useful for irq hardware that does not mask cleanly in an
508 spin_unlock(&desc->lock);
509 handle_irq_event(irq, regs, action);
510 spin_lock(&desc->lock);
512 if (!(desc->status & IRQ_PENDING))
514 desc->status &= ~IRQ_PENDING;
516 desc->status &= ~IRQ_INPROGRESS;
519 * The ->end() handler has to deal with interrupts which got
520 * disabled while the handler was running.
522 if (irq_desc[irq].handler) {
523 if (irq_desc[irq].handler->end)
524 irq_desc[irq].handler->end(irq);
525 else if (irq_desc[irq].handler->enable)
526 irq_desc[irq].handler->enable(irq);
528 spin_unlock(&desc->lock);
531 #ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_pic.c */
532 int do_IRQ(struct pt_regs *regs)
534 int cpu = smp_processor_id();
536 hardirq_enter( cpu );
539 * Every platform is required to implement ppc_md.get_irq.
540 * This function will either return an irq number or -1 to
541 * indicate there are no more pending. But the first time
542 * through the loop this means there wasn't an IRQ pending.
543 * The value -2 is for buggy hardware and means that this IRQ
544 * has already been handled. -- Tom
546 while ((irq = ppc_md.get_irq(regs)) >= 0) {
547 ppc_irq_dispatch_handler(regs, irq);
550 if (irq != -2 && first)
551 /* That's not SMP safe ... but who cares ? */
552 ppc_spurious_interrupts++;
555 if (softirq_pending(cpu))
557 return 1; /* lets ret_from_int know we can do checks */
559 #endif /* CONFIG_PPC_ISERIES */
561 unsigned long probe_irq_on (void)
566 int probe_irq_off (unsigned long irqs)
571 unsigned int probe_irq_mask(unsigned long irqs)
576 void __init init_IRQ(void)
589 unsigned char global_irq_holder = NO_PROC_ID;
590 unsigned volatile long global_irq_lock; /* pendantic :long for set_bit--RR*/
592 atomic_t global_bh_count;
594 static void show(char * str)
597 unsigned long *stack;
598 int cpu = smp_processor_id();
600 printk("\n%s, CPU %d:\n", str, cpu);
601 printk("irq: [%d %d]\n",
604 printk("bh: %d [%d %d]\n",
605 atomic_read(&global_bh_count),
608 stack = (unsigned long *) &str;
609 for (i = 40; i ; i--) {
610 unsigned long x = *++stack;
611 if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
612 printk("<[%08lx]> ", x);
617 static inline void wait_on_bh(void)
619 int count = MAXCOUNT;
625 /* nothing .. wait for the other bh's to go away */
626 } while (atomic_read(&global_bh_count) != 0);
630 static inline void wait_on_irq(int cpu)
632 int count = MAXCOUNT;
637 * Wait until all interrupts are gone. Wait
638 * for bottom half handlers unless we're
639 * already executing in one..
642 if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
645 /* Duh, we have to loop. Release the lock to avoid deadlocks */
646 clear_bit(0,&global_irq_lock);
655 * We have to allow irqs to arrive between __sti and __cli
656 * Some cpus apparently won't cause the interrupt
657 * for several instructions. We hope that isync will
660 __asm__ __volatile__ ("isync");
666 if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
668 if (!test_and_set_bit(0,&global_irq_lock))
675 * This is called when we want to synchronize with
676 * bottom half handlers. We need to wait until
677 * no other CPU is executing any bottom half handler.
679 * Don't wait if we're already running in an interrupt
680 * context or are inside a bh handler.
682 void synchronize_bh(void)
684 if (atomic_read(&global_bh_count) && !in_interrupt())
689 * This is called when we want to synchronize with
690 * interrupts. We may for example tell a device to
691 * stop sending interrupts: but to make sure there
692 * are no interrupts that are executing on another
693 * CPU we need to call this function.
695 void synchronize_irq(void)
697 if (irqs_running()) {
698 /* Stupid approach */
704 static inline void get_irqlock(int cpu)
706 unsigned int loops = MAXCOUNT;
708 if (test_and_set_bit(0,&global_irq_lock)) {
709 /* do we already hold the lock? */
710 if ((unsigned char) cpu == global_irq_holder)
712 /* Uhhuh.. Somebody else got it. Wait.. */
716 printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu, global_irq_holder);
721 } while (test_bit(0,&global_irq_lock));
722 } while (test_and_set_bit(0,&global_irq_lock));
725 * We also need to make sure that nobody else is running
726 * in an interrupt context.
733 global_irq_holder = cpu;
737 * A global "cli()" while in an interrupt context
738 * turns into just a local cli(). Interrupts
739 * should use spinlocks for the (very unlikely)
740 * case that they ever want to protect against
743 * If we already have local interrupts disabled,
744 * this will not turn a local disable into a
745 * global one (problems with spinlocks: this makes
746 * save_flags+cli+sti usable inside a spinlock).
748 void __global_cli(void)
753 if (flags & (1 << 15)) {
754 int cpu = smp_processor_id();
756 if (!local_irq_count(cpu))
761 void __global_sti(void)
763 int cpu = smp_processor_id();
765 if (!local_irq_count(cpu))
766 release_irqlock(cpu);
771 * SMP flags value to restore to:
777 unsigned long __global_save_flags(void)
784 local_enabled = (flags >> 15) & 1;
785 /* default to local */
786 retval = 2 + local_enabled;
788 /* check for global flags if we're not in an interrupt */
789 if (!local_irq_count(smp_processor_id())) {
792 if (global_irq_holder == (unsigned char) smp_processor_id())
802 register unsigned long *orig_sp __asm__ ("r1");
803 register unsigned long lr __asm__ ("r3");
807 asm volatile ("mflr 3");
809 sp = (unsigned long *) *orig_sp;
810 sp = (unsigned long *) *sp;
811 for (i=1; i<max_size; i++) {
817 sp = (unsigned long *) *sp;
823 void __global_restore_flags(unsigned long flags)
840 unsigned long trace[5];
844 printk("global_restore_flags: %08lx (%08lx)\n",
845 flags, (&flags)[-1]);
846 count = tb(trace, 5);
848 for(i=0; i<count; i++) {
849 printk(" %8.8lx", trace[i]);
855 #endif /* CONFIG_SMP */
857 static struct proc_dir_entry *root_irq_dir;
858 static struct proc_dir_entry *irq_dir[NR_IRQS];
859 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
861 #ifdef CONFIG_IRQ_ALL_CPUS
862 #define DEFAULT_CPU_AFFINITY 0xffffffff
864 #define DEFAULT_CPU_AFFINITY 0x00000001
867 unsigned int irq_affinity [NR_IRQS] =
868 { [0 ... NR_IRQS-1] = DEFAULT_CPU_AFFINITY };
872 static int irq_affinity_read_proc (char *page, char **start, off_t off,
873 int count, int *eof, void *data)
875 if (count < HEX_DIGITS+1)
877 return sprintf (page, "%08x\n", irq_affinity[(int)data]);
880 static unsigned int parse_hex_value (const char *buffer,
881 unsigned long count, unsigned long *ret)
883 unsigned char hexnum [HEX_DIGITS];
889 if (count > HEX_DIGITS)
891 if (copy_from_user(hexnum, buffer, count))
895 * Parse the first 8 characters as a hex string, any non-hex char
896 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
900 for (i = 0; i < count; i++) {
901 unsigned int c = hexnum[i];
904 case '0' ... '9': c -= '0'; break;
905 case 'a' ... 'f': c -= 'a'-10; break;
906 case 'A' ... 'F': c -= 'A'-10; break;
910 value = (value << 4) | c;
917 static int irq_affinity_write_proc (struct file *file, const char *buffer,
918 unsigned long count, void *data)
920 int irq = (int) data, full_count = count, err;
921 unsigned long new_value;
923 if (!irq_desc[irq].handler->set_affinity)
926 err = parse_hex_value(buffer, count, &new_value);
929 * Do not allow disabling IRQs completely - it's a too easy
930 * way to make the system unusable accidentally :-) At least
931 * one online CPU still has to be targeted.
933 * We assume a 1-1 logical<->physical cpu mapping here. If
934 * we assume that the cpu indices in /proc/irq/../smp_affinity
935 * are actually logical cpu #'s then we have no problem.
936 * -- Cort <cort@fsmlabs.com>
938 if (!(new_value & cpu_online_map))
941 irq_affinity[irq] = new_value;
942 irq_desc[irq].handler->set_affinity(irq, new_value);
947 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
948 int count, int *eof, void *data)
950 unsigned long *mask = (unsigned long *) data;
951 if (count < HEX_DIGITS+1)
953 return sprintf (page, "%08lx\n", *mask);
956 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
957 unsigned long count, void *data)
959 unsigned long *mask = (unsigned long *) data, full_count = count, err;
960 unsigned long new_value;
962 err = parse_hex_value(buffer, count, &new_value);
970 #define MAX_NAMELEN 10
972 static void register_irq_proc (unsigned int irq)
974 struct proc_dir_entry *entry;
975 char name [MAX_NAMELEN];
977 if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
980 memset(name, 0, MAX_NAMELEN);
981 sprintf(name, "%d", irq);
983 /* create /proc/irq/1234 */
984 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
986 /* create /proc/irq/1234/smp_affinity */
987 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
990 entry->data = (void *)irq;
991 entry->read_proc = irq_affinity_read_proc;
992 entry->write_proc = irq_affinity_write_proc;
994 smp_affinity_entry[irq] = entry;
997 unsigned long prof_cpu_mask = -1;
999 void init_irq_proc (void)
1001 struct proc_dir_entry *entry;
1004 /* create /proc/irq */
1005 root_irq_dir = proc_mkdir("irq", 0);
1007 /* create /proc/irq/prof_cpu_mask */
1008 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1011 entry->data = (void *)&prof_cpu_mask;
1012 entry->read_proc = prof_cpu_mask_read_proc;
1013 entry->write_proc = prof_cpu_mask_write_proc;
1016 * Create entries for all existing IRQs.
1018 for (i = 0; i < NR_IRQS; i++) {
1019 if (irq_desc[i].handler == NULL)
1021 register_irq_proc(i);
1025 void no_action(int irq, void *dev, struct pt_regs *regs)