2 * arch/ppc/kernel/irq.c
4 * Derived from arch/i386/kernel/irq.c
5 * Copyright (C) 1992 Linus Torvalds
6 * Adapted from arch/i386 by Gary Thomas
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Cort Dougan
10 * Adapted for Power Macintosh by Paul Mackerras
11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
19 * This file contains the code used by various IRQ handling routines:
20 * asking for different IRQ's should be done through these routines
21 * instead of just grabbing them. Thus setups with different IRQ numbers
22 * shouldn't result in any weird surprises, and installing new handlers
26 #include <linux/ptrace.h>
27 #include <linux/errno.h>
28 #include <linux/threads.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/ioport.h>
33 #include <linux/interrupt.h>
34 #include <linux/timex.h>
35 #include <linux/config.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/irq.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <linux/random.h>
45 #include <asm/uaccess.h>
46 #include <asm/bitops.h>
47 #include <asm/system.h>
49 #include <asm/pgtable.h>
51 #include <asm/cache.h>
53 #include <asm/ptrace.h>
54 #include <asm/iSeries/LparData.h>
55 #include <asm/machdep.h>
57 #include <asm/perfmon.h>
59 #include "local_irq.h"
63 void enable_irq(unsigned int irq_nr);
64 void disable_irq(unsigned int irq_nr);
67 extern void iSeries_smp_message_recv( struct pt_regs * );
70 volatile unsigned char *chrp_int_ack_special;
71 static void register_irq_proc (unsigned int irq);
73 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
74 { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};
76 int ppc_spurious_interrupts = 0;
77 struct irqaction *ppc_irq_action[NR_IRQS];
78 unsigned long lpEvent_count = 0;
80 extern void xmon(struct pt_regs *regs);
81 extern int xmon_bpt(struct pt_regs *regs);
82 extern int xmon_sstep(struct pt_regs *regs);
83 extern int xmon_iabr_match(struct pt_regs *regs);
84 extern int xmon_dabr_match(struct pt_regs *regs);
85 extern void (*xmon_fault_handler)(struct pt_regs *regs);
88 extern void (*debugger)(struct pt_regs *regs);
89 extern int (*debugger_bpt)(struct pt_regs *regs);
90 extern int (*debugger_sstep)(struct pt_regs *regs);
91 extern int (*debugger_iabr_match)(struct pt_regs *regs);
92 extern int (*debugger_dabr_match)(struct pt_regs *regs);
93 extern void (*debugger_fault_handler)(struct pt_regs *regs);
96 /* nasty hack for shared irq's since we need to do kmalloc calls but
97 * can't very early in the boot when we need to do a request irq.
98 * this needs to be removed.
101 #define IRQ_KMALLOC_ENTRIES 16
102 static int cache_bitmask = 0;
103 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
104 extern int mem_init_done;
106 void *irq_kmalloc(size_t size, int pri)
110 return kmalloc(size,pri);
111 for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
112 if ( ! ( cache_bitmask & (1<<i) ) ) {
113 cache_bitmask |= (1<<i);
114 return (void *)(&malloc_cache[i]);
119 void irq_kfree(void *ptr)
122 for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
123 if ( ptr == &malloc_cache[i] ) {
124 cache_bitmask &= ~(1<<i);
131 setup_irq(unsigned int irq, struct irqaction * new)
135 struct irqaction *old, **p;
136 irq_desc_t *desc = irq_desc + irq;
139 * Some drivers like serial.c use request_irq() heavily,
140 * so we have to be careful not to interfere with a
143 if (new->flags & SA_SAMPLE_RANDOM) {
145 * This function might sleep, we want to call it first,
146 * outside of the atomic block.
147 * Yes, this might clear the entropy pool if the wrong
148 * driver is attempted to be loaded, without actually
149 * installing a new handler, but is this really a problem,
150 * only the sysadmin is able to do this.
152 rand_initialize_irq(irq);
156 * The following block of code has to be executed atomically
158 spin_lock_irqsave(&desc->lock,flags);
160 if ((old = *p) != NULL) {
161 /* Can't share interrupts unless both agree to */
162 if (!(old->flags & new->flags & SA_SHIRQ)) {
163 spin_unlock_irqrestore(&desc->lock,flags);
167 /* add new interrupt at end of irq queue */
179 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
182 spin_unlock_irqrestore(&desc->lock,flags);
184 register_irq_proc(irq);
188 /* This could be promoted to a real free_irq() ... */
190 do_free_irq(int irq, void* dev_id)
193 struct irqaction **p;
196 desc = irq_desc + irq;
197 spin_lock_irqsave(&desc->lock,flags);
200 struct irqaction * action = *p;
202 struct irqaction **pp = p;
204 if (action->dev_id != dev_id)
207 /* Found it - now remove it from the list of entries */
210 desc->status |= IRQ_DISABLED;
213 spin_unlock_irqrestore(&desc->lock,flags);
216 /* Wait to make sure it's not being used on another CPU */
217 while (desc->status & IRQ_INPROGRESS)
223 printk("Trying to free free IRQ%d\n",irq);
224 spin_unlock_irqrestore(&desc->lock,flags);
230 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
231 unsigned long irqflags, const char * devname, void *dev_id)
233 struct irqaction *action;
239 /* We could implement really free_irq() instead of that... */
240 return do_free_irq(irq, dev_id);
242 action = (struct irqaction *)
243 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
245 printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
249 action->handler = handler;
250 action->flags = irqflags;
252 action->name = devname;
253 action->dev_id = dev_id;
256 retval = setup_irq(irq, action);
263 void free_irq(unsigned int irq, void *dev_id)
265 request_irq(irq, NULL, 0, NULL, dev_id);
269 * Generic enable/disable code: this just calls
270 * down into the PIC-specific version for the actual
271 * hardware disable after having gotten the irq
276 * disable_irq_nosync - disable an irq without waiting
277 * @irq: Interrupt to disable
279 * Disable the selected interrupt line. Disables of an interrupt
280 * stack. Unlike disable_irq(), this function does not ensure existing
281 * instances of the IRQ handler have completed before returning.
283 * This function may be called from IRQ context.
286 void disable_irq_nosync(unsigned int irq)
288 irq_desc_t *desc = irq_desc + irq;
291 spin_lock_irqsave(&desc->lock, flags);
292 if (!desc->depth++) {
293 if (!(desc->status & IRQ_PER_CPU))
294 desc->status |= IRQ_DISABLED;
297 spin_unlock_irqrestore(&desc->lock, flags);
301 * disable_irq - disable an irq and wait for completion
302 * @irq: Interrupt to disable
304 * Disable the selected interrupt line. Disables of an interrupt
305 * stack. That is for two disables you need two enables. This
306 * function waits for any pending IRQ handlers for this interrupt
307 * to complete before returning. If you use this function while
308 * holding a resource the IRQ handler may need you will deadlock.
310 * This function may be called - with care - from IRQ context.
313 void disable_irq(unsigned int irq)
315 disable_irq_nosync(irq);
317 if (!local_irq_count(smp_processor_id())) {
320 } while (irq_desc[irq].status & IRQ_INPROGRESS);
325 * enable_irq - enable interrupt handling on an irq
326 * @irq: Interrupt to enable
328 * Re-enables the processing of interrupts on this IRQ line
329 * providing no disable_irq calls are now in effect.
331 * This function may be called from IRQ context.
334 void enable_irq(unsigned int irq)
336 irq_desc_t *desc = irq_desc + irq;
339 spin_lock_irqsave(&desc->lock, flags);
340 switch (desc->depth) {
342 unsigned int status = desc->status & ~IRQ_DISABLED;
343 desc->status = status;
344 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
345 desc->status = status | IRQ_REPLAY;
346 hw_resend_irq(desc->handler,irq);
355 printk("enable_irq(%u) unbalanced\n", irq);
357 spin_unlock_irqrestore(&desc->lock, flags);
360 /* one would think this function has one foot in the grave */
361 int get_irq_list(char *buf)
364 struct irqaction * action;
366 len += sprintf(buf+len, " ");
367 for (j=0; j<smp_num_cpus; j++)
368 len += sprintf(buf+len, "CPU%d ",j);
369 *(char *)(buf+len++) = '\n';
371 for (i = 0 ; i < NR_IRQS ; i++) {
372 action = irq_desc[i].action;
373 if ( !action || !action->handler )
375 len += sprintf(buf+len, "%3d: ", i);
377 for (j = 0; j < smp_num_cpus; j++)
378 len += sprintf(buf+len, "%10u ",
379 kstat.irqs[cpu_logical_map(j)][i]);
381 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
382 #endif /* CONFIG_SMP */
383 if ( irq_desc[i].handler )
384 len += sprintf(buf+len, " %s ", irq_desc[i].handler->typename );
386 len += sprintf(buf+len, " None ");
387 len += sprintf(buf+len, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
388 len += sprintf(buf+len, " %s",action->name);
389 for (action=action->next; action; action = action->next) {
390 len += sprintf(buf+len, ", %s", action->name);
392 len += sprintf(buf+len, "\n");
395 /* should this be per processor send/receive? */
396 len += sprintf(buf+len, "IPI (recv/sent): %10u/%u\n",
397 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
399 len += sprintf(buf+len, "BAD: %10u\n", ppc_spurious_interrupts);
405 int show_interrupts(struct seq_file *p, void *v)
408 struct irqaction * action;
411 for (j=0; j<smp_num_cpus; j++)
412 seq_printf(p, "CPU%d ",j);
415 for (i = 0 ; i < NR_IRQS ; i++) {
416 action = irq_desc[i].action;
417 if (!action || !action->handler)
419 seq_printf(p, "%3d: ", i);
421 for (j = 0; j < smp_num_cpus; j++)
422 seq_printf(p, "%10u ",
423 kstat.irqs[cpu_logical_map(j)][i]);
425 seq_printf(p, "%10u ", kstat_irqs(i));
426 #endif /* CONFIG_SMP */
427 if (irq_desc[i].handler)
428 seq_printf(p, " %s ", irq_desc[i].handler->typename );
430 seq_printf(p, " None ");
431 seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
432 seq_printf(p, " %s",action->name);
433 for (action=action->next; action; action = action->next)
434 seq_printf(p, ", %s", action->name);
438 /* should this be per processor send/receive? */
439 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
440 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
442 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
447 handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
451 if (!(action->flags & SA_INTERRUPT))
455 status |= action->flags;
456 action->handler(irq, action->dev_id, regs);
457 action = action->next;
459 if (status & SA_SAMPLE_RANDOM)
460 add_interrupt_randomness(irq);
465 * Eventually, this should take an array of interrupts and an array size
466 * so it can dispatch multiple interrupts.
468 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
471 struct irqaction *action;
472 int cpu = smp_processor_id();
473 irq_desc_t *desc = irq_desc + irq;
475 kstat.irqs[cpu][irq]++;
476 spin_lock(&desc->lock);
479 REPLAY is when Linux resends an IRQ that was dropped earlier
480 WAITING is used by probe to mark irqs that are being tested
482 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
483 if (!(status & IRQ_PER_CPU))
484 status |= IRQ_PENDING; /* we _want_ to handle it */
487 * If the IRQ is disabled for whatever reason, we cannot
488 * use the action we have.
491 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
492 action = desc->action;
493 if (!action || !action->handler) {
494 ppc_spurious_interrupts++;
495 printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
496 /* We can't call disable_irq here, it would deadlock */
499 desc->status |= IRQ_DISABLED;
500 /* This is not a real spurrious interrupt, we
501 * have to eoi it, so we jump to out
506 status &= ~IRQ_PENDING; /* we commit to handling */
507 if (!(status & IRQ_PER_CPU))
508 status |= IRQ_INPROGRESS; /* we are handling it */
510 desc->status = status;
513 * If there is no IRQ handler or it was disabled, exit early.
514 Since we set PENDING, if another processor is handling
515 a different instance of this same irq, the other processor
516 will take care of it.
523 * Edge triggered interrupts need to remember
525 * This applies to any hw interrupts that allow a second
526 * instance of the same irq to arrive while we are in do_IRQ
527 * or in the handler. But the code here only handles the _second_
528 * instance of the irq, not the third or fourth. So it is mostly
529 * useful for irq hardware that does not mask cleanly in an
533 spin_unlock(&desc->lock);
534 handle_irq_event(irq, regs, action);
535 spin_lock(&desc->lock);
537 if (!(desc->status & IRQ_PENDING))
539 desc->status &= ~IRQ_PENDING;
541 desc->status &= ~IRQ_INPROGRESS;
544 * The ->end() handler has to deal with interrupts which got
545 * disabled while the handler was running.
547 if (irq_desc[irq].handler) {
548 if (irq_desc[irq].handler->end)
549 irq_desc[irq].handler->end(irq);
550 else if (irq_desc[irq].handler->enable)
551 irq_desc[irq].handler->enable(irq);
553 spin_unlock(&desc->lock);
556 int do_IRQ(struct pt_regs *regs)
558 int cpu = smp_processor_id();
560 #ifdef CONFIG_PPC_ISERIES
561 struct paca_struct *lpaca;
562 struct ItLpQueue *lpq;
567 #ifdef CONFIG_PPC_ISERIES
570 if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {
571 lpaca->xLpPaca.xIntDword.xFields.xIpiCnt = 0;
572 iSeries_smp_message_recv(regs);
574 #endif /* CONFIG_SMP */
575 lpq = lpaca->lpQueuePtr;
576 if (lpq && ItLpQueue_isLpIntPending(lpq))
577 lpEvent_count += ItLpQueue_process(lpq, regs);
580 * Every arch is required to implement ppc_md.get_irq.
581 * This function will either return an irq number or -1 to
582 * indicate there are no more pending. But the first time
583 * through the loop this means there wasn't an IRQ pending.
584 * The value -2 is for buggy hardware and means that this IRQ
585 * has already been handled. -- Tom
587 while ((irq = ppc_md.get_irq(regs)) >= 0) {
588 ppc_irq_dispatch_handler(regs, irq);
591 if (irq != -2 && first)
592 /* That's not SMP safe ... but who cares ? */
593 ppc_spurious_interrupts++;
598 #ifdef CONFIG_PPC_ISERIES
599 if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
600 lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
601 /* Signal a fake decrementer interrupt */
602 timer_interrupt(regs);
605 if (lpaca->xLpPaca.xIntDword.xFields.xPdcInt) {
606 lpaca->xLpPaca.xIntDword.xFields.xPdcInt = 0;
607 /* Signal a fake PMC interrupt */
608 PerformanceMonitorException();
612 if (softirq_pending(cpu))
615 return 1; /* lets ret_from_int know we can do checks */
618 unsigned long probe_irq_on (void)
623 int probe_irq_off (unsigned long irqs)
628 unsigned int probe_irq_mask(unsigned long irqs)
633 void __init init_IRQ(void)
643 if(ppc_md.init_ras_IRQ) ppc_md.init_ras_IRQ();
647 unsigned char global_irq_holder = NO_PROC_ID;
649 static void show(char * str)
651 int cpu = smp_processor_id();
654 printk("\n%s, CPU %d:\n", str, cpu);
655 printk("irq: %d [ ", irqs_running());
656 for (i = 0; i < smp_num_cpus; i++)
657 printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
658 printk("]\nbh: %d [ ",
659 (spin_is_locked(&global_bh_lock) ? 1 : 0));
660 for (i = 0; i < smp_num_cpus; i++)
661 printk("%u ", local_bh_count(i));
665 #define MAXCOUNT 10000000
667 void synchronize_irq(void)
669 if (irqs_running()) {
675 static inline void get_irqlock(int cpu)
679 if ((unsigned char)cpu == global_irq_holder)
684 br_write_lock(BR_GLOBALIRQ_LOCK);
688 if (!irqs_running() &&
689 (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
692 br_write_unlock(BR_GLOBALIRQ_LOCK);
693 lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
694 while (irqs_running() ||
695 spin_is_locked(lock) ||
696 (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
708 global_irq_holder = cpu;
712 * A global "cli()" while in an interrupt context
713 * turns into just a local cli(). Interrupts
714 * should use spinlocks for the (very unlikely)
715 * case that they ever want to protect against
718 * If we already have local interrupts disabled,
719 * this will not turn a local disable into a
720 * global one (problems with spinlocks: this makes
721 * save_flags+cli+sti usable inside a spinlock).
723 void __global_cli(void)
728 if (flags & (1UL << 15)) {
729 int cpu = smp_processor_id();
731 if (!local_irq_count(cpu))
736 void __global_sti(void)
738 int cpu = smp_processor_id();
740 if (!local_irq_count(cpu))
741 release_irqlock(cpu);
746 * SMP flags value to restore to:
752 unsigned long __global_save_flags(void)
759 local_enabled = (flags >> 15) & 1;
760 /* default to local */
761 retval = 2 + local_enabled;
763 /* check for global flags if we're not in an interrupt */
764 if (!local_irq_count(smp_processor_id())) {
767 if (global_irq_holder == (unsigned char) smp_processor_id())
773 void __global_restore_flags(unsigned long flags)
789 printk("global_restore_flags: %016lx caller %p\n",
790 flags, __builtin_return_address(0));
794 #endif /* CONFIG_SMP */
796 static struct proc_dir_entry * root_irq_dir;
797 static struct proc_dir_entry * irq_dir [NR_IRQS];
798 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
800 #ifdef CONFIG_IRQ_ALL_CPUS
801 unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0xffffffff};
802 #else /* CONFIG_IRQ_ALL_CPUS */
803 unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x00000000};
804 #endif /* CONFIG_IRQ_ALL_CPUS */
808 static int irq_affinity_read_proc (char *page, char **start, off_t off,
809 int count, int *eof, void *data)
811 if (count < HEX_DIGITS+1)
813 return sprintf (page, "%08x\n", irq_affinity[(int)(long)data]);
816 static unsigned int parse_hex_value (const char *buffer,
817 unsigned long count, unsigned long *ret)
819 unsigned char hexnum [HEX_DIGITS];
825 if (count > HEX_DIGITS)
827 if (copy_from_user(hexnum, buffer, count))
831 * Parse the first 8 characters as a hex string, any non-hex char
832 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
836 for (i = 0; i < count; i++) {
837 unsigned int c = hexnum[i];
840 case '0' ... '9': c -= '0'; break;
841 case 'a' ... 'f': c -= 'a'-10; break;
842 case 'A' ... 'F': c -= 'A'-10; break;
846 value = (value << 4) | c;
853 static int irq_affinity_write_proc (struct file *file, const char *buffer,
854 unsigned long count, void *data)
856 int irq = (int)(long) data, full_count = count, err;
857 unsigned long new_value;
859 if (!irq_desc[irq].handler->set_affinity)
862 err = parse_hex_value(buffer, count, &new_value);
864 /* Why is this disabled ? --BenH */
867 * Do not allow disabling IRQs completely - it's a too easy
868 * way to make the system unusable accidentally :-) At least
869 * one online CPU still has to be targeted.
871 if (!(new_value & cpu_online_map))
875 irq_affinity[irq] = new_value;
876 irq_desc[irq].handler->set_affinity(irq, new_value);
881 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
882 int count, int *eof, void *data)
884 unsigned long *mask = (unsigned long *) data;
885 if (count < HEX_DIGITS+1)
887 return sprintf (page, "%08lx\n", *mask);
890 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
891 unsigned long count, void *data)
893 unsigned long *mask = (unsigned long *) data, full_count = count, err;
894 unsigned long new_value;
896 err = parse_hex_value(buffer, count, &new_value);
902 #ifdef CONFIG_PPC_ISERIES
905 for (i=0; i<MAX_PACAS; ++i) {
906 if ( paca[i].prof_buffer && (new_value & 1) )
907 paca[i].prof_mode = PMC_STATE_DECR_PROFILE;
909 if(paca[i].prof_mode != PMC_STATE_INITIAL)
910 paca[i].prof_mode = PMC_STATE_READY;
920 #define MAX_NAMELEN 10
922 static void register_irq_proc (unsigned int irq)
924 struct proc_dir_entry *entry;
925 char name [MAX_NAMELEN];
927 if (!root_irq_dir || (irq_desc[irq].handler == NULL))
930 memset(name, 0, MAX_NAMELEN);
931 sprintf(name, "%d", irq);
933 /* create /proc/irq/1234 */
934 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
936 /* create /proc/irq/1234/smp_affinity */
937 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
940 entry->data = (void *)(long)irq;
941 entry->read_proc = irq_affinity_read_proc;
942 entry->write_proc = irq_affinity_write_proc;
944 smp_affinity_entry[irq] = entry;
947 unsigned long prof_cpu_mask = -1;
949 void init_irq_proc (void)
951 struct proc_dir_entry *entry;
954 /* create /proc/irq */
955 root_irq_dir = proc_mkdir("irq", 0);
957 /* create /proc/irq/prof_cpu_mask */
958 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
961 entry->data = (void *)&prof_cpu_mask;
962 entry->read_proc = prof_cpu_mask_read_proc;
963 entry->write_proc = prof_cpu_mask_write_proc;
966 * Create entries for all existing IRQs.
968 for (i = 0; i < NR_IRQS; i++) {
969 if (irq_desc[i].handler == NULL)
971 register_irq_proc(i);
975 void no_action(int irq, void *dev, struct pt_regs *regs)