2 * linux/arch/i386/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
14 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
16 * IRQs are in fact implemented a bit like signal handlers for the kernel.
17 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/ptrace.h>
22 #include <linux/errno.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/timex.h>
28 #include <linux/slab.h>
29 #include <linux/random.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/irq.h>
34 #include <linux/proc_fs.h>
36 #include <asm/atomic.h>
39 #include <asm/system.h>
40 #include <asm/bitops.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgalloc.h>
43 #include <asm/delay.h>
50 * Linux has a controller-independent x86 interrupt architecture.
51 * every controller has a 'controller-template', that is used
52 * by the main code to do the right thing. Each driver-visible
53 * interrupt source is transparently wired to the apropriate
54 * controller. Thus drivers need not be aware of the
55 * interrupt-controller.
57 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
58 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
59 * (IO-APICs assumed to be messaging to Pentium local-APICs)
61 * the code is designed to be easily extended with new/different
62 * interrupt controllers, without having to do assembly magic.
66 * Controller mappings for all interrupt sources:
68 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
69 { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
71 static void register_irq_proc (unsigned int irq);
74 * Special irq handlers.
77 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
80 * Generic no controller code
83 static void enable_none(unsigned int irq) { }
84 static unsigned int startup_none(unsigned int irq) { return 0; }
85 static void disable_none(unsigned int irq) { }
86 static void ack_none(unsigned int irq)
89 * 'what should we do if we get a hw irq event on an illegal vector'.
90 * each architecture has to answer this themselves, it doesnt deserve
91 * a generic callback i think.
94 printk("unexpected IRQ trap at vector %02x\n", irq);
95 #ifdef CONFIG_X86_LOCAL_APIC
97 * Currently unexpected vectors happen only on SMP and APIC.
98 * We _must_ ack these because every local APIC has only N
99 * irq slots per priority level, and a 'hanging, unacked' IRQ
100 * holds up an irq slot - in excessive cases (when multiple
101 * unexpected vectors occur) that might lock up the APIC
109 /* startup is the same as "enable", shutdown is same as "disable" */
110 #define shutdown_none disable_none
111 #define end_none enable_none
113 struct hw_interrupt_type no_irq_type = {
123 atomic_t irq_err_count;
124 #ifdef CONFIG_X86_IO_APIC
125 #ifdef APIC_MISMATCH_DEBUG
126 atomic_t irq_mis_count;
131 * Generic, controller-independent functions:
134 int get_irq_list(char *buf)
137 struct irqaction * action;
140 p += sprintf(p, " ");
141 for (j=0; j<smp_num_cpus; j++)
142 p += sprintf(p, "CPU%d ",j);
145 for (i = 0 ; i < NR_IRQS ; i++) {
146 action = irq_desc[i].action;
149 p += sprintf(p, "%3d: ",i);
151 p += sprintf(p, "%10u ", kstat_irqs(i));
153 for (j = 0; j < smp_num_cpus; j++)
154 p += sprintf(p, "%10u ",
155 kstat.irqs[cpu_logical_map(j)][i]);
157 p += sprintf(p, " %14s", irq_desc[i].handler->typename);
158 p += sprintf(p, " %s", action->name);
160 for (action=action->next; action; action = action->next)
161 p += sprintf(p, ", %s", action->name);
164 p += sprintf(p, "NMI: ");
165 for (j = 0; j < smp_num_cpus; j++)
166 p += sprintf(p, "%10u ",
167 nmi_count(cpu_logical_map(j)));
168 p += sprintf(p, "\n");
169 #if CONFIG_X86_LOCAL_APIC
170 p += sprintf(p, "LOC: ");
171 for (j = 0; j < smp_num_cpus; j++)
172 p += sprintf(p, "%10u ",
173 apic_timer_irqs[cpu_logical_map(j)]);
174 p += sprintf(p, "\n");
176 p += sprintf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
177 #ifdef CONFIG_X86_IO_APIC
178 #ifdef APIC_MISMATCH_DEBUG
179 p += sprintf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
187 * Global interrupt locks for SMP. Allow interrupts to come in on any
188 * CPU, yet make cli/sti act globally to protect critical regions..
192 unsigned char global_irq_holder = NO_PROC_ID;
193 unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
195 extern void show_stack(unsigned long* esp);
197 static void show(char * str)
200 int cpu = smp_processor_id();
202 printk("\n%s, CPU %d:\n", str, cpu);
203 printk("irq: %d [",irqs_running());
204 for(i=0;i < smp_num_cpus;i++)
205 printk(" %d",local_irq_count(i));
206 printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
207 for(i=0;i < smp_num_cpus;i++)
208 printk(" %d",local_bh_count(i));
210 printk(" ]\nStack dumps:");
211 for(i = 0; i < smp_num_cpus; i++) {
215 printk("\nCPU %d:",i);
216 esp = init_tss[i].esp0;
218 /* tss->esp0 is set to NULL in cpu_init(),
219 * it's initialized when the cpu returns to user
222 printk(" <unknown> ");
225 esp &= ~(THREAD_SIZE-1);
226 esp += sizeof(struct task_struct);
227 show_stack((void*)esp);
229 printk("\nCPU %d:",cpu);
234 #define MAXCOUNT 100000000
237 * I had a lockup scenario where a tight loop doing
238 * spin_unlock()/spin_lock() on CPU#1 was racing with
239 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
240 * apparently the spin_unlock() information did not make it
241 * through to CPU#0 ... nasty, is this by design, do we have to limit
242 * 'memory update oscillation frequency' artificially like here?
244 * Such 'high frequency update' races can be avoided by careful design, but
245 * some of our major constructs like spinlocks use similar techniques,
246 * it would be nice to clarify this issue. Set this define to 0 if you
247 * want to check whether your system freezes. I suspect the delay done
248 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
249 * i thought that such things are guaranteed by design, since we use
252 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
254 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
255 # define SYNC_OTHER_CORES(x) udelay(x+1)
258 * We have to allow irqs to arrive between __sti and __cli
260 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
263 static inline void wait_on_irq(int cpu)
265 int count = MAXCOUNT;
270 * Wait until all interrupts are gone. Wait
271 * for bottom half handlers unless we're
272 * already executing in one..
275 if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
278 /* Duh, we have to loop. Release the lock to avoid deadlocks */
279 clear_bit(0,&global_irq_lock);
287 SYNC_OTHER_CORES(cpu);
293 if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
295 if (!test_and_set_bit(0,&global_irq_lock))
302 * This is called when we want to synchronize with
303 * interrupts. We may for example tell a device to
304 * stop sending interrupts: but to make sure there
305 * are no interrupts that are executing on another
306 * CPU we need to call this function.
308 void synchronize_irq(void)
310 if (irqs_running()) {
311 /* Stupid approach */
317 static inline void get_irqlock(int cpu)
319 if (test_and_set_bit(0,&global_irq_lock)) {
320 /* do we already hold the lock? */
321 if ((unsigned char) cpu == global_irq_holder)
323 /* Uhhuh.. Somebody else got it. Wait.. */
327 } while (test_bit(0,&global_irq_lock));
328 } while (test_and_set_bit(0,&global_irq_lock));
331 * We also to make sure that nobody else is running
332 * in an interrupt context.
339 global_irq_holder = cpu;
342 #define EFLAGS_IF_SHIFT 9
345 * A global "cli()" while in an interrupt context
346 * turns into just a local cli(). Interrupts
347 * should use spinlocks for the (very unlikely)
348 * case that they ever want to protect against
351 * If we already have local interrupts disabled,
352 * this will not turn a local disable into a
353 * global one (problems with spinlocks: this makes
354 * save_flags+cli+sti usable inside a spinlock).
356 void __global_cli(void)
361 if (flags & (1 << EFLAGS_IF_SHIFT)) {
362 int cpu = smp_processor_id();
364 if (!local_irq_count(cpu))
369 void __global_sti(void)
371 int cpu = smp_processor_id();
373 if (!local_irq_count(cpu))
374 release_irqlock(cpu);
379 * SMP flags value to restore to:
385 unsigned long __global_save_flags(void)
390 int cpu = smp_processor_id();
393 local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
394 /* default to local */
395 retval = 2 + local_enabled;
397 /* check for global flags if we're not in an interrupt */
398 if (!local_irq_count(cpu)) {
401 if (global_irq_holder == cpu)
407 void __global_restore_flags(unsigned long flags)
423 printk("global_restore_flags: %08lx (%08lx)\n",
424 flags, (&flags)[-1]);
431 * This should really return information about whether
432 * we should do bottom half handling etc. Right now we
433 * end up _always_ checking the bottom half, which is a
434 * waste of time and is not what some drivers would
437 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
440 int cpu = smp_processor_id();
444 status = 1; /* Force the "do bottom halves" bit */
446 if (!(action->flags & SA_INTERRUPT))
450 status |= action->flags;
451 action->handler(irq, action->dev_id, regs);
452 action = action->next;
454 if (status & SA_SAMPLE_RANDOM)
455 add_interrupt_randomness(irq);
464 * Generic enable/disable code: this just calls
465 * down into the PIC-specific version for the actual
466 * hardware disable after having gotten the irq
471 * disable_irq_nosync - disable an irq without waiting
472 * @irq: Interrupt to disable
474 * Disable the selected interrupt line. Disables and Enables are
476 * Unlike disable_irq(), this function does not ensure existing
477 * instances of the IRQ handler have completed before returning.
479 * This function may be called from IRQ context.
482 inline void disable_irq_nosync(unsigned int irq)
484 irq_desc_t *desc = irq_desc + irq;
487 spin_lock_irqsave(&desc->lock, flags);
488 if (!desc->depth++) {
489 desc->status |= IRQ_DISABLED;
490 desc->handler->disable(irq);
492 spin_unlock_irqrestore(&desc->lock, flags);
496 * disable_irq - disable an irq and wait for completion
497 * @irq: Interrupt to disable
499 * Disable the selected interrupt line. Enables and Disables are
501 * This function waits for any pending IRQ handlers for this interrupt
502 * to complete before returning. If you use this function while
503 * holding a resource the IRQ handler may need you will deadlock.
505 * This function may be called - with care - from IRQ context.
508 void disable_irq(unsigned int irq)
510 disable_irq_nosync(irq);
512 if (!local_irq_count(smp_processor_id())) {
516 } while (irq_desc[irq].status & IRQ_INPROGRESS);
521 * enable_irq - enable handling of an irq
522 * @irq: Interrupt to enable
524 * Undoes the effect of one call to disable_irq(). If this
525 * matches the last disable, processing of interrupts on this
526 * IRQ line is re-enabled.
528 * This function may be called from IRQ context.
531 void enable_irq(unsigned int irq)
533 irq_desc_t *desc = irq_desc + irq;
536 spin_lock_irqsave(&desc->lock, flags);
537 switch (desc->depth) {
539 unsigned int status = desc->status & ~IRQ_DISABLED;
540 desc->status = status;
541 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
542 desc->status = status | IRQ_REPLAY;
543 hw_resend_irq(desc->handler,irq);
545 desc->handler->enable(irq);
552 printk("enable_irq(%u) unbalanced from %p\n", irq,
553 __builtin_return_address(0));
555 spin_unlock_irqrestore(&desc->lock, flags);
559 * do_IRQ handles all normal device IRQ's (the special
560 * SMP cross-CPU interrupts have their own specific
563 asmlinkage unsigned int do_IRQ(struct pt_regs regs)
566 * We ack quickly, we don't want the irq controller
567 * thinking we're snobs just because some other CPU has
568 * disabled global interrupts (we have already done the
569 * INT_ACK cycles, it's too late to try to pretend to the
570 * controller that we aren't taking the interrupt).
572 * 0 return value means that this irq is already being
573 * handled by some other CPU. (or is disabled)
575 int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code */
576 int cpu = smp_processor_id();
577 irq_desc_t *desc = irq_desc + irq;
578 struct irqaction * action;
580 #ifdef CONFIG_DEBUG_STACKOVERFLOW
583 /* Debugging check for stack overflow: is there less than 1KB free? */
584 __asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (8191));
585 if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
586 extern void show_stack(unsigned long *);
588 printk("do_IRQ: stack overflow: %ld\n",
589 esp - sizeof(struct task_struct));
590 __asm__ __volatile__("movl %%esp,%0" : "=r" (esp));
591 show_stack((void *)esp);
595 kstat.irqs[cpu][irq]++;
596 spin_lock(&desc->lock);
597 desc->handler->ack(irq);
599 REPLAY is when Linux resends an IRQ that was dropped earlier
600 WAITING is used by probe to mark irqs that are being tested
602 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
603 status |= IRQ_PENDING; /* we _want_ to handle it */
606 * If the IRQ is disabled for whatever reason, we cannot
607 * use the action we have.
610 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
611 action = desc->action;
612 status &= ~IRQ_PENDING; /* we commit to handling */
613 status |= IRQ_INPROGRESS; /* we are handling it */
615 desc->status = status;
618 * If there is no IRQ handler or it was disabled, exit early.
619 Since we set PENDING, if another processor is handling
620 a different instance of this same irq, the other processor
621 will take care of it.
627 * Edge triggered interrupts need to remember
629 * This applies to any hw interrupts that allow a second
630 * instance of the same irq to arrive while we are in do_IRQ
631 * or in the handler. But the code here only handles the _second_
632 * instance of the irq, not the third or fourth. So it is mostly
633 * useful for irq hardware that does not mask cleanly in an
637 spin_unlock(&desc->lock);
638 handle_IRQ_event(irq, ®s, action);
639 spin_lock(&desc->lock);
641 if (!(desc->status & IRQ_PENDING))
643 desc->status &= ~IRQ_PENDING;
645 desc->status &= ~IRQ_INPROGRESS;
648 * The ->end() handler has to deal with interrupts which got
649 * disabled while the handler was running.
651 desc->handler->end(irq);
652 spin_unlock(&desc->lock);
654 if (softirq_pending(cpu))
660 * request_irq - allocate an interrupt line
661 * @irq: Interrupt line to allocate
662 * @handler: Function to be called when the IRQ occurs
663 * @irqflags: Interrupt type flags
664 * @devname: An ascii name for the claiming device
665 * @dev_id: A cookie passed back to the handler function
667 * This call allocates interrupt resources and enables the
668 * interrupt line and IRQ handling. From the point this
669 * call is made your handler function may be invoked. Since
670 * your handler function must clear any interrupt the board
671 * raises, you must take care both to initialise your hardware
672 * and to set up the interrupt handler in the right order.
674 * Dev_id must be globally unique. Normally the address of the
675 * device data structure is used as the cookie. Since the handler
676 * receives this value it makes sense to use it.
678 * If your interrupt is shared you must pass a non NULL dev_id
679 * as this is required when freeing the interrupt.
683 * SA_SHIRQ Interrupt is shared
685 * SA_INTERRUPT Disable local interrupts while processing
687 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
691 int request_irq(unsigned int irq,
692 void (*handler)(int, void *, struct pt_regs *),
693 unsigned long irqflags,
694 const char * devname,
698 struct irqaction * action;
702 * Sanity-check: shared interrupts should REALLY pass in
703 * a real dev-ID, otherwise we'll have trouble later trying
704 * to figure out which interrupt is which (messes up the
705 * interrupt freeing logic etc).
707 if (irqflags & SA_SHIRQ) {
709 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
718 action = (struct irqaction *)
719 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
723 action->handler = handler;
724 action->flags = irqflags;
726 action->name = devname;
728 action->dev_id = dev_id;
730 retval = setup_irq(irq, action);
737 * free_irq - free an interrupt
738 * @irq: Interrupt line to free
739 * @dev_id: Device identity to free
741 * Remove an interrupt handler. The handler is removed and if the
742 * interrupt line is no longer in use by any driver it is disabled.
743 * On a shared IRQ the caller must ensure the interrupt is disabled
744 * on the card it drives before calling this function. The function
745 * does not return until any executing interrupts for this IRQ
748 * This function may be called from interrupt context.
750 * Bugs: Attempting to free an irq in a handler for the same irq hangs
754 void free_irq(unsigned int irq, void *dev_id)
757 struct irqaction **p;
763 desc = irq_desc + irq;
764 spin_lock_irqsave(&desc->lock,flags);
767 struct irqaction * action = *p;
769 struct irqaction **pp = p;
771 if (action->dev_id != dev_id)
774 /* Found it - now remove it from the list of entries */
777 desc->status |= IRQ_DISABLED;
778 desc->handler->shutdown(irq);
780 spin_unlock_irqrestore(&desc->lock,flags);
783 /* Wait to make sure it's not being used on another CPU */
784 while (desc->status & IRQ_INPROGRESS) {
792 printk("Trying to free free IRQ%d\n",irq);
793 spin_unlock_irqrestore(&desc->lock,flags);
799 * IRQ autodetection code..
801 * This depends on the fact that any interrupt that
802 * comes in on to an unassigned handler will get stuck
803 * with "IRQ_WAITING" cleared and the interrupt
807 static DECLARE_MUTEX(probe_sem);
810 * probe_irq_on - begin an interrupt autodetect
812 * Commence probing for an interrupt. The interrupts are scanned
813 * and a mask of potential interrupt lines is returned.
817 unsigned long probe_irq_on(void)
826 * something may have generated an irq long ago and we want to
827 * flush such a longstanding irq before considering it as spurious.
829 for (i = NR_IRQS-1; i > 0; i--) {
832 spin_lock_irq(&desc->lock);
833 if (!irq_desc[i].action)
834 irq_desc[i].handler->startup(i);
835 spin_unlock_irq(&desc->lock);
838 /* Wait for longstanding interrupts to trigger. */
839 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
840 /* about 20ms delay */ synchronize_irq();
843 * enable any unassigned irqs
844 * (we must startup again here because if a longstanding irq
845 * happened in the previous stage, it may have masked itself)
847 for (i = NR_IRQS-1; i > 0; i--) {
850 spin_lock_irq(&desc->lock);
852 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
853 if (desc->handler->startup(i))
854 desc->status |= IRQ_PENDING;
856 spin_unlock_irq(&desc->lock);
860 * Wait for spurious interrupts to trigger
862 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
863 /* about 100ms delay */ synchronize_irq();
866 * Now filter out any obviously spurious interrupts
869 for (i = 0; i < NR_IRQS; i++) {
870 irq_desc_t *desc = irq_desc + i;
873 spin_lock_irq(&desc->lock);
874 status = desc->status;
876 if (status & IRQ_AUTODETECT) {
877 /* It triggered already - consider it spurious. */
878 if (!(status & IRQ_WAITING)) {
879 desc->status = status & ~IRQ_AUTODETECT;
880 desc->handler->shutdown(i);
885 spin_unlock_irq(&desc->lock);
892 * Return a mask of triggered interrupts (this
893 * can handle only legacy ISA interrupts).
897 * probe_irq_mask - scan a bitmap of interrupt lines
898 * @val: mask of interrupts to consider
900 * Scan the ISA bus interrupt lines and return a bitmap of
901 * active interrupts. The interrupt probe logic state is then
902 * returned to its previous value.
904 * Note: we need to scan all the irq's even though we will
905 * only return ISA irq numbers - just so that we reset them
906 * all to a known state.
908 unsigned int probe_irq_mask(unsigned long val)
914 for (i = 0; i < NR_IRQS; i++) {
915 irq_desc_t *desc = irq_desc + i;
918 spin_lock_irq(&desc->lock);
919 status = desc->status;
921 if (status & IRQ_AUTODETECT) {
922 if (i < 16 && !(status & IRQ_WAITING))
925 desc->status = status & ~IRQ_AUTODETECT;
926 desc->handler->shutdown(i);
928 spin_unlock_irq(&desc->lock);
936 * Return the one interrupt that triggered (this can
937 * handle any interrupt source).
941 * probe_irq_off - end an interrupt autodetect
942 * @val: mask of potential interrupts (unused)
944 * Scans the unused interrupt lines and returns the line which
945 * appears to have triggered the interrupt. If no interrupt was
946 * found then zero is returned. If more than one interrupt is
947 * found then minus the first candidate is returned to indicate
950 * The interrupt probe logic state is returned to its previous
953 * BUGS: When used in a module (which arguably shouldnt happen)
954 * nothing prevents two IRQ probe callers from overlapping. The
955 * results of this are non-optimal.
958 int probe_irq_off(unsigned long val)
960 int i, irq_found, nr_irqs;
964 for (i = 0; i < NR_IRQS; i++) {
965 irq_desc_t *desc = irq_desc + i;
968 spin_lock_irq(&desc->lock);
969 status = desc->status;
971 if (status & IRQ_AUTODETECT) {
972 if (!(status & IRQ_WAITING)) {
977 desc->status = status & ~IRQ_AUTODETECT;
978 desc->handler->shutdown(i);
980 spin_unlock_irq(&desc->lock);
985 irq_found = -irq_found;
989 /* this was setup_x86_irq but it seems pretty generic */
990 int setup_irq(unsigned int irq, struct irqaction * new)
994 struct irqaction *old, **p;
995 irq_desc_t *desc = irq_desc + irq;
998 * Some drivers like serial.c use request_irq() heavily,
999 * so we have to be careful not to interfere with a
1002 if (new->flags & SA_SAMPLE_RANDOM) {
1004 * This function might sleep, we want to call it first,
1005 * outside of the atomic block.
1006 * Yes, this might clear the entropy pool if the wrong
1007 * driver is attempted to be loaded, without actually
1008 * installing a new handler, but is this really a problem,
1009 * only the sysadmin is able to do this.
1011 rand_initialize_irq(irq);
1015 * The following block of code has to be executed atomically
1017 spin_lock_irqsave(&desc->lock,flags);
1019 if ((old = *p) != NULL) {
1020 /* Can't share interrupts unless both agree to */
1021 if (!(old->flags & new->flags & SA_SHIRQ)) {
1022 spin_unlock_irqrestore(&desc->lock,flags);
1026 /* add new interrupt at end of irq queue */
1038 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
1039 desc->handler->startup(irq);
1041 spin_unlock_irqrestore(&desc->lock,flags);
1043 register_irq_proc(irq);
1047 static struct proc_dir_entry * root_irq_dir;
1048 static struct proc_dir_entry * irq_dir [NR_IRQS];
1050 #define HEX_DIGITS 8
1052 static unsigned int parse_hex_value (const char *buffer,
1053 unsigned long count, unsigned long *ret)
1055 unsigned char hexnum [HEX_DIGITS];
1056 unsigned long value;
1061 if (count > HEX_DIGITS)
1063 if (copy_from_user(hexnum, buffer, count))
1067 * Parse the first 8 characters as a hex string, any non-hex char
1068 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
1072 for (i = 0; i < count; i++) {
1073 unsigned int c = hexnum[i];
1076 case '0' ... '9': c -= '0'; break;
1077 case 'a' ... 'f': c -= 'a'-10; break;
1078 case 'A' ... 'F': c -= 'A'-10; break;
1082 value = (value << 4) | c;
1091 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
1093 static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
1094 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1095 int count, int *eof, void *data)
1097 if (count < HEX_DIGITS+1)
1099 return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
1102 static int irq_affinity_write_proc (struct file *file, const char *buffer,
1103 unsigned long count, void *data)
1105 int irq = (long) data, full_count = count, err;
1106 unsigned long new_value;
1108 if (!irq_desc[irq].handler->set_affinity)
1111 err = parse_hex_value(buffer, count, &new_value);
1114 * Do not allow disabling IRQs completely - it's a too easy
1115 * way to make the system unusable accidentally :-) At least
1116 * one online CPU still has to be targeted.
1118 if (!(new_value & cpu_online_map))
1121 irq_affinity[irq] = new_value;
1122 irq_desc[irq].handler->set_affinity(irq, new_value);
1129 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1130 int count, int *eof, void *data)
1132 unsigned long *mask = (unsigned long *) data;
1133 if (count < HEX_DIGITS+1)
1135 return sprintf (page, "%08lx\n", *mask);
1138 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1139 unsigned long count, void *data)
1141 unsigned long *mask = (unsigned long *) data, full_count = count, err;
1142 unsigned long new_value;
1144 err = parse_hex_value(buffer, count, &new_value);
1152 #define MAX_NAMELEN 10
1154 static void register_irq_proc (unsigned int irq)
1156 char name [MAX_NAMELEN];
1158 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
1162 memset(name, 0, MAX_NAMELEN);
1163 sprintf(name, "%d", irq);
1165 /* create /proc/irq/1234 */
1166 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1170 struct proc_dir_entry *entry;
1172 /* create /proc/irq/1234/smp_affinity */
1173 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1177 entry->data = (void *)(long)irq;
1178 entry->read_proc = irq_affinity_read_proc;
1179 entry->write_proc = irq_affinity_write_proc;
1182 smp_affinity_entry[irq] = entry;
1187 unsigned long prof_cpu_mask = -1;
1189 void init_irq_proc (void)
1191 struct proc_dir_entry *entry;
1194 /* create /proc/irq */
1195 root_irq_dir = proc_mkdir("irq", 0);
1197 /* create /proc/irq/prof_cpu_mask */
1198 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1204 entry->data = (void *)&prof_cpu_mask;
1205 entry->read_proc = prof_cpu_mask_read_proc;
1206 entry->write_proc = prof_cpu_mask_write_proc;
1209 * Create entries for all existing IRQs.
1211 for (i = 0; i < NR_IRQS; i++)
1212 register_irq_proc(i);