2 * arch/ppc/kernel/irq.c
4 * Derived from arch/i386/kernel/irq.c
5 * Copyright (C) 1992 Linus Torvalds
6 * Adapted from arch/i386 by Gary Thomas
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Cort Dougan
10 * Adapted for Power Macintosh by Paul Mackerras
11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
19 * This file contains the code used by various IRQ handling routines:
20 * asking for different IRQ's should be done through these routines
21 * instead of just grabbing them. Thus setups with different IRQ numbers
22 * shouldn't result in any weird surprises, and installing new handlers
26 #include <linux/ptrace.h>
27 #include <linux/errno.h>
28 #include <linux/threads.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/ioport.h>
33 #include <linux/interrupt.h>
34 #include <linux/timex.h>
35 #include <linux/config.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/irq.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <linux/random.h>
44 #include <linux/bootmem.h>
46 #include <asm/uaccess.h>
47 #include <asm/bitops.h>
48 #include <asm/system.h>
50 #include <asm/pgtable.h>
52 #include <asm/cache.h>
54 #include <asm/ptrace.h>
55 #include <asm/iSeries/LparData.h>
56 #include <asm/machdep.h>
58 #include <asm/perfmon.h>
61 * Because the name space for interrupts is so large on ppc64 systems we
62 * avoid declaring a single array of "NR_IRQ" interrupts and instead build
63 * a three level tree leading to the irq_desc_t (similar to page tables).
65 * Currently we cover 24-bit irq values:
66 * 10-bits: the "base" dir (2-pages)
67 * 9-bits: the "middle" dir (1-page)
68 * 5-bits: the "bottom" page (1-page) holding 128byte irq_desc's.
70 * We pack a hw_irq_stat struct directly after the irq_desc in the otherwise
71 * wasted space of the cacheline.
73 * MAX_IRQS is the max this implementation will support.
74 * It is much larger than NR_IRQS which is bogus on this arch and often used
77 * Note that all "undefined" mid table and bottom table pointers will point
78 * to dummy tables. Therefore, we don't need to check for NULL on spurious
82 #define IRQ_BASE_INDEX_SIZE 10
83 #define IRQ_MID_INDEX_SIZE 9
84 #define IRQ_BOT_DESC_SIZE 5
86 #define IRQ_BASE_PTRS (1 << IRQ_BASE_INDEX_SIZE)
87 #define IRQ_MID_PTRS (1 << IRQ_MID_INDEX_SIZE)
88 #define IRQ_BOT_DESCS (1 << IRQ_BOT_DESC_SIZE)
90 #define IRQ_BASE_IDX_SHIFT (IRQ_MID_INDEX_SIZE + IRQ_BOT_DESC_SIZE)
91 #define IRQ_MID_IDX_SHIFT (IRQ_BOT_DESC_SIZE)
93 #define IRQ_MID_IDX_MASK ((1 << IRQ_MID_INDEX_SIZE) - 1)
94 #define IRQ_BOT_IDX_MASK ((1 << IRQ_BOT_DESC_SIZE) - 1)
96 irq_desc_t **irq_desc_base_dir[IRQ_BASE_PTRS] __page_aligned = {0};
97 irq_desc_t **irq_desc_mid_null;
98 irq_desc_t *irq_desc_bot_null;
100 unsigned int _next_irq(unsigned int irq);
103 void enable_irq(unsigned int irq_nr);
104 void disable_irq(unsigned int irq_nr);
107 extern void iSeries_smp_message_recv( struct pt_regs * );
110 volatile unsigned char *chrp_int_ack_special;
111 static void register_irq_proc (unsigned int irq);
113 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
114 { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};
116 static irq_desc_t *add_irq_desc(unsigned int irq);
118 int ppc_spurious_interrupts = 0;
119 unsigned long lpEvent_count = 0;
121 extern void xmon(struct pt_regs *regs);
122 extern int xmon_bpt(struct pt_regs *regs);
123 extern int xmon_sstep(struct pt_regs *regs);
124 extern int xmon_iabr_match(struct pt_regs *regs);
125 extern int xmon_dabr_match(struct pt_regs *regs);
126 extern void (*xmon_fault_handler)(struct pt_regs *regs);
129 extern void (*debugger)(struct pt_regs *regs);
130 extern int (*debugger_bpt)(struct pt_regs *regs);
131 extern int (*debugger_sstep)(struct pt_regs *regs);
132 extern int (*debugger_iabr_match)(struct pt_regs *regs);
133 extern int (*debugger_dabr_match)(struct pt_regs *regs);
134 extern void (*debugger_fault_handler)(struct pt_regs *regs);
137 #define IRQ_KMALLOC_ENTRIES 16
138 static int cache_bitmask = 0;
139 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
140 extern int mem_init_done;
142 /* The hw_irq_stat struct is stored directly after the irq_desc_t
143 * in the same cacheline. We need to use care to make sure we don't
144 * overrun the size of the cacheline.
146 * Currently sizeof(irq_desc_t) is 40 bytes or less and this hw_irq_stat
147 * fills the rest of the cache line.
150 unsigned long irqs; /* statistic per irq */
151 unsigned long *per_cpu_stats;
152 struct proc_dir_entry *irq_dir, *smp_affinity;
153 unsigned long irq_affinity; /* ToDo: cpu bitmask */
156 static inline struct hw_irq_stat *get_irq_stat(irq_desc_t *desc)
158 /* WARNING: this assumes lock is the last field! */
159 return (struct hw_irq_stat *)(&desc->lock+1);
162 static inline unsigned long *get_irq_per_cpu(struct hw_irq_stat *hw)
164 return hw->per_cpu_stats;
167 static inline irq_desc_t **get_irq_mid_table(unsigned int irq)
169 /* Assume irq < MAX_IRQS so we won't index off the end. */
170 return irq_desc_base_dir[irq >> IRQ_BASE_IDX_SHIFT];
173 static inline irq_desc_t *get_irq_bot_table(unsigned int irq,
174 irq_desc_t **mid_ptr)
176 return mid_ptr[(irq >> IRQ_MID_IDX_SHIFT) & IRQ_MID_IDX_MASK];
179 /* This should be inline. */
180 void *_irqdesc(unsigned int irq)
182 irq_desc_t **mid_table, *bot_table, *desc;
184 mid_table = get_irq_mid_table(irq);
185 bot_table = get_irq_bot_table(irq, mid_table);
187 desc = bot_table + (irq & IRQ_BOT_IDX_MASK);
192 * This is used by the for_each_irq(i) macro to iterate quickly over
193 * all interrupts. It optimizes by skipping over ptrs to the null tables
194 * when possible, but it may produce false positives.
196 unsigned int _next_irq(unsigned int irq)
198 irq_desc_t **mid_table, *bot_table;
201 /* Easy case first...staying on the current bot_table. */
202 if (irq & IRQ_BOT_IDX_MASK)
205 /* Now skip empty mid tables */
206 while (irq < MAX_IRQS &&
207 (mid_table = get_irq_mid_table(irq)) == irq_desc_mid_null) {
208 /* index to the next base index (i.e. the next mid table) */
209 irq = (irq & ~(IRQ_BASE_IDX_SHIFT-1)) + IRQ_BASE_IDX_SHIFT;
211 /* And skip empty bot tables */
212 while (irq < MAX_IRQS &&
213 (bot_table = get_irq_bot_table(irq, mid_table)) == irq_desc_bot_null) {
214 /* index to the next mid index (i.e. the next bot table) */
215 irq = (irq & ~(IRQ_MID_IDX_SHIFT-1)) + IRQ_MID_IDX_SHIFT;
221 /* Same as irqdesc(irq) except it will "fault in" a real desc as needed
222 * rather than return the null entry.
223 * This is used by code that is actually defining the irq.
225 * NULL may be returned on memory allocation failure. In general, init code
226 * doesn't look for this, but setup_irq does. In this failure case the desc
227 * is left pointing at the null pages so callers of irqdesc() should
228 * always return something.
230 void *_real_irqdesc(unsigned int irq)
232 irq_desc_t *desc = irqdesc(irq);
233 if (((unsigned long)desc & PAGE_MASK) ==
234 (unsigned long)irq_desc_bot_null) {
235 desc = add_irq_desc(irq);
240 /* Allocate an irq middle page and init entries to null page. */
241 static irq_desc_t **alloc_irq_mid_page(void)
243 irq_desc_t **m, **ent;
246 m = (irq_desc_t **)__get_free_page(GFP_KERNEL);
248 m = (irq_desc_t **)alloc_bootmem_pages(PAGE_SIZE);
250 for (ent = m; ent < m + IRQ_MID_PTRS; ent++) {
251 *ent = irq_desc_bot_null;
257 /* Allocate an irq bottom page and init the entries. */
258 static irq_desc_t *alloc_irq_bot_page(void)
262 b = (irq_desc_t *)get_zeroed_page(GFP_KERNEL);
264 b = (irq_desc_t *)alloc_bootmem_pages(PAGE_SIZE);
266 for (ent = b; ent < b + IRQ_BOT_DESCS; ent++) {
267 ent->lock = SPIN_LOCK_UNLOCKED;
274 * The universe of interrupt numbers ranges from 0 to 2^24.
275 * Use a sparsely populated tree to map from the irq to the handler.
276 * Top level is 2 contiguous pages, covering the 10 most significant
277 * bits. Mid level is 1 page, covering 9 bits. Last page covering
278 * 5 bits is the irq_desc, each of which is 128B.
280 static void irq_desc_init(void) {
281 irq_desc_t ***entry_p;
284 * Now initialize the tables to point though the NULL tables for
285 * the default case of no interrupt handler (spurious).
287 irq_desc_bot_null = alloc_irq_bot_page();
288 irq_desc_mid_null = alloc_irq_mid_page();
289 if (!irq_desc_bot_null || !irq_desc_mid_null)
290 panic("irq_desc_init: could not allocate pages\n");
291 for(entry_p = irq_desc_base_dir;
292 entry_p < irq_desc_base_dir + IRQ_BASE_PTRS;
294 *entry_p = irq_desc_mid_null;
299 * Add a new irq desc for the given irq if needed.
300 * This breaks any ptr to the "null" middle or "bottom" irq desc page.
301 * Note that we don't ever coalesce pages as the interrupts are released.
302 * This isn't worth the effort. We add the cpu stats info when the
303 * interrupt is actually requested.
305 * May return NULL if memory could not be allocated.
307 static irq_desc_t *add_irq_desc(unsigned int irq)
309 irq_desc_t **mid_table_p, *bot_table_p;
311 mid_table_p = get_irq_mid_table(irq);
312 if(mid_table_p == irq_desc_mid_null) {
313 /* No mid table for this IRQ - create it */
314 mid_table_p = alloc_irq_mid_page();
315 if (!mid_table_p) return NULL;
316 irq_desc_base_dir[irq >> IRQ_BASE_IDX_SHIFT] = mid_table_p;
319 bot_table_p = (irq_desc_t *)(*(mid_table_p + ((irq >> 5) & 0x1ff)));
321 if(bot_table_p == irq_desc_bot_null) {
322 /* No bot table for this IRQ - create it */
323 bot_table_p = alloc_irq_bot_page();
324 if (!bot_table_p) return NULL;
325 mid_table_p[(irq >> IRQ_MID_IDX_SHIFT) & IRQ_MID_IDX_MASK] = bot_table_p;
328 return bot_table_p + (irq & IRQ_BOT_IDX_MASK);
331 void *irq_kmalloc(size_t size, int pri)
335 return kmalloc(size,pri);
336 for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
337 if ( ! ( cache_bitmask & (1<<i) ) ) {
338 cache_bitmask |= (1<<i);
339 return (void *)(&malloc_cache[i]);
344 void irq_kfree(void *ptr)
347 for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
348 if ( ptr == &malloc_cache[i] ) {
349 cache_bitmask &= ~(1<<i);
355 void allocate_per_cpu_stats(struct hw_irq_stat *hwstat)
360 p = (unsigned long *)kmalloc(sizeof(long)*NR_CPUS, GFP_KERNEL);
361 if (p) memset(p, 0, sizeof(long)*NR_CPUS);
363 p = (unsigned long *)alloc_bootmem(sizeof(long)*NR_CPUS);
364 hwstat->per_cpu_stats = p;
368 setup_irq(unsigned int irq, struct irqaction * new)
372 struct irqaction *old, **p;
373 irq_desc_t *desc = real_irqdesc(irq);
374 struct hw_irq_stat *hwstat;
379 ppc_md.init_irq_desc(desc);
381 hwstat = get_irq_stat(desc);
383 #ifdef CONFIG_IRQ_ALL_CPUS
384 hwstat->irq_affinity = ~0;
386 hwstat->irq_affinity = 0;
389 /* Now is the time to add per-cpu kstat data to the desc
390 * since it appears we are actually going to use the irq.
392 allocate_per_cpu_stats(hwstat);
395 * Some drivers like serial.c use request_irq() heavily,
396 * so we have to be careful not to interfere with a
399 if (new->flags & SA_SAMPLE_RANDOM) {
401 * This function might sleep, we want to call it first,
402 * outside of the atomic block.
403 * Yes, this might clear the entropy pool if the wrong
404 * driver is attempted to be loaded, without actually
405 * installing a new handler, but is this really a problem,
406 * only the sysadmin is able to do this.
408 rand_initialize_irq(irq);
412 * The following block of code has to be executed atomically
414 spin_lock_irqsave(&desc->lock,flags);
416 if ((old = *p) != NULL) {
417 /* Can't share interrupts unless both agree to */
418 if (!(old->flags & new->flags & SA_SHIRQ)) {
419 spin_unlock_irqrestore(&desc->lock,flags);
423 /* add new interrupt at end of irq queue */
435 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
438 spin_unlock_irqrestore(&desc->lock,flags);
440 register_irq_proc(irq);
444 /* This could be promoted to a real free_irq() ... */
446 do_free_irq(int irq, void* dev_id)
448 irq_desc_t *desc = irqdesc(irq);
449 struct irqaction **p;
452 spin_lock_irqsave(&desc->lock,flags);
455 struct irqaction * action = *p;
457 struct irqaction **pp = p;
459 if (action->dev_id != dev_id)
462 /* Found it - now remove it from the list of entries */
465 desc->status |= IRQ_DISABLED;
468 spin_unlock_irqrestore(&desc->lock,flags);
471 /* Wait to make sure it's not being used on another CPU */
472 while (desc->status & IRQ_INPROGRESS)
478 printk("Trying to free free IRQ%d\n",irq);
479 spin_unlock_irqrestore(&desc->lock,flags);
485 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
486 unsigned long irqflags, const char * devname, void *dev_id)
488 struct irqaction *action;
495 /* We could implement really free_irq() instead of that... */
496 return do_free_irq(irq, dev_id);
498 action = (struct irqaction *)
499 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
501 printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
505 action->handler = handler;
506 action->flags = irqflags;
508 action->name = devname;
509 action->dev_id = dev_id;
512 retval = setup_irq(irq, action);
519 void free_irq(unsigned int irq, void *dev_id)
521 request_irq(irq, NULL, 0, NULL, dev_id);
525 * Generic enable/disable code: this just calls
526 * down into the PIC-specific version for the actual
527 * hardware disable after having gotten the irq
532 * disable_irq_nosync - disable an irq without waiting
533 * @irq: Interrupt to disable
535 * Disable the selected interrupt line. Disables of an interrupt
536 * stack. Unlike disable_irq(), this function does not ensure existing
537 * instances of the IRQ handler have completed before returning.
539 * This function may be called from IRQ context.
542 void disable_irq_nosync(unsigned int irq)
544 irq_desc_t *desc = irqdesc(irq);
547 spin_lock_irqsave(&desc->lock, flags);
548 if (!desc->depth++) {
549 if (!(desc->status & IRQ_PER_CPU))
550 desc->status |= IRQ_DISABLED;
553 spin_unlock_irqrestore(&desc->lock, flags);
557 * disable_irq - disable an irq and wait for completion
558 * @irq: Interrupt to disable
560 * Disable the selected interrupt line. Disables of an interrupt
561 * stack. That is for two disables you need two enables. This
562 * function waits for any pending IRQ handlers for this interrupt
563 * to complete before returning. If you use this function while
564 * holding a resource the IRQ handler may need you will deadlock.
566 * This function may be called - with care - from IRQ context.
569 void disable_irq(unsigned int irq)
571 disable_irq_nosync(irq);
573 if (!local_irq_count(smp_processor_id())) {
576 } while (irqdesc(irq)->status & IRQ_INPROGRESS);
581 * enable_irq - enable interrupt handling on an irq
582 * @irq: Interrupt to enable
584 * Re-enables the processing of interrupts on this IRQ line
585 * providing no disable_irq calls are now in effect.
587 * This function may be called from IRQ context.
590 void enable_irq(unsigned int irq)
592 irq_desc_t *desc = irqdesc(irq);
595 spin_lock_irqsave(&desc->lock, flags);
596 switch (desc->depth) {
598 unsigned int status = desc->status & ~IRQ_DISABLED;
599 desc->status = status;
600 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
601 desc->status = status | IRQ_REPLAY;
602 hw_resend_irq(desc->handler,irq);
611 printk("enable_irq(%u) unbalanced\n", irq);
613 spin_unlock_irqrestore(&desc->lock, flags);
616 /* This function as implemented was a potential source of data
617 * corruption. I pulled it for now, until it can be properly
620 int get_irq_list(char *buf)
625 int show_interrupts(struct seq_file *p, void *v)
628 struct irqaction * action;
630 struct hw_irq_stat *hwstat;
631 unsigned long *per_cpus;
635 for (j=0; j<smp_num_cpus; j++)
636 seq_printf(p, "CPU%d ",j);
641 spin_lock_irqsave(&desc->lock, flags);
642 action = desc->action;
644 if (!action || !action->handler)
646 seq_printf(p, "%3d: ", i);
647 hwstat = get_irq_stat(desc);
648 per_cpus = get_irq_per_cpu(hwstat);
650 for (j = 0; j < smp_num_cpus; j++)
651 seq_printf(p, "%10lu ", per_cpus[j]);
653 seq_printf(p, "%10lu ", hwstat->irqs);
656 if (irqdesc(i)->handler)
657 seq_printf(p, " %s ", irqdesc(i)->handler->typename );
659 seq_printf(p, " None ");
660 seq_printf(p, "%s", (irqdesc(i)->status & IRQ_LEVEL) ? "Level " : "Edge ");
661 seq_printf(p, " %s",action->name);
662 for (action=action->next; action; action = action->next)
663 seq_printf(p, ", %s", action->name);
666 spin_unlock_irqrestore(&desc->lock, flags);
669 /* should this be per processor send/receive? */
670 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
671 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
673 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
678 handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
682 if (!(action->flags & SA_INTERRUPT))
686 status |= action->flags;
687 action->handler(irq, action->dev_id, regs);
688 action = action->next;
690 if (status & SA_SAMPLE_RANDOM)
691 add_interrupt_randomness(irq);
696 * Eventually, this should take an array of interrupts and an array size
697 * so it can dispatch multiple interrupts.
699 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
702 struct irqaction *action;
703 int cpu = smp_processor_id();
704 irq_desc_t *desc = irqdesc(irq);
705 struct hw_irq_stat *hwstat;
706 unsigned long *per_cpus;
709 hwstat = get_irq_stat(desc); /* same cache line as desc */
711 per_cpus = get_irq_per_cpu(hwstat); /* same cache line for < 8 cpus */
716 kstat.irqs[cpu][irq]++;
718 kstat.irqs[cpu][NR_IRQS-1]++;
722 spin_lock(&desc->lock);
725 REPLAY is when Linux resends an IRQ that was dropped earlier
726 WAITING is used by probe to mark irqs that are being tested
728 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
729 if (!(status & IRQ_PER_CPU))
730 status |= IRQ_PENDING; /* we _want_ to handle it */
733 * If the IRQ is disabled for whatever reason, we cannot
734 * use the action we have.
737 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
738 action = desc->action;
739 if (!action || !action->handler) {
740 ppc_spurious_interrupts++;
741 printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
742 /* We can't call disable_irq here, it would deadlock */
745 desc->status |= IRQ_DISABLED;
746 /* This is not a real spurrious interrupt, we
747 * have to eoi it, so we jump to out
752 status &= ~IRQ_PENDING; /* we commit to handling */
753 if (!(status & IRQ_PER_CPU))
754 status |= IRQ_INPROGRESS; /* we are handling it */
756 desc->status = status;
759 * If there is no IRQ handler or it was disabled, exit early.
760 Since we set PENDING, if another processor is handling
761 a different instance of this same irq, the other processor
762 will take care of it.
769 * Edge triggered interrupts need to remember
771 * This applies to any hw interrupts that allow a second
772 * instance of the same irq to arrive while we are in do_IRQ
773 * or in the handler. But the code here only handles the _second_
774 * instance of the irq, not the third or fourth. So it is mostly
775 * useful for irq hardware that does not mask cleanly in an
779 spin_unlock(&desc->lock);
780 handle_irq_event(irq, regs, action);
781 spin_lock(&desc->lock);
783 if (!(desc->status & IRQ_PENDING))
785 desc->status &= ~IRQ_PENDING;
787 desc->status &= ~IRQ_INPROGRESS;
790 * The ->end() handler has to deal with interrupts which got
791 * disabled while the handler was running.
794 if (desc->handler->end)
795 desc->handler->end(irq);
796 else if (desc->handler->enable)
797 desc->handler->enable(irq);
799 spin_unlock(&desc->lock);
802 int do_IRQ(struct pt_regs *regs)
804 int cpu = smp_processor_id();
806 #ifdef CONFIG_PPC_ISERIES
807 struct paca_struct *lpaca;
808 struct ItLpQueue *lpq;
813 #ifdef CONFIG_PPC_ISERIES
816 if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {
817 lpaca->xLpPaca.xIntDword.xFields.xIpiCnt = 0;
818 iSeries_smp_message_recv(regs);
820 #endif /* CONFIG_SMP */
821 lpq = lpaca->lpQueuePtr;
822 if (lpq && ItLpQueue_isLpIntPending(lpq))
823 lpEvent_count += ItLpQueue_process(lpq, regs);
826 * Every arch is required to implement ppc_md.get_irq.
827 * This function will either return an irq number or -1 to
828 * indicate there are no more pending. But the first time
829 * through the loop this means there wasn't an IRQ pending.
830 * The value -2 is for buggy hardware and means that this IRQ
831 * has already been handled. -- Tom
833 while ((irq = ppc_md.get_irq(regs)) >= 0) {
834 ppc_irq_dispatch_handler(regs, irq);
837 if (irq != -2 && first)
838 /* That's not SMP safe ... but who cares ? */
839 ppc_spurious_interrupts++;
844 #ifdef CONFIG_PPC_ISERIES
845 if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
846 lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
847 /* Signal a fake decrementer interrupt */
848 timer_interrupt(regs);
851 if (lpaca->xLpPaca.xIntDword.xFields.xPdcInt) {
852 lpaca->xLpPaca.xIntDword.xFields.xPdcInt = 0;
853 /* Signal a fake PMC interrupt */
854 PerformanceMonitorException();
858 if (softirq_pending(cpu))
861 return 1; /* lets ret_from_int know we can do checks */
864 unsigned long probe_irq_on (void)
869 int probe_irq_off (unsigned long irqs)
874 unsigned int probe_irq_mask(unsigned long irqs)
879 void __init init_IRQ(void)
888 /* Initialize the irq tree */
892 if(ppc_md.init_ras_IRQ) ppc_md.init_ras_IRQ();
896 unsigned char global_irq_holder = NO_PROC_ID;
898 static void show(char * str)
900 int cpu = smp_processor_id();
903 printk("\n%s, CPU %d:\n", str, cpu);
904 printk("irq: %d [ ", irqs_running());
905 for (i = 0; i < smp_num_cpus; i++)
906 printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
907 printk("]\nbh: %d [ ",
908 (spin_is_locked(&global_bh_lock) ? 1 : 0));
909 for (i = 0; i < smp_num_cpus; i++)
910 printk("%u ", local_bh_count(i));
914 #define MAXCOUNT 10000000
916 void synchronize_irq(void)
918 if (irqs_running()) {
924 static inline void get_irqlock(int cpu)
928 if ((unsigned char)cpu == global_irq_holder)
933 br_write_lock(BR_GLOBALIRQ_LOCK);
937 if (!irqs_running() &&
938 (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
941 br_write_unlock(BR_GLOBALIRQ_LOCK);
942 lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
943 while (irqs_running() ||
944 spin_is_locked(lock) ||
945 (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
957 global_irq_holder = cpu;
961 * A global "cli()" while in an interrupt context
962 * turns into just a local cli(). Interrupts
963 * should use spinlocks for the (very unlikely)
964 * case that they ever want to protect against
967 * If we already have local interrupts disabled,
968 * this will not turn a local disable into a
969 * global one (problems with spinlocks: this makes
970 * save_flags+cli+sti usable inside a spinlock).
972 void __global_cli(void)
977 if (flags & (1UL << 15)) {
978 int cpu = smp_processor_id();
980 if (!local_irq_count(cpu))
985 void __global_sti(void)
987 int cpu = smp_processor_id();
989 if (!local_irq_count(cpu))
990 release_irqlock(cpu);
995 * SMP flags value to restore to:
1001 unsigned long __global_save_flags(void)
1005 unsigned long flags;
1007 __save_flags(flags);
1008 local_enabled = (flags >> 15) & 1;
1009 /* default to local */
1010 retval = 2 + local_enabled;
1012 /* check for global flags if we're not in an interrupt */
1013 if (!local_irq_count(smp_processor_id())) {
1016 if (global_irq_holder == (unsigned char) smp_processor_id())
1022 void __global_restore_flags(unsigned long flags)
1038 printk("global_restore_flags: %016lx caller %p\n",
1039 flags, __builtin_return_address(0));
1043 #endif /* CONFIG_SMP */
1045 static struct proc_dir_entry * root_irq_dir;
1047 static struct proc_dir_entry * irq_dir [NR_IRQS];
1048 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
1050 #ifdef CONFIG_IRQ_ALL_CPUS
1051 unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0xffffffff};
1052 #else /* CONFIG_IRQ_ALL_CPUS */
1053 unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x00000000};
1054 #endif /* CONFIG_IRQ_ALL_CPUS */
1057 #define HEX_DIGITS 8
1059 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1060 int count, int *eof, void *data)
1062 irq_desc_t *desc = irqdesc((long)data);
1063 struct hw_irq_stat *hwstat = get_irq_stat(desc);
1065 if (count < HEX_DIGITS+1)
1067 return sprintf(page, "%16lx\n", hwstat->irq_affinity);
1070 static unsigned int parse_hex_value (const char *buffer,
1071 unsigned long count, unsigned long *ret)
1073 unsigned char hexnum [HEX_DIGITS];
1074 unsigned long value;
1079 if (count > HEX_DIGITS)
1081 if (copy_from_user(hexnum, buffer, count))
1085 * Parse the first 8 characters as a hex string, any non-hex char
1086 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
1090 for (i = 0; i < count; i++) {
1091 unsigned int c = hexnum[i];
1094 case '0' ... '9': c -= '0'; break;
1095 case 'a' ... 'f': c -= 'a'-10; break;
1096 case 'A' ... 'F': c -= 'A'-10; break;
1100 value = (value << 4) | c;
1107 static int irq_affinity_write_proc (struct file *file, const char *buffer,
1108 unsigned long count, void *data)
1110 unsigned int irq = (long)data;
1111 irq_desc_t *desc = irqdesc(irq);
1112 struct hw_irq_stat *hwstat = get_irq_stat(desc);
1113 int full_count = count, err;
1114 unsigned long new_value;
1116 if (!desc->handler->set_affinity)
1119 err = parse_hex_value(buffer, count, &new_value);
1121 /* Why is this disabled ? --BenH */
1124 * Do not allow disabling IRQs completely - it's a too easy
1125 * way to make the system unusable accidentally :-) At least
1126 * one online CPU still has to be targeted.
1128 if (!(new_value & cpu_online_map))
1131 hwstat->irq_affinity = new_value;
1132 desc->handler->set_affinity(irq, new_value);
1136 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1137 int count, int *eof, void *data)
1139 unsigned long *mask = (unsigned long *) data;
1140 if (count < HEX_DIGITS+1)
1142 return sprintf (page, "%08lx\n", *mask);
1145 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1146 unsigned long count, void *data)
1148 unsigned long *mask = (unsigned long *) data, full_count = count, err;
1149 unsigned long new_value;
1151 err = parse_hex_value(buffer, count, &new_value);
1157 #ifdef CONFIG_PPC_ISERIES
1160 for (i=0; i<MAX_PACAS; ++i) {
1161 if ( paca[i].prof_buffer && (new_value & 1) )
1162 paca[i].prof_mode = PMC_STATE_DECR_PROFILE;
1164 if(paca[i].prof_mode != PMC_STATE_INITIAL)
1165 paca[i].prof_mode = PMC_STATE_READY;
1175 #define MAX_NAMELEN 10
1177 static void register_irq_proc (unsigned int irq)
1179 struct proc_dir_entry *entry;
1180 char name [MAX_NAMELEN];
1182 struct hw_irq_stat *hwstat;
1184 desc = real_irqdesc(irq);
1185 if (!root_irq_dir || !desc || !desc->handler)
1187 hwstat = get_irq_stat(desc);
1188 if (hwstat->irq_dir)
1191 memset(name, 0, MAX_NAMELEN);
1192 sprintf(name, "%d", irq);
1194 /* create /proc/irq/1234 */
1195 hwstat->irq_dir = proc_mkdir(name, root_irq_dir);
1196 if(hwstat->irq_dir == NULL) {
1197 printk(KERN_ERR "register_irq_proc: proc_mkdir failed.\n");
1201 /* create /proc/irq/1234/smp_affinity */
1202 entry = create_proc_entry("smp_affinity", 0600, hwstat->irq_dir);
1206 entry->data = (void *)(long)irq;
1207 entry->read_proc = irq_affinity_read_proc;
1208 entry->write_proc = irq_affinity_write_proc;
1210 printk(KERN_ERR "register_irq_proc: create_proc_entry failed.\n");
1213 hwstat->smp_affinity = entry;
1216 unsigned long prof_cpu_mask = -1;
1218 void init_irq_proc (void)
1220 struct proc_dir_entry *entry;
1223 /* create /proc/irq */
1224 root_irq_dir = proc_mkdir("irq", 0);
1225 if(root_irq_dir == NULL) {
1226 printk(KERN_ERR "init_irq_proc: proc_mkdir failed.\n");
1229 /* create /proc/irq/prof_cpu_mask */
1230 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1234 entry->data = (void *)&prof_cpu_mask;
1235 entry->read_proc = prof_cpu_mask_read_proc;
1236 entry->write_proc = prof_cpu_mask_write_proc;
1238 printk(KERN_ERR "init_irq_proc: create_proc_entry failed.\n");
1242 * Create entries for all existing IRQs.
1245 if (irqdesc(i)->handler == NULL)
1247 register_irq_proc(i);
1251 void no_action(int irq, void *dev, struct pt_regs *regs)