more changes on original files
[linux-2.4.git] / arch / alpha / kernel / irq_smp.c
1 /*
2  *      linux/arch/alpha/kernel/irq_smp.c
3  *
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/interrupt.h>
10 #include <linux/random.h>
11 #include <linux/init.h>
12 #include <linux/delay.h>
13 #include <linux/irq.h>
14
15 #include <asm/system.h>
16 #include <asm/io.h>
17
18
19 /* Who has global_irq_lock. */
20 int global_irq_holder = NO_PROC_ID;
21
22 /* This protects IRQ's. */
23 spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
24
25 /* Global IRQ locking depth. */
26 static void *previous_irqholder = NULL;
27
28 #define MAXCOUNT 100000000
29
30
31 static void
32 show(char * str, void *where)
33 {
34 #if 0
35         int i;
36         unsigned long *stack;
37 #endif
38         int cpu = smp_processor_id();
39
40         printk("\n%s, CPU %d: %p\n", str, cpu, where);
41         printk("irq:  %d [%d %d]\n",
42                irqs_running(),
43                local_irq_count(0),
44                local_irq_count(1));
45
46         printk("bh:   %d [%d %d]\n",
47                spin_is_locked(&global_bh_lock) ? 1 : 0,
48                local_bh_count(0),
49                local_bh_count(1));
50 #if 0
51         stack = (unsigned long *) &str;
52         for (i = 40; i ; i--) {
53                 unsigned long x = *++stack;
54                 if (x > (unsigned long) &init_task_union &&
55                     x < (unsigned long) &vsprintf) {
56                         printk("<[%08lx]> ", x);
57                 }
58         }
59 #endif
60 }
61
62 static inline void
63 wait_on_irq(int cpu, void *where)
64 {
65         int count = MAXCOUNT;
66
67         for (;;) {
68
69                 /*
70                  * Wait until all interrupts are gone. Wait
71                  * for bottom half handlers unless we're
72                  * already executing in one..
73                  */
74                 if (!irqs_running()) {
75                         if (local_bh_count(cpu)
76                             || !spin_is_locked(&global_bh_lock))
77                                 break;
78                 }
79
80                 /* Duh, we have to loop. Release the lock to avoid deadlocks */
81                 spin_unlock(&global_irq_lock);
82
83                 for (;;) {
84                         if (!--count) {
85                                 show("wait_on_irq", where);
86                                 count = MAXCOUNT;
87                         }
88                         __sti();
89                         udelay(1); /* make sure to run pending irqs */
90                         __cli();
91
92                         if (irqs_running())
93                                 continue;
94                         if (spin_is_locked(&global_irq_lock))
95                                 continue;
96                         if (!local_bh_count(cpu)
97                             && spin_is_locked(&global_bh_lock))
98                                 continue;
99                         if (spin_trylock(&global_irq_lock))
100                                 break;
101                 }
102         }
103 }
104
105 static inline void
106 get_irqlock(int cpu, void* where)
107 {
108         if (!spin_trylock(&global_irq_lock)) {
109                 /* Do we already hold the lock?  */
110                 if (cpu == global_irq_holder)
111                         return;
112                 /* Uhhuh.. Somebody else got it.  Wait.  */
113                 spin_lock(&global_irq_lock);
114         }
115
116         /*
117          * Ok, we got the lock bit.
118          * But that's actually just the easy part.. Now
119          * we need to make sure that nobody else is running
120          * in an interrupt context. 
121          */
122         wait_on_irq(cpu, where);
123
124         /*
125          * Finally.
126          */
127 #ifdef CONFIG_DEBUG_SPINLOCK
128         global_irq_lock.task = current;
129         global_irq_lock.previous = where;
130 #endif
131         global_irq_holder = cpu;
132         previous_irqholder = where;
133 }
134
135 void
136 __global_cli(void)
137 {
138         int cpu = smp_processor_id();
139         void *where = __builtin_return_address(0);
140
141         /*
142          * Maximize ipl.  If ipl was previously 0 and if this thread
143          * is not in an irq, then take global_irq_lock.
144          */
145         if (swpipl(IPL_MAX) == IPL_MIN && !local_irq_count(cpu))
146                 get_irqlock(cpu, where);
147 }
148
149 void
150 __global_sti(void)
151 {
152         int cpu = smp_processor_id();
153
154         if (!local_irq_count(cpu))
155                 release_irqlock(cpu);
156         __sti();
157 }
158
159 /*
160  * SMP flags value to restore to:
161  * 0 - global cli
162  * 1 - global sti
163  * 2 - local cli
164  * 3 - local sti
165  */
166 unsigned long
167 __global_save_flags(void)
168 {
169         int retval;
170         int local_enabled;
171         unsigned long flags;
172         int cpu = smp_processor_id();
173
174         __save_flags(flags);
175         local_enabled = (!(flags & 7));
176         /* default to local */
177         retval = 2 + local_enabled;
178
179         /* Check for global flags if we're not in an interrupt.  */
180         if (!local_irq_count(cpu)) {
181                 if (local_enabled)
182                         retval = 1;
183                 if (global_irq_holder == cpu)
184                         retval = 0;
185         }
186         return retval;
187 }
188
189 void
190 __global_restore_flags(unsigned long flags)
191 {
192         switch (flags) {
193         case 0:
194                 __global_cli();
195                 break;
196         case 1:
197                 __global_sti();
198                 break;
199         case 2:
200                 __cli();
201                 break;
202         case 3:
203                 __sti();
204                 break;
205         default:
206                 printk(KERN_ERR "global_restore_flags: %08lx (%p)\n",
207                         flags, __builtin_return_address(0));
208         }
209 }
210
211 /*
212  * From its use, I infer that synchronize_irq() stalls a thread until
213  * the effects of a command to an external device are known to have
214  * taken hold.  Typically, the command is to stop sending interrupts.
215  * The strategy here is wait until there is at most one processor
216  * (this one) in an irq.  The memory barrier serializes the write to
217  * the device and the subsequent accesses of global_irq_count.
218  * --jmartin
219  */
220 #define DEBUG_SYNCHRONIZE_IRQ 0
221
222 void
223 synchronize_irq(void)
224 {
225 #if 0
226         /* Joe's version.  */
227         int cpu = smp_processor_id();
228         int local_count;
229         int global_count;
230         int countdown = 1<<24;
231         void *where = __builtin_return_address(0);
232
233         mb();
234         do {
235                 local_count = local_irq_count(cpu);
236                 global_count = atomic_read(&global_irq_count);
237                 if (DEBUG_SYNCHRONIZE_IRQ && (--countdown == 0)) {
238                         printk("%d:%d/%d\n", cpu, local_count, global_count);
239                         show("synchronize_irq", where);
240                         break;
241                 }
242         } while (global_count != local_count);
243 #else
244         /* Jay's version.  */
245         if (irqs_running()) {
246                 cli();
247                 sti();
248         }
249 #endif
250 }