2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * This code is released under the GNU General Public License version 2 or
12 #include <linux/init.h>
15 #include <linux/irq.h>
16 #include <linux/delay.h>
17 #include <linux/spinlock.h>
18 #include <linux/smp_lock.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mc146818rtc.h>
23 #include <asm/pgalloc.h>
26 * Some notes on x86 processor bugs affecting SMP operation:
28 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
29 * The Linux implications for SMP are handled as follows:
31 * Pentium III / [Xeon]
32 * None of the E1AP-E3AP errata are visible to the user.
39 * None of the A1AP-A3AP errata are visible to the user.
46 * None of 1AP-9AP errata are visible to the normal user,
47 * except occasional delivery of 'spurious interrupt' as trap #15.
48 * This is very rare and a non-problem.
50 * 1AP. Linux maps APIC as non-cacheable
51 * 2AP. worked around in hardware
52 * 3AP. fixed in C0 and above steppings microcode update.
53 * Linux does not use excessive STARTUP_IPIs.
54 * 4AP. worked around in hardware
55 * 5AP. symmetric IO mode (normal Linux operation) not affected.
56 * 'noapic' mode has vector 0xf filled out properly.
57 * 6AP. 'noapic' mode might be affected - fixed in later steppings
58 * 7AP. We do not assume writes to the LVT deassering IRQs
59 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
60 * 9AP. We do not use mixed mode
63 * There is a marginal case where REP MOVS on 100MHz SMP
64 * machines with B stepping processors can fail. XXX should provide
65 * an L1cache=Writethrough or L1cache=off option.
67 * B stepping CPUs may hang. There are hardware work arounds
68 * for this. We warn about it in case your board doesnt have the work
69 * arounds. Basically thats so I can tell anyone with a B stepping
70 * CPU and SMP problems "tough".
72 * Specific items [From Pentium Processor Specification Update]
74 * 1AP. Linux doesn't use remote read
75 * 2AP. Linux doesn't trust APIC errors
76 * 3AP. We work around this
77 * 4AP. Linux never generated 3 interrupts of the same priority
78 * to cause a lost local interrupt.
79 * 5AP. Remote read is never used
80 * 6AP. not affected - worked around in hardware
81 * 7AP. not affected - worked around in hardware
82 * 8AP. worked around in hardware - we get explicit CS errors if not
83 * 9AP. only 'noapic' mode affected. Might generate spurious
84 * interrupts, we log only the first one and count the
86 * 10AP. not affected - worked around in hardware
87 * 11AP. Linux reads the APIC between writes to avoid this, as per
88 * the documentation. Make sure you preserve this as it affects
89 * the C stepping chips too.
90 * 12AP. not affected - worked around in hardware
91 * 13AP. not affected - worked around in hardware
92 * 14AP. we always deassert INIT during bootup
93 * 15AP. not affected - worked around in hardware
94 * 16AP. not affected - worked around in hardware
95 * 17AP. not affected - worked around in hardware
96 * 18AP. not affected - worked around in hardware
97 * 19AP. not affected - worked around in BIOS
99 * If this sounds worrying believe me these bugs are either ___RARE___,
100 * or are signal timing bugs worked around in hardware and there's
101 * about nothing of note with C stepping upwards.
104 /* The 'big kernel lock' */
105 spinlock_cacheline_t kernel_flag_cacheline = {SPIN_LOCK_UNLOCKED};
107 struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
110 * the following functions deal with sending IPIs between CPUs.
112 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
115 static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector)
117 unsigned int icr = APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
121 static inline int __prepare_ICR2 (unsigned int mask)
123 return SET_APIC_DEST_FIELD(mask);
126 static inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
129 * Subtle. In the case of the 'never do double writes' workaround
130 * we have to lock out interrupts to be safe. As we don't care
131 * of the value read we use an atomic rmw access to avoid costly
132 * cli/sti. Otherwise we use an even cheaper single atomic write
140 apic_wait_icr_idle();
143 * No need to touch the target chip field
145 cfg = __prepare_ICR(shortcut, vector);
148 * Send the IPI. The write to APIC_ICR fires this off.
150 apic_write_around(APIC_ICR, cfg);
153 static inline void send_IPI_allbutself(int vector)
156 * if there are no other CPUs in the system then
157 * we get an APIC send error if we try to broadcast.
158 * thus we have to avoid sending IPIs in this case.
160 if (smp_num_cpus > 1)
161 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
164 static inline void send_IPI_all(int vector)
166 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
169 void send_IPI_self(int vector)
171 __send_IPI_shortcut(APIC_DEST_SELF, vector);
174 static inline void send_IPI_mask(int mask, int vector)
185 apic_wait_icr_idle();
188 * prepare target chip field
190 cfg = __prepare_ICR2(mask);
191 apic_write_around(APIC_ICR2, cfg);
196 cfg = __prepare_ICR(0, vector);
199 * Send the IPI. The write to APIC_ICR fires this off.
201 apic_write_around(APIC_ICR, cfg);
202 __restore_flags(flags);
206 * Smarter SMP flushing macros.
207 * c/o Linus Torvalds.
209 * These mean you can really definitely utterly forget about
210 * writing to user space from interrupts. (Its not allowed anyway).
212 * Optimizations Manfred Spraul <manfred@colorfullife.com>
215 static volatile unsigned long flush_cpumask;
216 static struct mm_struct * flush_mm;
217 static unsigned long flush_va;
218 static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
219 #define FLUSH_ALL 0xffffffff
222 * We cannot call mmdrop() because we are in interrupt context,
223 * instead update mm->cpu_vm_mask.
225 static void inline leave_mm (unsigned long cpu)
227 if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
229 clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
230 /* flush TLB before it goes away. this stops speculative prefetches */
231 *read_pda(level4_pgt) = __pa(init_mm.pgd) | _PAGE_TABLE;
237 * The flush IPI assumes that a thread switch happens in this order:
238 * [cpu0: the cpu that switches]
239 * 1) switch_mm() either 1a) or 1b)
240 * 1a) thread switch to a different mm
241 * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
242 * Stop ipi delivery for the old mm. This is not synchronized with
243 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
244 * for the wrong mm, and in the worst case we perform a superflous
246 * 1a2) set cpu_tlbstate to TLBSTATE_OK
247 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
248 * was in lazy tlb mode.
249 * 1a3) update cpu_tlbstate[].active_mm
250 * Now cpu0 accepts tlb flushes for the new mm.
251 * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
252 * Now the other cpus will send tlb flush ipis.
254 * 1b) thread switch without mm change
255 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
257 * 1b1) set cpu_tlbstate to TLBSTATE_OK
258 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
259 * Atomically set the bit [other cpus will start sending flush ipis],
261 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
262 * 2) switch %%esp, ie current
264 * The interrupt must handle 2 special cases:
265 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
266 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
267 * runs in kernel space, the cpu could load tlb entries for user space
270 * The good news is that cpu_tlbstate is local to each cpu, no
271 * write/read ordering problems.
277 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
278 * 2) Leave the mm if we are in the lazy tlb mode.
281 asmlinkage void smp_invalidate_interrupt (void)
283 unsigned long cpu = smp_processor_id();
285 if (!test_bit(cpu, &flush_cpumask))
288 * This was a BUG() but until someone can quote me the
289 * line from the intel manual that guarantees an IPI to
290 * multiple CPUs is retried _only_ on the erroring CPUs
291 * its staying as a return
296 if (flush_mm == cpu_tlbstate[cpu].active_mm) {
297 if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
298 if (flush_va == FLUSH_ALL)
301 __flush_tlb_one(flush_va);
306 clear_bit(cpu, &flush_cpumask);
309 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
313 * A couple of (to be removed) sanity checks:
315 * - we do not send IPIs to not-yet booted CPUs.
316 * - current CPU must not be in mask
317 * - mask must exist :)
321 if ((cpumask & cpu_online_map) != cpumask)
323 if (cpumask & (1 << smp_processor_id()))
329 * i'm not happy about this global shared spinlock in the
330 * MM hot path, but we'll see how contended it is.
331 * Temporarily this turns IRQs off, so that lockups are
332 * detected by the NMI watchdog.
334 spin_lock(&tlbstate_lock);
338 atomic_set_mask(cpumask, &flush_cpumask);
340 * We have to send the IPI only to
343 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
345 while (flush_cpumask)
346 /* nothing. lockup detection does not belong here */;
350 spin_unlock(&tlbstate_lock);
353 void flush_tlb_current_task(void)
355 struct mm_struct *mm = current->mm;
356 unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
360 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
363 void flush_tlb_mm (struct mm_struct * mm)
365 unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
367 if (current->active_mm == mm) {
371 leave_mm(smp_processor_id());
374 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
377 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
379 struct mm_struct *mm = vma->vm_mm;
380 unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
382 if (current->active_mm == mm) {
386 leave_mm(smp_processor_id());
390 flush_tlb_others(cpu_mask, mm, va);
393 static inline void do_flush_tlb_all_local(void)
395 unsigned long cpu = smp_processor_id();
398 if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
402 static void flush_tlb_all_ipi(void* info)
404 do_flush_tlb_all_local();
407 void flush_tlb_all(void)
409 smp_call_function (flush_tlb_all_ipi,0,1,1);
411 do_flush_tlb_all_local();
415 * this function sends a 'reschedule' IPI to another CPU.
416 * it goes straight through and wastes no time serializing
417 * anything. Worst case is that we lose a reschedule ...
420 void smp_send_reschedule(int cpu)
422 send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
426 * Structure and data for smp_call_function(). This is designed to minimise
427 * static memory requirements. It also looks cleaner.
429 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
431 struct call_data_struct {
432 void (*func) (void *info);
439 static struct call_data_struct * call_data;
442 * this function sends a 'generic call function' IPI to all other CPUs
446 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
449 * [SUMMARY] Run a function on all other CPUs.
450 * <func> The function to run. This must be fast and non-blocking.
451 * <info> An arbitrary pointer to pass to the function.
452 * <nonatomic> currently unused.
453 * <wait> If true, wait (atomically) until function has completed on other CPUs.
454 * [RETURNS] 0 on success, else a negative status code. Does not return until
455 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
457 * You must not call this function with disabled interrupts or from a
458 * hardware interrupt handler or from a bottom half handler.
461 struct call_data_struct data;
462 int cpus = smp_num_cpus-1;
469 atomic_set(&data.started, 0);
472 atomic_set(&data.finished, 0);
474 spin_lock(&call_lock);
477 /* Send a message to all other CPUs and wait for them to respond */
478 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
480 /* Wait for response */
481 while (atomic_read(&data.started) != cpus)
485 while (atomic_read(&data.finished) != cpus)
487 spin_unlock(&call_lock);
492 void smp_stop_cpu(void)
497 clear_bit(smp_processor_id(), &cpu_online_map);
499 disable_local_APIC();
503 static void smp_really_stop_cpu(void *dummy)
511 * this function calls the 'stop' function on all other CPUs in the system.
514 void smp_send_stop(void)
516 smp_call_function(smp_really_stop_cpu, NULL, 1, 0);
521 * Reschedule call back. Nothing to do,
522 * all the work is done automatically when
523 * we return from the interrupt.
525 asmlinkage void smp_reschedule_interrupt(void)
530 asmlinkage void smp_call_function_interrupt(void)
532 void (*func) (void *info) = call_data->func;
533 void *info = call_data->info;
534 int wait = call_data->wait;
538 * Notify initiating CPU that I've grabbed the data and am
539 * about to execute the function
542 atomic_inc(&call_data->started);
544 * At this point the info structure may be out of scope unless wait==1
549 atomic_inc(&call_data->finished);