2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/sched.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/spinlock.h>
25 #include <linux/irq.h>
26 #include <linux/cache.h>
28 #include <asm/hwrpb.h>
29 #include <asm/ptrace.h>
30 #include <asm/atomic.h>
34 #include <asm/bitops.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/hardirq.h>
38 #include <asm/softirq.h>
39 #include <asm/mmu_context.h>
41 #define __KERNEL_SYSCALLS__
42 #include <asm/unistd.h>
50 #define DBGS(args) printk args
55 /* A collection of per-processor data. */
56 struct cpuinfo_alpha cpu_data[NR_CPUS];
58 /* A collection of single bit ipi messages. */
60 unsigned long bits ____cacheline_aligned;
61 } ipi_data[NR_CPUS] __cacheline_aligned;
63 enum ipi_message_type {
69 spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
71 /* Set to a secondary's cpuid when it comes online. */
72 static unsigned long smp_secondary_alive;
74 /* Which cpus ids came online. */
75 unsigned long cpu_present_mask;
77 /* cpus reported in the hwrpb */
78 static unsigned long hwrpb_cpu_present_mask __initdata = 0;
80 static int max_cpus = -1; /* Command-line limitation. */
81 int smp_num_probed; /* Internal processor count */
82 int smp_num_cpus = 1; /* Number that came online. */
83 int smp_threads_ready; /* True once the per process idle is forked. */
85 int __cpu_number_map[NR_CPUS];
86 int __cpu_logical_map[NR_CPUS];
88 extern void calibrate_delay(void);
89 extern asmlinkage void entInt(void);
92 static int __init nosmp(char *str)
98 __setup("nosmp", nosmp);
100 static int __init maxcpus(char *str)
102 get_option(&str, &max_cpus);
106 __setup("maxcpus", maxcpus);
110 * Called by both boot and secondaries to move global data into
111 * per-processor storage.
113 static inline void __init
114 smp_store_cpu_info(int cpuid)
116 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
117 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
118 cpu_data[cpuid].need_new_asn = 0;
119 cpu_data[cpuid].asn_lock = 0;
120 local_irq_count(cpuid) = 0;
121 local_bh_count(cpuid) = 0;
125 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
127 static inline void __init
128 smp_setup_percpu_timer(int cpuid)
130 cpu_data[cpuid].prof_counter = 1;
131 cpu_data[cpuid].prof_multiplier = 1;
135 wait_boot_cpu_to_stop(int cpuid)
137 long stop = jiffies + 10*HZ;
139 while (time_before(jiffies, stop)) {
140 if (!smp_secondary_alive)
145 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
151 * Where secondaries begin a life of C.
156 int cpuid = hard_smp_processor_id();
158 if (current != init_tasks[cpu_number_map(cpuid)]) {
159 printk("BUG: smp_calling: cpu %d current %p init_tasks[cpu_number_map(cpuid)] %p\n",
160 cpuid, current, init_tasks[cpu_number_map(cpuid)]);
163 DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state));
165 /* Turn on machine checks. */
168 /* Set trap vectors. */
171 /* Set interrupt vector. */
174 /* Get our local ticker going. */
175 smp_setup_percpu_timer(cpuid);
177 /* Call platform-specific callin, if specified */
178 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
180 /* Must have completely accurate bogos. */
184 * Wait boot CPU to stop with irq enabled before
185 * running calibrate_delay().
187 wait_boot_cpu_to_stop(cpuid);
192 smp_store_cpu_info(cpuid);
195 #define LPJ(c) ((long)cpu_data[c].loops_per_jiffy)
196 long diff = LPJ(boot_cpuid) - LPJ(cpuid);
197 if (diff < 0) diff = -diff;
199 if (diff > LPJ(boot_cpuid)/10) {
200 printk("Bogus BogoMIPS for cpu %d - trusting boot CPU\n",
202 loops_per_jiffy = LPJ(cpuid) = LPJ(boot_cpuid);
207 * Allow master to continue only after we written
208 * the loops_per_jiffy.
211 smp_secondary_alive = 1;
213 /* Wait for the go code. */
214 while (!smp_threads_ready)
217 DBGS(("smp_callin: commencing CPU %d current %p\n",
220 /* Setup the scheduler for this processor. */
223 /* ??? This should be in init_idle. */
224 atomic_inc(&init_mm.mm_count);
225 current->active_mm = &init_mm;
231 * Send a message to a secondary's console. "START" is one such
232 * interesting message. ;-)
235 send_secondary_console_msg(char *str, int cpuid)
237 struct percpu_struct *cpu;
238 register char *cp1, *cp2;
239 unsigned long cpumask;
243 cpu = (struct percpu_struct *)
245 + hwrpb->processor_offset
246 + cpuid * hwrpb->processor_size);
248 cpumask = (1UL << cpuid);
249 if (hwrpb->txrdy & cpumask)
255 *(unsigned int *)&cpu->ipc_buffer[0] = len;
256 cp1 = (char *) &cpu->ipc_buffer[1];
257 memcpy(cp1, cp2, len);
259 /* atomic test and set */
261 set_bit(cpuid, &hwrpb->rxrdy);
263 if (hwrpb->txrdy & cpumask)
269 /* Wait 10 seconds. Note that jiffies aren't ticking yet. */
270 for (timeout = 1000000; timeout > 0; --timeout) {
271 if (!(hwrpb->txrdy & cpumask))
279 /* Wait 10 seconds. */
280 for (timeout = 1000000; timeout > 0; --timeout) {
281 if (!(hwrpb->txrdy & cpumask))
289 printk("Processor %x not ready\n", cpuid);
294 * A secondary console wants to send a message. Receive it.
297 recv_secondary_console_msg(void)
300 unsigned long txrdy = hwrpb->txrdy;
301 char *cp1, *cp2, buf[80];
302 struct percpu_struct *cpu;
304 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
306 mycpu = hard_smp_processor_id();
308 for (i = 0; i < NR_CPUS; i++) {
309 if (!(txrdy & (1UL << i)))
312 DBGS(("recv_secondary_console_msg: "
313 "TXRDY contains CPU %d.\n", i));
315 cpu = (struct percpu_struct *)
317 + hwrpb->processor_offset
318 + i * hwrpb->processor_size);
320 DBGS(("recv_secondary_console_msg: on %d from %d"
321 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
322 mycpu, i, cpu->halt_reason, cpu->flags));
324 cnt = cpu->ipc_buffer[0] >> 32;
325 if (cnt <= 0 || cnt >= 80)
326 strcpy(buf, "<<< BOGUS MSG >>>");
328 cp1 = (char *) &cpu->ipc_buffer[11];
332 while ((cp2 = strchr(cp2, '\r')) != 0) {
339 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
340 "message is '%s'\n", mycpu, buf));
347 * Convince the console to have a secondary cpu begin execution.
350 secondary_cpu_start(int cpuid, struct task_struct *idle)
352 struct percpu_struct *cpu;
353 struct pcb_struct *hwpcb;
356 cpu = (struct percpu_struct *)
358 + hwrpb->processor_offset
359 + cpuid * hwrpb->processor_size);
360 hwpcb = (struct pcb_struct *) cpu->hwpcb;
362 /* Initialize the CPU's HWPCB to something just good enough for
363 us to get started. Immediately after starting, we'll swpctx
364 to the target idle task's ptb. Reuse the stack in the mean
365 time. Precalculate the target PCBB. */
366 hwpcb->ksp = (unsigned long) idle + sizeof(union task_union) - 16;
368 hwpcb->ptbr = idle->thread.ptbr;
371 hwpcb->unique = virt_to_phys(&idle->thread);
372 hwpcb->flags = idle->thread.pal_flags;
373 hwpcb->res1 = hwpcb->res2 = 0;
376 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
377 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
379 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
380 cpuid, idle->state, idle->thread.pal_flags));
382 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
383 hwrpb->CPU_restart = __smp_callin;
384 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
386 /* Recalculate and update the HWRPB checksum */
387 hwrpb_update_checksum(hwrpb);
390 * Send a "start" command to the specified processor.
393 /* SRM III 3.4.1.3 */
394 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
395 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
398 send_secondary_console_msg("START\r\n", cpuid);
400 /* Wait 10 seconds for an ACK from the console. Note that jiffies
401 aren't ticking yet. */
402 for (timeout = 1000000; timeout > 0; timeout--) {
408 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
412 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
416 static int __init fork_by_hand(void)
420 * don't care about the regs settings since
421 * we'll never reschedule the forked task.
423 return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
427 * Bring one cpu online.
430 smp_boot_one_cpu(int cpuid, int cpunum)
432 struct task_struct *idle;
435 /* Cook up an idler for this guy. Note that the address we give
436 to kernel_thread is irrelevant -- it's going to start where
437 HWRPB.CPU_restart says to start. But this gets all the other
438 task-y sort of data structures set up like we wish. */
440 * We can't use kernel_thread since we must avoid to
441 * reschedule the child.
443 if (fork_by_hand() < 0)
444 panic("failed fork for CPU %d", cpuid);
446 idle = init_task.prev_task;
448 panic("No idle process for CPU %d", cpuid);
449 if (idle == &init_task)
450 panic("idle process is init_task for CPU %d", cpuid);
452 idle->processor = cpuid;
453 idle->cpus_runnable = 1 << cpuid; /* we schedule the first task manually */
454 __cpu_logical_map[cpunum] = cpuid;
455 __cpu_number_map[cpuid] = cpunum;
457 del_from_runqueue(idle);
458 unhash_process(idle);
459 init_tasks[cpunum] = idle;
461 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
462 cpuid, idle->state, idle->flags));
464 /* The secondary will change this once it is happy. Note that
465 secondary_cpu_start contains the necessary memory barrier. */
466 smp_secondary_alive = -1;
468 /* Whirrr, whirrr, whirrrrrrrrr... */
469 if (secondary_cpu_start(cpuid, idle))
473 /* Notify the secondary CPU it can run calibrate_delay() */
474 smp_secondary_alive = 0;
476 /* We've been acked by the console; wait one second for the task
477 to start up for real. Note that jiffies aren't ticking yet. */
478 for (timeout = 0; timeout < 1000000; timeout++) {
479 if (smp_secondary_alive == 1)
485 /* we must invalidate our stuff as we failed to boot the CPU */
486 __cpu_logical_map[cpunum] = -1;
487 __cpu_number_map[cpuid] = -1;
489 /* the idle task is local to us so free it as we don't use it */
490 free_task_struct(idle);
492 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
496 /* Another "Red Snapper". */
501 * Called from setup_arch. Detect an SMP system and which processors
507 struct percpu_struct *cpubase, *cpu;
510 if (boot_cpuid != 0) {
511 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
515 if (hwrpb->nr_processors > 1) {
518 DBGS(("setup_smp: nr_processors %ld\n",
519 hwrpb->nr_processors));
521 cpubase = (struct percpu_struct *)
522 ((char*)hwrpb + hwrpb->processor_offset);
523 boot_cpu_palrev = cpubase->pal_revision;
525 for (i = 0; i < hwrpb->nr_processors; i++ ) {
526 cpu = (struct percpu_struct *)
527 ((char *)cpubase + i*hwrpb->processor_size);
528 if ((cpu->flags & 0x1cc) == 0x1cc) {
530 /* Assume here that "whami" == index */
531 hwrpb_cpu_present_mask |= (1UL << i);
532 cpu->pal_revision = boot_cpu_palrev;
535 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
536 i, cpu->flags, cpu->type));
537 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
538 i, cpu->pal_revision));
542 hwrpb_cpu_present_mask = (1UL << boot_cpuid);
544 cpu_present_mask = 1UL << boot_cpuid;
546 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
547 smp_num_probed, hwrpb_cpu_present_mask);
551 * Called by smp_init bring all the secondaries online and hold them.
557 unsigned long bogosum;
559 /* Take care of some initial bookkeeping. */
560 memset(__cpu_number_map, -1, sizeof(__cpu_number_map));
561 memset(__cpu_logical_map, -1, sizeof(__cpu_logical_map));
562 memset(ipi_data, 0, sizeof(ipi_data));
564 __cpu_number_map[boot_cpuid] = 0;
565 __cpu_logical_map[0] = boot_cpuid;
566 current->processor = boot_cpuid;
568 smp_store_cpu_info(boot_cpuid);
569 smp_setup_percpu_timer(boot_cpuid);
573 /* ??? This should be in init_idle. */
574 atomic_inc(&init_mm.mm_count);
575 current->active_mm = &init_mm;
577 /* Nothing to do on a UP box, or when told not to. */
578 if (smp_num_probed == 1 || max_cpus == 0) {
579 printk(KERN_INFO "SMP mode deactivated.\n");
583 printk(KERN_INFO "SMP starting up secondaries.\n");
586 for (i = 0; i < NR_CPUS; i++) {
590 if (((hwrpb_cpu_present_mask >> i) & 1) == 0)
593 if (smp_boot_one_cpu(i, cpu_count))
596 cpu_present_mask |= 1UL << i;
600 if (cpu_count == 1) {
601 printk(KERN_ERR "SMP: Only one lonely processor alive.\n");
606 for (i = 0; i < NR_CPUS; i++) {
607 if (cpu_present_mask & (1UL << i))
608 bogosum += cpu_data[i].loops_per_jiffy;
610 printk(KERN_INFO "SMP: Total of %d processors activated "
611 "(%lu.%02lu BogoMIPS).\n",
612 cpu_count, bogosum / (500000/HZ),
613 (bogosum / (5000/HZ)) % 100);
615 smp_num_cpus = cpu_count;
619 * Called by smp_init to release the blocking online cpus once they
625 /* smp_init sets smp_threads_ready -- that's enough. */
631 smp_percpu_timer_interrupt(struct pt_regs *regs)
633 int cpu = smp_processor_id();
634 unsigned long user = user_mode(regs);
635 struct cpuinfo_alpha *data = &cpu_data[cpu];
637 /* Record kernel PC. */
639 alpha_do_profile(regs->pc);
641 if (!--data->prof_counter) {
642 /* We need to make like a normal interrupt -- otherwise
643 timer interrupts ignore the global interrupt lock,
644 which would be a Bad Thing. */
645 irq_enter(cpu, RTC_IRQ);
647 update_process_times(user);
649 data->prof_counter = data->prof_multiplier;
650 irq_exit(cpu, RTC_IRQ);
652 if (softirq_pending(cpu))
658 setup_profiling_timer(unsigned int multiplier)
665 send_ipi_message(unsigned long to_whom, enum ipi_message_type operation)
669 /* Reduce the number of memory barriers by doing two loops,
670 one to set the bits, one to invoke the interrupts. */
672 mb(); /* Order out-of-band data and bit setting. */
674 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) {
676 set_bit(operation, &ipi_data[i].bits);
679 mb(); /* Order bit setting and interrupt. */
681 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) {
687 /* Structure and data for smp_call_function. This is designed to
688 minimize static memory requirements. Plus it looks cleaner. */
690 struct smp_call_struct {
691 void (*func) (void *info);
694 atomic_t unstarted_count;
695 atomic_t unfinished_count;
698 static struct smp_call_struct *smp_call_function_data;
700 /* Atomicly drop data into a shared pointer. The pointer is free if
701 it is initially locked. If retry, spin until free. */
704 pointer_lock (void *lock, void *data, int retry)
710 /* Compare and swap with zero. */
718 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
727 while (*(void **)lock)
733 handle_ipi(struct pt_regs *regs)
735 int this_cpu = smp_processor_id();
736 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
740 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
741 this_cpu, *pending_ipis, regs->pc));
744 mb(); /* Order interrupt and bit testing. */
745 while ((ops = xchg(pending_ipis, 0)) != 0) {
746 mb(); /* Order bit clearing and data access. */
754 if (which == IPI_RESCHEDULE) {
755 /* Reschedule callback. Everything to be done
756 is done by the interrupt return path. */
758 else if (which == IPI_CALL_FUNC) {
759 struct smp_call_struct *data;
760 void (*func)(void *info);
764 data = smp_call_function_data;
769 /* Notify the sending CPU that the data has been
770 received, and execution is about to begin. */
772 atomic_dec (&data->unstarted_count);
774 /* At this point the structure may be gone unless
778 /* Notify the sending CPU that the task is done. */
780 if (wait) atomic_dec (&data->unfinished_count);
782 else if (which == IPI_CPU_STOP) {
786 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
791 mb(); /* Order data access and bit testing. */
794 cpu_data[this_cpu].ipi_count++;
797 recv_secondary_console_msg();
801 smp_send_reschedule(int cpu)
804 if (cpu == hard_smp_processor_id())
806 "smp_send_reschedule: Sending IPI to self.\n");
808 send_ipi_message(1UL << cpu, IPI_RESCHEDULE);
814 unsigned long to_whom = cpu_present_mask ^ (1UL << smp_processor_id());
816 if (hard_smp_processor_id() != boot_cpu_id)
817 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
819 send_ipi_message(to_whom, IPI_CPU_STOP);
823 * Run a function on all other CPUs.
824 * <func> The function to run. This must be fast and non-blocking.
825 * <info> An arbitrary pointer to pass to the function.
826 * <retry> If true, keep retrying until ready.
827 * <wait> If true, wait until function has completed on other CPUs.
828 * [RETURNS] 0 on success, else a negative status code.
830 * Does not return until remote CPUs are nearly ready to execute <func>
831 * or are or have executed.
835 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
836 int wait, unsigned long to_whom)
838 struct smp_call_struct data;
840 int num_cpus_to_call;
847 to_whom &= ~(1L << smp_processor_id());
848 for (i = 0, j = 1, num_cpus_to_call = 0; i < NR_CPUS; ++i, j <<= 1)
852 atomic_set(&data.unstarted_count, num_cpus_to_call);
853 atomic_set(&data.unfinished_count, num_cpus_to_call);
855 /* Acquire the smp_call_function_data mutex. */
856 if (pointer_lock(&smp_call_function_data, &data, retry))
859 /* Send a message to the requested CPUs. */
860 send_ipi_message(to_whom, IPI_CALL_FUNC);
862 /* Wait for a minimal response. */
863 timeout = jiffies + HZ;
864 while (atomic_read (&data.unstarted_count) > 0
865 && time_before (jiffies, timeout))
868 /* If there's no response yet, log a message but allow a longer
869 * timeout period -- if we get a response this time, log
870 * a message saying when we got it..
872 if (atomic_read(&data.unstarted_count) > 0) {
873 long start_time = jiffies;
874 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
876 timeout = jiffies + 30 * HZ;
877 while (atomic_read(&data.unstarted_count) > 0
878 && time_before(jiffies, timeout))
880 if (atomic_read(&data.unstarted_count) <= 0) {
881 long delta = jiffies - start_time;
883 "%s: response %ld.%ld seconds into long wait\n",
884 __FUNCTION__, delta / HZ,
885 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
889 /* We either got one or timed out -- clear the lock. */
891 smp_call_function_data = 0;
894 * If after both the initial and long timeout periods we still don't
895 * have a response, something is very wrong...
897 BUG_ON(atomic_read (&data.unstarted_count) > 0);
899 /* Wait for a complete response, if needed. */
901 while (atomic_read (&data.unfinished_count) > 0)
909 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
911 return smp_call_function_on_cpu (func, info, retry, wait,
916 ipi_imb(void *ignored)
924 /* Must wait other processors to flush their icache before continue. */
925 if (smp_call_function(ipi_imb, NULL, 1, 1))
926 printk(KERN_CRIT "smp_imb: timed out\n");
932 ipi_flush_tlb_all(void *ignored)
940 /* Although we don't have any data to pass, we do want to
941 synchronize with the other processors. */
942 if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) {
943 printk(KERN_CRIT "flush_tlb_all: timed out\n");
949 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
952 ipi_flush_tlb_mm(void *x)
954 struct mm_struct *mm = (struct mm_struct *) x;
955 if (mm == current->active_mm && !asn_locked())
956 flush_tlb_current(mm);
962 flush_tlb_mm(struct mm_struct *mm)
964 if (mm == current->active_mm) {
965 flush_tlb_current(mm);
966 if (atomic_read(&mm->mm_users) <= 1) {
967 int i, cpu, this_cpu = smp_processor_id();
968 for (i = 0; i < smp_num_cpus; i++) {
969 cpu = cpu_logical_map(i);
972 if (mm->context[cpu])
973 mm->context[cpu] = 0;
979 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
980 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
984 struct flush_tlb_page_struct {
985 struct vm_area_struct *vma;
986 struct mm_struct *mm;
991 ipi_flush_tlb_page(void *x)
993 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
994 struct mm_struct * mm = data->mm;
996 if (mm == current->active_mm && !asn_locked())
997 flush_tlb_current_page(mm, data->vma, data->addr);
1003 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
1005 struct flush_tlb_page_struct data;
1006 struct mm_struct *mm = vma->vm_mm;
1008 if (mm == current->active_mm) {
1009 flush_tlb_current_page(mm, vma, addr);
1010 if (atomic_read(&mm->mm_users) <= 1) {
1011 int i, cpu, this_cpu = smp_processor_id();
1012 for (i = 0; i < smp_num_cpus; i++) {
1013 cpu = cpu_logical_map(i);
1014 if (cpu == this_cpu)
1016 if (mm->context[cpu])
1017 mm->context[cpu] = 0;
1027 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
1028 printk(KERN_CRIT "flush_tlb_page: timed out\n");
1033 flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1035 /* On the Alpha we always flush the whole user tlb. */
1040 ipi_flush_icache_page(void *x)
1042 struct mm_struct *mm = (struct mm_struct *) x;
1043 if (mm == current->active_mm && !asn_locked())
1044 __load_new_mm_context(mm);
1046 flush_tlb_other(mm);
1050 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
1051 unsigned long addr, int len)
1053 struct mm_struct *mm = vma->vm_mm;
1055 if ((vma->vm_flags & VM_EXEC) == 0)
1058 if (mm == current->active_mm) {
1059 __load_new_mm_context(mm);
1060 if (atomic_read(&mm->mm_users) <= 1) {
1061 int i, cpu, this_cpu = smp_processor_id();
1062 for (i = 0; i < smp_num_cpus; i++) {
1063 cpu = cpu_logical_map(i);
1064 if (cpu == this_cpu)
1066 if (mm->context[cpu])
1067 mm->context[cpu] = 0;
1073 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
1074 printk(KERN_CRIT "flush_icache_page: timed out\n");
1078 #ifdef CONFIG_DEBUG_SPINLOCK
1080 spin_unlock(spinlock_t * lock)
1086 lock->previous = NULL;
1088 lock->base_file = "none";
1093 debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
1097 void *inline_pc = __builtin_return_address(0);
1098 unsigned long started = jiffies;
1100 int cpu = smp_processor_id();
1105 /* Use sub-sections to put the actual loop at the end
1106 of this object file's text section so as to perfect
1107 branch prediction. */
1108 __asm__ __volatile__(
1123 : "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
1124 : "1" (lock->lock), "2" (stuck) : "memory");
1128 "%s:%d spinlock stuck in %s at %p(%d)"
1129 " owner %s at %p(%d) %s:%d\n",
1131 current->comm, inline_pc, cpu,
1132 lock->task->comm, lock->previous,
1133 lock->on_cpu, lock->base_file, lock->line_no);
1139 /* Exiting. Got the lock. */
1141 lock->previous = inline_pc;
1142 lock->task = current;
1143 lock->base_file = base_file;
1144 lock->line_no = line_no;
1148 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1149 base_file, line_no, current->comm, inline_pc,
1150 cpu, jiffies - started);
1155 debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
1158 if ((ret = !test_and_set_bit(0, lock))) {
1159 lock->on_cpu = smp_processor_id();
1160 lock->previous = __builtin_return_address(0);
1161 lock->task = current;
1163 lock->base_file = base_file;
1164 lock->line_no = line_no;
1168 #endif /* CONFIG_DEBUG_SPINLOCK */
1170 #ifdef CONFIG_DEBUG_RWLOCK
1171 void write_lock(rwlock_t * lock)
1174 int stuck_lock, stuck_reader;
1175 void *inline_pc = __builtin_return_address(0);
1180 stuck_reader = 1<<30;
1182 __asm__ __volatile__(
1191 "6: blt %3,4b # debug\n"
1192 " subl %3,1,%3 # debug\n"
1195 "8: blt %4,4b # debug\n"
1196 " subl %4,1,%4 # debug\n"
1201 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
1202 "=&r" (stuck_lock), "=&r" (stuck_reader)
1203 : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
1205 if (stuck_lock < 0) {
1206 printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
1209 if (stuck_reader < 0) {
1210 printk(KERN_WARNING "write_lock stuck on readers at %p\n",
1216 void read_lock(rwlock_t * lock)
1220 void *inline_pc = __builtin_return_address(0);
1226 __asm__ __volatile__(
1235 " blt %2,4b # debug\n"
1236 " subl %2,1,%2 # debug\n"
1240 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
1241 : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
1243 if (stuck_lock < 0) {
1244 printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
1248 #endif /* CONFIG_DEBUG_RWLOCK */