2 * arch/s390/kernel/smp.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
10 * based on other smp stuff by
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
14 * We work with logical cpu numbering everywhere we can. The only
15 * functions using the real cpu address (got from STAP) are the sigp
16 * functions. For all other functions we use the identity mapping.
17 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
18 * used e.g. to find the idle task belonging to a logical cpu. Every array
19 * in the kernel is sorted by the logical cpu number and not by the physical
20 * one which is causing all the confusion with __cpu_logical_map and
21 * cpu_number_map in other architectures.
24 #include <linux/module.h>
25 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/smp_lock.h>
32 #include <linux/delay.h>
33 #include <linux/cache.h>
34 #include <linux/interrupt.h>
35 #include <linux/cpu.h>
38 #include <asm/pgalloc.h>
40 #include <asm/s390_ext.h>
41 #include <asm/cpcmd.h>
42 #include <asm/tlbflush.h>
45 extern int cpu_idle(void * unused);
47 extern volatile int __cpu_logical_map[];
50 * An array with a pointer the lowcore of every CPU.
53 struct _lowcore *lowcore_ptr[NR_CPUS];
54 cycles_t cacheflush_time=0;
55 int smp_threads_ready=0; /* Set when the idlers are all forked. */
57 cpumask_t cpu_online_map;
58 cpumask_t cpu_possible_map;
59 unsigned long cache_decay_ticks = 0;
61 static struct task_struct *current_set[NR_CPUS];
63 EXPORT_SYMBOL(cpu_online_map);
66 * Reboot, halt and power_off routines for SMP.
68 extern char vmhalt_cmd[];
69 extern char vmpoff_cmd[];
71 extern void reipl(unsigned long devno);
73 static void smp_ext_bitcall(int, ec_bit_sig);
74 static void smp_ext_bitcall_others(ec_bit_sig);
77 * Structure and data for smp_call_function(). This is designed to minimise
78 * static memory requirements. It also looks cleaner.
80 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
82 struct call_data_struct {
83 void (*func) (void *info);
90 static struct call_data_struct * call_data;
93 * 'Call function' interrupt callback
95 static void do_call_function(void)
97 void (*func) (void *info) = call_data->func;
98 void *info = call_data->info;
99 int wait = call_data->wait;
101 atomic_inc(&call_data->started);
104 atomic_inc(&call_data->finished);
108 * this function sends a 'generic call function' IPI to all other CPUs
112 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
115 * [SUMMARY] Run a function on all other CPUs.
116 * <func> The function to run. This must be fast and non-blocking.
117 * <info> An arbitrary pointer to pass to the function.
118 * <nonatomic> currently unused.
119 * <wait> If true, wait (atomically) until function has completed on other CPUs.
120 * [RETURNS] 0 on success, else a negative status code. Does not return until
121 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
123 * You must not call this function with disabled interrupts or from a
124 * hardware interrupt handler or from a bottom half handler.
127 struct call_data_struct data;
128 int cpus = num_online_cpus()-1;
133 /* Can deadlock when called with interrupts disabled */
134 WARN_ON(irqs_disabled());
138 atomic_set(&data.started, 0);
141 atomic_set(&data.finished, 0);
143 spin_lock(&call_lock);
145 /* Send a message to all other CPUs and wait for them to respond */
146 smp_ext_bitcall_others(ec_call_function);
148 /* Wait for response */
149 while (atomic_read(&data.started) != cpus)
153 while (atomic_read(&data.finished) != cpus)
155 spin_unlock(&call_lock);
161 * Call a function on one CPU
162 * cpu : the CPU the function should be executed on
164 * You must not call this function with disabled interrupts or from a
165 * hardware interrupt handler. You may call it from a bottom half.
167 * It is guaranteed that the called function runs on the specified CPU,
168 * preemption is disabled.
170 int smp_call_function_on(void (*func) (void *info), void *info,
171 int nonatomic, int wait, int cpu)
173 struct call_data_struct data;
176 if (!cpu_online(cpu))
179 /* disable preemption for local function call */
180 curr_cpu = get_cpu();
182 if (curr_cpu == cpu) {
183 /* direct call to function */
191 atomic_set(&data.started, 0);
194 atomic_set(&data.finished, 0);
196 spin_lock_bh(&call_lock);
198 smp_ext_bitcall(cpu, ec_call_function);
200 /* Wait for response */
201 while (atomic_read(&data.started) != 1)
205 while (atomic_read(&data.finished) != 1)
208 spin_unlock_bh(&call_lock);
212 EXPORT_SYMBOL(smp_call_function_on);
214 static inline void do_send_stop(void)
218 /* stop all processors */
219 for (i = 0; i < NR_CPUS; i++) {
220 if (!cpu_online(i) || smp_processor_id() == i)
223 rc = signal_processor(i, sigp_stop);
224 } while (rc == sigp_busy);
228 static inline void do_store_status(void)
232 /* store status of all processors in their lowcores (real 0) */
233 for (i = 0; i < NR_CPUS; i++) {
234 if (!cpu_online(i) || smp_processor_id() == i)
237 rc = signal_processor_p(
238 (__u32)(unsigned long) lowcore_ptr[i], i,
239 sigp_store_status_at_address);
240 } while(rc == sigp_busy);
245 * this function sends a 'stop' sigp to all other CPUs in the system.
246 * it goes straight through.
248 void smp_send_stop(void)
250 /* write magic number to zero page (absolute 0) */
251 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
253 /* stop other processors. */
256 /* store status of other processors. */
261 * Reboot, halt and power_off routines for SMP.
263 static cpumask_t cpu_restart_map;
265 static void do_machine_restart(void * __unused)
267 static atomic_t cpuid = ATOMIC_INIT(-1);
269 cpu_clear(smp_processor_id(), cpu_restart_map);
270 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
271 /* Wait for all other cpus to enter do_machine_restart. */
272 while (!cpus_empty(cpu_restart_map))
274 /* Store status of other cpus. */
277 * Finally call reipl. Because we waited for all other
278 * cpus to enter this function we know that they do
279 * not hold any s390irq-locks (the cpus have been
280 * interrupted by an external interrupt and s390irq
281 * locks are always held disabled).
284 cpcmd ("IPL", NULL, 0);
286 reipl (0x10000 | S390_lowcore.ipl_device);
288 signal_processor(smp_processor_id(), sigp_stop);
291 void machine_restart_smp(char * __unused)
293 cpu_restart_map = cpu_online_map;
294 on_each_cpu(do_machine_restart, NULL, 0, 0);
297 static void do_wait_for_stop(void)
299 unsigned long cr[16];
301 __ctl_store(cr, 0, 15);
304 __ctl_load(cr, 0, 15);
309 static void do_machine_halt(void * __unused)
311 static atomic_t cpuid = ATOMIC_INIT(-1);
313 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
315 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
316 cpcmd(vmhalt_cmd, NULL, 0);
317 signal_processor(smp_processor_id(),
318 sigp_stop_and_store_status);
323 void machine_halt_smp(void)
325 on_each_cpu(do_machine_halt, NULL, 0, 0);
328 static void do_machine_power_off(void * __unused)
330 static atomic_t cpuid = ATOMIC_INIT(-1);
332 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
334 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
335 cpcmd(vmpoff_cmd, NULL, 0);
336 signal_processor(smp_processor_id(),
337 sigp_stop_and_store_status);
342 void machine_power_off_smp(void)
344 on_each_cpu(do_machine_power_off, NULL, 0, 0);
348 * This is the main routine where commands issued by other
352 void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
357 * handle bit signal external calls
359 * For the ec_schedule signal we have to do nothing. All the work
360 * is done automatically when we return from the interrupt.
362 bits = xchg(&S390_lowcore.ext_call_fast, 0);
364 if (test_bit(ec_call_function, &bits))
369 * Send an external call sigp to another cpu and return without waiting
370 * for its completion.
372 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
375 * Set signaling bit in lowcore of target cpu and kick it
377 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
378 while(signal_processor(cpu, sigp_external_call) == sigp_busy)
383 * Send an external call sigp to every other cpu in the system and
384 * return without waiting for its completion.
386 static void smp_ext_bitcall_others(ec_bit_sig sig)
390 for (i = 0; i < NR_CPUS; i++) {
391 if (!cpu_online(i) || smp_processor_id() == i)
394 * Set signaling bit in lowcore of target cpu and kick it
396 set_bit(sig, (unsigned long *) &lowcore_ptr[i]->ext_call_fast);
397 while (signal_processor(i, sigp_external_call) == sigp_busy)
402 #ifndef CONFIG_ARCH_S390X
404 * this function sends a 'purge tlb' signal to another CPU.
406 void smp_ptlb_callback(void *info)
411 void smp_ptlb_all(void)
413 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
415 EXPORT_SYMBOL(smp_ptlb_all);
416 #endif /* ! CONFIG_ARCH_S390X */
419 * this function sends a 'reschedule' IPI to another CPU.
420 * it goes straight through and wastes no time serializing
421 * anything. Worst case is that we lose a reschedule ...
423 void smp_send_reschedule(int cpu)
425 smp_ext_bitcall(cpu, ec_schedule);
429 * parameter area for the set/clear control bit callbacks
435 unsigned long orvals[16];
436 unsigned long andvals[16];
437 } ec_creg_mask_parms;
440 * callback for setting/clearing control bits
442 void smp_ctl_bit_callback(void *info) {
443 ec_creg_mask_parms *pp;
444 unsigned long cregs[16];
447 pp = (ec_creg_mask_parms *) info;
448 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
449 for (i = pp->start_ctl; i <= pp->end_ctl; i++)
450 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
451 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
455 * Set a bit in a control register of all cpus
457 void smp_ctl_set_bit(int cr, int bit) {
458 ec_creg_mask_parms parms;
460 parms.start_ctl = cr;
462 parms.orvals[cr] = 1 << bit;
463 parms.andvals[cr] = -1L;
465 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
466 __ctl_set_bit(cr, bit);
471 * Clear a bit in a control register of all cpus
473 void smp_ctl_clear_bit(int cr, int bit) {
474 ec_creg_mask_parms parms;
476 parms.start_ctl = cr;
478 parms.orvals[cr] = 0;
479 parms.andvals[cr] = ~(1L << bit);
481 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
482 __ctl_clear_bit(cr, bit);
487 * Lets check how many CPUs we have.
490 #ifdef CONFIG_HOTPLUG_CPU
493 __init smp_check_cpus(unsigned int max_cpus)
498 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
500 for (cpu = 1; cpu < max_cpus; cpu++)
501 cpu_set(cpu, cpu_possible_map);
504 #else /* CONFIG_HOTPLUG_CPU */
507 __init smp_check_cpus(unsigned int max_cpus)
509 int curr_cpu, num_cpus;
512 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
513 current_thread_info()->cpu = 0;
516 curr_cpu <= 65535 && num_cpus < max_cpus; curr_cpu++) {
517 if ((__u16) curr_cpu == boot_cpu_addr)
519 __cpu_logical_map[num_cpus] = (__u16) curr_cpu;
520 if (signal_processor(num_cpus, sigp_sense) ==
521 sigp_not_operational)
523 cpu_set(num_cpus, cpu_possible_map);
526 printk("Detected %d CPU's\n",(int) num_cpus);
527 printk("Boot cpu address %2X\n", boot_cpu_addr);
530 #endif /* CONFIG_HOTPLUG_CPU */
533 * Activate a secondary processor.
535 extern void init_cpu_timer(void);
536 extern void init_cpu_vtimer(void);
537 extern int pfault_init(void);
538 extern int pfault_token(void);
540 int __devinit start_secondary(void *cpuvoid)
544 /* init per CPU timer */
546 #ifdef CONFIG_VIRT_TIMER
550 /* Enable pfault pseudo page faults on this cpu. */
553 /* Mark this cpu as online */
554 cpu_set(smp_processor_id(), cpu_online_map);
555 /* Switch on interrupts */
557 /* Print info about this processor */
558 print_cpu_info(&S390_lowcore.cpu_data);
559 /* cpu_idle will call schedule for us */
560 return cpu_idle(NULL);
563 static void __init smp_create_idle(unsigned int cpu)
566 struct task_struct *p;
569 * don't care about the psw and regs settings since we'll never
570 * reschedule the forked task.
572 memset(®s, 0, sizeof(struct pt_regs));
573 p = copy_process(CLONE_VM | CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
575 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
577 wake_up_forked_process(p);
580 current_set[cpu] = p;
583 /* Reserving and releasing of CPUs */
585 static spinlock_t smp_reserve_lock = SPIN_LOCK_UNLOCKED;
586 static int smp_cpu_reserved[NR_CPUS];
589 smp_get_cpu(cpumask_t cpu_mask)
594 spin_lock_irqsave(&smp_reserve_lock, flags);
595 /* Try to find an already reserved cpu. */
596 for_each_cpu_mask(cpu, cpu_mask) {
597 if (smp_cpu_reserved[cpu] != 0) {
598 smp_cpu_reserved[cpu]++;
603 /* Reserve a new cpu from cpu_mask. */
604 for_each_cpu_mask(cpu, cpu_mask) {
605 if (cpu_online(cpu)) {
606 smp_cpu_reserved[cpu]++;
612 spin_unlock_irqrestore(&smp_reserve_lock, flags);
621 spin_lock_irqsave(&smp_reserve_lock, flags);
622 smp_cpu_reserved[cpu]--;
623 spin_unlock_irqrestore(&smp_reserve_lock, flags);
631 /* Check for stopped state */
632 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
639 /* Upping and downing of CPUs */
642 __cpu_up(unsigned int cpu)
644 struct task_struct *idle;
645 struct _lowcore *cpu_lowcore;
649 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
650 __cpu_logical_map[cpu] = (__u16) curr_cpu;
651 if (cpu_stopped(cpu))
655 if (!cpu_stopped(cpu))
658 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
659 cpu, sigp_set_prefix);
661 printk("sigp_set_prefix failed for cpu %d "
662 "with condition code %d\n",
663 (int) cpu, (int) ccode);
667 idle = current_set[cpu];
668 cpu_lowcore = lowcore_ptr[cpu];
669 cpu_lowcore->save_area[15] = idle->thread.ksp;
670 cpu_lowcore->kernel_stack = (unsigned long)
671 idle->thread_info + (THREAD_SIZE);
672 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
673 __asm__ __volatile__("stam 0,15,0(%0)"
674 : : "a" (&cpu_lowcore->access_regs_save_area)
676 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
677 cpu_lowcore->current_task = (unsigned long) idle;
678 cpu_lowcore->cpu_data.cpu_nr = cpu;
680 signal_processor(cpu,sigp_restart);
682 while (!cpu_online(cpu));
690 ec_creg_mask_parms cr_parms;
692 spin_lock_irqsave(&smp_reserve_lock, flags);
693 if (smp_cpu_reserved[smp_processor_id()] != 0) {
694 spin_unlock_irqrestore(&smp_reserve_lock, flags);
698 /* disable all external interrupts */
700 cr_parms.start_ctl = 0;
701 cr_parms.end_ctl = 0;
702 cr_parms.orvals[0] = 0;
703 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
704 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
705 smp_ctl_bit_callback(&cr_parms);
707 /* disable all I/O interrupts */
709 cr_parms.start_ctl = 6;
710 cr_parms.end_ctl = 6;
711 cr_parms.orvals[6] = 0;
712 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
713 1<<27 | 1<<26 | 1<<25 | 1<<24);
714 smp_ctl_bit_callback(&cr_parms);
716 /* disable most machine checks */
718 cr_parms.start_ctl = 14;
719 cr_parms.end_ctl = 14;
720 cr_parms.orvals[14] = 0;
721 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
722 smp_ctl_bit_callback(&cr_parms);
724 spin_unlock_irqrestore(&smp_reserve_lock, flags);
729 __cpu_die(unsigned int cpu)
731 /* Wait until target cpu is down */
732 while (!cpu_stopped(cpu));
733 printk("Processor %d spun down\n", cpu);
739 signal_processor(smp_processor_id(), sigp_stop);
745 * Cycle through the processors and setup structures.
748 void __init smp_prepare_cpus(unsigned int max_cpus)
750 unsigned long async_stack;
754 /* request the 0x1202 external interrupt */
755 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
756 panic("Couldn't request external interrupt 0x1202");
757 smp_check_cpus(max_cpus);
758 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
760 * Initialize prefix pages and stacks for all possible cpus
762 print_cpu_info(&S390_lowcore.cpu_data);
764 for(i = 0; i < NR_CPUS; i++) {
765 if (!cpu_possible(i))
767 lowcore_ptr[i] = (struct _lowcore *)
768 __get_free_pages(GFP_KERNEL|GFP_DMA,
769 sizeof(void*) == 8 ? 1 : 0);
770 async_stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
771 if (lowcore_ptr[i] == NULL || async_stack == 0ULL)
772 panic("smp_boot_cpus failed to allocate memory\n");
774 *(lowcore_ptr[i]) = S390_lowcore;
775 lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
777 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
780 if (cpu != smp_processor_id())
781 smp_create_idle(cpu);
784 void __devinit smp_prepare_boot_cpu(void)
786 BUG_ON(smp_processor_id() != 0);
788 cpu_set(0, cpu_online_map);
789 cpu_set(0, cpu_possible_map);
790 S390_lowcore.percpu_offset = __per_cpu_offset[0];
791 current_set[0] = current;
794 void smp_cpus_done(unsigned int max_cpus)
799 * the frequency of the profiling timer can be changed
800 * by writing a multiplier value into /proc/profile.
802 * usually you want to run this on all CPUs ;)
804 int setup_profiling_timer(unsigned int multiplier)
809 static DEFINE_PER_CPU(struct cpu, cpu_devices);
811 static int __init topology_init(void)
817 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
819 printk(KERN_WARNING "topology_init: register_cpu %d "
820 "failed (%d)\n", cpu, ret);
825 subsys_initcall(topology_init);
827 EXPORT_SYMBOL(cpu_possible_map);
828 EXPORT_SYMBOL(lowcore_ptr);
829 EXPORT_SYMBOL(smp_ctl_set_bit);
830 EXPORT_SYMBOL(smp_ctl_clear_bit);
831 EXPORT_SYMBOL(smp_call_function);
832 EXPORT_SYMBOL(smp_get_cpu);
833 EXPORT_SYMBOL(smp_put_cpu);