2 * arch/s390/kernel/smp.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/smp_lock.h>
31 #include <linux/delay.h>
32 #include <linux/cache.h>
35 #include <asm/pgalloc.h>
37 #include <asm/s390_ext.h>
38 #include <asm/cpcmd.h>
41 extern int cpu_idle(void * unused);
43 extern __u16 boot_cpu_addr;
44 extern volatile int __cpu_logical_map[];
47 * An array with a pointer the lowcore of every CPU.
49 static int max_cpus = NR_CPUS; /* Setup configured maximum number of CPUs to activate */
51 struct _lowcore *lowcore_ptr[NR_CPUS];
52 cycles_t cacheflush_time=0;
53 int smp_threads_ready=0; /* Set when the idlers are all forked. */
54 static atomic_t smp_commenced = ATOMIC_INIT(0);
56 spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
58 unsigned long cpu_online_map;
61 * Setup routine for controlling SMP activation
63 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
64 * activation entirely (the MPS table probe still happens, though).
66 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
67 * greater than 0, limits the maximum number of CPUs activated in
71 static int __init nosmp(char *str)
77 __setup("nosmp", nosmp);
79 static int __init maxcpus(char *str)
81 get_option(&str, &max_cpus);
85 __setup("maxcpus=", maxcpus);
88 * Reboot, halt and power_off routines for SMP.
90 extern char vmhalt_cmd[];
91 extern char vmpoff_cmd[];
93 extern void reipl(unsigned long devno);
95 static sigp_ccode smp_ext_bitcall(int, ec_bit_sig);
96 static void smp_ext_bitcall_others(ec_bit_sig);
99 * Structure and data for smp_call_function(). This is designed to minimise
100 * static memory requirements. It also looks cleaner.
102 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
104 struct call_data_struct {
105 void (*func) (void *info);
112 static struct call_data_struct * call_data;
115 * 'Call function' interrupt callback
117 static void do_call_function(void)
119 void (*func) (void *info) = call_data->func;
120 void *info = call_data->info;
121 int wait = call_data->wait;
123 atomic_inc(&call_data->started);
126 atomic_inc(&call_data->finished);
130 * this function sends a 'generic call function' IPI to all other CPUs
134 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
137 * [SUMMARY] Run a function on all other CPUs.
138 * <func> The function to run. This must be fast and non-blocking.
139 * <info> An arbitrary pointer to pass to the function.
140 * <nonatomic> currently unused.
141 * <wait> If true, wait (atomically) until function has completed on other CPUs.
142 * [RETURNS] 0 on success, else a negative status code. Does not return until
143 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
145 * You must not call this function with disabled interrupts or from a
146 * hardware interrupt handler, you may call it from a bottom half handler.
149 struct call_data_struct data;
150 int cpus = smp_num_cpus-1;
152 if (!cpus || !atomic_read(&smp_commenced))
157 atomic_set(&data.started, 0);
160 atomic_set(&data.finished, 0);
162 spin_lock_bh(&call_lock);
164 /* Send a message to all other CPUs and wait for them to respond */
165 smp_ext_bitcall_others(ec_call_function);
167 /* Wait for response */
168 while (atomic_read(&data.started) != cpus)
172 while (atomic_read(&data.finished) != cpus)
174 spin_unlock_bh(&call_lock);
179 static inline void do_send_stop(void)
184 /* stop all processors */
185 for (i = 0; i < smp_num_cpus; i++) {
186 if (smp_processor_id() != i) {
189 ccode = signal_processor_ps(
194 } while(ccode == sigp_busy);
199 static inline void do_store_status(void)
201 unsigned long low_core_addr;
205 /* store status of all processors in their lowcores (real 0) */
206 for (i = 0; i < smp_num_cpus; i++) {
207 if (smp_processor_id() != i) {
209 low_core_addr = (unsigned long)get_cpu_lowcore(i);
211 ccode = signal_processor_ps(
215 sigp_store_status_at_address);
216 } while(ccode == sigp_busy);
222 * this function sends a 'stop' sigp to all other CPUs in the system.
223 * it goes straight through.
225 void smp_send_stop(void)
227 /* write magic number to zero page (absolute 0) */
228 get_cpu_lowcore(smp_processor_id())->panic_magic = __PANIC_MAGIC;
230 /* stop other processors. */
233 /* store status of other processors. */
238 * Reboot, halt and power_off routines for SMP.
240 static volatile unsigned long cpu_restart_map;
242 static void do_machine_restart(void * __unused)
244 clear_bit(smp_processor_id(), &cpu_restart_map);
245 if (smp_processor_id() == 0) {
246 /* Wait for all other cpus to enter do_machine_restart. */
247 while (cpu_restart_map != 0);
248 /* Store status of other cpus. */
251 * Finally call reipl. Because we waited for all other
252 * cpus to enter this function we know that they do
253 * not hold any s390irq-locks (the cpus have been
254 * interrupted by an external interrupt and s390irq
255 * locks are always held disabled).
257 reipl(S390_lowcore.ipl_device);
259 signal_processor(smp_processor_id(), sigp_stop);
262 void machine_restart_smp(char * __unused)
264 cpu_restart_map = cpu_online_map;
265 smp_call_function(do_machine_restart, NULL, 0, 0);
266 do_machine_restart(NULL);
269 static void do_machine_halt(void * __unused)
271 if (smp_processor_id() == 0) {
273 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
274 cpcmd(vmhalt_cmd, NULL, 0);
275 signal_processor(smp_processor_id(),
276 sigp_stop_and_store_status);
282 void machine_halt_smp(void)
284 smp_call_function(do_machine_halt, NULL, 0, 0);
285 do_machine_halt(NULL);
288 static void do_machine_power_off(void * __unused)
290 if (smp_processor_id() == 0) {
292 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
293 cpcmd(vmpoff_cmd, NULL, 0);
294 signal_processor(smp_processor_id(),
295 sigp_stop_and_store_status);
301 void machine_power_off_smp(void)
303 smp_call_function(do_machine_power_off, NULL, 0, 0);
304 do_machine_power_off(NULL);
308 * This is the main routine where commands issued by other
312 void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
317 * handle bit signal external calls
319 * For the ec_schedule signal we have to do nothing. All the work
320 * is done automatically when we return from the interrupt.
323 bits = atomic_read(&S390_lowcore.ext_call_fast);
324 } while (atomic_compare_and_swap(bits,0,&S390_lowcore.ext_call_fast));
326 if (test_bit(ec_call_function, &bits))
331 * Send an external call sigp to another cpu and return without waiting
332 * for its completion.
334 static sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig)
336 struct _lowcore *lowcore = get_cpu_lowcore(cpu);
340 * Set signaling bit in lowcore of target cpu and kick it
342 atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
343 ccode = signal_processor(cpu, sigp_external_call);
348 * Send an external call sigp to every other cpu in the system and
349 * return without waiting for its completion.
351 static void smp_ext_bitcall_others(ec_bit_sig sig)
353 struct _lowcore *lowcore;
356 for (i = 0; i < smp_num_cpus; i++) {
357 if (smp_processor_id() == i)
359 lowcore = get_cpu_lowcore(i);
361 * Set signaling bit in lowcore of target cpu and kick it
363 atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
364 while (signal_processor(i, sigp_external_call) == sigp_busy)
370 * this function sends a 'purge tlb' signal to another CPU.
372 void smp_ptlb_callback(void *info)
377 void smp_ptlb_all(void)
379 smp_call_function(smp_ptlb_callback, NULL, 0, 1);
384 * this function sends a 'reschedule' IPI to another CPU.
385 * it goes straight through and wastes no time serializing
386 * anything. Worst case is that we lose a reschedule ...
389 void smp_send_reschedule(int cpu)
391 smp_ext_bitcall(cpu, ec_schedule);
395 * parameter area for the set/clear control bit callbacks
403 } ec_creg_mask_parms;
406 * callback for setting/clearing control bits
408 void smp_ctl_bit_callback(void *info) {
409 ec_creg_mask_parms *pp;
413 pp = (ec_creg_mask_parms *) info;
414 asm volatile (" bras 1,0f\n"
417 : : "a" (cregs+pp->start_ctl),
418 "a" ((pp->start_ctl<<4) + pp->end_ctl)
420 for (i = pp->start_ctl; i <= pp->end_ctl; i++)
421 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
422 asm volatile (" bras 1,0f\n"
425 : : "a" (cregs+pp->start_ctl),
426 "a" ((pp->start_ctl<<4) + pp->end_ctl)
432 * Set a bit in a control register of all cpus
434 void smp_ctl_set_bit(int cr, int bit) {
435 ec_creg_mask_parms parms;
437 if (atomic_read(&smp_commenced) != 0) {
438 parms.start_ctl = cr;
440 parms.orvals[cr] = 1 << bit;
441 parms.andvals[cr] = 0xFFFFFFFF;
442 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
444 __ctl_set_bit(cr, bit);
448 * Clear a bit in a control register of all cpus
450 void smp_ctl_clear_bit(int cr, int bit) {
451 ec_creg_mask_parms parms;
453 if (atomic_read(&smp_commenced) != 0) {
454 parms.start_ctl = cr;
456 parms.orvals[cr] = 0x00000000;
457 parms.andvals[cr] = ~(1 << bit);
458 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
460 __ctl_clear_bit(cr, bit);
464 * Lets check how many CPUs we have.
467 void smp_count_cpus(void)
471 current->processor = 0;
475 curr_cpu <= 65535 && smp_num_cpus < max_cpus; curr_cpu++) {
476 if ((__u16) curr_cpu == boot_cpu_addr)
478 __cpu_logical_map[smp_num_cpus] = (__u16) curr_cpu;
479 if (signal_processor(smp_num_cpus, sigp_sense) ==
480 sigp_not_operational)
484 printk("Detected %d CPU's\n",(int) smp_num_cpus);
485 printk("Boot cpu address %2X\n", boot_cpu_addr);
490 * Activate a secondary processor.
492 extern void init_cpu_timer(void);
493 extern int pfault_init(void);
494 extern int pfault_token(void);
496 int __init start_secondary(void *cpuvoid)
500 /* Print info about this processor */
501 print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id())->cpu_data);
502 /* Wait for completion of smp startup */
503 while (!atomic_read(&smp_commenced))
505 /* init per CPU timer */
508 /* Enable pfault pseudo page faults on this cpu. */
511 /* cpu_idle will call schedule for us */
512 return cpu_idle(NULL);
516 * The restart interrupt handler jumps to start_secondary directly
517 * without the detour over initialize_secondary. We defined it here
518 * so that the linker doesn't complain.
520 void __init initialize_secondary(void)
524 static int __init fork_by_hand(void)
527 /* don't care about the psw and regs settings since we'll never
528 reschedule the forked task. */
529 memset(®s,0,sizeof(struct pt_regs));
530 return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
533 static void __init do_boot_cpu(int cpu)
535 struct task_struct *idle;
536 struct _lowcore *cpu_lowcore;
538 /* We can't use kernel_thread since we must _avoid_ to reschedule
540 if (fork_by_hand() < 0)
541 panic("failed fork for CPU %d", cpu);
544 * We remove it from the pidhash and the runqueue
545 * once we got the process:
547 idle = init_task.prev_task;
549 panic("No idle process for CPU %d",cpu);
550 idle->processor = cpu;
551 idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
553 del_from_runqueue(idle);
554 unhash_process(idle);
555 init_tasks[cpu] = idle;
557 cpu_lowcore = get_cpu_lowcore(cpu);
558 cpu_lowcore->save_area[15] = idle->thread.ksp;
559 cpu_lowcore->kernel_stack = (__u32) idle + 8192;
560 __asm__ __volatile__("la 1,%0\n\t"
561 "stctl 0,15,0(1)\n\t"
564 : "=m" (cpu_lowcore->cregs_save_area[0]),
565 "=m" (cpu_lowcore->access_regs_save_area[0])
569 signal_processor(cpu,sigp_restart);
570 /* Mark this cpu as online */
571 set_bit(cpu, &cpu_online_map);
575 * Architecture specific routine called by the kernel just before init is
576 * fired off. This allows the BP to have everything in order [we hope].
577 * At the end of this all the APs will hit the system scheduling and off
578 * we go. Each AP will load the system gdt's and jump through the kernel
579 * init into idle(). At this point the scheduler will one day take over
580 * and give them jobs to do. smp_callin is a standard routine
581 * we use to track CPUs as they power up.
584 void __init smp_commence(void)
587 * Lets the callins below out of their loop.
589 atomic_set(&smp_commenced,1);
593 * Cycle through the processors sending sigp_restart to boot each.
596 void __init smp_boot_cpus(void)
598 unsigned long async_stack;
602 /* request the 0x1202 external interrupt */
603 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
604 panic("Couldn't request external interrupt 0x1202");
606 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
609 * Initialize the logical to physical CPU number mapping
611 print_cpu_info(&safe_get_cpu_lowcore(0)->cpu_data);
613 for(i = 0; i < smp_num_cpus; i++)
615 lowcore_ptr[i] = (struct _lowcore *)
616 __get_free_page(GFP_KERNEL|GFP_DMA);
617 if (lowcore_ptr[i] == NULL)
618 panic("smp_boot_cpus failed to "
619 "allocate prefix memory\n");
620 async_stack = __get_free_pages(GFP_KERNEL,1);
621 if (async_stack == 0)
622 panic("smp_boot_cpus failed to allocate "
623 "asyncronous interrupt stack\n");
625 memcpy(lowcore_ptr[i], &S390_lowcore, sizeof(struct _lowcore));
626 lowcore_ptr[i]->async_stack = async_stack + (2 * PAGE_SIZE);
628 * Most of the parameters are set up when the cpu is
631 if (smp_processor_id() == i)
632 set_prefix((u32) lowcore_ptr[i]);
634 ccode = signal_processor_p((u32)(lowcore_ptr[i]),
637 /* if this gets troublesome I'll have to do
638 * something about it. */
639 printk("ccode %d for cpu %d returned when "
640 "setting prefix in smp_boot_cpus not good.\n",
641 (int) ccode, (int) i);
649 * the frequency of the profiling timer can be changed
650 * by writing a multiplier value into /proc/profile.
652 * usually you want to run this on all CPUs ;)
654 int setup_profiling_timer(unsigned int multiplier)
659 EXPORT_SYMBOL(lowcore_ptr);
660 EXPORT_SYMBOL(kernel_flag);
661 EXPORT_SYMBOL(smp_ctl_set_bit);
662 EXPORT_SYMBOL(smp_ctl_clear_bit);
663 EXPORT_SYMBOL(smp_num_cpus);
664 EXPORT_SYMBOL(smp_call_function);