1 /* Copyright (C) 2004 Mips Technologies, Inc */
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/cpumask.h>
6 #include <linux/interrupt.h>
9 #include <asm/processor.h>
10 #include <asm/atomic.h>
11 #include <asm/system.h>
12 #include <asm/hardirq.h>
13 #include <asm/hazards.h>
14 #include <asm/mmu_context.h>
16 #include <asm/mipsregs.h>
17 #include <asm/cacheflush.h>
19 #include <asm/addrspace.h>
21 #include <asm/smtc_ipi.h>
22 #include <asm/smtc_proc.h>
25 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
29 * MIPSCPU_INT_BASE is identically defined in both
30 * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
31 * but as yet there's no properly organized include structure that
32 * will ensure that the right *int.h file will be included for a
33 * given platform build.
36 #define MIPSCPU_INT_BASE 16
38 #define MIPS_CPU_IPI_IRQ 1
40 #define LOCK_MT_PRA() \
41 local_irq_save(flags); \
44 #define UNLOCK_MT_PRA() \
46 local_irq_restore(flags)
48 #define LOCK_CORE_PRA() \
49 local_irq_save(flags); \
52 #define UNLOCK_CORE_PRA() \
54 local_irq_restore(flags)
57 * Data structures purely associated with SMTC parallelism
62 * Table for tracking ASIDs whose lifetime is prolonged.
65 asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
68 * Clock interrupt "latch" buffers, per "CPU"
71 unsigned int ipi_timer_latch[NR_CPUS];
74 * Number of InterProcessor Interupt (IPI) message buffers to allocate
77 #define IPIBUF_PER_CPU 4
79 struct smtc_ipi_q IPIQ[NR_CPUS];
80 struct smtc_ipi_q freeIPIq;
83 /* Forward declarations */
85 void ipi_decode(struct smtc_ipi *);
86 void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
87 void setup_cross_vpe_interrupts(void);
88 void init_smtc_stats(void);
90 /* Global SMTC Status */
92 unsigned int smtc_status = 0;
94 /* Boot command line configuration overrides */
96 static int vpelimit = 0;
97 static int tclimit = 0;
98 static int ipibuffers = 0;
99 static int nostlb = 0;
100 static int asidmask = 0;
101 unsigned long smtc_asid_mask = 0xff;
103 static int __init maxvpes(char *str)
105 get_option(&str, &vpelimit);
109 static int __init maxtcs(char *str)
111 get_option(&str, &tclimit);
115 static int __init ipibufs(char *str)
117 get_option(&str, &ipibuffers);
121 static int __init stlb_disable(char *s)
127 static int __init asidmask_set(char *str)
129 get_option(&str, &asidmask);
139 smtc_asid_mask = (unsigned long)asidmask;
142 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
147 __setup("maxvpes=", maxvpes);
148 __setup("maxtcs=", maxtcs);
149 __setup("ipibufs=", ipibufs);
150 __setup("nostlb", stlb_disable);
151 __setup("asidmask=", asidmask_set);
153 /* Enable additional debug checks before going into CPU idle loop */
154 #define SMTC_IDLE_HOOK_DEBUG
156 #ifdef SMTC_IDLE_HOOK_DEBUG
158 static int hang_trig = 0;
160 static int __init hangtrig_enable(char *s)
167 __setup("hangtrig", hangtrig_enable);
169 #define DEFAULT_BLOCKED_IPI_LIMIT 32
171 static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
173 static int __init tintq(char *str)
175 get_option(&str, &timerq_limit);
179 __setup("tintq=", tintq);
181 int imstuckcount[2][8];
182 /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
183 int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
184 int tcnoprog[NR_CPUS];
185 static atomic_t idle_hook_initialized = {0};
186 static int clock_hang_reported[NR_CPUS];
188 #endif /* SMTC_IDLE_HOOK_DEBUG */
190 /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
192 void __init sanitize_tlb_entries(void)
194 printk("Deprecated sanitize_tlb_entries() invoked\n");
199 * Configure shared TLB - VPC configuration bit must be set by caller
202 void smtc_configure_tlb(void)
205 unsigned long mvpconf0;
206 unsigned long config1val;
208 /* Set up ASID preservation table */
209 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
210 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
211 smtc_live_asid[vpes][i] = 0;
214 mvpconf0 = read_c0_mvpconf0();
216 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
217 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
218 /* If we have multiple VPEs, try to share the TLB */
219 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
221 * If TLB sizing is programmable, shared TLB
222 * size is the total available complement.
223 * Otherwise, we have to take the sum of all
224 * static VPE TLB entries.
226 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
227 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
229 * If there's more than one VPE, there had better
230 * be more than one TC, because we need one to bind
231 * to each VPE in turn to be able to read
232 * its configuration state!
235 /* Stop the TC from doing anything foolish */
236 write_tc_c0_tchalt(TCHALT_H);
238 /* No need to un-Halt - that happens later anyway */
239 for (i=0; i < vpes; i++) {
240 write_tc_c0_tcbind(i);
242 * To be 100% sure we're really getting the right
243 * information, we exit the configuration state
244 * and do an IHB after each rebinding.
247 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
250 * Only count if the MMU Type indicated is TLB
252 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
253 config1val = read_vpe_c0_config1();
254 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
257 /* Put core back in configuration state */
259 read_c0_mvpcontrol() | MVPCONTROL_VPC );
263 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
267 * Setup kernel data structures to use software total,
268 * rather than read the per-VPE Config1 value. The values
269 * for "CPU 0" gets copied to all the other CPUs as part
270 * of their initialization in smtc_cpu_setup().
273 /* MIPS32 limits TLB indices to 64 */
276 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
277 smtc_status |= SMTC_TLB_SHARED;
278 local_flush_tlb_all();
280 printk("TLB of %d entry pairs shared by %d VPEs\n",
283 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
290 * Incrementally build the CPU map out of constituent MIPS MT cores,
291 * using the specified available VPEs and TCs. Plaform code needs
292 * to ensure that each MIPS MT core invokes this routine on reset,
295 * This version of the build_cpu_map and prepare_cpus routines assumes
296 * that *all* TCs of a MIPS MT core will be used for Linux, and that
297 * they will be spread across *all* available VPEs (to minimise the
298 * loss of efficiency due to exception service serialization).
299 * An improved version would pick up configuration information and
300 * possibly leave some TCs/VPEs as "slave" processors.
302 * Use c0_MVPConf0 to find out how many TCs are available, setting up
303 * phys_cpu_present_map and the logical/physical mappings.
306 int __init mipsmt_build_cpu_map(int start_cpu_slot)
311 * The CPU map isn't actually used for anything at this point,
312 * so it's not clear what else we should do apart from set
313 * everything up so that "logical" = "physical".
315 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
316 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
317 cpu_set(i, phys_cpu_present_map);
318 __cpu_number_map[i] = i;
319 __cpu_logical_map[i] = i;
321 /* Initialize map of CPUs with FPUs */
322 cpus_clear(mt_fpu_cpumask);
324 /* One of those TC's is the one booting, and not a secondary... */
325 printk("%i available secondary CPU TC(s)\n", i - 1);
331 * Common setup before any secondaries are started
332 * Make sure all CPU's are in a sensible state before we boot any of the
335 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
336 * as possible across the available VPEs.
339 static void smtc_tc_setup(int vpe, int tc, int cpu)
342 write_tc_c0_tchalt(TCHALT_H);
344 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
345 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
347 write_tc_c0_tccontext(0);
349 write_tc_c0_tcbind(vpe);
350 /* In general, all TCs should have the same cpu_data indications */
351 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
352 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
353 if (cpu_data[0].cputype == CPU_34K)
354 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
355 cpu_data[cpu].vpe_id = vpe;
356 cpu_data[cpu].tc_id = tc;
360 void mipsmt_prepare_cpus(void)
362 int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
366 struct smtc_ipi *pipi;
368 /* disable interrupts so we can disable MT */
369 local_irq_save(flags);
370 /* disable MT so we can configure */
374 spin_lock_init(&freeIPIq.lock);
377 * We probably don't have as many VPEs as we do SMP "CPUs",
378 * but it's possible - and in any case we'll never use more!
380 for (i=0; i<NR_CPUS; i++) {
381 IPIQ[i].head = IPIQ[i].tail = NULL;
382 spin_lock_init(&IPIQ[i].lock);
384 ipi_timer_latch[i] = 0;
387 /* cpu_data index starts at zero */
389 cpu_data[cpu].vpe_id = 0;
390 cpu_data[cpu].tc_id = 0;
393 /* Report on boot-time options */
394 mips_mt_set_cpuoptions ();
396 printk("Limit of %d VPEs set\n", vpelimit);
398 printk("Limit of %d TCs set\n", tclimit);
400 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
403 printk("ASID mask value override to 0x%x\n", asidmask);
406 #ifdef SMTC_IDLE_HOOK_DEBUG
408 printk("Logic Analyser Trigger on suspected TC hang\n");
409 #endif /* SMTC_IDLE_HOOK_DEBUG */
411 /* Put MVPE's into 'configuration state' */
412 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
414 val = read_c0_mvpconf0();
415 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
416 if (vpelimit > 0 && nvpe > vpelimit)
418 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
421 if (tclimit > 0 && ntc > tclimit)
423 tcpervpe = ntc / nvpe;
424 slop = ntc % nvpe; /* Residual TCs, < NVPE */
426 /* Set up shared TLB */
427 smtc_configure_tlb();
429 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
434 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
437 printk("VPE %d: TC", vpe);
438 for (i = 0; i < tcpervpe; i++) {
440 * TC 0 is bound to VPE 0 at reset,
441 * and is presumably executing this
442 * code. Leave it alone!
445 smtc_tc_setup(vpe,tc, cpu);
453 smtc_tc_setup(vpe,tc, cpu);
462 * Clear any stale software interrupts from VPE's Cause
464 write_vpe_c0_cause(0);
467 * Clear ERL/EXL of VPEs other than 0
468 * and set restricted interrupt enable/mask.
470 write_vpe_c0_status((read_vpe_c0_status()
471 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
472 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
475 * set config to be the same as vpe0,
476 * particularly kseg0 coherency alg
478 write_vpe_c0_config(read_c0_config());
479 /* Clear any pending timer interrupt */
480 write_vpe_c0_compare(0);
481 /* Propagate Config7 */
482 write_vpe_c0_config7(read_c0_config7());
483 write_vpe_c0_count(read_c0_count());
485 /* enable multi-threading within VPE */
486 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
488 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
492 * Pull any physically present but unused TCs out of circulation.
494 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
495 cpu_clear(tc, phys_cpu_present_map);
496 cpu_clear(tc, cpu_present_map);
500 /* release config state */
501 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
505 /* Set up coprocessor affinity CPU mask(s) */
507 for (tc = 0; tc < ntc; tc++) {
508 if (cpu_data[tc].options & MIPS_CPU_FPU)
509 cpu_set(tc, mt_fpu_cpumask);
512 /* set up ipi interrupts... */
514 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
517 setup_cross_vpe_interrupts();
519 /* Set up queue of free IPI "messages". */
520 nipi = NR_CPUS * IPIBUF_PER_CPU;
524 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
526 panic("kmalloc of IPI message buffers failed\n");
528 printk("IPI buffer pool of %d buffers\n", nipi);
529 for (i = 0; i < nipi; i++) {
530 smtc_ipi_nq(&freeIPIq, pipi);
534 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
537 local_irq_restore(flags);
538 /* Initialize SMTC /proc statistics/diagnostics */
544 * Setup the PC, SP, and GP of a secondary processor and start it
546 * smp_bootstrap is the place to resume from
547 * __KSTK_TOS(idle) is apparently the stack pointer
548 * (unsigned long)idle->thread_info the gp
551 void smtc_boot_secondary(int cpu, struct task_struct *idle)
553 extern u32 kernelsp[NR_CPUS];
558 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
561 settc(cpu_data[cpu].tc_id);
564 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
567 kernelsp[cpu] = __KSTK_TOS(idle);
568 write_tc_gpr_sp(__KSTK_TOS(idle));
571 write_tc_gpr_gp((unsigned long)idle->thread_info);
573 smtc_status |= SMTC_MTC_ACTIVE;
574 write_tc_c0_tchalt(0);
575 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
581 void smtc_init_secondary(void)
584 * Start timer on secondary VPEs if necessary.
585 * plat_timer_setup has already have been invoked by init/main
586 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
587 * SMTC init code assigns TCs consdecutively and in ascending order
588 * to across available VPEs.
590 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
591 ((read_c0_tcbind() & TCBIND_CURVPE)
592 != cpu_data[smp_processor_id() - 1].vpe_id)){
593 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
599 void smtc_smp_finish(void)
601 printk("TC %d going on-line as CPU %d\n",
602 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
605 void smtc_cpus_done(void)
610 * Support for SMTC-optimized driver IRQ registration
614 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
615 * in do_IRQ. These are passed in setup_irq_smtc() and stored
619 int setup_irq_smtc(unsigned int irq, struct irqaction * new,
620 unsigned long hwmask)
622 irq_hwmask[irq] = hwmask;
624 return setup_irq(irq, new);
628 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
629 * Within a VPE one TC can interrupt another by different approaches.
630 * The easiest to get right would probably be to make all TCs except
631 * the target IXMT and set a software interrupt, but an IXMT-based
632 * scheme requires that a handler must run before a new IPI could
633 * be sent, which would break the "broadcast" loops in MIPS MT.
634 * A more gonzo approach within a VPE is to halt the TC, extract
635 * its Restart, Status, and a couple of GPRs, and program the Restart
636 * address to emulate an interrupt.
638 * Within a VPE, one can be confident that the target TC isn't in
639 * a critical EXL state when halted, since the write to the Halt
640 * register could not have issued on the writing thread if the
641 * halting thread had EXL set. So k0 and k1 of the target TC
642 * can be used by the injection code. Across VPEs, one can't
643 * be certain that the target TC isn't in a critical exception
644 * state. So we try a two-step process of sending a software
645 * interrupt to the target VPE, which either handles the event
646 * itself (if it was the target) or injects the event within
650 void smtc_ipi_qdump(void)
654 for (i = 0; i < NR_CPUS ;i++) {
655 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
656 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
662 * The standard atomic.h primitives don't quite do what we want
663 * here: We need an atomic add-and-return-previous-value (which
664 * could be done with atomic_add_return and a decrement) and an
665 * atomic set/zero-and-return-previous-value (which can't really
666 * be done with the atomic.h primitives). And since this is
667 * MIPS MT, we can assume that we have LL/SC.
669 static __inline__ int atomic_postincrement(unsigned int *pv)
671 unsigned long result;
675 __asm__ __volatile__(
681 : "=&r" (result), "=&r" (temp), "=m" (*pv)
688 /* No longer used in IPI dispatch, but retained for future recycling */
690 static __inline__ int atomic_postclear(unsigned int *pv)
692 unsigned long result;
696 __asm__ __volatile__(
702 : "=&r" (result), "=&r" (temp), "=m" (*pv)
710 void smtc_send_ipi(int cpu, int type, unsigned int action)
713 struct smtc_ipi *pipi;
717 if (cpu == smp_processor_id()) {
718 printk("Cannot Send IPI to self!\n");
721 /* Set up a descriptor, to be delivered either promptly or queued */
722 pipi = smtc_ipi_dq(&freeIPIq);
725 mips_mt_regdump(dvpe());
726 panic("IPI Msg. Buffers Depleted\n");
729 pipi->arg = (void *)action;
731 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
732 /* If not on same VPE, enqueue and send cross-VPE interupt */
733 smtc_ipi_nq(&IPIQ[cpu], pipi);
735 settc(cpu_data[cpu].tc_id);
736 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
740 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
741 * since ASID shootdown on the other VPE may
742 * collide with this operation.
745 settc(cpu_data[cpu].tc_id);
746 /* Halt the targeted TC */
747 write_tc_c0_tchalt(TCHALT_H);
751 * Inspect TCStatus - if IXMT is set, we have to queue
752 * a message. Otherwise, we set up the "interrupt"
755 tcstatus = read_tc_c0_tcstatus();
757 if ((tcstatus & TCSTATUS_IXMT) != 0) {
759 * Spin-waiting here can deadlock,
760 * so we queue the message for the target TC.
762 write_tc_c0_tchalt(0);
764 /* Try to reduce redundant timer interrupt messages */
765 if (type == SMTC_CLOCK_TICK) {
766 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
767 smtc_ipi_nq(&freeIPIq, pipi);
771 smtc_ipi_nq(&IPIQ[cpu], pipi);
773 post_direct_ipi(cpu, pipi);
774 write_tc_c0_tchalt(0);
781 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
783 void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
785 struct pt_regs *kstack;
786 unsigned long tcstatus;
787 unsigned long tcrestart;
788 extern u32 kernelsp[NR_CPUS];
789 extern void __smtc_ipi_vector(void);
791 /* Extract Status, EPC from halted TC */
792 tcstatus = read_tc_c0_tcstatus();
793 tcrestart = read_tc_c0_tcrestart();
794 /* If TCRestart indicates a WAIT instruction, advance the PC */
795 if ((tcrestart & 0x80000000)
796 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
800 * Save on TC's future kernel stack
802 * CU bit of Status is indicator that TC was
803 * already running on a kernel stack...
805 if (tcstatus & ST0_CU0) {
806 /* Note that this "- 1" is pointer arithmetic */
807 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
809 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
812 kstack->cp0_epc = (long)tcrestart;
814 kstack->cp0_tcstatus = tcstatus;
815 /* Pass token of operation to be performed kernel stack pad area */
816 kstack->pad0[4] = (unsigned long)pipi;
817 /* Pass address of function to be called likewise */
818 kstack->pad0[5] = (unsigned long)&ipi_decode;
819 /* Set interrupt exempt and kernel mode */
820 tcstatus |= TCSTATUS_IXMT;
821 tcstatus &= ~TCSTATUS_TKSU;
822 write_tc_c0_tcstatus(tcstatus);
824 /* Set TC Restart address to be SMTC IPI vector */
825 write_tc_c0_tcrestart(__smtc_ipi_vector);
828 static void ipi_resched_interrupt(void)
830 /* Return from interrupt should be enough to cause scheduler check */
834 static void ipi_call_interrupt(void)
836 /* Invoke generic function invocation code in smp.c */
837 smp_call_function_interrupt();
840 void ipi_decode(struct smtc_ipi *pipi)
842 void *arg_copy = pipi->arg;
843 int type_copy = pipi->type;
844 int dest_copy = pipi->dest;
846 smtc_ipi_nq(&freeIPIq, pipi);
848 case SMTC_CLOCK_TICK:
849 /* Invoke Clock "Interrupt" */
850 ipi_timer_latch[dest_copy] = 0;
851 #ifdef SMTC_IDLE_HOOK_DEBUG
852 clock_hang_reported[dest_copy] = 0;
853 #endif /* SMTC_IDLE_HOOK_DEBUG */
854 local_timer_interrupt(0, NULL);
857 switch ((int)arg_copy) {
858 case SMP_RESCHEDULE_YOURSELF:
859 ipi_resched_interrupt();
861 case SMP_CALL_FUNCTION:
862 ipi_call_interrupt();
865 printk("Impossible SMTC IPI Argument 0x%x\n",
871 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
876 void deferred_smtc_ipi(void)
878 struct smtc_ipi *pipi;
881 int q = smp_processor_id();
884 * Test is not atomic, but much faster than a dequeue,
885 * and the vast majority of invocations will have a null queue.
887 if (IPIQ[q].head != NULL) {
888 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
889 /* ipi_decode() should be called with interrupts off */
890 local_irq_save(flags);
892 local_irq_restore(flags);
898 * Send clock tick to all TCs except the one executing the funtion
901 void smtc_timer_broadcast(int vpe)
904 int myTC = cpu_data[smp_processor_id()].tc_id;
905 int myVPE = cpu_data[smp_processor_id()].vpe_id;
907 smtc_cpu_stats[smp_processor_id()].timerints++;
909 for_each_online_cpu(cpu) {
910 if (cpu_data[cpu].vpe_id == myVPE &&
911 cpu_data[cpu].tc_id != myTC)
912 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
917 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
918 * set via cross-VPE MTTR manipulation of the Cause register. It would be
919 * in some regards preferable to have external logic for "doorbell" hardware
923 static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ;
925 static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
927 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
928 int my_tc = cpu_data[smp_processor_id()].tc_id;
930 struct smtc_ipi *pipi;
931 unsigned long tcstatus;
934 unsigned int mtflags;
935 unsigned int vpflags;
938 * So long as cross-VPE interrupts are done via
939 * MFTR/MTTR read-modify-writes of Cause, we need
940 * to stop other VPEs whenever the local VPE does
943 local_irq_save(flags);
945 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
946 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
949 local_irq_restore(flags);
952 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
953 * queued for TCs on this VPE other than the current one.
954 * Return-from-interrupt should cause us to drain the queue
955 * for the current TC, so we ought not to have to do it explicitly here.
958 for_each_online_cpu(cpu) {
959 if (cpu_data[cpu].vpe_id != my_vpe)
962 pipi = smtc_ipi_dq(&IPIQ[cpu]);
964 if (cpu_data[cpu].tc_id != my_tc) {
967 settc(cpu_data[cpu].tc_id);
968 write_tc_c0_tchalt(TCHALT_H);
970 tcstatus = read_tc_c0_tcstatus();
971 if ((tcstatus & TCSTATUS_IXMT) == 0) {
972 post_direct_ipi(cpu, pipi);
975 write_tc_c0_tchalt(0);
978 smtc_ipi_req(&IPIQ[cpu], pipi);
982 * ipi_decode() should be called
983 * with interrupts off
985 local_irq_save(flags);
987 local_irq_restore(flags);
995 static void ipi_irq_dispatch(void)
1000 static struct irqaction irq_ipi;
1002 void setup_cross_vpe_interrupts(void)
1005 panic("SMTC Kernel requires Vectored Interupt support");
1007 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1009 irq_ipi.handler = ipi_interrupt;
1010 irq_ipi.flags = IRQF_DISABLED;
1011 irq_ipi.name = "SMTC_IPI";
1013 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1015 irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
1016 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
1020 * SMTC-specific hacks invoked from elsewhere in the kernel.
1023 void smtc_ipi_replay(void)
1026 * To the extent that we've ever turned interrupts off,
1027 * we may have accumulated deferred IPIs. This is subtle.
1028 * If we use the smtc_ipi_qdepth() macro, we'll get an
1029 * exact number - but we'll also disable interrupts
1030 * and create a window of failure where a new IPI gets
1031 * queued after we test the depth but before we re-enable
1032 * interrupts. So long as IXMT never gets set, however,
1033 * we should be OK: If we pick up something and dispatch
1034 * it here, that's great. If we see nothing, but concurrent
1035 * with this operation, another TC sends us an IPI, IXMT
1036 * is clear, and we'll handle it as a real pseudo-interrupt
1037 * and not a pseudo-pseudo interrupt.
1039 if (IPIQ[smp_processor_id()].depth > 0) {
1040 struct smtc_ipi *pipi;
1041 extern void self_ipi(struct smtc_ipi *);
1043 while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
1045 smtc_cpu_stats[smp_processor_id()].selfipis++;
1050 void smtc_idle_loop_hook(void)
1052 #ifdef SMTC_IDLE_HOOK_DEBUG
1061 * printk within DMT-protected regions can deadlock,
1062 * so buffer diagnostic messages for later output.
1065 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1067 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1068 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1070 /* Tedious stuff to just do once */
1071 mvpconf0 = read_c0_mvpconf0();
1072 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1073 if (hook_ntcs > NR_CPUS)
1074 hook_ntcs = NR_CPUS;
1075 for (tc = 0; tc < hook_ntcs; tc++) {
1077 clock_hang_reported[tc] = 0;
1079 for (vpe = 0; vpe < 2; vpe++)
1080 for (im = 0; im < 8; im++)
1081 imstuckcount[vpe][im] = 0;
1082 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1083 atomic_set(&idle_hook_initialized, 1000);
1085 /* Someone else is initializing in parallel - let 'em finish */
1086 while (atomic_read(&idle_hook_initialized) < 1000)
1091 /* Have we stupidly left IXMT set somewhere? */
1092 if (read_c0_tcstatus() & 0x400) {
1093 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1095 printk("Dangling IXMT in cpu_idle()\n");
1098 /* Have we stupidly left an IM bit turned off? */
1099 #define IM_LIMIT 2000
1100 local_irq_save(flags);
1102 pdb_msg = &id_ho_db_msg[0];
1103 im = read_c0_status();
1104 vpe = cpu_data[smp_processor_id()].vpe_id;
1105 for (bit = 0; bit < 8; bit++) {
1107 * In current prototype, I/O interrupts
1108 * are masked for VPE > 0
1110 if (vpemask[vpe][bit]) {
1111 if (!(im & (0x100 << bit)))
1112 imstuckcount[vpe][bit]++;
1114 imstuckcount[vpe][bit] = 0;
1115 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1116 set_c0_status(0x100 << bit);
1118 imstuckcount[vpe][bit] = 0;
1119 pdb_msg += sprintf(pdb_msg,
1120 "Dangling IM %d fixed for VPE %d\n", bit,
1127 * Now that we limit outstanding timer IPIs, check for hung TC
1129 for (tc = 0; tc < NR_CPUS; tc++) {
1130 /* Don't check ourself - we'll dequeue IPIs just below */
1131 if ((tc != smp_processor_id()) &&
1132 ipi_timer_latch[tc] > timerq_limit) {
1133 if (clock_hang_reported[tc] == 0) {
1134 pdb_msg += sprintf(pdb_msg,
1135 "TC %d looks hung with timer latch at %d\n",
1136 tc, ipi_timer_latch[tc]);
1137 clock_hang_reported[tc]++;
1142 local_irq_restore(flags);
1143 if (pdb_msg != &id_ho_db_msg[0])
1144 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1145 #endif /* SMTC_IDLE_HOOK_DEBUG */
1148 * Replay any accumulated deferred IPIs. If "Instant Replay"
1149 * is in use, there should never be any.
1151 #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1153 #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1156 void smtc_soft_dump(void)
1160 printk("Counter Interrupts taken per CPU (TC)\n");
1161 for (i=0; i < NR_CPUS; i++) {
1162 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1164 printk("Self-IPI invocations:\n");
1165 for (i=0; i < NR_CPUS; i++) {
1166 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1169 printk("Timer IPI Backlogs:\n");
1170 for (i=0; i < NR_CPUS; i++) {
1171 printk("%d: %d\n", i, ipi_timer_latch[i]);
1173 printk("%d Recoveries of \"stolen\" FPU\n",
1174 atomic_read(&smtc_fpu_recoveries));
1179 * TLB management routines special to SMTC
1182 void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1184 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1188 * It would be nice to be able to use a spinlock here,
1189 * but this is invoked from within TLB flush routines
1190 * that protect themselves with DVPE, so if a lock is
1191 * held by another TC, it'll never be freed.
1193 * DVPE/DMT must not be done with interrupts enabled,
1194 * so even so most callers will already have disabled
1195 * them, let's be really careful...
1198 local_irq_save(flags);
1199 if (smtc_status & SMTC_TLB_SHARED) {
1204 tlb = cpu_data[cpu].vpe_id;
1206 asid = asid_cache(cpu);
1209 if (!((asid += ASID_INC) & ASID_MASK) ) {
1210 if (cpu_has_vtag_icache)
1212 /* Traverse all online CPUs (hack requires contigous range) */
1213 for (i = 0; i < num_online_cpus(); i++) {
1215 * We don't need to worry about our own CPU, nor those of
1216 * CPUs who don't share our TLB.
1218 if ((i != smp_processor_id()) &&
1219 ((smtc_status & SMTC_TLB_SHARED) ||
1220 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1221 settc(cpu_data[i].tc_id);
1222 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1224 write_tc_c0_tchalt(TCHALT_H);
1227 tcstat = read_tc_c0_tcstatus();
1228 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1230 write_tc_c0_tchalt(0);
1233 if (!asid) /* fix version if needed */
1234 asid = ASID_FIRST_VERSION;
1235 local_flush_tlb_all(); /* start new asid cycle */
1237 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1240 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1242 for (i = 0; i < num_online_cpus(); i++) {
1243 if ((smtc_status & SMTC_TLB_SHARED) ||
1244 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1245 cpu_context(i, mm) = asid_cache(i) = asid;
1248 if (smtc_status & SMTC_TLB_SHARED)
1252 local_irq_restore(flags);
1256 * Invoked from macros defined in mmu_context.h
1257 * which must already have disabled interrupts
1258 * and done a DVPE or DMT as appropriate.
1261 void smtc_flush_tlb_asid(unsigned long asid)
1266 entry = read_c0_wired();
1268 /* Traverse all non-wired entries */
1269 while (entry < current_cpu_data.tlbsize) {
1270 write_c0_index(entry);
1274 ehi = read_c0_entryhi();
1275 if ((ehi & ASID_MASK) == asid) {
1277 * Invalidate only entries with specified ASID,
1278 * makiing sure all entries differ.
1280 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1281 write_c0_entrylo0(0);
1282 write_c0_entrylo1(0);
1284 tlb_write_indexed();
1288 write_c0_index(PARKED_INDEX);
1293 * Support for single-threading cache flush operations.
1296 int halt_state_save[NR_CPUS];
1299 * To really, really be sure that nothing is being done
1300 * by other TCs, halt them all. This code assumes that
1301 * a DVPE has already been done, so while their Halted
1302 * state is theoretically architecturally unstable, in
1303 * practice, it's not going to change while we're looking
1307 void smtc_cflush_lockdown(void)
1311 for_each_online_cpu(cpu) {
1312 if (cpu != smp_processor_id()) {
1313 settc(cpu_data[cpu].tc_id);
1314 halt_state_save[cpu] = read_tc_c0_tchalt();
1315 write_tc_c0_tchalt(TCHALT_H);
1321 /* It would be cheating to change the cpu_online states during a flush! */
1323 void smtc_cflush_release(void)
1328 * Start with a hazard barrier to ensure
1329 * that all CACHE ops have played through.
1333 for_each_online_cpu(cpu) {
1334 if (cpu != smp_processor_id()) {
1335 settc(cpu_data[cpu].tc_id);
1336 write_tc_c0_tchalt(halt_state_save[cpu]);