#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <asm/cpu.h>
#include <asm/processor.h>
* This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
*/
-/*
- * MIPSCPU_INT_BASE is identically defined in both
- * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
- * but as yet there's no properly organized include structure that
- * will ensure that the right *int.h file will be included for a
- * given platform build.
- */
-
-#define MIPSCPU_INT_BASE 16
-
#define MIPS_CPU_IPI_IRQ 1
#define LOCK_MT_PRA() \
#define IPIBUF_PER_CPU 4
-struct smtc_ipi_q IPIQ[NR_CPUS];
-struct smtc_ipi_q freeIPIq;
+static struct smtc_ipi_q IPIQ[NR_CPUS];
+static struct smtc_ipi_q freeIPIq;
/* Forward declarations */
-void ipi_decode(struct pt_regs *, struct smtc_ipi *);
-void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
-void setup_cross_vpe_interrupts(void);
+void ipi_decode(struct smtc_ipi *);
+static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
+static void setup_cross_vpe_interrupts(void);
void init_smtc_stats(void);
/* Global SMTC Status */
static int __init asidmask_set(char *str)
{
get_option(&str, &asidmask);
- switch(asidmask) {
+ switch (asidmask) {
case 0x1:
case 0x3:
case 0x7:
__setup("nostlb", stlb_disable);
__setup("asidmask=", asidmask_set);
-/* Enable additional debug checks before going into CPU idle loop */
-#define SMTC_IDLE_HOOK_DEBUG
-
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
static int hang_trig = 0;
static atomic_t idle_hook_initialized = {0};
static int clock_hang_reported[NR_CPUS];
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
* Configure shared TLB - VPC configuration bit must be set by caller
*/
-void smtc_configure_tlb(void)
+static void smtc_configure_tlb(void)
{
int i,tlbsiz,vpes;
unsigned long mvpconf0;
/*
* Only count if the MMU Type indicated is TLB
*/
- if(((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
+ if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
config1val = read_vpe_c0_config1();
tlbsiz += ((config1val >> 25) & 0x3f) + 1;
}
}
}
write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
+ ehb();
/*
* Setup kernel data structures to use software total,
* of their initialization in smtc_cpu_setup().
*/
- tlbsiz = tlbsiz & 0x3f; /* MIPS32 limits TLB indices to 64 */
- cpu_data[0].tlbsize = tlbsiz;
+ /* MIPS32 limits TLB indices to 64 */
+ if (tlbsiz > 64)
+ tlbsiz = 64;
+ cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
smtc_status |= SMTC_TLB_SHARED;
+ local_flush_tlb_all();
printk("TLB of %d entry pairs shared by %d VPEs\n",
tlbsiz, vpes);
dvpe();
dmt();
- freeIPIq.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&freeIPIq.lock);
/*
* We probably don't have as many VPEs as we do SMP "CPUs",
*/
for (i=0; i<NR_CPUS; i++) {
IPIQ[i].head = IPIQ[i].tail = NULL;
- IPIQ[i].lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
ipi_timer_latch[i] = 0;
}
printk("ASID mask value override to 0x%x\n", asidmask);
/* Temporary */
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
if (hang_trig)
printk("Logic Analyser Trigger on suspected TC hang\n");
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/* Put MVPE's into 'configuration state' */
write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
write_vpe_c0_compare(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
+ write_vpe_c0_count(read_c0_count());
}
/* enable multi-threading within VPE */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
/* Set up coprocessor affinity CPU mask(s) */
for (tc = 0; tc < ntc; tc++) {
- if(cpu_data[tc].options & MIPS_CPU_FPU)
+ if (cpu_data[tc].options & MIPS_CPU_FPU)
cpu_set(tc, mt_fpu_cpumask);
}
{
/*
* Start timer on secondary VPEs if necessary.
- * mips_timer_setup should already have been invoked by init/main
+ * plat_timer_setup has already have been invoked by init/main
* on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
* SMTC init code assigns TCs consdecutively and in ascending order
* to across available VPEs.
*/
- if(((read_c0_tcbind() & TCBIND_CURTC) != 0)
- && ((read_c0_tcbind() & TCBIND_CURVPE)
+ if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
+ ((read_c0_tcbind() & TCBIND_CURVPE)
!= cpu_data[smp_processor_id() - 1].vpe_id)){
write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
}
* the VPE.
*/
-void smtc_ipi_qdump(void)
+static void smtc_ipi_qdump(void)
{
int i;
return result;
}
-/* No longer used in IPI dispatch, but retained for future recycling */
-
-static __inline__ int atomic_postclear(unsigned int *pv)
-{
- unsigned long result;
-
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ll %0, %2 \n"
- " or %1, $0, $0 \n"
- " sc %1, %2 \n"
- " beqz %1, 1b \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (*pv)
- : "m" (*pv)
- : "memory");
-
- return result;
-}
-
-
void smtc_send_ipi(int cpu, int type, unsigned int action)
{
int tcstatus;
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
/* Try to reduce redundant timer interrupt messages */
- if(type == SMTC_CLOCK_TICK) {
- if(atomic_postincrement(&ipi_timer_latch[cpu])!=0) {
+ if (type == SMTC_CLOCK_TICK) {
+ if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
smtc_ipi_nq(&freeIPIq, pipi);
return;
}
/*
* Send IPI message to Halted TC, TargTC/TargVPE already having been set
*/
-void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
+static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
{
struct pt_regs *kstack;
unsigned long tcstatus;
* CU bit of Status is indicator that TC was
* already running on a kernel stack...
*/
- if(tcstatus & ST0_CU0) {
+ if (tcstatus & ST0_CU0) {
/* Note that this "- 1" is pointer arithmetic */
kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
} else {
write_tc_c0_tcrestart(__smtc_ipi_vector);
}
-void ipi_resched_interrupt(struct pt_regs *regs)
+static void ipi_resched_interrupt(void)
{
/* Return from interrupt should be enough to cause scheduler check */
}
-void ipi_call_interrupt(struct pt_regs *regs)
+static void ipi_call_interrupt(void)
{
/* Invoke generic function invocation code in smp.c */
smp_call_function_interrupt();
}
-void ipi_decode(struct pt_regs *regs, struct smtc_ipi *pipi)
+void ipi_decode(struct smtc_ipi *pipi)
{
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
- case SMTC_CLOCK_TICK:
- /* Invoke Clock "Interrupt" */
- ipi_timer_latch[dest_copy] = 0;
-#ifdef SMTC_IDLE_HOOK_DEBUG
- clock_hang_reported[dest_copy] = 0;
-#endif /* SMTC_IDLE_HOOK_DEBUG */
- local_timer_interrupt(0, NULL, regs);
+ case SMTC_CLOCK_TICK:
+ /* Invoke Clock "Interrupt" */
+ ipi_timer_latch[dest_copy] = 0;
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
+ clock_hang_reported[dest_copy] = 0;
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
+ local_timer_interrupt(0, NULL);
+ break;
+ case LINUX_SMP_IPI:
+ switch ((int)arg_copy) {
+ case SMP_RESCHEDULE_YOURSELF:
+ ipi_resched_interrupt();
break;
- case LINUX_SMP_IPI:
- switch ((int)arg_copy) {
- case SMP_RESCHEDULE_YOURSELF:
- ipi_resched_interrupt(regs);
- break;
- case SMP_CALL_FUNCTION:
- ipi_call_interrupt(regs);
- break;
- default:
- printk("Impossible SMTC IPI Argument 0x%x\n",
- (int)arg_copy);
- break;
- }
+ case SMP_CALL_FUNCTION:
+ ipi_call_interrupt();
break;
default:
- printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
+ printk("Impossible SMTC IPI Argument 0x%x\n",
+ (int)arg_copy);
break;
+ }
+ break;
+ default:
+ printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
+ break;
}
}
-void deferred_smtc_ipi(struct pt_regs *regs)
+void deferred_smtc_ipi(void)
{
struct smtc_ipi *pipi;
unsigned long flags;
* Test is not atomic, but much faster than a dequeue,
* and the vast majority of invocations will have a null queue.
*/
- if(IPIQ[q].head != NULL) {
+ if (IPIQ[q].head != NULL) {
while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
/* ipi_decode() should be called with interrupts off */
local_irq_save(flags);
- ipi_decode(regs, pipi);
+ ipi_decode(pipi);
local_irq_restore(flags);
}
}
* interrupts.
*/
-static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ;
+static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
-static irqreturn_t ipi_interrupt(int irq, void *dev_idm, struct pt_regs *regs)
+static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
{
int my_vpe = cpu_data[smp_processor_id()].vpe_id;
int my_tc = cpu_data[smp_processor_id()].tc_id;
* with interrupts off
*/
local_irq_save(flags);
- ipi_decode(regs, pipi);
+ ipi_decode(pipi);
local_irq_restore(flags);
}
}
return IRQ_HANDLED;
}
-static void ipi_irq_dispatch(struct pt_regs *regs)
+static void ipi_irq_dispatch(void)
{
- do_IRQ(cpu_ipi_irq, regs);
+ do_IRQ(cpu_ipi_irq);
}
static struct irqaction irq_ipi;
-void setup_cross_vpe_interrupts(void)
+static void setup_cross_vpe_interrupts(void)
{
if (!cpu_has_vint)
panic("SMTC Kernel requires Vectored Interupt support");
set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
irq_ipi.handler = ipi_interrupt;
- irq_ipi.flags = SA_INTERRUPT;
+ irq_ipi.flags = IRQF_DISABLED;
irq_ipi.name = "SMTC_IPI";
setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
+ set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
}
/*
* SMTC-specific hacks invoked from elsewhere in the kernel.
*/
+void smtc_ipi_replay(void)
+{
+ /*
+ * To the extent that we've ever turned interrupts off,
+ * we may have accumulated deferred IPIs. This is subtle.
+ * If we use the smtc_ipi_qdepth() macro, we'll get an
+ * exact number - but we'll also disable interrupts
+ * and create a window of failure where a new IPI gets
+ * queued after we test the depth but before we re-enable
+ * interrupts. So long as IXMT never gets set, however,
+ * we should be OK: If we pick up something and dispatch
+ * it here, that's great. If we see nothing, but concurrent
+ * with this operation, another TC sends us an IPI, IXMT
+ * is clear, and we'll handle it as a real pseudo-interrupt
+ * and not a pseudo-pseudo interrupt.
+ */
+ if (IPIQ[smp_processor_id()].depth > 0) {
+ struct smtc_ipi *pipi;
+ extern void self_ipi(struct smtc_ipi *);
+
+ while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
+ self_ipi(pipi);
+ smtc_cpu_stats[smp_processor_id()].selfipis++;
+ }
+ }
+}
+
+EXPORT_SYMBOL(smtc_ipi_replay);
+
void smtc_idle_loop_hook(void)
{
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
int im;
int flags;
int mtflags;
local_irq_restore(flags);
if (pdb_msg != &id_ho_db_msg[0])
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
+
/*
- * To the extent that we've ever turned interrupts off,
- * we may have accumulated deferred IPIs. This is subtle.
- * If we use the smtc_ipi_qdepth() macro, we'll get an
- * exact number - but we'll also disable interrupts
- * and create a window of failure where a new IPI gets
- * queued after we test the depth but before we re-enable
- * interrupts. So long as IXMT never gets set, however,
- * we should be OK: If we pick up something and dispatch
- * it here, that's great. If we see nothing, but concurrent
- * with this operation, another TC sends us an IPI, IXMT
- * is clear, and we'll handle it as a real pseudo-interrupt
- * and not a pseudo-pseudo interrupt.
+ * Replay any accumulated deferred IPIs. If "Instant Replay"
+ * is in use, there should never be any.
*/
- if (IPIQ[smp_processor_id()].depth > 0) {
- struct smtc_ipi *pipi;
- extern void self_ipi(struct smtc_ipi *);
-
- if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
- self_ipi(pipi);
- smtc_cpu_stats[smp_processor_id()].selfipis++;
- }
- }
+#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
+ smtc_ipi_replay();
+#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
}
void smtc_soft_dump(void)
* It would be nice to be able to use a spinlock here,
* but this is invoked from within TLB flush routines
* that protect themselves with DVPE, so if a lock is
- * held by another TC, it'll never be freed.
+ * held by another TC, it'll never be freed.
*
* DVPE/DMT must not be done with interrupts enabled,
* so even so most callers will already have disabled
tlb_read();
ehb();
ehi = read_c0_entryhi();
- if((ehi & ASID_MASK) == asid) {
+ if ((ehi & ASID_MASK) == asid) {
/*
* Invalidate only entries with specified ASID,
* makiing sure all entries differ.
* Support for single-threading cache flush operations.
*/
-int halt_state_save[NR_CPUS];
+static int halt_state_save[NR_CPUS];
/*
* To really, really be sure that nothing is being done