X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=arch%2Fmips%2Fkernel%2Ftraps.c;h=4901f0a37fca2b3ddb2de93dcf998873b10e6643;hb=5006ecc2d5073d4e52f54381fd0fee1575d4ce22;hp=c9d2b5147ca353ccda409983df7aca3a439b4d0a;hpb=b38c73995f63fe205c0068cb0ce3131895244068;p=powerpc.git diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c9d2b5147c..4901f0a37f 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle + * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle * Copyright (C) 1995, 1996 Paul M. Antoine * Copyright (C) 1998 Ulf Carlsson * Copyright (C) 1999 Silicon Graphics, Inc. @@ -42,6 +42,7 @@ #include #include +extern asmlinkage void handle_int(void); extern asmlinkage void handle_tlbm(void); extern asmlinkage void handle_tlbl(void); extern asmlinkage void handle_tlbs(void); @@ -279,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock); NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) { static int die_counter; +#ifdef CONFIG_MIPS_MT_SMTC + unsigned long dvpret = dvpe(); +#endif /* CONFIG_MIPS_MT_SMTC */ console_verbose(); spin_lock_irq(&die_lock); + bust_spinlocks(1); +#ifdef CONFIG_MIPS_MT_SMTC + mips_mt_regdump(dvpret); +#endif /* CONFIG_MIPS_MT_SMTC */ printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); spin_unlock_irq(&die_lock); @@ -548,6 +556,8 @@ asmlinkage void do_ov(struct pt_regs *regs) { siginfo_t info; + die_if_kernel("Integer overflow", regs); + info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; info.si_errno = 0; @@ -574,7 +584,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) } #endif /* - * Unimplemented operation exception. If we've got the full + * Unimplemented operation exception. If we've got the full * software emulator on-board, let's use it... * * Force FPU to dump state into task/thread context. We're @@ -748,12 +758,43 @@ asmlinkage void do_cpu(struct pt_regs *regs) ¤t->thread.fpu.soft); if (sig) force_sig(sig, current); +#ifdef CONFIG_MIPS_MT_FPAFF + else { + /* + * MIPS MT processors may have fewer FPU contexts + * than CPU threads. If we've emulated more than + * some threshold number of instructions, force + * migration to a "CPU" that has FP support. + */ + if(mt_fpemul_threshold > 0 + && ((current->thread.emulated_fp++ + > mt_fpemul_threshold))) { + /* + * If there's no FPU present, or if the + * application has already restricted + * the allowed set to exclude any CPUs + * with FPUs, we'll skip the procedure. + */ + if (cpus_intersects(current->cpus_allowed, + mt_fpu_cpumask)) { + cpumask_t tmask; + + cpus_and(tmask, + current->thread.user_cpus_allowed, + mt_fpu_cpumask); + set_cpus_allowed(current, tmask); + current->thread.mflags |= MF_FPUBOUND; + } + } + } +#endif /* CONFIG_MIPS_MT_FPAFF */ } return; case 2: case 3: + die_if_kernel("do_cpu invoked from kernel context!", regs); break; } @@ -791,6 +832,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs) asmlinkage void do_mt(struct pt_regs *regs) { + int subcode; + + die_if_kernel("MIPS MT Thread exception in kernel", regs); + + subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) + >> VPECONTROL_EXCPT_SHIFT; + switch (subcode) { + case 0: + printk(KERN_ERR "Thread Underflow\n"); + break; + case 1: + printk(KERN_ERR "Thread Overflow\n"); + break; + case 2: + printk(KERN_ERR "Invalid YIELD Qualifier\n"); + break; + case 3: + printk(KERN_ERR "Gating Storage Exception\n"); + break; + case 4: + printk(KERN_ERR "YIELD Scheduler Exception\n"); + break; + case 5: + printk(KERN_ERR "Gating Storage Schedulier Exception\n"); + break; + default: + printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n", + subcode); + break; + } die_if_kernel("MIPS MT Thread exception in kernel", regs); force_sig(SIGILL, current); @@ -926,7 +997,15 @@ void ejtag_exception_handler(struct pt_regs *regs) */ void nmi_exception_handler(struct pt_regs *regs) { +#ifdef CONFIG_MIPS_MT_SMTC + unsigned long dvpret = dvpe(); + bust_spinlocks(1); printk("NMI taken!!!!\n"); + mips_mt_regdump(dvpret); +#else + bust_spinlocks(1); + printk("NMI taken!!!!\n"); +#endif /* CONFIG_MIPS_MT_SMTC */ die("NMI", regs); while(1) ; } @@ -958,27 +1037,29 @@ void *set_except_vector(int n, void *addr) #ifdef CONFIG_CPU_MIPSR2 /* - * Shadow register allocation + * MIPSR2 shadow register set allocation * FIXME: SMP... */ -/* MIPSR2 shadow register sets */ -struct shadow_registers { - spinlock_t sr_lock; /* */ - int sr_supported; /* Number of shadow register sets supported */ - int sr_allocated; /* Bitmap of allocated shadow registers */ +static struct shadow_registers { + /* + * Number of shadow register sets supported + */ + unsigned long sr_supported; + /* + * Bitmap of allocated shadow registers + */ + unsigned long sr_allocated; } shadow_registers; -void mips_srs_init(void) +static void mips_srs_init(void) { #ifdef CONFIG_CPU_MIPSR2_SRS shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; - printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported); -#else - shadow_registers.sr_supported = 1; + printk(KERN_INFO "%d MIPSR2 register sets available\n", + shadow_registers.sr_supported); #endif shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ - spin_lock_init(&shadow_registers.sr_lock); } int mips_srs_max(void) @@ -986,38 +1067,30 @@ int mips_srs_max(void) return shadow_registers.sr_supported; } -int mips_srs_alloc (void) +int mips_srs_alloc(void) { struct shadow_registers *sr = &shadow_registers; - unsigned long flags; int set; - spin_lock_irqsave(&sr->sr_lock, flags); +again: + set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported); + if (set >= sr->sr_supported) + return -1; - for (set = 0; set < sr->sr_supported; set++) { - if ((sr->sr_allocated & (1 << set)) == 0) { - sr->sr_allocated |= 1 << set; - spin_unlock_irqrestore(&sr->sr_lock, flags); - return set; - } - } + if (test_and_set_bit(set, &sr->sr_allocated)) + goto again; - /* None available */ - spin_unlock_irqrestore(&sr->sr_lock, flags); - return -1; + return set; } -void mips_srs_free (int set) +void mips_srs_free(int set) { struct shadow_registers *sr = &shadow_registers; - unsigned long flags; - spin_lock_irqsave(&sr->sr_lock, flags); - sr->sr_allocated &= ~(1 << set); - spin_unlock_irqrestore(&sr->sr_lock, flags); + clear_bit(set, &sr->sr_allocated); } -void *set_vi_srs_handler (int n, void *addr, int srs) +static void *set_vi_srs_handler(int n, void *addr, int srs) { unsigned long handler; unsigned long old_handler = vi_handlers[n]; @@ -1030,8 +1103,7 @@ void *set_vi_srs_handler (int n, void *addr, int srs) if (addr == NULL) { handler = (unsigned long) do_default_vi; srs = 0; - } - else + } else handler = (unsigned long) addr; vi_handlers[n] = (unsigned long) addr; @@ -1043,8 +1115,7 @@ void *set_vi_srs_handler (int n, void *addr, int srs) if (cpu_has_veic) { if (board_bind_eic_interrupt) board_bind_eic_interrupt (n, srs); - } - else if (cpu_has_vint) { + } else if (cpu_has_vint) { /* SRSMap is only defined if shadow sets are implemented */ if (mips_srs_max() > 1) change_c0_srsmap (0xf << n*4, srs << n*4); @@ -1058,6 +1129,15 @@ void *set_vi_srs_handler (int n, void *addr, int srs) extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; +#ifdef CONFIG_MIPS_MT_SMTC + /* + * We need to provide the SMTC vectored interrupt handler + * not only with the address of the handler, but with the + * Status.IM bit to be masked before going there. + */ + extern char except_vec_vi_mori; + const int mori_offset = &except_vec_vi_mori - &except_vec_vi; +#endif /* CONFIG_MIPS_MT_SMTC */ const int handler_len = &except_vec_vi_end - &except_vec_vi; const int lui_offset = &except_vec_vi_lui - &except_vec_vi; const int ori_offset = &except_vec_vi_ori - &except_vec_vi; @@ -1071,6 +1151,12 @@ void *set_vi_srs_handler (int n, void *addr, int srs) } memcpy (b, &except_vec_vi, handler_len); +#ifdef CONFIG_MIPS_MT_SMTC + if (n > 7) + printk("Vector index %d exceeds SMTC maximum\n", n); + w = (u32 *)(b + mori_offset); + *w = (*w & 0xffff0000) | (0x100 << n); +#endif /* CONFIG_MIPS_MT_SMTC */ w = (u32 *)(b + lui_offset); *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); w = (u32 *)(b + ori_offset); @@ -1093,9 +1179,9 @@ void *set_vi_srs_handler (int n, void *addr, int srs) return (void *)old_handler; } -void *set_vi_handler (int n, void *addr) +void *set_vi_handler(int n, void *addr) { - return set_vi_srs_handler (n, addr, 0); + return set_vi_srs_handler(n, addr, 0); } #endif @@ -1111,8 +1197,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc); extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); +#ifdef CONFIG_SMP +static int smp_save_fp_context(struct sigcontext *sc) +{ + return cpu_has_fpu + ? _save_fp_context(sc) + : fpu_emulator_save_context(sc); +} + +static int smp_restore_fp_context(struct sigcontext *sc) +{ + return cpu_has_fpu + ? _restore_fp_context(sc) + : fpu_emulator_restore_context(sc); +} +#endif + static inline void signal_init(void) { +#ifdef CONFIG_SMP + /* For now just do the cpu_has_fpu check when the functions are invoked */ + save_fp_context = smp_save_fp_context; + restore_fp_context = smp_restore_fp_context; +#else if (cpu_has_fpu) { save_fp_context = _save_fp_context; restore_fp_context = _restore_fp_context; @@ -1120,6 +1227,7 @@ static inline void signal_init(void) save_fp_context = fpu_emulator_save_context; restore_fp_context = fpu_emulator_restore_context; } +#endif } #ifdef CONFIG_MIPS32_COMPAT @@ -1156,6 +1264,20 @@ void __init per_cpu_trap_init(void) { unsigned int cpu = smp_processor_id(); unsigned int status_set = ST0_CU0; +#ifdef CONFIG_MIPS_MT_SMTC + int secondaryTC = 0; + int bootTC = (cpu == 0); + + /* + * Only do per_cpu_trap_init() for first TC of Each VPE. + * Note that this hack assumes that the SMTC init code + * assigns TCs consecutively and in ascending order. + */ + + if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && + ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) + secondaryTC = 1; +#endif /* CONFIG_MIPS_MT_SMTC */ /* * Disable coprocessors and select 32-bit or 64-bit addressing @@ -1178,6 +1300,10 @@ void __init per_cpu_trap_init(void) write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ #endif +#ifdef CONFIG_MIPS_MT_SMTC + if (!secondaryTC) { +#endif /* CONFIG_MIPS_MT_SMTC */ + /* * Interrupt handling. */ @@ -1194,6 +1320,9 @@ void __init per_cpu_trap_init(void) } else set_c0_cause(CAUSEF_IV); } +#ifdef CONFIG_MIPS_MT_SMTC + } +#endif /* CONFIG_MIPS_MT_SMTC */ cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; TLBMISS_HANDLER_SETUP(); @@ -1203,8 +1332,14 @@ void __init per_cpu_trap_init(void) BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); - cpu_cache_init(); - tlb_init(); +#ifdef CONFIG_MIPS_MT_SMTC + if (bootTC) { +#endif /* CONFIG_MIPS_MT_SMTC */ + cpu_cache_init(); + tlb_init(); +#ifdef CONFIG_MIPS_MT_SMTC + } +#endif /* CONFIG_MIPS_MT_SMTC */ } /* Install CPU exception handler */ @@ -1276,7 +1411,7 @@ void __init trap_init(void) if (cpu_has_veic || cpu_has_vint) { int nvec = cpu_has_veic ? 64 : 8; for (i = 0; i < nvec; i++) - set_vi_handler (i, NULL); + set_vi_handler(i, NULL); } else if (cpu_has_divec) set_handler(0x200, &except_vec4, 0x8); @@ -1295,6 +1430,7 @@ void __init trap_init(void) if (board_be_init) board_be_init(); + set_except_vector(0, handle_int); set_except_vector(1, handle_tlbm); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs);