X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;ds=sidebyside;f=include%2Fasm-arm%2Fsystem.h;h=6f8e6a69dc5f4334391dc03ff6ef53b3d1bd032a;hb=1f8a6b658a943b4f04a1fc7b3a420360202c86cd;hp=0947cbf9b69a3394bd3cba0803a112ece0c183c1;hpb=a4b47ab9464a8200528fad3101668abdd7379cf9;p=powerpc.git diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 0947cbf9b6..6f8e6a69dc 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -3,6 +3,7 @@ #ifdef __KERNEL__ +#include #define CPU_ARCH_UNKNOWN 0 #define CPU_ARCH_ARMv3 1 @@ -13,6 +14,7 @@ #define CPU_ARCH_ARMv5TE 6 #define CPU_ARCH_ARMv5TEJ 7 #define CPU_ARCH_ARMv6 8 +#define CPU_ARCH_ARMv7 9 /* * CR1 bits (CP#15 CR1) @@ -46,6 +48,7 @@ #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 +#ifdef CONFIG_CPU_CP15 #define read_cpuid(reg) \ ({ \ unsigned int __val; \ @@ -55,6 +58,9 @@ : "cc"); \ __val; \ }) +#else +#define read_cpuid(reg) (processor_id) +#endif /* * This is used to ensure the compiler did actually allocate the register we @@ -69,6 +75,9 @@ #ifndef __ASSEMBLY__ #include +#include + +#define __exception __attribute__((section(".exception.text"))) struct thread_info; struct task_struct; @@ -85,7 +94,7 @@ void die(const char *msg, struct pt_regs *regs, int err) __attribute__((noreturn)); struct siginfo; -void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, +void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap); void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, @@ -95,8 +104,6 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) -#define tas(ptr) (xchg((ptr),1)) - extern asmlinkage void __backtrace(void); extern asmlinkage void c_backtrace(unsigned long fp, int pmode); @@ -135,23 +142,6 @@ static inline int cpu_is_xsc3(void) #define cpu_is_xscale() 1 #endif -#define set_cr(x) \ - __asm__ __volatile__( \ - "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ - : : "r" (x) : "cc") - -#define get_cr() \ - ({ \ - unsigned int __val; \ - __asm__ __volatile__( \ - "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ - : "=r" (__val) : : "cc"); \ - __val; \ - }) - -extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ -extern unsigned long cr_alignment; /* defined in entry-armv.S */ - #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) #define UDBG_BADABORT (1 << 2) @@ -166,18 +156,85 @@ extern unsigned int user_debug; #define vectors_high() (0) #endif -#if __LINUX_ARM_ARCH__ >= 6 -#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ - : : "r" (0) : "memory") +#if __LINUX_ARM_ARCH__ >= 7 +#define isb() __asm__ __volatile__ ("isb" : : : "memory") +#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") +#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") #else -#define mb() __asm__ __volatile__ ("" : : : "memory") +#define isb() __asm__ __volatile__ ("" : : : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") #endif -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while (0) + +#ifndef CONFIG_SMP +#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#else +#define mb() dmb() +#define rmb() dmb() +#define wmb() dmb() +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() +#endif +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); +extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ +extern unsigned long cr_alignment; /* defined in entry-armv.S */ + +static inline unsigned int get_cr(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); + return val; +} + +static inline void set_cr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" + : : "r" (val) : "cc"); + isb(); +} + +#ifndef CONFIG_SMP +extern void adjust_cr(unsigned long mask, unsigned long set); +#endif + +#define CPACC_FULL(n) (3 << (n * 2)) +#define CPACC_SVC(n) (1 << (n * 2)) +#define CPACC_DISABLE(n) (0 << (n * 2)) + +static inline unsigned int get_copro_access(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" + : "=r" (val) : : "cc"); + return val; +} + +static inline void set_copro_access(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" + : : "r" (val) : "cc"); + isb(); +} + /* * switch_mm() may do a full cache flush over the context switch, * so enable interrupts over the context switch to avoid high @@ -207,147 +264,6 @@ static inline void sched_cacheflush(void) { } -/* - * CPU interrupt mask handling. - */ -#if __LINUX_ARM_ARCH__ >= 6 - -#define local_irq_save(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ - "cpsid i" \ - : "=r" (x) : : "memory", "cc"); \ - }) - -#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") -#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") -#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") -#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") - -#else - -/* - * Save the current interrupt enable state & disable IRQs - */ -#define local_irq_save(x) \ - ({ \ - unsigned long temp; \ - (void) (&temp == &x); \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ -" orr %1, %0, #128\n" \ -" msr cpsr_c, %1" \ - : "=r" (x), "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Enable IRQs - */ -#define local_irq_enable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_enable\n" \ -" bic %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Disable IRQs - */ -#define local_irq_disable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_disable\n" \ -" orr %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Enable FIQs - */ -#define local_fiq_enable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ stf\n" \ -" bic %0, %0, #64\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Disable FIQs - */ -#define local_fiq_disable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ clf\n" \ -" orr %0, %0, #64\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -#endif - -/* - * Save the current interrupt enable state. - */ -#define local_save_flags(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_save_flags" \ - : "=r" (x) : : "memory", "cc"); \ - }) - -/* - * restore saved IRQ & FIQ state - */ -#define local_irq_restore(x) \ - __asm__ __volatile__( \ - "msr cpsr_c, %0 @ local_irq_restore\n" \ - : \ - : "r" (x) \ - : "memory", "cc") - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (int)(flags & PSR_I_BIT); \ -}) - -#ifdef CONFIG_SMP - -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() - -#else - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) - -#endif /* CONFIG_SMP */ - #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) /* * On the StrongARM, "swp" is terminally broken since it bypasses the @@ -405,17 +321,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #error SMP is not supported on this platform #endif case 1: - local_irq_save(flags); + raw_local_irq_save(flags); ret = *(volatile unsigned char *)ptr; *(volatile unsigned char *)ptr = x; - local_irq_restore(flags); + raw_local_irq_restore(flags); break; case 4: - local_irq_save(flags); + raw_local_irq_save(flags); ret = *(volatile unsigned long *)ptr; *(volatile unsigned long *)ptr = x; - local_irq_restore(flags); + raw_local_irq_restore(flags); break; #else case 1: