X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Fasm-powerpc%2Fsystem.h;h=d9bf53653b10de8175fba20f16be1b9840a5f61a;hb=8169b5d2384a0acd9ea3bb86bf5988cd7d62d03a;hp=b5da0b851e02ee6c9e32f520389d90260b5c73c9;hpb=328198acb7407301ddf6005c0fa1e04bd0c539c8;p=powerpc.git diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index b5da0b851e..d9bf53653b 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -4,11 +4,9 @@ #ifndef _ASM_POWERPC_SYSTEM_H #define _ASM_POWERPC_SYSTEM_H -#include #include #include -#include #include /* @@ -43,6 +41,7 @@ #define set_mb(var, value) do { var = value; mb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) +#ifdef __KERNEL__ #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() @@ -55,7 +54,6 @@ #define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ -#ifdef __KERNEL__ struct task_struct; struct pt_regs; @@ -135,6 +133,14 @@ extern int fix_alignment(struct pt_regs *); extern void cvt_fd(float *from, double *to, struct thread_struct *thread); extern void cvt_df(double *from, float *to, struct thread_struct *thread); +#ifndef CONFIG_SMP +extern void discard_lazy_cpu_state(void); +#else +static inline void discard_lazy_cpu_state(void) +{ +} +#endif + #ifdef CONFIG_ALTIVEC extern void flush_altivec_to_thread(struct task_struct *); #else @@ -177,9 +183,20 @@ struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} + extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern unsigned long memory_limit; +extern unsigned long klimit; extern int powersave_nap; /* set if nap mode can be used in idle loop */ @@ -195,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stwcx. %3,0,%2 \n\ @@ -215,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stdcx. %3,0,%2 \n\ @@ -270,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) unsigned int prev; __asm__ __volatile__ ( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" @@ -289,12 +306,12 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) #ifdef CONFIG_PPC64 static __inline__ unsigned long -__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) +__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) { unsigned long prev; __asm__ __volatile__ ( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ cmpd 0,%0,%3\n\ bne- 2f\n\