X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Fasm-powerpc%2Fsystem.h;h=5341b75c75cb6937ac3372e059346fa2dc606727;hb=7d450e00711bf8c72fc781677f7abc08d227578e;hp=3536a5cd7a2d62fdcd3df5a58be8f4cafeb87714;hpb=333c47c847c90aaefde8b593054d9344106333b5;p=powerpc.git diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 3536a5cd7a..c6569516ba 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -4,11 +4,9 @@ #ifndef _ASM_POWERPC_SYSTEM_H #define _ASM_POWERPC_SYSTEM_H -#include #include #include -#include #include /* @@ -43,6 +41,7 @@ #define set_mb(var, value) do { var = value; mb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) +#ifdef __KERNEL__ #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() @@ -55,7 +54,6 @@ #define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ -#ifdef __KERNEL__ struct task_struct; struct pt_regs; @@ -135,6 +133,14 @@ extern int fix_alignment(struct pt_regs *); extern void cvt_fd(float *from, double *to, struct thread_struct *thread); extern void cvt_df(double *from, float *to, struct thread_struct *thread); +#ifndef CONFIG_SMP +extern void discard_lazy_cpu_state(void); +#else +static inline void discard_lazy_cpu_state(void) +{ +} +#endif + #ifdef CONFIG_ALTIVEC extern void flush_altivec_to_thread(struct task_struct *); #else @@ -165,6 +171,8 @@ extern u32 booke_wdt_period; /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ extern unsigned char e2a(unsigned char); +extern unsigned char* strne2a(unsigned char *dest, + const unsigned char *src, size_t n); struct device_node; extern void note_scsi_host(struct device_node *, void *); @@ -177,9 +185,20 @@ struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} + extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern unsigned long memory_limit; +extern unsigned long klimit; extern int powersave_nap; /* set if nap mode can be used in idle loop */ @@ -195,14 +214,14 @@ __xchg_u32(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stwcx. %3,0,%2 \n\ bne- 1b" ISYNC_ON_SMP - : "=&r" (prev), "=m" (*(volatile unsigned int *)p) - : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p) + : "=&r" (prev), "+m" (*(volatile unsigned int *)p) + : "r" (p), "r" (val) : "cc", "memory"); return prev; @@ -215,14 +234,14 @@ __xchg_u64(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stdcx. %3,0,%2 \n\ bne- 1b" ISYNC_ON_SMP - : "=&r" (prev), "=m" (*(volatile unsigned long *)p) - : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) + : "=&r" (prev), "+m" (*(volatile unsigned long *)p) + : "r" (p), "r" (val) : "cc", "memory"); return prev; @@ -270,7 +289,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) unsigned int prev; __asm__ __volatile__ ( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" @@ -280,8 +299,8 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) ISYNC_ON_SMP "\n\ 2:" - : "=&r" (prev), "=m" (*p) - : "r" (p), "r" (old), "r" (new), "m" (*p) + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) : "cc", "memory"); return prev; @@ -294,7 +313,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) unsigned long prev; __asm__ __volatile__ ( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ cmpd 0,%0,%3\n\ bne- 2f\n\ @@ -303,8 +322,8 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) ISYNC_ON_SMP "\n\ 2:" - : "=&r" (prev), "=m" (*p) - : "r" (p), "r" (old), "r" (new), "m" (*p) + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) : "cc", "memory"); return prev; @@ -346,8 +365,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, * powers of 2 writes until it reaches sufficient alignment). * * Based on this we disable the IP header alignment in network drivers. + * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining + * cacheline alignment of buffers. */ -#define NET_IP_ALIGN 0 +#define NET_IP_ALIGN 0 +#define NET_SKB_PAD L1_CACHE_BYTES #endif #define arch_align_stack(x) (x) @@ -407,5 +429,9 @@ static inline void create_function_call(unsigned long addr, void * func) create_branch(addr, func_addr, BRANCH_SET_LINK); } +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void account_system_vtime(struct task_struct *); +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYSTEM_H */