1 #ifndef __PPC64_SYSTEM_H
2 #define __PPC64_SYSTEM_H
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 #include <linux/config.h>
12 #include <linux/kdev_t.h>
14 #include <asm/processor.h>
15 #include <asm/hw_irq.h>
16 #include <asm/memory.h>
20 * The sync instruction guarantees that all memory accesses initiated
21 * by this processor have been performed (with respect to all other
22 * mechanisms that access memory). The eieio instruction is a barrier
23 * providing an ordering (separately) for (a) cacheable stores and (b)
24 * loads and stores to non-cacheable memory (e.g. I/O devices).
26 * mb() prevents loads and stores being reordered across this point.
27 * rmb() prevents loads being reordered across this point.
28 * wmb() prevents stores being reordered across this point.
30 * We can use the eieio instruction for wmb, but since it doesn't
31 * give any ordering guarantees about loads, we have to use the
32 * stronger but slower sync instruction for mb and rmb.
34 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
35 #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
36 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
38 #define set_mb(var, value) do { var = value; mb(); } while (0)
39 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
43 #define smp_rmb() rmb()
44 #define smp_wmb() wmb()
46 #define smp_mb() __asm__ __volatile__("": : :"memory")
47 #define smp_rmb() __asm__ __volatile__("": : :"memory")
48 #define smp_wmb() __asm__ __volatile__("": : :"memory")
49 #endif /* CONFIG_SMP */
52 extern void xmon_irq(int, void *, struct pt_regs *);
53 extern void xmon(struct pt_regs *excp);
56 extern void print_backtrace(unsigned long *);
57 extern void show_regs(struct pt_regs * regs);
58 extern void flush_instruction_cache(void);
59 extern void hard_reset_now(void);
60 extern void poweroff_now(void);
61 extern int _get_PVR(void);
62 extern long _get_L2CR(void);
63 extern void _set_L2CR(unsigned long);
64 extern void giveup_fpu(struct task_struct *);
65 extern void enable_kernel_fp(void);
66 extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
67 extern void cvt_df(double *from, float *to, unsigned long *fpscr);
69 extern void cacheable_memzero(void *p, unsigned int nb);
74 #define prepare_to_switch() do { } while(0)
75 #define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
76 extern void _switch_to(struct task_struct *, struct task_struct *,
77 struct task_struct **);
80 extern struct task_struct *_switch(struct thread_struct *prev,
81 struct thread_struct *next);
84 extern void dump_regs(struct pt_regs *);
90 #define save_flags(flags) __save_flags(flags)
91 #define restore_flags(flags) __restore_flags(flags)
92 #define save_and_cli(flags) __save_and_cli(flags)
93 #define save_and_sti(flags) __save_and_sti(flags)
95 #else /* CONFIG_SMP */
97 extern void __global_cli(void);
98 extern void __global_sti(void);
99 extern unsigned long __global_save_flags(void);
100 extern void __global_restore_flags(unsigned long);
101 #define cli() __global_cli()
102 #define sti() __global_sti()
103 #define save_flags(x) ((x)=__global_save_flags())
104 #define restore_flags(x) __global_restore_flags(x)
106 #define save_and_cli(x) do { save_flags(x); cli(); } while(0);
107 #define save_and_sti(x) do { save_flags(x); sti(); } while(0);
109 #endif /* !CONFIG_SMP */
111 #define local_irq_disable() __cli()
112 #define local_irq_enable() __sti()
113 #define local_irq_save(flags) __save_and_cli(flags)
114 #define local_irq_set(flags) __save_and_sti(flags)
115 #define local_irq_restore(flags) __restore_flags(flags)
117 static __inline__ int __is_processor(unsigned long pv)
120 asm volatile("mfspr %0, 0x11F" : "=r" (pvr));
121 return(PVR_VER(pvr) == pv);
127 * Changes the memory location '*ptr' to be val and returns
128 * the previous value stored there.
130 * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
131 * is more like most of the other architectures.
133 static __inline__ unsigned long
134 __xchg_u32(volatile int *m, unsigned long val)
138 __asm__ __volatile__(
140 "1: lwarx %0,0,%3 # __xchg_u32\n\
144 : "=&r" (dummy), "=m" (*m)
151 static __inline__ unsigned long
152 __xchg_u64(volatile long *m, unsigned long val)
156 __asm__ __volatile__(
158 "1: ldarx %0,0,%3 # __xchg_u64\n\
162 : "=&r" (dummy), "=m" (*m)
170 * This function doesn't exist, so you'll get a linker error
171 * if something tries to do an invalid xchg().
173 extern void __xchg_called_with_bad_pointer(void);
175 static __inline__ unsigned long
176 __xchg(volatile void *ptr, unsigned long x, int size)
180 return __xchg_u32(ptr, x);
182 return __xchg_u64(ptr, x);
184 __xchg_called_with_bad_pointer();
188 #define xchg(ptr,x) \
190 __typeof__(*(ptr)) _x_ = (x); \
191 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
194 #define tas(ptr) (xchg((ptr),1))
196 #define __HAVE_ARCH_CMPXCHG 1
198 static __inline__ unsigned long
199 __cmpxchg_u32(volatile int *p, int old, int new)
203 __asm__ __volatile__ (
205 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
213 : "=&r" (prev), "=m" (*p)
214 : "r" (p), "r" (old), "r" (new), "m" (*p)
220 static __inline__ unsigned long
221 __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
225 __asm__ __volatile__ (
227 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
235 : "=&r" (prev), "=m" (*p)
236 : "r" (p), "r" (old), "r" (new), "m" (*p)
242 /* This function doesn't exist, so you'll get a linker error
243 if something tries to do an invalid cmpxchg(). */
244 extern void __cmpxchg_called_with_bad_pointer(void);
246 static __inline__ unsigned long
247 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
251 return __cmpxchg_u32(ptr, old, new);
253 return __cmpxchg_u64(ptr, old, new);
255 __cmpxchg_called_with_bad_pointer();
259 #define cmpxchg(ptr,o,n) \
261 __typeof__(*(ptr)) _o_ = (o); \
262 __typeof__(*(ptr)) _n_ = (n); \
263 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
264 (unsigned long)_n_, sizeof(*(ptr))); \