[PATCH] i386: Convert PDA into the percpu section
[powerpc.git] / include / asm-i386 / percpu.h
1 #ifndef __ARCH_I386_PERCPU__
2 #define __ARCH_I386_PERCPU__
3
4 #ifdef __ASSEMBLY__
5
6 /*
7  * PER_CPU finds an address of a per-cpu variable.
8  *
9  * Args:
10  *    var - variable name
11  *    reg - 32bit register
12  *
13  * The resulting address is stored in the "reg" argument.
14  *
15  * Example:
16  *    PER_CPU(cpu_gdt_descr, %ebx)
17  */
18 #ifdef CONFIG_SMP
19 #define PER_CPU(var, reg)                       \
20         movl %fs:per_cpu__this_cpu_off, reg;            \
21         addl $per_cpu__##var, reg
22 #else /* ! SMP */
23 #define PER_CPU(var, reg) \
24         movl $per_cpu__##var, reg;
25 #endif  /* SMP */
26
27 #else /* ...!ASSEMBLY */
28
29 /*
30  * PER_CPU finds an address of a per-cpu variable.
31  *
32  * Args:
33  *    var - variable name
34  *    cpu - 32bit register containing the current CPU number
35  *
36  * The resulting address is stored in the "cpu" argument.
37  *
38  * Example:
39  *    PER_CPU(cpu_gdt_descr, %ebx)
40  */
41 #ifdef CONFIG_SMP
42 /* Same as generic implementation except for optimized local access. */
43 #define __GENERIC_PER_CPU
44
45 /* This is used for other cpus to find our section. */
46 extern unsigned long __per_cpu_offset[];
47
48 /* Separate out the type, so (int[3], foo) works. */
49 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
50 #define DEFINE_PER_CPU(type, name) \
51     __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
52
53 /* We can use this directly for local CPU (faster). */
54 DECLARE_PER_CPU(unsigned long, this_cpu_off);
55
56 /* var is in discarded region: offset to particular copy we want */
57 #define per_cpu(var, cpu) (*({                          \
58         extern int simple_indentifier_##var(void);      \
59         RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
60
61 #define __raw_get_cpu_var(var) (*({                                     \
62         extern int simple_indentifier_##var(void);                      \
63         RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off));     \
64 }))
65
66 #define __get_cpu_var(var) __raw_get_cpu_var(var)
67
68 /* A macro to avoid #include hell... */
69 #define percpu_modcopy(pcpudst, src, size)                      \
70 do {                                                            \
71         unsigned int __i;                                       \
72         for_each_possible_cpu(__i)                              \
73                 memcpy((pcpudst)+__per_cpu_offset[__i],         \
74                        (src), (size));                          \
75 } while (0)
76
77 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
78 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
79
80 /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
81 #define __percpu_seg "%%fs:"
82 #else  /* !SMP */
83 #include <asm-generic/percpu.h>
84 #define __percpu_seg ""
85 #endif  /* SMP */
86
87 /* For arch-specific code, we can use direct single-insn ops (they
88  * don't give an lvalue though). */
89 extern void __bad_percpu_size(void);
90
91 #define percpu_to_op(op,var,val)                                \
92         do {                                                    \
93                 typedef typeof(var) T__;                        \
94                 if (0) { T__ tmp__; tmp__ = (val); }            \
95                 switch (sizeof(var)) {                          \
96                 case 1:                                         \
97                         asm(op "b %1,"__percpu_seg"%0"          \
98                             : "+m" (var)                        \
99                             :"ri" ((T__)val));                  \
100                         break;                                  \
101                 case 2:                                         \
102                         asm(op "w %1,"__percpu_seg"%0"          \
103                             : "+m" (var)                        \
104                             :"ri" ((T__)val));                  \
105                         break;                                  \
106                 case 4:                                         \
107                         asm(op "l %1,"__percpu_seg"%0"          \
108                             : "+m" (var)                        \
109                             :"ri" ((T__)val));                  \
110                         break;                                  \
111                 default: __bad_percpu_size();                   \
112                 }                                               \
113         } while (0)
114
115 #define percpu_from_op(op,var)                                  \
116         ({                                                      \
117                 typeof(var) ret__;                              \
118                 switch (sizeof(var)) {                          \
119                 case 1:                                         \
120                         asm(op "b "__percpu_seg"%1,%0"          \
121                             : "=r" (ret__)                      \
122                             : "m" (var));                       \
123                         break;                                  \
124                 case 2:                                         \
125                         asm(op "w "__percpu_seg"%1,%0"          \
126                             : "=r" (ret__)                      \
127                             : "m" (var));                       \
128                         break;                                  \
129                 case 4:                                         \
130                         asm(op "l "__percpu_seg"%1,%0"          \
131                             : "=r" (ret__)                      \
132                             : "m" (var));                       \
133                         break;                                  \
134                 default: __bad_percpu_size();                   \
135                 }                                               \
136                 ret__; })
137
138 #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
139 #define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val)
140 #define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val)
141 #define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val)
142 #define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val)
143 #endif /* !__ASSEMBLY__ */
144
145 #endif /* __ARCH_I386_PERCPU__ */