[PATCH] zoned vm counters: conversion of nr_unstable to per zone counter
[powerpc.git] / include / linux / vmstat.h
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/config.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9
10 /*
11  * Global page accounting.  One instance per CPU.  Only unsigned longs are
12  * allowed.
13  *
14  * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
15  * any time safely (which protects the instance from modification by
16  * interrupt.
17  * - The __xxx_page_state variants can be used safely when interrupts are
18  * disabled.
19  * - The __xxx_page_state variants can be used if the field is only
20  * modified from process context and protected from preemption, or only
21  * modified from interrupt context.  In this case, the field should be
22  * commented here.
23  */
24 struct page_state {
25         unsigned long pgpgin;           /* Disk reads */
26         unsigned long pgpgout;          /* Disk writes */
27         unsigned long pswpin;           /* swap reads */
28         unsigned long pswpout;          /* swap writes */
29
30         unsigned long pgalloc_high;     /* page allocations */
31         unsigned long pgalloc_normal;
32         unsigned long pgalloc_dma32;
33         unsigned long pgalloc_dma;
34
35         unsigned long pgfree;           /* page freeings */
36         unsigned long pgactivate;       /* pages moved inactive->active */
37         unsigned long pgdeactivate;     /* pages moved active->inactive */
38
39         unsigned long pgfault;          /* faults (major+minor) */
40         unsigned long pgmajfault;       /* faults (major only) */
41
42         unsigned long pgrefill_high;    /* inspected in refill_inactive_zone */
43         unsigned long pgrefill_normal;
44         unsigned long pgrefill_dma32;
45         unsigned long pgrefill_dma;
46
47         unsigned long pgsteal_high;     /* total highmem pages reclaimed */
48         unsigned long pgsteal_normal;
49         unsigned long pgsteal_dma32;
50         unsigned long pgsteal_dma;
51
52         unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
53         unsigned long pgscan_kswapd_normal;
54         unsigned long pgscan_kswapd_dma32;
55         unsigned long pgscan_kswapd_dma;
56
57         unsigned long pgscan_direct_high;/* total highmem pages scanned */
58         unsigned long pgscan_direct_normal;
59         unsigned long pgscan_direct_dma32;
60         unsigned long pgscan_direct_dma;
61
62         unsigned long pginodesteal;     /* pages reclaimed via inode freeing */
63         unsigned long slabs_scanned;    /* slab objects scanned */
64         unsigned long kswapd_steal;     /* pages reclaimed by kswapd */
65         unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
66         unsigned long pageoutrun;       /* kswapd's calls to page reclaim */
67         unsigned long allocstall;       /* direct reclaim calls */
68
69         unsigned long pgrotated;        /* pages rotated to tail of the LRU */
70         unsigned long nr_bounce;        /* pages for bounce buffers */
71 };
72
73 extern void get_full_page_state(struct page_state *ret);
74 extern unsigned long read_page_state_offset(unsigned long offset);
75 extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
76 extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
77
78 #define read_page_state(member) \
79         read_page_state_offset(offsetof(struct page_state, member))
80
81 #define mod_page_state(member, delta)   \
82         mod_page_state_offset(offsetof(struct page_state, member), (delta))
83
84 #define __mod_page_state(member, delta) \
85         __mod_page_state_offset(offsetof(struct page_state, member), (delta))
86
87 #define inc_page_state(member)          mod_page_state(member, 1UL)
88 #define dec_page_state(member)          mod_page_state(member, 0UL - 1)
89 #define add_page_state(member,delta)    mod_page_state(member, (delta))
90 #define sub_page_state(member,delta)    mod_page_state(member, 0UL - (delta))
91
92 #define __inc_page_state(member)        __mod_page_state(member, 1UL)
93 #define __dec_page_state(member)        __mod_page_state(member, 0UL - 1)
94 #define __add_page_state(member,delta)  __mod_page_state(member, (delta))
95 #define __sub_page_state(member,delta)  __mod_page_state(member, 0UL - (delta))
96
97 #define page_state(member) (*__page_state(offsetof(struct page_state, member)))
98
99 #define state_zone_offset(zone, member)                                 \
100 ({                                                                      \
101         unsigned offset;                                                \
102         if (is_highmem(zone))                                           \
103                 offset = offsetof(struct page_state, member##_high);    \
104         else if (is_normal(zone))                                       \
105                 offset = offsetof(struct page_state, member##_normal);  \
106         else if (is_dma32(zone))                                        \
107                 offset = offsetof(struct page_state, member##_dma32);   \
108         else                                                            \
109                 offset = offsetof(struct page_state, member##_dma);     \
110         offset;                                                         \
111 })
112
113 #define __mod_page_state_zone(zone, member, delta)                      \
114  do {                                                                   \
115         __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
116  } while (0)
117
118 #define mod_page_state_zone(zone, member, delta)                        \
119  do {                                                                   \
120         mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
121  } while (0)
122
123 DECLARE_PER_CPU(struct page_state, page_states);
124
125 /*
126  * Zone based page accounting with per cpu differentials.
127  */
128 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
129
130 static inline void zone_page_state_add(long x, struct zone *zone,
131                                  enum zone_stat_item item)
132 {
133         atomic_long_add(x, &zone->vm_stat[item]);
134         atomic_long_add(x, &vm_stat[item]);
135 }
136
137 static inline unsigned long global_page_state(enum zone_stat_item item)
138 {
139         long x = atomic_long_read(&vm_stat[item]);
140 #ifdef CONFIG_SMP
141         if (x < 0)
142                 x = 0;
143 #endif
144         return x;
145 }
146
147 static inline unsigned long zone_page_state(struct zone *zone,
148                                         enum zone_stat_item item)
149 {
150         long x = atomic_long_read(&zone->vm_stat[item]);
151 #ifdef CONFIG_SMP
152         if (x < 0)
153                 x = 0;
154 #endif
155         return x;
156 }
157
158 #ifdef CONFIG_NUMA
159 /*
160  * Determine the per node value of a stat item. This function
161  * is called frequently in a NUMA machine, so try to be as
162  * frugal as possible.
163  */
164 static inline unsigned long node_page_state(int node,
165                                  enum zone_stat_item item)
166 {
167         struct zone *zones = NODE_DATA(node)->node_zones;
168
169         return
170 #ifndef CONFIG_DMA_IS_NORMAL
171 #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
172                 zone_page_state(&zones[ZONE_DMA32], item) +
173 #endif
174                 zone_page_state(&zones[ZONE_NORMAL], item) +
175 #endif
176 #ifdef CONFIG_HIGHMEM
177                 zone_page_state(&zones[ZONE_HIGHMEM], item) +
178 #endif
179                 zone_page_state(&zones[ZONE_DMA], item);
180 }
181 #else
182 #define node_page_state(node, item) global_page_state(item)
183 #endif
184
185 #define __add_zone_page_state(__z, __i, __d)    \
186                 __mod_zone_page_state(__z, __i, __d)
187 #define __sub_zone_page_state(__z, __i, __d)    \
188                 __mod_zone_page_state(__z, __i,-(__d))
189
190 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
191 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
192
193 static inline void zap_zone_vm_stats(struct zone *zone)
194 {
195         memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
196 }
197
198 #ifdef CONFIG_SMP
199 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
200 void __inc_zone_page_state(struct page *, enum zone_stat_item);
201 void __dec_zone_page_state(struct page *, enum zone_stat_item);
202
203 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
204 void inc_zone_page_state(struct page *, enum zone_stat_item);
205 void dec_zone_page_state(struct page *, enum zone_stat_item);
206
207 extern void inc_zone_state(struct zone *, enum zone_stat_item);
208
209 void refresh_cpu_vm_stats(int);
210 void refresh_vm_stats(void);
211
212 #else /* CONFIG_SMP */
213
214 /*
215  * We do not maintain differentials in a single processor configuration.
216  * The functions directly modify the zone and global counters.
217  */
218 static inline void __mod_zone_page_state(struct zone *zone,
219                         enum zone_stat_item item, int delta)
220 {
221         zone_page_state_add(delta, zone, item);
222 }
223
224 static inline void __inc_zone_page_state(struct page *page,
225                         enum zone_stat_item item)
226 {
227         atomic_long_inc(&page_zone(page)->vm_stat[item]);
228         atomic_long_inc(&vm_stat[item]);
229 }
230
231 static inline void __dec_zone_page_state(struct page *page,
232                         enum zone_stat_item item)
233 {
234         atomic_long_dec(&page_zone(page)->vm_stat[item]);
235         atomic_long_dec(&vm_stat[item]);
236 }
237
238 /*
239  * We only use atomic operations to update counters. So there is no need to
240  * disable interrupts.
241  */
242 #define inc_zone_page_state __inc_zone_page_state
243 #define dec_zone_page_state __dec_zone_page_state
244 #define mod_zone_page_state __mod_zone_page_state
245
246 static inline void refresh_cpu_vm_stats(int cpu) { }
247 static inline void refresh_vm_stats(void) { }
248 #endif
249
250 #endif /* _LINUX_VMSTAT_H */